aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-07 22:51:47 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-07 22:51:47 -0500
commit6b995751c2e851d2bc9c277b5884d0adb519e31d (patch)
tree7a15b41b5d8ce612915584a0773c670d5c0ab5b8 /drivers/mtd/chips
parent6c2f4267833f453156f8f439cc32eb4c92f357b4 (diff)
parentd27ba47e7e8c466c18983a1779d611f82d6a354f (diff)
Merge branch 'master'
Diffstat (limited to 'drivers/mtd/chips')
-rw-r--r--drivers/mtd/chips/Kconfig22
-rw-r--r--drivers/mtd/chips/Makefile4
-rw-r--r--drivers/mtd/chips/amd_flash.c80
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c487
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c160
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c183
-rw-r--r--drivers/mtd/chips/cfi_probe.c98
-rw-r--r--drivers/mtd/chips/cfi_util.c25
-rw-r--r--drivers/mtd/chips/chipreg.c6
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c33
-rw-r--r--drivers/mtd/chips/jedec.c206
-rw-r--r--drivers/mtd/chips/jedec_probe.c48
-rw-r--r--drivers/mtd/chips/map_absent.c8
-rw-r--r--drivers/mtd/chips/sharp.c23
15 files changed, 746 insertions, 643 deletions
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index df95d2158b16..eafa23f5cbd6 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -1,5 +1,5 @@
1# drivers/mtd/chips/Kconfig 1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.15 2005/06/06 23:04:35 tpoynor Exp $ 2# $Id: Kconfig,v 1.18 2005/11/07 11:14:22 gleixner Exp $
3 3
4menu "RAM/ROM/Flash chip drivers" 4menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n 5 depends on MTD!=n
@@ -39,7 +39,7 @@ config MTD_CFI_ADV_OPTIONS
39 If you need to specify a specific endianness for access to flash 39 If you need to specify a specific endianness for access to flash
40 chips, or if you wish to reduce the size of the kernel by including 40 chips, or if you wish to reduce the size of the kernel by including
41 support for only specific arrangements of flash chips, say 'Y'. This 41 support for only specific arrangements of flash chips, say 'Y'. This
42 option does not directly affect the code, but will enable other 42 option does not directly affect the code, but will enable other
43 configuration options which allow you to do so. 43 configuration options which allow you to do so.
44 44
45 If unsure, say 'N'. 45 If unsure, say 'N'.
@@ -56,7 +56,7 @@ config MTD_CFI_NOSWAP
56 data bits when writing the 'magic' commands to the chips. Saying 56 data bits when writing the 'magic' commands to the chips. Saying
57 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't 57 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
58 enabled, means that the CPU will not do any swapping; the chips 58 enabled, means that the CPU will not do any swapping; the chips
59 are expected to be wired to the CPU in 'host-endian' form. 59 are expected to be wired to the CPU in 'host-endian' form.
60 Specific arrangements are possible with the BIG_ENDIAN_BYTE and 60 Specific arrangements are possible with the BIG_ENDIAN_BYTE and
61 LITTLE_ENDIAN_BYTE, if the bytes are reversed. 61 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
62 62
@@ -79,10 +79,10 @@ config MTD_CFI_GEOMETRY
79 bool "Specific CFI Flash geometry selection" 79 bool "Specific CFI Flash geometry selection"
80 depends on MTD_CFI_ADV_OPTIONS 80 depends on MTD_CFI_ADV_OPTIONS
81 help 81 help
82 This option does not affect the code directly, but will enable 82 This option does not affect the code directly, but will enable
83 some other configuration options which would allow you to reduce 83 some other configuration options which would allow you to reduce
84 the size of the kernel by including support for only certain 84 the size of the kernel by including support for only certain
85 arrangements of CFI chips. If unsure, say 'N' and all options 85 arrangements of CFI chips. If unsure, say 'N' and all options
86 which are supported by the current code will be enabled. 86 which are supported by the current code will be enabled.
87 87
88config MTD_MAP_BANK_WIDTH_1 88config MTD_MAP_BANK_WIDTH_1
@@ -197,7 +197,7 @@ config MTD_CFI_AMDSTD
197 help 197 help
198 The Common Flash Interface defines a number of different command 198 The Common Flash Interface defines a number of different command
199 sets which a CFI-compliant chip may claim to implement. This code 199 sets which a CFI-compliant chip may claim to implement. This code
200 provides support for one of those command sets, used on chips 200 provides support for one of those command sets, used on chips
201 including the AMD Am29LV320. 201 including the AMD Am29LV320.
202 202
203config MTD_CFI_AMDSTD_RETRY 203config MTD_CFI_AMDSTD_RETRY
@@ -237,14 +237,14 @@ config MTD_RAM
237 tristate "Support for RAM chips in bus mapping" 237 tristate "Support for RAM chips in bus mapping"
238 depends on MTD 238 depends on MTD
239 help 239 help
240 This option enables basic support for RAM chips accessed through 240 This option enables basic support for RAM chips accessed through
241 a bus mapping driver. 241 a bus mapping driver.
242 242
243config MTD_ROM 243config MTD_ROM
244 tristate "Support for ROM chips in bus mapping" 244 tristate "Support for ROM chips in bus mapping"
245 depends on MTD 245 depends on MTD
246 help 246 help
247 This option enables basic support for ROM chips accessed through 247 This option enables basic support for ROM chips accessed through
248 a bus mapping driver. 248 a bus mapping driver.
249 249
250config MTD_ABSENT 250config MTD_ABSENT
@@ -275,7 +275,7 @@ config MTD_AMDSTD
275 depends on MTD && MTD_OBSOLETE_CHIPS 275 depends on MTD && MTD_OBSOLETE_CHIPS
276 help 276 help
277 This option enables support for flash chips using AMD-compatible 277 This option enables support for flash chips using AMD-compatible
278 commands, including some which are not CFI-compatible and hence 278 commands, including some which are not CFI-compatible and hence
279 cannot be used with the CONFIG_MTD_CFI_AMDSTD option. 279 cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
280 280
281 It also works on AMD compatible chips that do conform to CFI. 281 It also works on AMD compatible chips that do conform to CFI.
@@ -285,7 +285,7 @@ config MTD_SHARP
285 depends on MTD && MTD_OBSOLETE_CHIPS 285 depends on MTD && MTD_OBSOLETE_CHIPS
286 help 286 help
287 This option enables support for flash chips using Sharp-compatible 287 This option enables support for flash chips using Sharp-compatible
288 commands, including some which are not CFI-compatible and hence 288 commands, including some which are not CFI-compatible and hence
289 cannot be used with the CONFIG_MTD_CFI_INTELxxx options. 289 cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
290 290
291config MTD_JEDEC 291config MTD_JEDEC
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 6830489828c6..8afe3092c4e3 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# linux/drivers/chips/Makefile 2# linux/drivers/chips/Makefile
3# 3#
4# $Id: Makefile.common,v 1.4 2004/07/12 16:07:30 dwmw2 Exp $ 4# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
5 5
6# *** BIG UGLY NOTE *** 6# *** BIG UGLY NOTE ***
7# 7#
@@ -11,7 +11,7 @@
11# the CFI command set drivers are linked before gen_probe.o 11# the CFI command set drivers are linked before gen_probe.o
12 12
13obj-$(CONFIG_MTD) += chipreg.o 13obj-$(CONFIG_MTD) += chipreg.o
14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o 14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
15obj-$(CONFIG_MTD_CFI) += cfi_probe.o 15obj-$(CONFIG_MTD_CFI) += cfi_probe.o
16obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o 16obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
17obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o 17obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
index 2dafeba3f3d5..fdb91b6f1d97 100644
--- a/drivers/mtd/chips/amd_flash.c
+++ b/drivers/mtd/chips/amd_flash.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com> 4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 * 5 *
6 * $Id: amd_flash.c,v 1.27 2005/02/04 07:43:09 jonashg Exp $ 6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7 * 7 *
8 * Copyright (c) 2001 Axis Communications AB 8 * Copyright (c) 2001 Axis Communications AB
9 * 9 *
@@ -93,9 +93,9 @@
93#define D6_MASK 0x40 93#define D6_MASK 0x40
94 94
95struct amd_flash_private { 95struct amd_flash_private {
96 int device_type; 96 int device_type;
97 int interleave; 97 int interleave;
98 int numchips; 98 int numchips;
99 unsigned long chipshift; 99 unsigned long chipshift;
100// const char *im_name; 100// const char *im_name;
101 struct flchip chips[0]; 101 struct flchip chips[0];
@@ -253,7 +253,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
253 int i; 253 int i;
254 int retval = 0; 254 int retval = 0;
255 int lock_status; 255 int lock_status;
256 256
257 map = mtd->priv; 257 map = mtd->priv;
258 258
259 /* Pass the whole chip through sector by sector and check for each 259 /* Pass the whole chip through sector by sector and check for each
@@ -273,7 +273,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
273 unlock_sector(map, eraseoffset, is_unlock); 273 unlock_sector(map, eraseoffset, is_unlock);
274 274
275 lock_status = is_sector_locked(map, eraseoffset); 275 lock_status = is_sector_locked(map, eraseoffset);
276 276
277 if (is_unlock && lock_status) { 277 if (is_unlock && lock_status) {
278 printk("Cannot unlock sector at address %x length %xx\n", 278 printk("Cannot unlock sector at address %x length %xx\n",
279 eraseoffset, merip->erasesize); 279 eraseoffset, merip->erasesize);
@@ -305,7 +305,7 @@ static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
305/* 305/*
306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first 306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
307 * matching table entry (-1 if not found or alias for already found chip). 307 * matching table entry (-1 if not found or alias for already found chip).
308 */ 308 */
309static int probe_new_chip(struct mtd_info *mtd, __u32 base, 309static int probe_new_chip(struct mtd_info *mtd, __u32 base,
310 struct flchip *chips, 310 struct flchip *chips,
311 struct amd_flash_private *private, 311 struct amd_flash_private *private,
@@ -636,7 +636,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
636 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, 636 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
637 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 } 637 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
638 } 638 }
639 } 639 }
640 }; 640 };
641 641
642 struct mtd_info *mtd; 642 struct mtd_info *mtd;
@@ -701,7 +701,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
701 701
702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * 702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
703 mtd->numeraseregions, GFP_KERNEL); 703 mtd->numeraseregions, GFP_KERNEL);
704 if (!mtd->eraseregions) { 704 if (!mtd->eraseregions) {
705 printk(KERN_WARNING "%s: Failed to allocate " 705 printk(KERN_WARNING "%s: Failed to allocate "
706 "memory for MTD erase region info\n", map->name); 706 "memory for MTD erase region info\n", map->name);
707 kfree(mtd); 707 kfree(mtd);
@@ -739,12 +739,12 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
739 mtd->type = MTD_NORFLASH; 739 mtd->type = MTD_NORFLASH;
740 mtd->flags = MTD_CAP_NORFLASH; 740 mtd->flags = MTD_CAP_NORFLASH;
741 mtd->name = map->name; 741 mtd->name = map->name;
742 mtd->erase = amd_flash_erase; 742 mtd->erase = amd_flash_erase;
743 mtd->read = amd_flash_read; 743 mtd->read = amd_flash_read;
744 mtd->write = amd_flash_write; 744 mtd->write = amd_flash_write;
745 mtd->sync = amd_flash_sync; 745 mtd->sync = amd_flash_sync;
746 mtd->suspend = amd_flash_suspend; 746 mtd->suspend = amd_flash_suspend;
747 mtd->resume = amd_flash_resume; 747 mtd->resume = amd_flash_resume;
748 mtd->lock = amd_flash_lock; 748 mtd->lock = amd_flash_lock;
749 mtd->unlock = amd_flash_unlock; 749 mtd->unlock = amd_flash_unlock;
750 750
@@ -789,7 +789,7 @@ retry:
789 map->name, chip->state); 789 map->name, chip->state);
790 set_current_state(TASK_UNINTERRUPTIBLE); 790 set_current_state(TASK_UNINTERRUPTIBLE);
791 add_wait_queue(&chip->wq, &wait); 791 add_wait_queue(&chip->wq, &wait);
792 792
793 spin_unlock_bh(chip->mutex); 793 spin_unlock_bh(chip->mutex);
794 794
795 schedule(); 795 schedule();
@@ -802,7 +802,7 @@ retry:
802 timeo = jiffies + HZ; 802 timeo = jiffies + HZ;
803 803
804 goto retry; 804 goto retry;
805 } 805 }
806 806
807 adr += chip->start; 807 adr += chip->start;
808 808
@@ -889,7 +889,7 @@ retry:
889 map->name, chip->state); 889 map->name, chip->state);
890 set_current_state(TASK_UNINTERRUPTIBLE); 890 set_current_state(TASK_UNINTERRUPTIBLE);
891 add_wait_queue(&chip->wq, &wait); 891 add_wait_queue(&chip->wq, &wait);
892 892
893 spin_unlock_bh(chip->mutex); 893 spin_unlock_bh(chip->mutex);
894 894
895 schedule(); 895 schedule();
@@ -901,7 +901,7 @@ retry:
901 timeo = jiffies + HZ; 901 timeo = jiffies + HZ;
902 902
903 goto retry; 903 goto retry;
904 } 904 }
905 905
906 chip->state = FL_WRITING; 906 chip->state = FL_WRITING;
907 907
@@ -911,7 +911,7 @@ retry:
911 wide_write(map, datum, adr); 911 wide_write(map, datum, adr);
912 912
913 times_left = 500000; 913 times_left = 500000;
914 while (times_left-- && flash_is_busy(map, adr, private->interleave)) { 914 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
915 if (need_resched()) { 915 if (need_resched()) {
916 spin_unlock_bh(chip->mutex); 916 spin_unlock_bh(chip->mutex);
917 schedule(); 917 schedule();
@@ -989,7 +989,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
989 if (ret) { 989 if (ret) {
990 return ret; 990 return ret;
991 } 991 }
992 992
993 ofs += n; 993 ofs += n;
994 buf += n; 994 buf += n;
995 (*retlen) += n; 995 (*retlen) += n;
@@ -1002,7 +1002,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
1002 } 1002 }
1003 } 1003 }
1004 } 1004 }
1005 1005
1006 /* We are now aligned, write as much as possible. */ 1006 /* We are now aligned, write as much as possible. */
1007 while(len >= map->buswidth) { 1007 while(len >= map->buswidth) {
1008 __u32 datum; 1008 __u32 datum;
@@ -1063,7 +1063,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
1063 if (ret) { 1063 if (ret) {
1064 return ret; 1064 return ret;
1065 } 1065 }
1066 1066
1067 (*retlen) += n; 1067 (*retlen) += n;
1068 } 1068 }
1069 1069
@@ -1085,7 +1085,7 @@ retry:
1085 if (chip->state != FL_READY){ 1085 if (chip->state != FL_READY){
1086 set_current_state(TASK_UNINTERRUPTIBLE); 1086 set_current_state(TASK_UNINTERRUPTIBLE);
1087 add_wait_queue(&chip->wq, &wait); 1087 add_wait_queue(&chip->wq, &wait);
1088 1088
1089 spin_unlock_bh(chip->mutex); 1089 spin_unlock_bh(chip->mutex);
1090 1090
1091 schedule(); 1091 schedule();
@@ -1098,7 +1098,7 @@ retry:
1098 timeo = jiffies + HZ; 1098 timeo = jiffies + HZ;
1099 1099
1100 goto retry; 1100 goto retry;
1101 } 1101 }
1102 1102
1103 chip->state = FL_ERASING; 1103 chip->state = FL_ERASING;
1104 1104
@@ -1106,30 +1106,30 @@ retry:
1106 ENABLE_VPP(map); 1106 ENABLE_VPP(map);
1107 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA); 1107 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1108 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr); 1108 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1109 1109
1110 timeo = jiffies + (HZ * 20); 1110 timeo = jiffies + (HZ * 20);
1111 1111
1112 spin_unlock_bh(chip->mutex); 1112 spin_unlock_bh(chip->mutex);
1113 msleep(1000); 1113 msleep(1000);
1114 spin_lock_bh(chip->mutex); 1114 spin_lock_bh(chip->mutex);
1115 1115
1116 while (flash_is_busy(map, adr, private->interleave)) { 1116 while (flash_is_busy(map, adr, private->interleave)) {
1117 1117
1118 if (chip->state != FL_ERASING) { 1118 if (chip->state != FL_ERASING) {
1119 /* Someone's suspended the erase. Sleep */ 1119 /* Someone's suspended the erase. Sleep */
1120 set_current_state(TASK_UNINTERRUPTIBLE); 1120 set_current_state(TASK_UNINTERRUPTIBLE);
1121 add_wait_queue(&chip->wq, &wait); 1121 add_wait_queue(&chip->wq, &wait);
1122 1122
1123 spin_unlock_bh(chip->mutex); 1123 spin_unlock_bh(chip->mutex);
1124 printk(KERN_INFO "%s: erase suspended. Sleeping\n", 1124 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1125 map->name); 1125 map->name);
1126 schedule(); 1126 schedule();
1127 remove_wait_queue(&chip->wq, &wait); 1127 remove_wait_queue(&chip->wq, &wait);
1128 1128
1129 if (signal_pending(current)) { 1129 if (signal_pending(current)) {
1130 return -EINTR; 1130 return -EINTR;
1131 } 1131 }
1132 1132
1133 timeo = jiffies + (HZ*2); /* FIXME */ 1133 timeo = jiffies + (HZ*2); /* FIXME */
1134 spin_lock_bh(chip->mutex); 1134 spin_lock_bh(chip->mutex);
1135 continue; 1135 continue;
@@ -1145,7 +1145,7 @@ retry:
1145 1145
1146 return -EIO; 1146 return -EIO;
1147 } 1147 }
1148 1148
1149 /* Latency issues. Drop the lock, wait a while and retry */ 1149 /* Latency issues. Drop the lock, wait a while and retry */
1150 spin_unlock_bh(chip->mutex); 1150 spin_unlock_bh(chip->mutex);
1151 1151
@@ -1153,7 +1153,7 @@ retry:
1153 schedule(); 1153 schedule();
1154 else 1154 else
1155 udelay(1); 1155 udelay(1);
1156 1156
1157 spin_lock_bh(chip->mutex); 1157 spin_lock_bh(chip->mutex);
1158 } 1158 }
1159 1159
@@ -1180,7 +1180,7 @@ retry:
1180 return -EIO; 1180 return -EIO;
1181 } 1181 }
1182 } 1182 }
1183 1183
1184 DISABLE_VPP(map); 1184 DISABLE_VPP(map);
1185 chip->state = FL_READY; 1185 chip->state = FL_READY;
1186 wake_up(&chip->wq); 1186 wake_up(&chip->wq);
@@ -1246,7 +1246,7 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1246 * with the erase region at that address. 1246 * with the erase region at that address.
1247 */ 1247 */
1248 1248
1249 while ((i < mtd->numeraseregions) && 1249 while ((i < mtd->numeraseregions) &&
1250 ((instr->addr + instr->len) >= regions[i].offset)) { 1250 ((instr->addr + instr->len) >= regions[i].offset)) {
1251 i++; 1251 i++;
1252 } 1252 }
@@ -1293,10 +1293,10 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1293 } 1293 }
1294 } 1294 }
1295 } 1295 }
1296 1296
1297 instr->state = MTD_ERASE_DONE; 1297 instr->state = MTD_ERASE_DONE;
1298 mtd_erase_callback(instr); 1298 mtd_erase_callback(instr);
1299 1299
1300 return 0; 1300 return 0;
1301} 1301}
1302 1302
@@ -1324,7 +1324,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
1324 case FL_JEDEC_QUERY: 1324 case FL_JEDEC_QUERY:
1325 chip->oldstate = chip->state; 1325 chip->oldstate = chip->state;
1326 chip->state = FL_SYNCING; 1326 chip->state = FL_SYNCING;
1327 /* No need to wake_up() on this state change - 1327 /* No need to wake_up() on this state change -
1328 * as the whole point is that nobody can do anything 1328 * as the whole point is that nobody can do anything
1329 * with the chip now anyway. 1329 * with the chip now anyway.
1330 */ 1330 */
@@ -1335,13 +1335,13 @@ static void amd_flash_sync(struct mtd_info *mtd)
1335 default: 1335 default:
1336 /* Not an idle state */ 1336 /* Not an idle state */
1337 add_wait_queue(&chip->wq, &wait); 1337 add_wait_queue(&chip->wq, &wait);
1338 1338
1339 spin_unlock_bh(chip->mutex); 1339 spin_unlock_bh(chip->mutex);
1340 1340
1341 schedule(); 1341 schedule();
1342 1342
1343 remove_wait_queue(&chip->wq, &wait); 1343 remove_wait_queue(&chip->wq, &wait);
1344 1344
1345 goto retry; 1345 goto retry;
1346 } 1346 }
1347 } 1347 }
@@ -1351,7 +1351,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
1351 chip = &private->chips[i]; 1351 chip = &private->chips[i];
1352 1352
1353 spin_lock_bh(chip->mutex); 1353 spin_lock_bh(chip->mutex);
1354 1354
1355 if (chip->state == FL_SYNCING) { 1355 if (chip->state == FL_SYNCING) {
1356 chip->state = chip->oldstate; 1356 chip->state = chip->oldstate;
1357 wake_up(&chip->wq); 1357 wake_up(&chip->wq);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 0cfcd88468e0..143f01a4c170 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,9 +4,9 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $ 7 * $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $
8 *
8 * 9 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org> 10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and 11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.) 12 * independent of the flash geometry (buswidth, interleave, etc.)
@@ -51,6 +51,7 @@
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *); 56static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
@@ -105,6 +106,7 @@ static struct mtd_chip_driver cfi_intelext_chipdrv = {
105static void cfi_tell_features(struct cfi_pri_intelext *extp) 106static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{ 107{
107 int i; 108 int i;
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); 110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported"); 111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported"); 112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
@@ -116,36 +118,43 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported"); 120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) { 121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
120 if (extp->FeatureSupport & (1<<i)) 122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i); 124 printk(" - Unknown Bit %X: supported\n", i);
122 } 125 }
123 126
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) { 129 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i)) 130 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i); 131 printk(" - Unknown Bit %X: supported\n", i);
129 } 132 }
130 133
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) { 137 for (i=2; i<3; i++) {
135 if (extp->BlkStatusRegMask & (1<<i)) 138 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i); 139 printk(" - Unknown Bit %X Active: yes\n",i);
137 } 140 }
138 141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf); 149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal) 150 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf); 152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144} 153}
145#endif 154#endif
146 155
147#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150{ 159{
151 struct map_info *map = mtd->priv; 160 struct map_info *map = mtd->priv;
@@ -176,7 +185,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176{ 185{
177 struct map_info *map = mtd->priv; 186 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv; 187 struct cfi_private *cfi = map->fldrv_priv;
179 188
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */ 189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ 190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
182} 191}
@@ -185,7 +194,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185{ 194{
186 struct map_info *map = mtd->priv; 195 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv; 196 struct cfi_private *cfi = map->fldrv_priv;
188 197
189 /* Note this is done after the region info is endian swapped */ 198 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] = 199 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
@@ -207,12 +216,13 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
207 if (cfi->cfiq->BufWriteTimeoutTyp) { 216 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" ); 217 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers; 218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
210 } 220 }
211} 221}
212 222
213static struct cfi_fixup cfi_fixup_table[] = { 223static struct cfi_fixup cfi_fixup_table[] = {
214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216#endif 226#endif
217#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, 228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
@@ -252,12 +262,21 @@ read_pri_intelext(struct map_info *map, __u16 adr)
252 if (!extp) 262 if (!extp)
253 return NULL; 263 return NULL;
254 264
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
255 /* Do some byteswapping if necessary */ 274 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport); 275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask); 276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr); 277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259 278
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') { 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
261 unsigned int extra_size = 0; 280 unsigned int extra_size = 0;
262 int nb_parts, i; 281 int nb_parts, i;
263 282
@@ -266,7 +285,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
266 sizeof(struct cfi_intelext_otpinfo); 285 sizeof(struct cfi_intelext_otpinfo);
267 286
268 /* Burst Read info */ 287 /* Burst Read info */
269 extra_size += 6; 288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
270 292
271 /* Number of hardware-partitions */ 293 /* Number of hardware-partitions */
272 extra_size += 1; 294 extra_size += 1;
@@ -274,6 +296,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
274 goto need_more; 296 goto need_more;
275 nb_parts = extp->extra[extra_size - 1]; 297 nb_parts = extp->extra[extra_size - 1];
276 298
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
277 for (i = 0; i < nb_parts; i++) { 303 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo; 304 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size]; 305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
@@ -285,6 +311,9 @@ read_pri_intelext(struct map_info *map, __u16 adr)
285 * sizeof(struct cfi_intelext_blockinfo); 311 * sizeof(struct cfi_intelext_blockinfo);
286 } 312 }
287 313
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
288 if (extp_size < sizeof(*extp) + extra_size) { 317 if (extp_size < sizeof(*extp) + extra_size) {
289 need_more: 318 need_more:
290 extp_size = sizeof(*extp) + extra_size; 319 extp_size = sizeof(*extp) + extra_size;
@@ -298,7 +327,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
298 goto again; 327 goto again;
299 } 328 }
300 } 329 }
301 330
302 return extp; 331 return extp;
303} 332}
304 333
@@ -339,7 +368,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340 369
341 if (cfi->cfi_mode == CFI_MODE_CFI) { 370 if (cfi->cfi_mode == CFI_MODE_CFI) {
342 /* 371 /*
343 * It's a real CFI chip, not one for which the probe 372 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature 373 * routine faked a CFI structure. So we read the feature
345 * table from it. 374 * table from it.
@@ -354,14 +383,14 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
354 } 383 }
355 384
356 /* Install our own private info structure */ 385 /* Install our own private info structure */
357 cfi->cmdset_priv = extp; 386 cfi->cmdset_priv = extp;
358 387
359 cfi_fixup(mtd, cfi_fixup_table); 388 cfi_fixup(mtd, cfi_fixup_table);
360 389
361#ifdef DEBUG_CFI_FEATURES 390#ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */ 391 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp); 392 cfi_tell_features(extp);
364#endif 393#endif
365 394
366 if(extp->SuspendCmdSupport & 1) { 395 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n"); 396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
@@ -379,10 +408,10 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0; 410 cfi->chips[i].ref_point_counter = 0;
382 } 411 }
383 412
384 map->fldrv = &cfi_intelext_chipdrv; 413 map->fldrv = &cfi_intelext_chipdrv;
385 414
386 return cfi_intelext_setup(mtd); 415 return cfi_intelext_setup(mtd);
387} 416}
388 417
@@ -399,13 +428,13 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
399 mtd->size = devsize * cfi->numchips; 428 mtd->size = devsize * cfi->numchips;
400 429
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL); 432 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) { 433 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); 434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err; 435 goto setup_err;
407 } 436 }
408 437
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize; 439 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -429,7 +458,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
429 } 458 }
430 459
431 for (i=0; i<mtd->numeraseregions;i++){ 460 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset, 462 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize, 463 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks); 464 mtd->eraseregions[i].numblocks);
@@ -455,8 +484,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
455 484
456 setup_err: 485 setup_err:
457 if(mtd) { 486 if(mtd) {
458 if(mtd->eraseregions) 487 kfree(mtd->eraseregions);
459 kfree(mtd->eraseregions);
460 kfree(mtd); 488 kfree(mtd);
461 } 489 }
462 kfree(cfi->cmdset_priv); 490 kfree(cfi->cmdset_priv);
@@ -481,7 +509,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
481 * arrangement at this point. This can be rearranged in the future 509 * arrangement at this point. This can be rearranged in the future
482 * if someone feels motivated enough. --nico 510 * if someone feels motivated enough. --nico
483 */ 511 */
484 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3' 512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
485 && extp->FeatureSupport & (1 << 9)) { 513 && extp->FeatureSupport & (1 << 9)) {
486 struct cfi_private *newcfi; 514 struct cfi_private *newcfi;
487 struct flchip *chip; 515 struct flchip *chip;
@@ -493,12 +521,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
493 sizeof(struct cfi_intelext_otpinfo); 521 sizeof(struct cfi_intelext_otpinfo);
494 522
495 /* Burst Read info */ 523 /* Burst Read info */
496 offs += 6; 524 offs += extp->extra[offs+1]+2;
497 525
498 /* Number of partition regions */ 526 /* Number of partition regions */
499 numregions = extp->extra[offs]; 527 numregions = extp->extra[offs];
500 offs += 1; 528 offs += 1;
501 529
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
532 offs += 2;
533
502 /* Number of hardware partitions */ 534 /* Number of hardware partitions */
503 numparts = 0; 535 numparts = 0;
504 for (i = 0; i < numregions; i++) { 536 for (i = 0; i < numregions; i++) {
@@ -510,6 +542,20 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
510 sizeof(struct cfi_intelext_blockinfo); 542 sizeof(struct cfi_intelext_blockinfo);
511 } 543 }
512 544
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags |= MTD_PROGRAM_REGIONS;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, MTD_PROGREGION_SIZE(mtd),
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557 }
558
513 /* 559 /*
514 * All functions below currently rely on all chips having 560 * All functions below currently rely on all chips having
515 * the same geometry so we'll just assume that all hardware 561 * the same geometry so we'll just assume that all hardware
@@ -654,8 +700,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
654 break; 700 break;
655 701
656 if (time_after(jiffies, timeo)) { 702 if (time_after(jiffies, timeo)) {
657 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 703 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
658 status.x[0]); 704 map->name, status.x[0]);
659 return -EIO; 705 return -EIO;
660 } 706 }
661 spin_unlock(chip->mutex); 707 spin_unlock(chip->mutex);
@@ -664,7 +710,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
664 /* Someone else might have been playing with it. */ 710 /* Someone else might have been playing with it. */
665 goto retry; 711 goto retry;
666 } 712 }
667 713
668 case FL_READY: 714 case FL_READY:
669 case FL_CFI_QUERY: 715 case FL_CFI_QUERY:
670 case FL_JEDEC_QUERY: 716 case FL_JEDEC_QUERY:
@@ -702,8 +748,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
702 map_write(map, CMD(0x70), adr); 748 map_write(map, CMD(0x70), adr);
703 chip->state = FL_ERASING; 749 chip->state = FL_ERASING;
704 chip->oldstate = FL_READY; 750 chip->oldstate = FL_READY;
705 printk(KERN_ERR "Chip not ready after erase " 751 printk(KERN_ERR "%s: Chip not ready after erase "
706 "suspended: status = 0x%lx\n", status.x[0]); 752 "suspended: status = 0x%lx\n", map->name, status.x[0]);
707 return -EIO; 753 return -EIO;
708 } 754 }
709 755
@@ -783,14 +829,14 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
783 switch(chip->oldstate) { 829 switch(chip->oldstate) {
784 case FL_ERASING: 830 case FL_ERASING:
785 chip->state = chip->oldstate; 831 chip->state = chip->oldstate;
786 /* What if one interleaved chip has finished and the 832 /* What if one interleaved chip has finished and the
787 other hasn't? The old code would leave the finished 833 other hasn't? The old code would leave the finished
788 one in READY mode. That's bad, and caused -EROFS 834 one in READY mode. That's bad, and caused -EROFS
789 errors to be returned from do_erase_oneblock because 835 errors to be returned from do_erase_oneblock because
790 that's the only bit it checked for at the time. 836 that's the only bit it checked for at the time.
791 As the state machine appears to explicitly allow 837 As the state machine appears to explicitly allow
792 sending the 0x70 (Read Status) command to an erasing 838 sending the 0x70 (Read Status) command to an erasing
793 chip and expecting it to be ignored, that's what we 839 chip and expecting it to be ignored, that's what we
794 do. */ 840 do. */
795 map_write(map, CMD(0xd0), adr); 841 map_write(map, CMD(0xd0), adr);
796 map_write(map, CMD(0x70), adr); 842 map_write(map, CMD(0x70), adr);
@@ -810,7 +856,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
810 DISABLE_VPP(map); 856 DISABLE_VPP(map);
811 break; 857 break;
812 default: 858 default:
813 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate); 859 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
814 } 860 }
815 wake_up(&chip->wq); 861 wake_up(&chip->wq);
816} 862}
@@ -1026,8 +1072,8 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1026 1072
1027 adr += chip->start; 1073 adr += chip->start;
1028 1074
1029 /* Ensure cmd read/writes are aligned. */ 1075 /* Ensure cmd read/writes are aligned. */
1030 cmd_addr = adr & ~(map_bankwidth(map)-1); 1076 cmd_addr = adr & ~(map_bankwidth(map)-1);
1031 1077
1032 spin_lock(chip->mutex); 1078 spin_lock(chip->mutex);
1033 1079
@@ -1055,7 +1101,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1055 1101
1056 if (!map->virt || (from + len > mtd->size)) 1102 if (!map->virt || (from + len > mtd->size))
1057 return -EINVAL; 1103 return -EINVAL;
1058 1104
1059 *mtdbuf = (void *)map->virt + from; 1105 *mtdbuf = (void *)map->virt + from;
1060 *retlen = 0; 1106 *retlen = 0;
1061 1107
@@ -1082,7 +1128,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1082 1128
1083 *retlen += thislen; 1129 *retlen += thislen;
1084 len -= thislen; 1130 len -= thislen;
1085 1131
1086 ofs = 0; 1132 ofs = 0;
1087 chipnum++; 1133 chipnum++;
1088 } 1134 }
@@ -1121,7 +1167,7 @@ static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t fro
1121 if(chip->ref_point_counter == 0) 1167 if(chip->ref_point_counter == 0)
1122 chip->state = FL_READY; 1168 chip->state = FL_READY;
1123 } else 1169 } else
1124 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */ 1170 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1125 1171
1126 put_chip(map, chip, chip->start); 1172 put_chip(map, chip, chip->start);
1127 spin_unlock(chip->mutex); 1173 spin_unlock(chip->mutex);
@@ -1140,8 +1186,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
1140 1186
1141 adr += chip->start; 1187 adr += chip->start;
1142 1188
1143 /* Ensure cmd read/writes are aligned. */ 1189 /* Ensure cmd read/writes are aligned. */
1144 cmd_addr = adr & ~(map_bankwidth(map)-1); 1190 cmd_addr = adr & ~(map_bankwidth(map)-1);
1145 1191
1146 spin_lock(chip->mutex); 1192 spin_lock(chip->mutex);
1147 ret = get_chip(map, chip, cmd_addr, FL_READY); 1193 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -1196,7 +1242,7 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1196 *retlen += thislen; 1242 *retlen += thislen;
1197 len -= thislen; 1243 len -= thislen;
1198 buf += thislen; 1244 buf += thislen;
1199 1245
1200 ofs = 0; 1246 ofs = 0;
1201 chipnum++; 1247 chipnum++;
1202 } 1248 }
@@ -1213,12 +1259,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1213 1259
1214 adr += chip->start; 1260 adr += chip->start;
1215 1261
1216 /* Let's determine this according to the interleave only once */ 1262 /* Let's determine those according to the interleave only once */
1217 status_OK = CMD(0x80); 1263 status_OK = CMD(0x80);
1218 switch (mode) { 1264 switch (mode) {
1219 case FL_WRITING: write_cmd = CMD(0x40); break; 1265 case FL_WRITING:
1220 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break; 1266 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1221 default: return -EINVAL; 1267 break;
1268 case FL_OTP_WRITE:
1269 write_cmd = CMD(0xc0);
1270 break;
1271 default:
1272 return -EINVAL;
1222 } 1273 }
1223 1274
1224 spin_lock(chip->mutex); 1275 spin_lock(chip->mutex);
@@ -1259,12 +1310,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1259 status = map_read(map, adr); 1310 status = map_read(map, adr);
1260 if (map_word_andequal(map, status, status_OK, status_OK)) 1311 if (map_word_andequal(map, status, status_OK, status_OK))
1261 break; 1312 break;
1262 1313
1263 /* OK Still waiting */ 1314 /* OK Still waiting */
1264 if (time_after(jiffies, timeo)) { 1315 if (time_after(jiffies, timeo)) {
1316 map_write(map, CMD(0x70), adr);
1265 chip->state = FL_STATUS; 1317 chip->state = FL_STATUS;
1266 xip_enable(map, chip, adr); 1318 xip_enable(map, chip, adr);
1267 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n"); 1319 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1268 ret = -EIO; 1320 ret = -EIO;
1269 goto out; 1321 goto out;
1270 } 1322 }
@@ -1276,27 +1328,39 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1276 if (!z) { 1328 if (!z) {
1277 chip->word_write_time--; 1329 chip->word_write_time--;
1278 if (!chip->word_write_time) 1330 if (!chip->word_write_time)
1279 chip->word_write_time++; 1331 chip->word_write_time = 1;
1280 } 1332 }
1281 if (z > 1) 1333 if (z > 1)
1282 chip->word_write_time++; 1334 chip->word_write_time++;
1283 1335
1284 /* Done and happy. */ 1336 /* Done and happy. */
1285 chip->state = FL_STATUS; 1337 chip->state = FL_STATUS;
1286 1338
1287 /* check for lock bit */ 1339 /* check for errors */
1288 if (map_word_bitsset(map, status, CMD(0x02))) { 1340 if (map_word_bitsset(map, status, CMD(0x1a))) {
1289 /* clear status */ 1341 unsigned long chipstatus = MERGESTATUS(status);
1342
1343 /* reset status */
1290 map_write(map, CMD(0x50), adr); 1344 map_write(map, CMD(0x50), adr);
1291 /* put back into read status register mode */
1292 map_write(map, CMD(0x70), adr); 1345 map_write(map, CMD(0x70), adr);
1293 ret = -EROFS; 1346 xip_enable(map, chip, adr);
1347
1348 if (chipstatus & 0x02) {
1349 ret = -EROFS;
1350 } else if (chipstatus & 0x08) {
1351 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1352 ret = -EIO;
1353 } else {
1354 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1355 ret = -EINVAL;
1356 }
1357
1358 goto out;
1294 } 1359 }
1295 1360
1296 xip_enable(map, chip, adr); 1361 xip_enable(map, chip, adr);
1297 out: put_chip(map, chip, adr); 1362 out: put_chip(map, chip, adr);
1298 spin_unlock(chip->mutex); 1363 spin_unlock(chip->mutex);
1299
1300 return ret; 1364 return ret;
1301} 1365}
1302 1366
@@ -1329,7 +1393,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1329 1393
1330 ret = do_write_oneword(map, &cfi->chips[chipnum], 1394 ret = do_write_oneword(map, &cfi->chips[chipnum],
1331 bus_ofs, datum, FL_WRITING); 1395 bus_ofs, datum, FL_WRITING);
1332 if (ret) 1396 if (ret)
1333 return ret; 1397 return ret;
1334 1398
1335 len -= n; 1399 len -= n;
@@ -1338,13 +1402,13 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1338 (*retlen) += n; 1402 (*retlen) += n;
1339 1403
1340 if (ofs >> cfi->chipshift) { 1404 if (ofs >> cfi->chipshift) {
1341 chipnum ++; 1405 chipnum ++;
1342 ofs = 0; 1406 ofs = 0;
1343 if (chipnum == cfi->numchips) 1407 if (chipnum == cfi->numchips)
1344 return 0; 1408 return 0;
1345 } 1409 }
1346 } 1410 }
1347 1411
1348 while(len >= map_bankwidth(map)) { 1412 while(len >= map_bankwidth(map)) {
1349 map_word datum = map_word_load(map, buf); 1413 map_word datum = map_word_load(map, buf);
1350 1414
@@ -1359,7 +1423,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1359 len -= map_bankwidth(map); 1423 len -= map_bankwidth(map);
1360 1424
1361 if (ofs >> cfi->chipshift) { 1425 if (ofs >> cfi->chipshift) {
1362 chipnum ++; 1426 chipnum ++;
1363 ofs = 0; 1427 ofs = 0;
1364 if (chipnum == cfi->numchips) 1428 if (chipnum == cfi->numchips)
1365 return 0; 1429 return 0;
@@ -1374,9 +1438,9 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1374 1438
1375 ret = do_write_oneword(map, &cfi->chips[chipnum], 1439 ret = do_write_oneword(map, &cfi->chips[chipnum],
1376 ofs, datum, FL_WRITING); 1440 ofs, datum, FL_WRITING);
1377 if (ret) 1441 if (ret)
1378 return ret; 1442 return ret;
1379 1443
1380 (*retlen) += len; 1444 (*retlen) += len;
1381 } 1445 }
1382 1446
@@ -1384,20 +1448,24 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1384} 1448}
1385 1449
1386 1450
1387static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1451static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1388 unsigned long adr, const u_char *buf, int len) 1452 unsigned long adr, const struct kvec **pvec,
1453 unsigned long *pvec_seek, int len)
1389{ 1454{
1390 struct cfi_private *cfi = map->fldrv_priv; 1455 struct cfi_private *cfi = map->fldrv_priv;
1391 map_word status, status_OK; 1456 map_word status, status_OK, write_cmd, datum;
1392 unsigned long cmd_adr, timeo; 1457 unsigned long cmd_adr, timeo;
1393 int wbufsize, z, ret=0, bytes, words; 1458 int wbufsize, z, ret=0, word_gap, words;
1459 const struct kvec *vec;
1460 unsigned long vec_seek;
1394 1461
1395 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1462 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1396 adr += chip->start; 1463 adr += chip->start;
1397 cmd_adr = adr & ~(wbufsize-1); 1464 cmd_adr = adr & ~(wbufsize-1);
1398 1465
1399 /* Let's determine this according to the interleave only once */ 1466 /* Let's determine this according to the interleave only once */
1400 status_OK = CMD(0x80); 1467 status_OK = CMD(0x80);
1468 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1401 1469
1402 spin_lock(chip->mutex); 1470 spin_lock(chip->mutex);
1403 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1471 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
@@ -1411,7 +1479,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1411 xip_disable(map, chip, cmd_adr); 1479 xip_disable(map, chip, cmd_adr);
1412 1480
1413 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set 1481 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1414 [...], the device will not accept any more Write to Buffer commands". 1482 [...], the device will not accept any more Write to Buffer commands".
1415 So we must check here and reset those bits if they're set. Otherwise 1483 So we must check here and reset those bits if they're set. Otherwise
1416 we're just pissing in the wind */ 1484 we're just pissing in the wind */
1417 if (chip->state != FL_STATUS) 1485 if (chip->state != FL_STATUS)
@@ -1429,7 +1497,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1429 1497
1430 z = 0; 1498 z = 0;
1431 for (;;) { 1499 for (;;) {
1432 map_write(map, CMD(0xe8), cmd_adr); 1500 map_write(map, write_cmd, cmd_adr);
1433 1501
1434 status = map_read(map, cmd_adr); 1502 status = map_read(map, cmd_adr);
1435 if (map_word_andequal(map, status, status_OK, status_OK)) 1503 if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1447,41 +1515,66 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1447 map_write(map, CMD(0x50), cmd_adr); 1515 map_write(map, CMD(0x50), cmd_adr);
1448 map_write(map, CMD(0x70), cmd_adr); 1516 map_write(map, CMD(0x70), cmd_adr);
1449 xip_enable(map, chip, cmd_adr); 1517 xip_enable(map, chip, cmd_adr);
1450 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n", 1518 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1451 status.x[0], Xstatus.x[0]); 1519 map->name, status.x[0], Xstatus.x[0]);
1452 ret = -EIO; 1520 ret = -EIO;
1453 goto out; 1521 goto out;
1454 } 1522 }
1455 } 1523 }
1456 1524
1525 /* Figure out the number of words to write */
1526 word_gap = (-adr & (map_bankwidth(map)-1));
1527 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1528 if (!word_gap) {
1529 words--;
1530 } else {
1531 word_gap = map_bankwidth(map) - word_gap;
1532 adr -= word_gap;
1533 datum = map_word_ff(map);
1534 }
1535
1457 /* Write length of data to come */ 1536 /* Write length of data to come */
1458 bytes = len & (map_bankwidth(map)-1); 1537 map_write(map, CMD(words), cmd_adr );
1459 words = len / map_bankwidth(map);
1460 map_write(map, CMD(words - !bytes), cmd_adr );
1461 1538
1462 /* Write data */ 1539 /* Write data */
1463 z = 0; 1540 vec = *pvec;
1464 while(z < words * map_bankwidth(map)) { 1541 vec_seek = *pvec_seek;
1465 map_word datum = map_word_load(map, buf); 1542 do {
1466 map_write(map, datum, adr+z); 1543 int n = map_bankwidth(map) - word_gap;
1544 if (n > vec->iov_len - vec_seek)
1545 n = vec->iov_len - vec_seek;
1546 if (n > len)
1547 n = len;
1467 1548
1468 z += map_bankwidth(map); 1549 if (!word_gap && len < map_bankwidth(map))
1469 buf += map_bankwidth(map); 1550 datum = map_word_ff(map);
1470 }
1471 1551
1472 if (bytes) { 1552 datum = map_word_load_partial(map, datum,
1473 map_word datum; 1553 vec->iov_base + vec_seek,
1554 word_gap, n);
1474 1555
1475 datum = map_word_ff(map); 1556 len -= n;
1476 datum = map_word_load_partial(map, datum, buf, 0, bytes); 1557 word_gap += n;
1477 map_write(map, datum, adr+z); 1558 if (!len || word_gap == map_bankwidth(map)) {
1478 } 1559 map_write(map, datum, adr);
1560 adr += map_bankwidth(map);
1561 word_gap = 0;
1562 }
1563
1564 vec_seek += n;
1565 if (vec_seek == vec->iov_len) {
1566 vec++;
1567 vec_seek = 0;
1568 }
1569 } while (len);
1570 *pvec = vec;
1571 *pvec_seek = vec_seek;
1479 1572
1480 /* GO GO GO */ 1573 /* GO GO GO */
1481 map_write(map, CMD(0xd0), cmd_adr); 1574 map_write(map, CMD(0xd0), cmd_adr);
1482 chip->state = FL_WRITING; 1575 chip->state = FL_WRITING;
1483 1576
1484 INVALIDATE_CACHE_UDELAY(map, chip, 1577 INVALIDATE_CACHE_UDELAY(map, chip,
1485 cmd_adr, len, 1578 cmd_adr, len,
1486 chip->buffer_write_time); 1579 chip->buffer_write_time);
1487 1580
@@ -1507,13 +1600,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1507 1600
1508 /* OK Still waiting */ 1601 /* OK Still waiting */
1509 if (time_after(jiffies, timeo)) { 1602 if (time_after(jiffies, timeo)) {
1603 map_write(map, CMD(0x70), cmd_adr);
1510 chip->state = FL_STATUS; 1604 chip->state = FL_STATUS;
1511 xip_enable(map, chip, cmd_adr); 1605 xip_enable(map, chip, cmd_adr);
1512 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 1606 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1513 ret = -EIO; 1607 ret = -EIO;
1514 goto out; 1608 goto out;
1515 } 1609 }
1516 1610
1517 /* Latency issues. Drop the lock, wait a while and retry */ 1611 /* Latency issues. Drop the lock, wait a while and retry */
1518 z++; 1612 z++;
1519 UDELAY(map, chip, cmd_adr, 1); 1613 UDELAY(map, chip, cmd_adr, 1);
@@ -1521,21 +1615,34 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1521 if (!z) { 1615 if (!z) {
1522 chip->buffer_write_time--; 1616 chip->buffer_write_time--;
1523 if (!chip->buffer_write_time) 1617 if (!chip->buffer_write_time)
1524 chip->buffer_write_time++; 1618 chip->buffer_write_time = 1;
1525 } 1619 }
1526 if (z > 1) 1620 if (z > 1)
1527 chip->buffer_write_time++; 1621 chip->buffer_write_time++;
1528 1622
1529 /* Done and happy. */ 1623 /* Done and happy. */
1530 chip->state = FL_STATUS; 1624 chip->state = FL_STATUS;
1531 1625
1532 /* check for lock bit */ 1626 /* check for errors */
1533 if (map_word_bitsset(map, status, CMD(0x02))) { 1627 if (map_word_bitsset(map, status, CMD(0x1a))) {
1534 /* clear status */ 1628 unsigned long chipstatus = MERGESTATUS(status);
1629
1630 /* reset status */
1535 map_write(map, CMD(0x50), cmd_adr); 1631 map_write(map, CMD(0x50), cmd_adr);
1536 /* put back into read status register mode */ 1632 map_write(map, CMD(0x70), cmd_adr);
1537 map_write(map, CMD(0x70), adr); 1633 xip_enable(map, chip, cmd_adr);
1538 ret = -EROFS; 1634
1635 if (chipstatus & 0x02) {
1636 ret = -EROFS;
1637 } else if (chipstatus & 0x08) {
1638 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1639 ret = -EIO;
1640 } else {
1641 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1642 ret = -EINVAL;
1643 }
1644
1645 goto out;
1539 } 1646 }
1540 1647
1541 xip_enable(map, chip, cmd_adr); 1648 xip_enable(map, chip, cmd_adr);
@@ -1544,70 +1651,65 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1544 return ret; 1651 return ret;
1545} 1652}
1546 1653
1547static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 1654static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1548 size_t len, size_t *retlen, const u_char *buf) 1655 unsigned long count, loff_t to, size_t *retlen)
1549{ 1656{
1550 struct map_info *map = mtd->priv; 1657 struct map_info *map = mtd->priv;
1551 struct cfi_private *cfi = map->fldrv_priv; 1658 struct cfi_private *cfi = map->fldrv_priv;
1552 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1659 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1553 int ret = 0; 1660 int ret = 0;
1554 int chipnum; 1661 int chipnum;
1555 unsigned long ofs; 1662 unsigned long ofs, vec_seek, i;
1663 size_t len = 0;
1664
1665 for (i = 0; i < count; i++)
1666 len += vecs[i].iov_len;
1556 1667
1557 *retlen = 0; 1668 *retlen = 0;
1558 if (!len) 1669 if (!len)
1559 return 0; 1670 return 0;
1560 1671
1561 chipnum = to >> cfi->chipshift; 1672 chipnum = to >> cfi->chipshift;
1562 ofs = to - (chipnum << cfi->chipshift); 1673 ofs = to - (chipnum << cfi->chipshift);
1563 1674 vec_seek = 0;
1564 /* If it's not bus-aligned, do the first word write */
1565 if (ofs & (map_bankwidth(map)-1)) {
1566 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1567 if (local_len > len)
1568 local_len = len;
1569 ret = cfi_intelext_write_words(mtd, to, local_len,
1570 retlen, buf);
1571 if (ret)
1572 return ret;
1573 ofs += local_len;
1574 buf += local_len;
1575 len -= local_len;
1576
1577 if (ofs >> cfi->chipshift) {
1578 chipnum ++;
1579 ofs = 0;
1580 if (chipnum == cfi->numchips)
1581 return 0;
1582 }
1583 }
1584 1675
1585 while(len) { 1676 do {
1586 /* We must not cross write block boundaries */ 1677 /* We must not cross write block boundaries */
1587 int size = wbufsize - (ofs & (wbufsize-1)); 1678 int size = wbufsize - (ofs & (wbufsize-1));
1588 1679
1589 if (size > len) 1680 if (size > len)
1590 size = len; 1681 size = len;
1591 ret = do_write_buffer(map, &cfi->chips[chipnum], 1682 ret = do_write_buffer(map, &cfi->chips[chipnum],
1592 ofs, buf, size); 1683 ofs, &vecs, &vec_seek, size);
1593 if (ret) 1684 if (ret)
1594 return ret; 1685 return ret;
1595 1686
1596 ofs += size; 1687 ofs += size;
1597 buf += size;
1598 (*retlen) += size; 1688 (*retlen) += size;
1599 len -= size; 1689 len -= size;
1600 1690
1601 if (ofs >> cfi->chipshift) { 1691 if (ofs >> cfi->chipshift) {
1602 chipnum ++; 1692 chipnum ++;
1603 ofs = 0; 1693 ofs = 0;
1604 if (chipnum == cfi->numchips) 1694 if (chipnum == cfi->numchips)
1605 return 0; 1695 return 0;
1606 } 1696 }
1607 } 1697 } while (len);
1698
1608 return 0; 1699 return 0;
1609} 1700}
1610 1701
1702static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1703 size_t len, size_t *retlen, const u_char *buf)
1704{
1705 struct kvec vec;
1706
1707 vec.iov_base = (void *) buf;
1708 vec.iov_len = len;
1709
1710 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1711}
1712
1611static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, 1713static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1612 unsigned long adr, int len, void *thunk) 1714 unsigned long adr, int len, void *thunk)
1613{ 1715{
@@ -1673,23 +1775,17 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1673 status = map_read(map, adr); 1775 status = map_read(map, adr);
1674 if (map_word_andequal(map, status, status_OK, status_OK)) 1776 if (map_word_andequal(map, status, status_OK, status_OK))
1675 break; 1777 break;
1676 1778
1677 /* OK Still waiting */ 1779 /* OK Still waiting */
1678 if (time_after(jiffies, timeo)) { 1780 if (time_after(jiffies, timeo)) {
1679 map_word Xstatus;
1680 map_write(map, CMD(0x70), adr); 1781 map_write(map, CMD(0x70), adr);
1681 chip->state = FL_STATUS; 1782 chip->state = FL_STATUS;
1682 Xstatus = map_read(map, adr);
1683 /* Clear status bits */
1684 map_write(map, CMD(0x50), adr);
1685 map_write(map, CMD(0x70), adr);
1686 xip_enable(map, chip, adr); 1783 xip_enable(map, chip, adr);
1687 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n", 1784 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1688 adr, status.x[0], Xstatus.x[0]);
1689 ret = -EIO; 1785 ret = -EIO;
1690 goto out; 1786 goto out;
1691 } 1787 }
1692 1788
1693 /* Latency issues. Drop the lock, wait a while and retry */ 1789 /* Latency issues. Drop the lock, wait a while and retry */
1694 UDELAY(map, chip, adr, 1000000/HZ); 1790 UDELAY(map, chip, adr, 1000000/HZ);
1695 } 1791 }
@@ -1699,43 +1795,40 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1699 chip->state = FL_STATUS; 1795 chip->state = FL_STATUS;
1700 status = map_read(map, adr); 1796 status = map_read(map, adr);
1701 1797
1702 /* check for lock bit */ 1798 /* check for errors */
1703 if (map_word_bitsset(map, status, CMD(0x3a))) { 1799 if (map_word_bitsset(map, status, CMD(0x3a))) {
1704 unsigned long chipstatus; 1800 unsigned long chipstatus = MERGESTATUS(status);
1705 1801
1706 /* Reset the error bits */ 1802 /* Reset the error bits */
1707 map_write(map, CMD(0x50), adr); 1803 map_write(map, CMD(0x50), adr);
1708 map_write(map, CMD(0x70), adr); 1804 map_write(map, CMD(0x70), adr);
1709 xip_enable(map, chip, adr); 1805 xip_enable(map, chip, adr);
1710 1806
1711 chipstatus = MERGESTATUS(status);
1712
1713 if ((chipstatus & 0x30) == 0x30) { 1807 if ((chipstatus & 0x30) == 0x30) {
1714 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus); 1808 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1715 ret = -EIO; 1809 ret = -EINVAL;
1716 } else if (chipstatus & 0x02) { 1810 } else if (chipstatus & 0x02) {
1717 /* Protection bit set */ 1811 /* Protection bit set */
1718 ret = -EROFS; 1812 ret = -EROFS;
1719 } else if (chipstatus & 0x8) { 1813 } else if (chipstatus & 0x8) {
1720 /* Voltage */ 1814 /* Voltage */
1721 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus); 1815 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1722 ret = -EIO; 1816 ret = -EIO;
1723 } else if (chipstatus & 0x20) { 1817 } else if (chipstatus & 0x20 && retries--) {
1724 if (retries--) { 1818 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1725 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1819 timeo = jiffies + HZ;
1726 timeo = jiffies + HZ; 1820 put_chip(map, chip, adr);
1727 put_chip(map, chip, adr); 1821 spin_unlock(chip->mutex);
1728 spin_unlock(chip->mutex); 1822 goto retry;
1729 goto retry; 1823 } else {
1730 } 1824 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1731 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1732 ret = -EIO; 1825 ret = -EIO;
1733 } 1826 }
1734 } else { 1827
1735 xip_enable(map, chip, adr); 1828 goto out;
1736 ret = 0;
1737 } 1829 }
1738 1830
1831 xip_enable(map, chip, adr);
1739 out: put_chip(map, chip, adr); 1832 out: put_chip(map, chip, adr);
1740 spin_unlock(chip->mutex); 1833 spin_unlock(chip->mutex);
1741 return ret; 1834 return ret;
@@ -1755,7 +1848,7 @@ int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1755 1848
1756 instr->state = MTD_ERASE_DONE; 1849 instr->state = MTD_ERASE_DONE;
1757 mtd_erase_callback(instr); 1850 mtd_erase_callback(instr);
1758 1851
1759 return 0; 1852 return 0;
1760} 1853}
1761 1854
@@ -1776,7 +1869,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1776 if (!ret) { 1869 if (!ret) {
1777 chip->oldstate = chip->state; 1870 chip->oldstate = chip->state;
1778 chip->state = FL_SYNCING; 1871 chip->state = FL_SYNCING;
1779 /* No need to wake_up() on this state change - 1872 /* No need to wake_up() on this state change -
1780 * as the whole point is that nobody can do anything 1873 * as the whole point is that nobody can do anything
1781 * with the chip now anyway. 1874 * with the chip now anyway.
1782 */ 1875 */
@@ -1790,7 +1883,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1790 chip = &cfi->chips[i]; 1883 chip = &cfi->chips[i];
1791 1884
1792 spin_lock(chip->mutex); 1885 spin_lock(chip->mutex);
1793 1886
1794 if (chip->state == FL_SYNCING) { 1887 if (chip->state == FL_SYNCING) {
1795 chip->state = chip->oldstate; 1888 chip->state = chip->oldstate;
1796 chip->oldstate = FL_READY; 1889 chip->oldstate = FL_READY;
@@ -1847,7 +1940,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1847 1940
1848 ENABLE_VPP(map); 1941 ENABLE_VPP(map);
1849 xip_disable(map, chip, adr); 1942 xip_disable(map, chip, adr);
1850 1943
1851 map_write(map, CMD(0x60), adr); 1944 map_write(map, CMD(0x60), adr);
1852 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 1945 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1853 map_write(map, CMD(0x01), adr); 1946 map_write(map, CMD(0x01), adr);
@@ -1875,25 +1968,22 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1875 status = map_read(map, adr); 1968 status = map_read(map, adr);
1876 if (map_word_andequal(map, status, status_OK, status_OK)) 1969 if (map_word_andequal(map, status, status_OK, status_OK))
1877 break; 1970 break;
1878 1971
1879 /* OK Still waiting */ 1972 /* OK Still waiting */
1880 if (time_after(jiffies, timeo)) { 1973 if (time_after(jiffies, timeo)) {
1881 map_word Xstatus;
1882 map_write(map, CMD(0x70), adr); 1974 map_write(map, CMD(0x70), adr);
1883 chip->state = FL_STATUS; 1975 chip->state = FL_STATUS;
1884 Xstatus = map_read(map, adr);
1885 xip_enable(map, chip, adr); 1976 xip_enable(map, chip, adr);
1886 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n", 1977 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1887 status.x[0], Xstatus.x[0]);
1888 put_chip(map, chip, adr); 1978 put_chip(map, chip, adr);
1889 spin_unlock(chip->mutex); 1979 spin_unlock(chip->mutex);
1890 return -EIO; 1980 return -EIO;
1891 } 1981 }
1892 1982
1893 /* Latency issues. Drop the lock, wait a while and retry */ 1983 /* Latency issues. Drop the lock, wait a while and retry */
1894 UDELAY(map, chip, adr, 1); 1984 UDELAY(map, chip, adr, 1);
1895 } 1985 }
1896 1986
1897 /* Done and happy. */ 1987 /* Done and happy. */
1898 chip->state = FL_STATUS; 1988 chip->state = FL_STATUS;
1899 xip_enable(map, chip, adr); 1989 xip_enable(map, chip, adr);
@@ -1913,9 +2003,9 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1913 ofs, len, 0); 2003 ofs, len, 0);
1914#endif 2004#endif
1915 2005
1916 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2006 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1917 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK); 2007 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1918 2008
1919#ifdef DEBUG_LOCK_BITS 2009#ifdef DEBUG_LOCK_BITS
1920 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2010 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1921 __FUNCTION__, ret); 2011 __FUNCTION__, ret);
@@ -1939,20 +2029,20 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1939 2029
1940 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2030 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1941 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK); 2031 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1942 2032
1943#ifdef DEBUG_LOCK_BITS 2033#ifdef DEBUG_LOCK_BITS
1944 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2034 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945 __FUNCTION__, ret); 2035 __FUNCTION__, ret);
1946 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2036 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1947 ofs, len, 0); 2037 ofs, len, 0);
1948#endif 2038#endif
1949 2039
1950 return ret; 2040 return ret;
1951} 2041}
1952 2042
1953#ifdef CONFIG_MTD_OTP 2043#ifdef CONFIG_MTD_OTP
1954 2044
1955typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 2045typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1956 u_long data_offset, u_char *buf, u_int size, 2046 u_long data_offset, u_char *buf, u_int size,
1957 u_long prot_offset, u_int groupno, u_int groupsize); 2047 u_long prot_offset, u_int groupno, u_int groupsize);
1958 2048
@@ -2003,7 +2093,7 @@ do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2003 2093
2004 datum = map_word_load_partial(map, datum, buf, gap, n); 2094 datum = map_word_load_partial(map, datum, buf, gap, n);
2005 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 2095 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2006 if (ret) 2096 if (ret)
2007 return ret; 2097 return ret;
2008 2098
2009 offset += n; 2099 offset += n;
@@ -2196,7 +2286,7 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2196 NULL, do_otp_lock, 1); 2286 NULL, do_otp_lock, 1);
2197} 2287}
2198 2288
2199static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 2289static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2200 struct otp_info *buf, size_t len) 2290 struct otp_info *buf, size_t len)
2201{ 2291{
2202 size_t retlen; 2292 size_t retlen;
@@ -2239,7 +2329,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2239 if (chip->oldstate == FL_READY) { 2329 if (chip->oldstate == FL_READY) {
2240 chip->oldstate = chip->state; 2330 chip->oldstate = chip->state;
2241 chip->state = FL_PM_SUSPENDED; 2331 chip->state = FL_PM_SUSPENDED;
2242 /* No need to wake_up() on this state change - 2332 /* No need to wake_up() on this state change -
2243 * as the whole point is that nobody can do anything 2333 * as the whole point is that nobody can do anything
2244 * with the chip now anyway. 2334 * with the chip now anyway.
2245 */ 2335 */
@@ -2267,9 +2357,9 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2267 if (ret) { 2357 if (ret) {
2268 for (i--; i >=0; i--) { 2358 for (i--; i >=0; i--) {
2269 chip = &cfi->chips[i]; 2359 chip = &cfi->chips[i];
2270 2360
2271 spin_lock(chip->mutex); 2361 spin_lock(chip->mutex);
2272 2362
2273 if (chip->state == FL_PM_SUSPENDED) { 2363 if (chip->state == FL_PM_SUSPENDED) {
2274 /* No need to force it into a known state here, 2364 /* No need to force it into a known state here,
2275 because we're returning failure, and it didn't 2365 because we're returning failure, and it didn't
@@ -2280,8 +2370,8 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2280 } 2370 }
2281 spin_unlock(chip->mutex); 2371 spin_unlock(chip->mutex);
2282 } 2372 }
2283 } 2373 }
2284 2374
2285 return ret; 2375 return ret;
2286} 2376}
2287 2377
@@ -2293,11 +2383,11 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2293 struct flchip *chip; 2383 struct flchip *chip;
2294 2384
2295 for (i=0; i<cfi->numchips; i++) { 2385 for (i=0; i<cfi->numchips; i++) {
2296 2386
2297 chip = &cfi->chips[i]; 2387 chip = &cfi->chips[i];
2298 2388
2299 spin_lock(chip->mutex); 2389 spin_lock(chip->mutex);
2300 2390
2301 /* Go to known state. Chip may have been power cycled */ 2391 /* Go to known state. Chip may have been power cycled */
2302 if (chip->state == FL_PM_SUSPENDED) { 2392 if (chip->state == FL_PM_SUSPENDED) {
2303 map_write(map, CMD(0xFF), cfi->chips[i].start); 2393 map_write(map, CMD(0xFF), cfi->chips[i].start);
@@ -2319,7 +2409,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2319 struct flchip *chip = &cfi->chips[i]; 2409 struct flchip *chip = &cfi->chips[i];
2320 2410
2321 /* force the completion of any ongoing operation 2411 /* force the completion of any ongoing operation
2322 and switch to array mode so any bootloader in 2412 and switch to array mode so any bootloader in
2323 flash is accessible for soft reboot. */ 2413 flash is accessible for soft reboot. */
2324 spin_lock(chip->mutex); 2414 spin_lock(chip->mutex);
2325 ret = get_chip(map, chip, chip->start, FL_SYNCING); 2415 ret = get_chip(map, chip, chip->start, FL_SYNCING);
@@ -2356,20 +2446,23 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
2356 kfree(mtd->eraseregions); 2446 kfree(mtd->eraseregions);
2357} 2447}
2358 2448
2359static char im_name_1[]="cfi_cmdset_0001"; 2449static char im_name_0001[] = "cfi_cmdset_0001";
2360static char im_name_3[]="cfi_cmdset_0003"; 2450static char im_name_0003[] = "cfi_cmdset_0003";
2451static char im_name_0200[] = "cfi_cmdset_0200";
2361 2452
2362static int __init cfi_intelext_init(void) 2453static int __init cfi_intelext_init(void)
2363{ 2454{
2364 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001); 2455 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2365 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001); 2456 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2457 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2366 return 0; 2458 return 0;
2367} 2459}
2368 2460
2369static void __exit cfi_intelext_exit(void) 2461static void __exit cfi_intelext_exit(void)
2370{ 2462{
2371 inter_module_unregister(im_name_1); 2463 inter_module_unregister(im_name_0001);
2372 inter_module_unregister(im_name_3); 2464 inter_module_unregister(im_name_0003);
2465 inter_module_unregister(im_name_0200);
2373} 2466}
2374 2467
2375module_init(cfi_intelext_init); 2468module_init(cfi_intelext_init);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 8505f118f2db..aed10bd5c3c3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -10,14 +10,14 @@
10 * 10 *
11 * 4_by_16 work by Carolyn J. Smith 11 * 4_by_16 work by Carolyn J. Smith
12 * 12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre) 14 * by Nicolas Pitre)
15 * 15 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 * 17 *
18 * This code is GPL 18 * This code is GPL
19 * 19 *
20 * $Id: cfi_cmdset_0002.c,v 1.118 2005/07/04 22:34:29 gleixner Exp $ 20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21 * 21 *
22 */ 22 */
23 23
@@ -93,7 +93,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
93 }; 93 };
94 94
95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 printk(" Address sensitive unlock: %s\n", 96 printk(" Address sensitive unlock: %s\n",
97 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 97 (extp->SiliconRevision & 1) ? "Not required" : "Required");
98 98
99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
@@ -118,9 +118,9 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
118 else 118 else
119 printk(" Page mode: %d word page\n", extp->PageMode << 2); 119 printk(" Page mode: %d word page\n", extp->PageMode << 2);
120 120
121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 extp->VppMin >> 4, extp->VppMin & 0xf); 122 extp->VppMin >> 4, extp->VppMin & 0xf);
123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMax >> 4, extp->VppMax & 0xf); 124 extp->VppMax >> 4, extp->VppMax & 0xf);
125 125
126 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 126 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
@@ -177,7 +177,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 mtd->erase = cfi_amdstd_erase_chip; 178 mtd->erase = cfi_amdstd_erase_chip;
179 } 179 }
180 180
181} 181}
182 182
183static struct cfi_fixup cfi_fixup_table[] = { 183static struct cfi_fixup cfi_fixup_table[] = {
@@ -239,7 +239,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
239 239
240 if (cfi->cfi_mode==CFI_MODE_CFI){ 240 if (cfi->cfi_mode==CFI_MODE_CFI){
241 unsigned char bootloc; 241 unsigned char bootloc;
242 /* 242 /*
243 * It's a real CFI chip, not one for which the probe 243 * It's a real CFI chip, not one for which the probe
244 * routine faked a CFI structure. So we read the feature 244 * routine faked a CFI structure. So we read the feature
245 * table from it. 245 * table from it.
@@ -253,8 +253,18 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
253 return NULL; 253 return NULL;
254 } 254 }
255 255
256 if (extp->MajorVersion != '1' ||
257 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
258 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
259 "version %c.%c.\n", extp->MajorVersion,
260 extp->MinorVersion);
261 kfree(extp);
262 kfree(mtd);
263 return NULL;
264 }
265
256 /* Install our own private info structure */ 266 /* Install our own private info structure */
257 cfi->cmdset_priv = extp; 267 cfi->cmdset_priv = extp;
258 268
259 /* Apply cfi device specific fixups */ 269 /* Apply cfi device specific fixups */
260 cfi_fixup(mtd, cfi_fixup_table); 270 cfi_fixup(mtd, cfi_fixup_table);
@@ -262,7 +272,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
262#ifdef DEBUG_CFI_FEATURES 272#ifdef DEBUG_CFI_FEATURES
263 /* Tell the user about it in lots of lovely detail */ 273 /* Tell the user about it in lots of lovely detail */
264 cfi_tell_features(extp); 274 cfi_tell_features(extp);
265#endif 275#endif
266 276
267 bootloc = extp->TopBottom; 277 bootloc = extp->TopBottom;
268 if ((bootloc != 2) && (bootloc != 3)) { 278 if ((bootloc != 2) && (bootloc != 3)) {
@@ -273,11 +283,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
273 283
274 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 284 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
275 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 285 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
276 286
277 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 287 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
278 int j = (cfi->cfiq->NumEraseRegions-1)-i; 288 int j = (cfi->cfiq->NumEraseRegions-1)-i;
279 __u32 swap; 289 __u32 swap;
280 290
281 swap = cfi->cfiq->EraseRegionInfo[i]; 291 swap = cfi->cfiq->EraseRegionInfo[i];
282 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 292 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
283 cfi->cfiq->EraseRegionInfo[j] = swap; 293 cfi->cfiq->EraseRegionInfo[j] = swap;
@@ -288,11 +298,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
288 cfi->addr_unlock2 = 0x2aa; 298 cfi->addr_unlock2 = 0x2aa;
289 /* Modify the unlock address if we are in compatibility mode */ 299 /* Modify the unlock address if we are in compatibility mode */
290 if ( /* x16 in x8 mode */ 300 if ( /* x16 in x8 mode */
291 ((cfi->device_type == CFI_DEVICETYPE_X8) && 301 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
292 (cfi->cfiq->InterfaceDesc == 2)) || 302 (cfi->cfiq->InterfaceDesc == 2)) ||
293 /* x32 in x16 mode */ 303 /* x32 in x16 mode */
294 ((cfi->device_type == CFI_DEVICETYPE_X16) && 304 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
295 (cfi->cfiq->InterfaceDesc == 4))) 305 (cfi->cfiq->InterfaceDesc == 4)))
296 { 306 {
297 cfi->addr_unlock1 = 0xaaa; 307 cfi->addr_unlock1 = 0xaaa;
298 cfi->addr_unlock2 = 0x555; 308 cfi->addr_unlock2 = 0x555;
@@ -310,10 +320,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
310 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 320 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
311 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 321 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
312 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 322 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
313 } 323 }
314 324
315 map->fldrv = &cfi_amdstd_chipdrv; 325 map->fldrv = &cfi_amdstd_chipdrv;
316 326
317 return cfi_amdstd_setup(mtd); 327 return cfi_amdstd_setup(mtd);
318} 328}
319 329
@@ -326,24 +336,24 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
326 unsigned long offset = 0; 336 unsigned long offset = 0;
327 int i,j; 337 int i,j;
328 338
329 printk(KERN_NOTICE "number of %s chips: %d\n", 339 printk(KERN_NOTICE "number of %s chips: %d\n",
330 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 340 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
331 /* Select the correct geometry setup */ 341 /* Select the correct geometry setup */
332 mtd->size = devsize * cfi->numchips; 342 mtd->size = devsize * cfi->numchips;
333 343
334 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 344 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
335 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 345 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
336 * mtd->numeraseregions, GFP_KERNEL); 346 * mtd->numeraseregions, GFP_KERNEL);
337 if (!mtd->eraseregions) { 347 if (!mtd->eraseregions) {
338 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 348 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
339 goto setup_err; 349 goto setup_err;
340 } 350 }
341 351
342 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 352 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
343 unsigned long ernum, ersize; 353 unsigned long ernum, ersize;
344 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 354 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
345 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 355 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
346 356
347 if (mtd->erasesize < ersize) { 357 if (mtd->erasesize < ersize) {
348 mtd->erasesize = ersize; 358 mtd->erasesize = ersize;
349 } 359 }
@@ -378,8 +388,7 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
378 388
379 setup_err: 389 setup_err:
380 if(mtd) { 390 if(mtd) {
381 if(mtd->eraseregions) 391 kfree(mtd->eraseregions);
382 kfree(mtd->eraseregions);
383 kfree(mtd); 392 kfree(mtd);
384 } 393 }
385 kfree(cfi->cmdset_priv); 394 kfree(cfi->cmdset_priv);
@@ -430,7 +439,7 @@ static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word
430 oldd = map_read(map, addr); 439 oldd = map_read(map, addr);
431 curd = map_read(map, addr); 440 curd = map_read(map, addr);
432 441
433 return map_word_equal(map, oldd, curd) && 442 return map_word_equal(map, oldd, curd) &&
434 map_word_equal(map, curd, expected); 443 map_word_equal(map, curd, expected);
435} 444}
436 445
@@ -462,7 +471,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
462 /* Someone else might have been playing with it. */ 471 /* Someone else might have been playing with it. */
463 goto retry; 472 goto retry;
464 } 473 }
465 474
466 case FL_READY: 475 case FL_READY:
467 case FL_CFI_QUERY: 476 case FL_CFI_QUERY:
468 case FL_JEDEC_QUERY: 477 case FL_JEDEC_QUERY:
@@ -505,7 +514,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
505 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 514 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
506 return -EIO; 515 return -EIO;
507 } 516 }
508 517
509 spin_unlock(chip->mutex); 518 spin_unlock(chip->mutex);
510 cfi_udelay(1); 519 cfi_udelay(1);
511 spin_lock(chip->mutex); 520 spin_lock(chip->mutex);
@@ -608,7 +617,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
608 * When a delay is required for the flash operation to complete, the 617 * When a delay is required for the flash operation to complete, the
609 * xip_udelay() function is polling for both the given timeout and pending 618 * xip_udelay() function is polling for both the given timeout and pending
610 * (but still masked) hardware interrupts. Whenever there is an interrupt 619 * (but still masked) hardware interrupts. Whenever there is an interrupt
611 * pending then the flash erase operation is suspended, array mode restored 620 * pending then the flash erase operation is suspended, array mode restored
612 * and interrupts unmasked. Task scheduling might also happen at that 621 * and interrupts unmasked. Task scheduling might also happen at that
613 * point. The CPU eventually returns from the interrupt or the call to 622 * point. The CPU eventually returns from the interrupt or the call to
614 * schedule() and the suspended flash operation is resumed for the remaining 623 * schedule() and the suspended flash operation is resumed for the remaining
@@ -632,9 +641,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
632 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 641 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
633 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 642 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
634 /* 643 /*
635 * Let's suspend the erase operation when supported. 644 * Let's suspend the erase operation when supported.
636 * Note that we currently don't try to suspend 645 * Note that we currently don't try to suspend
637 * interleaved chips if there is already another 646 * interleaved chips if there is already another
638 * operation suspended (imagine what happens 647 * operation suspended (imagine what happens
639 * when one chip was already done with the current 648 * when one chip was already done with the current
640 * operation while another chip suspended it, then 649 * operation while another chip suspended it, then
@@ -770,8 +779,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
770 779
771 adr += chip->start; 780 adr += chip->start;
772 781
773 /* Ensure cmd read/writes are aligned. */ 782 /* Ensure cmd read/writes are aligned. */
774 cmd_addr = adr & ~(map_bankwidth(map)-1); 783 cmd_addr = adr & ~(map_bankwidth(map)-1);
775 784
776 spin_lock(chip->mutex); 785 spin_lock(chip->mutex);
777 ret = get_chip(map, chip, cmd_addr, FL_READY); 786 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -851,7 +860,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
851#endif 860#endif
852 set_current_state(TASK_UNINTERRUPTIBLE); 861 set_current_state(TASK_UNINTERRUPTIBLE);
853 add_wait_queue(&chip->wq, &wait); 862 add_wait_queue(&chip->wq, &wait);
854 863
855 spin_unlock(chip->mutex); 864 spin_unlock(chip->mutex);
856 865
857 schedule(); 866 schedule();
@@ -863,7 +872,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
863 timeo = jiffies + HZ; 872 timeo = jiffies + HZ;
864 873
865 goto retry; 874 goto retry;
866 } 875 }
867 876
868 adr += chip->start; 877 adr += chip->start;
869 878
@@ -872,14 +881,14 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
872 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 881 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
873 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 882 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
874 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 883 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
875 884
876 map_copy_from(map, buf, adr, len); 885 map_copy_from(map, buf, adr, len);
877 886
878 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 887 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
879 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 888 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
880 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 889 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
881 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 890 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
882 891
883 wake_up(&chip->wq); 892 wake_up(&chip->wq);
884 spin_unlock(chip->mutex); 893 spin_unlock(chip->mutex);
885 894
@@ -988,7 +997,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
988 chip->word_write_time); 997 chip->word_write_time);
989 998
990 /* See comment above for timeout value. */ 999 /* See comment above for timeout value. */
991 timeo = jiffies + uWriteTimeout; 1000 timeo = jiffies + uWriteTimeout;
992 for (;;) { 1001 for (;;) {
993 if (chip->state != FL_WRITING) { 1002 if (chip->state != FL_WRITING) {
994 /* Someone's suspended the write. Sleep */ 1003 /* Someone's suspended the write. Sleep */
@@ -1004,16 +1013,16 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1004 continue; 1013 continue;
1005 } 1014 }
1006 1015
1007 if (chip_ready(map, adr)) 1016 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1008 break;
1009
1010 if (time_after(jiffies, timeo)) {
1011 xip_enable(map, chip, adr); 1017 xip_enable(map, chip, adr);
1012 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1018 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1013 xip_disable(map, chip, adr); 1019 xip_disable(map, chip, adr);
1014 break; 1020 break;
1015 } 1021 }
1016 1022
1023 if (chip_ready(map, adr))
1024 break;
1025
1017 /* Latency issues. Drop the lock, wait a while and retry */ 1026 /* Latency issues. Drop the lock, wait a while and retry */
1018 UDELAY(map, chip, adr, 1); 1027 UDELAY(map, chip, adr, 1);
1019 } 1028 }
@@ -1023,7 +1032,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1023 map_write( map, CMD(0xF0), chip->start ); 1032 map_write( map, CMD(0xF0), chip->start );
1024 /* FIXME - should have reset delay before continuing */ 1033 /* FIXME - should have reset delay before continuing */
1025 1034
1026 if (++retry_cnt <= MAX_WORD_RETRIES) 1035 if (++retry_cnt <= MAX_WORD_RETRIES)
1027 goto retry; 1036 goto retry;
1028 1037
1029 ret = -EIO; 1038 ret = -EIO;
@@ -1091,27 +1100,27 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1091 1100
1092 /* Number of bytes to copy from buffer */ 1101 /* Number of bytes to copy from buffer */
1093 n = min_t(int, len, map_bankwidth(map)-i); 1102 n = min_t(int, len, map_bankwidth(map)-i);
1094 1103
1095 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1104 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1096 1105
1097 ret = do_write_oneword(map, &cfi->chips[chipnum], 1106 ret = do_write_oneword(map, &cfi->chips[chipnum],
1098 bus_ofs, tmp_buf); 1107 bus_ofs, tmp_buf);
1099 if (ret) 1108 if (ret)
1100 return ret; 1109 return ret;
1101 1110
1102 ofs += n; 1111 ofs += n;
1103 buf += n; 1112 buf += n;
1104 (*retlen) += n; 1113 (*retlen) += n;
1105 len -= n; 1114 len -= n;
1106 1115
1107 if (ofs >> cfi->chipshift) { 1116 if (ofs >> cfi->chipshift) {
1108 chipnum ++; 1117 chipnum ++;
1109 ofs = 0; 1118 ofs = 0;
1110 if (chipnum == cfi->numchips) 1119 if (chipnum == cfi->numchips)
1111 return 0; 1120 return 0;
1112 } 1121 }
1113 } 1122 }
1114 1123
1115 /* We are now aligned, write as much as possible */ 1124 /* We are now aligned, write as much as possible */
1116 while(len >= map_bankwidth(map)) { 1125 while(len >= map_bankwidth(map)) {
1117 map_word datum; 1126 map_word datum;
@@ -1129,7 +1138,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1129 len -= map_bankwidth(map); 1138 len -= map_bankwidth(map);
1130 1139
1131 if (ofs >> cfi->chipshift) { 1140 if (ofs >> cfi->chipshift) {
1132 chipnum ++; 1141 chipnum ++;
1133 ofs = 0; 1142 ofs = 0;
1134 if (chipnum == cfi->numchips) 1143 if (chipnum == cfi->numchips)
1135 return 0; 1144 return 0;
@@ -1167,12 +1176,12 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1167 spin_unlock(cfi->chips[chipnum].mutex); 1176 spin_unlock(cfi->chips[chipnum].mutex);
1168 1177
1169 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1178 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1170 1179
1171 ret = do_write_oneword(map, &cfi->chips[chipnum], 1180 ret = do_write_oneword(map, &cfi->chips[chipnum],
1172 ofs, tmp_buf); 1181 ofs, tmp_buf);
1173 if (ret) 1182 if (ret)
1174 return ret; 1183 return ret;
1175 1184
1176 (*retlen) += len; 1185 (*retlen) += len;
1177 } 1186 }
1178 1187
@@ -1184,7 +1193,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1184 * FIXME: interleaved mode not tested, and probably not supported! 1193 * FIXME: interleaved mode not tested, and probably not supported!
1185 */ 1194 */
1186static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1195static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1187 unsigned long adr, const u_char *buf, 1196 unsigned long adr, const u_char *buf,
1188 int len) 1197 int len)
1189{ 1198{
1190 struct cfi_private *cfi = map->fldrv_priv; 1199 struct cfi_private *cfi = map->fldrv_priv;
@@ -1214,7 +1223,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1214 XIP_INVAL_CACHED_RANGE(map, adr, len); 1223 XIP_INVAL_CACHED_RANGE(map, adr, len);
1215 ENABLE_VPP(map); 1224 ENABLE_VPP(map);
1216 xip_disable(map, chip, cmd_adr); 1225 xip_disable(map, chip, cmd_adr);
1217 1226
1218 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1227 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1219 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1228 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1220 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1229 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -1248,8 +1257,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1248 adr, map_bankwidth(map), 1257 adr, map_bankwidth(map),
1249 chip->word_write_time); 1258 chip->word_write_time);
1250 1259
1251 timeo = jiffies + uWriteTimeout; 1260 timeo = jiffies + uWriteTimeout;
1252 1261
1253 for (;;) { 1262 for (;;) {
1254 if (chip->state != FL_WRITING) { 1263 if (chip->state != FL_WRITING) {
1255 /* Someone's suspended the write. Sleep */ 1264 /* Someone's suspended the write. Sleep */
@@ -1265,13 +1274,13 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1265 continue; 1274 continue;
1266 } 1275 }
1267 1276
1277 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1278 break;
1279
1268 if (chip_ready(map, adr)) { 1280 if (chip_ready(map, adr)) {
1269 xip_enable(map, chip, adr); 1281 xip_enable(map, chip, adr);
1270 goto op_done; 1282 goto op_done;
1271 } 1283 }
1272
1273 if( time_after(jiffies, timeo))
1274 break;
1275 1284
1276 /* Latency issues. Drop the lock, wait a while and retry */ 1285 /* Latency issues. Drop the lock, wait a while and retry */
1277 UDELAY(map, chip, adr, 1); 1286 UDELAY(map, chip, adr, 1);
@@ -1343,7 +1352,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1343 if (size % map_bankwidth(map)) 1352 if (size % map_bankwidth(map))
1344 size -= size % map_bankwidth(map); 1353 size -= size % map_bankwidth(map);
1345 1354
1346 ret = do_write_buffer(map, &cfi->chips[chipnum], 1355 ret = do_write_buffer(map, &cfi->chips[chipnum],
1347 ofs, buf, size); 1356 ofs, buf, size);
1348 if (ret) 1357 if (ret)
1349 return ret; 1358 return ret;
@@ -1354,7 +1363,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1354 len -= size; 1363 len -= size;
1355 1364
1356 if (ofs >> cfi->chipshift) { 1365 if (ofs >> cfi->chipshift) {
1357 chipnum ++; 1366 chipnum ++;
1358 ofs = 0; 1367 ofs = 0;
1359 if (chipnum == cfi->numchips) 1368 if (chipnum == cfi->numchips)
1360 return 0; 1369 return 0;
@@ -1571,7 +1580,7 @@ int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1571 1580
1572 instr->state = MTD_ERASE_DONE; 1581 instr->state = MTD_ERASE_DONE;
1573 mtd_erase_callback(instr); 1582 mtd_erase_callback(instr);
1574 1583
1575 return 0; 1584 return 0;
1576} 1585}
1577 1586
@@ -1594,7 +1603,7 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1594 1603
1595 instr->state = MTD_ERASE_DONE; 1604 instr->state = MTD_ERASE_DONE;
1596 mtd_erase_callback(instr); 1605 mtd_erase_callback(instr);
1597 1606
1598 return 0; 1607 return 0;
1599} 1608}
1600 1609
@@ -1621,7 +1630,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1621 case FL_JEDEC_QUERY: 1630 case FL_JEDEC_QUERY:
1622 chip->oldstate = chip->state; 1631 chip->oldstate = chip->state;
1623 chip->state = FL_SYNCING; 1632 chip->state = FL_SYNCING;
1624 /* No need to wake_up() on this state change - 1633 /* No need to wake_up() on this state change -
1625 * as the whole point is that nobody can do anything 1634 * as the whole point is that nobody can do anything
1626 * with the chip now anyway. 1635 * with the chip now anyway.
1627 */ 1636 */
@@ -1632,13 +1641,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1632 default: 1641 default:
1633 /* Not an idle state */ 1642 /* Not an idle state */
1634 add_wait_queue(&chip->wq, &wait); 1643 add_wait_queue(&chip->wq, &wait);
1635 1644
1636 spin_unlock(chip->mutex); 1645 spin_unlock(chip->mutex);
1637 1646
1638 schedule(); 1647 schedule();
1639 1648
1640 remove_wait_queue(&chip->wq, &wait); 1649 remove_wait_queue(&chip->wq, &wait);
1641 1650
1642 goto retry; 1651 goto retry;
1643 } 1652 }
1644 } 1653 }
@@ -1649,7 +1658,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1649 chip = &cfi->chips[i]; 1658 chip = &cfi->chips[i];
1650 1659
1651 spin_lock(chip->mutex); 1660 spin_lock(chip->mutex);
1652 1661
1653 if (chip->state == FL_SYNCING) { 1662 if (chip->state == FL_SYNCING) {
1654 chip->state = chip->oldstate; 1663 chip->state = chip->oldstate;
1655 wake_up(&chip->wq); 1664 wake_up(&chip->wq);
@@ -1679,7 +1688,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1679 case FL_JEDEC_QUERY: 1688 case FL_JEDEC_QUERY:
1680 chip->oldstate = chip->state; 1689 chip->oldstate = chip->state;
1681 chip->state = FL_PM_SUSPENDED; 1690 chip->state = FL_PM_SUSPENDED;
1682 /* No need to wake_up() on this state change - 1691 /* No need to wake_up() on this state change -
1683 * as the whole point is that nobody can do anything 1692 * as the whole point is that nobody can do anything
1684 * with the chip now anyway. 1693 * with the chip now anyway.
1685 */ 1694 */
@@ -1700,7 +1709,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1700 chip = &cfi->chips[i]; 1709 chip = &cfi->chips[i];
1701 1710
1702 spin_lock(chip->mutex); 1711 spin_lock(chip->mutex);
1703 1712
1704 if (chip->state == FL_PM_SUSPENDED) { 1713 if (chip->state == FL_PM_SUSPENDED) {
1705 chip->state = chip->oldstate; 1714 chip->state = chip->oldstate;
1706 wake_up(&chip->wq); 1715 wake_up(&chip->wq);
@@ -1708,7 +1717,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1708 spin_unlock(chip->mutex); 1717 spin_unlock(chip->mutex);
1709 } 1718 }
1710 } 1719 }
1711 1720
1712 return ret; 1721 return ret;
1713} 1722}
1714 1723
@@ -1721,11 +1730,11 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1721 struct flchip *chip; 1730 struct flchip *chip;
1722 1731
1723 for (i=0; i<cfi->numchips; i++) { 1732 for (i=0; i<cfi->numchips; i++) {
1724 1733
1725 chip = &cfi->chips[i]; 1734 chip = &cfi->chips[i];
1726 1735
1727 spin_lock(chip->mutex); 1736 spin_lock(chip->mutex);
1728 1737
1729 if (chip->state == FL_PM_SUSPENDED) { 1738 if (chip->state == FL_PM_SUSPENDED) {
1730 chip->state = FL_READY; 1739 chip->state = FL_READY;
1731 map_write(map, CMD(0xF0), chip->start); 1740 map_write(map, CMD(0xF0), chip->start);
@@ -1742,6 +1751,7 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
1742{ 1751{
1743 struct map_info *map = mtd->priv; 1752 struct map_info *map = mtd->priv;
1744 struct cfi_private *cfi = map->fldrv_priv; 1753 struct cfi_private *cfi = map->fldrv_priv;
1754
1745 kfree(cfi->cmdset_priv); 1755 kfree(cfi->cmdset_priv);
1746 kfree(cfi->cfiq); 1756 kfree(cfi->cfiq);
1747 kfree(cfi); 1757 kfree(cfi);
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index c894f8801578..c4a19d2dc67f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -4,8 +4,8 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0020.c,v 1.19 2005/07/13 15:52:45 dwmw2 Exp $ 7 * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
8 * 8 *
9 * 10/10/2000 Nicolas Pitre <nico@cam.org> 9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and 10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.) 11 * independent of the flash geometry (buswidth, interleave, etc.)
@@ -81,17 +81,17 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
81 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 81 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
82 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 82 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
83 for (i=9; i<32; i++) { 83 for (i=9; i<32; i++) {
84 if (extp->FeatureSupport & (1<<i)) 84 if (extp->FeatureSupport & (1<<i))
85 printk(" - Unknown Bit %X: supported\n", i); 85 printk(" - Unknown Bit %X: supported\n", i);
86 } 86 }
87 87
88 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 88 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
89 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 89 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
90 for (i=1; i<8; i++) { 90 for (i=1; i<8; i++) {
91 if (extp->SuspendCmdSupport & (1<<i)) 91 if (extp->SuspendCmdSupport & (1<<i))
92 printk(" - Unknown Bit %X: supported\n", i); 92 printk(" - Unknown Bit %X: supported\n", i);
93 } 93 }
94 94
95 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 95 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
96 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 96 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
97 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 97 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
@@ -99,11 +99,11 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
99 if (extp->BlkStatusRegMask & (1<<i)) 99 if (extp->BlkStatusRegMask & (1<<i))
100 printk(" - Unknown Bit %X Active: yes\n",i); 100 printk(" - Unknown Bit %X Active: yes\n",i);
101 } 101 }
102 102
103 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 103 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
104 extp->VccOptimal >> 8, extp->VccOptimal & 0xf); 104 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
105 if (extp->VppOptimal) 105 if (extp->VppOptimal)
106 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 106 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
107 extp->VppOptimal >> 8, extp->VppOptimal & 0xf); 107 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
108} 108}
109#endif 109#endif
@@ -121,7 +121,7 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
121 int i; 121 int i;
122 122
123 if (cfi->cfi_mode) { 123 if (cfi->cfi_mode) {
124 /* 124 /*
125 * It's a real CFI chip, not one for which the probe 125 * It's a real CFI chip, not one for which the probe
126 * routine faked a CFI structure. So we read the feature 126 * routine faked a CFI structure. So we read the feature
127 * table from it. 127 * table from it.
@@ -133,24 +133,33 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
133 if (!extp) 133 if (!extp)
134 return NULL; 134 return NULL;
135 135
136 if (extp->MajorVersion != '1' ||
137 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
138 printk(KERN_ERR " Unknown ST Microelectronics"
139 " Extended Query version %c.%c.\n",
140 extp->MajorVersion, extp->MinorVersion);
141 kfree(extp);
142 return NULL;
143 }
144
136 /* Do some byteswapping if necessary */ 145 /* Do some byteswapping if necessary */
137 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport); 146 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
138 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask); 147 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
139 148
140#ifdef DEBUG_CFI_FEATURES 149#ifdef DEBUG_CFI_FEATURES
141 /* Tell the user about it in lots of lovely detail */ 150 /* Tell the user about it in lots of lovely detail */
142 cfi_tell_features(extp); 151 cfi_tell_features(extp);
143#endif 152#endif
144 153
145 /* Install our own private info structure */ 154 /* Install our own private info structure */
146 cfi->cmdset_priv = extp; 155 cfi->cmdset_priv = extp;
147 } 156 }
148 157
149 for (i=0; i< cfi->numchips; i++) { 158 for (i=0; i< cfi->numchips; i++) {
150 cfi->chips[i].word_write_time = 128; 159 cfi->chips[i].word_write_time = 128;
151 cfi->chips[i].buffer_write_time = 128; 160 cfi->chips[i].buffer_write_time = 128;
152 cfi->chips[i].erase_time = 1024; 161 cfi->chips[i].erase_time = 1024;
153 } 162 }
154 163
155 return cfi_staa_setup(map); 164 return cfi_staa_setup(map);
156} 165}
@@ -178,15 +187,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
178 mtd->size = devsize * cfi->numchips; 187 mtd->size = devsize * cfi->numchips;
179 188
180 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 189 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
181 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 190 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
182 * mtd->numeraseregions, GFP_KERNEL); 191 * mtd->numeraseregions, GFP_KERNEL);
183 if (!mtd->eraseregions) { 192 if (!mtd->eraseregions) {
184 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); 193 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
185 kfree(cfi->cmdset_priv); 194 kfree(cfi->cmdset_priv);
186 kfree(mtd); 195 kfree(mtd);
187 return NULL; 196 return NULL;
188 } 197 }
189 198
190 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 199 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
191 unsigned long ernum, ersize; 200 unsigned long ernum, ersize;
192 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 201 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -219,7 +228,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
219 mtd->eraseregions[i].numblocks); 228 mtd->eraseregions[i].numblocks);
220 } 229 }
221 230
222 /* Also select the correct geometry setup too */ 231 /* Also select the correct geometry setup too */
223 mtd->erase = cfi_staa_erase_varsize; 232 mtd->erase = cfi_staa_erase_varsize;
224 mtd->read = cfi_staa_read; 233 mtd->read = cfi_staa_read;
225 mtd->write = cfi_staa_write_buffers; 234 mtd->write = cfi_staa_write_buffers;
@@ -250,8 +259,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
250 259
251 adr += chip->start; 260 adr += chip->start;
252 261
253 /* Ensure cmd read/writes are aligned. */ 262 /* Ensure cmd read/writes are aligned. */
254 cmd_addr = adr & ~(map_bankwidth(map)-1); 263 cmd_addr = adr & ~(map_bankwidth(map)-1);
255 264
256 /* Let's determine this according to the interleave only once */ 265 /* Let's determine this according to the interleave only once */
257 status_OK = CMD(0x80); 266 status_OK = CMD(0x80);
@@ -267,7 +276,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
267 case FL_ERASING: 276 case FL_ERASING:
268 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)) 277 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
269 goto sleep; /* We don't support erase suspend */ 278 goto sleep; /* We don't support erase suspend */
270 279
271 map_write (map, CMD(0xb0), cmd_addr); 280 map_write (map, CMD(0xb0), cmd_addr);
272 /* If the flash has finished erasing, then 'erase suspend' 281 /* If the flash has finished erasing, then 'erase suspend'
273 * appears to make some (28F320) flash devices switch to 282 * appears to make some (28F320) flash devices switch to
@@ -282,7 +291,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
282 status = map_read(map, cmd_addr); 291 status = map_read(map, cmd_addr);
283 if (map_word_andequal(map, status, status_OK, status_OK)) 292 if (map_word_andequal(map, status, status_OK, status_OK))
284 break; 293 break;
285 294
286 if (time_after(jiffies, timeo)) { 295 if (time_after(jiffies, timeo)) {
287 /* Urgh */ 296 /* Urgh */
288 map_write(map, CMD(0xd0), cmd_addr); 297 map_write(map, CMD(0xd0), cmd_addr);
@@ -294,17 +303,17 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
294 "suspended: status = 0x%lx\n", status.x[0]); 303 "suspended: status = 0x%lx\n", status.x[0]);
295 return -EIO; 304 return -EIO;
296 } 305 }
297 306
298 spin_unlock_bh(chip->mutex); 307 spin_unlock_bh(chip->mutex);
299 cfi_udelay(1); 308 cfi_udelay(1);
300 spin_lock_bh(chip->mutex); 309 spin_lock_bh(chip->mutex);
301 } 310 }
302 311
303 suspended = 1; 312 suspended = 1;
304 map_write(map, CMD(0xff), cmd_addr); 313 map_write(map, CMD(0xff), cmd_addr);
305 chip->state = FL_READY; 314 chip->state = FL_READY;
306 break; 315 break;
307 316
308#if 0 317#if 0
309 case FL_WRITING: 318 case FL_WRITING:
310 /* Not quite yet */ 319 /* Not quite yet */
@@ -325,7 +334,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
325 chip->state = FL_READY; 334 chip->state = FL_READY;
326 break; 335 break;
327 } 336 }
328 337
329 /* Urgh. Chip not yet ready to talk to us. */ 338 /* Urgh. Chip not yet ready to talk to us. */
330 if (time_after(jiffies, timeo)) { 339 if (time_after(jiffies, timeo)) {
331 spin_unlock_bh(chip->mutex); 340 spin_unlock_bh(chip->mutex);
@@ -355,17 +364,17 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
355 364
356 if (suspended) { 365 if (suspended) {
357 chip->state = chip->oldstate; 366 chip->state = chip->oldstate;
358 /* What if one interleaved chip has finished and the 367 /* What if one interleaved chip has finished and the
359 other hasn't? The old code would leave the finished 368 other hasn't? The old code would leave the finished
360 one in READY mode. That's bad, and caused -EROFS 369 one in READY mode. That's bad, and caused -EROFS
361 errors to be returned from do_erase_oneblock because 370 errors to be returned from do_erase_oneblock because
362 that's the only bit it checked for at the time. 371 that's the only bit it checked for at the time.
363 As the state machine appears to explicitly allow 372 As the state machine appears to explicitly allow
364 sending the 0x70 (Read Status) command to an erasing 373 sending the 0x70 (Read Status) command to an erasing
365 chip and expecting it to be ignored, that's what we 374 chip and expecting it to be ignored, that's what we
366 do. */ 375 do. */
367 map_write(map, CMD(0xd0), cmd_addr); 376 map_write(map, CMD(0xd0), cmd_addr);
368 map_write(map, CMD(0x70), cmd_addr); 377 map_write(map, CMD(0x70), cmd_addr);
369 } 378 }
370 379
371 wake_up(&chip->wq); 380 wake_up(&chip->wq);
@@ -405,14 +414,14 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
405 *retlen += thislen; 414 *retlen += thislen;
406 len -= thislen; 415 len -= thislen;
407 buf += thislen; 416 buf += thislen;
408 417
409 ofs = 0; 418 ofs = 0;
410 chipnum++; 419 chipnum++;
411 } 420 }
412 return ret; 421 return ret;
413} 422}
414 423
415static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 424static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
416 unsigned long adr, const u_char *buf, int len) 425 unsigned long adr, const u_char *buf, int len)
417{ 426{
418 struct cfi_private *cfi = map->fldrv_priv; 427 struct cfi_private *cfi = map->fldrv_priv;
@@ -420,7 +429,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
420 unsigned long cmd_adr, timeo; 429 unsigned long cmd_adr, timeo;
421 DECLARE_WAITQUEUE(wait, current); 430 DECLARE_WAITQUEUE(wait, current);
422 int wbufsize, z; 431 int wbufsize, z;
423 432
424 /* M58LW064A requires bus alignment for buffer wriets -- saw */ 433 /* M58LW064A requires bus alignment for buffer wriets -- saw */
425 if (adr & (map_bankwidth(map)-1)) 434 if (adr & (map_bankwidth(map)-1))
426 return -EINVAL; 435 return -EINVAL;
@@ -428,10 +437,10 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
428 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 437 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
429 adr += chip->start; 438 adr += chip->start;
430 cmd_adr = adr & ~(wbufsize-1); 439 cmd_adr = adr & ~(wbufsize-1);
431 440
432 /* Let's determine this according to the interleave only once */ 441 /* Let's determine this according to the interleave only once */
433 status_OK = CMD(0x80); 442 status_OK = CMD(0x80);
434 443
435 timeo = jiffies + HZ; 444 timeo = jiffies + HZ;
436 retry: 445 retry:
437 446
@@ -439,7 +448,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
439 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state); 448 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
440#endif 449#endif
441 spin_lock_bh(chip->mutex); 450 spin_lock_bh(chip->mutex);
442 451
443 /* Check that the chip's ready to talk to us. 452 /* Check that the chip's ready to talk to us.
444 * Later, we can actually think about interrupting it 453 * Later, we can actually think about interrupting it
445 * if it's in FL_ERASING state. 454 * if it's in FL_ERASING state.
@@ -448,7 +457,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
448 switch (chip->state) { 457 switch (chip->state) {
449 case FL_READY: 458 case FL_READY:
450 break; 459 break;
451 460
452 case FL_CFI_QUERY: 461 case FL_CFI_QUERY:
453 case FL_JEDEC_QUERY: 462 case FL_JEDEC_QUERY:
454 map_write(map, CMD(0x70), cmd_adr); 463 map_write(map, CMD(0x70), cmd_adr);
@@ -513,7 +522,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
513 522
514 /* Write length of data to come */ 523 /* Write length of data to come */
515 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr ); 524 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
516 525
517 /* Write data */ 526 /* Write data */
518 for (z = 0; z < len; 527 for (z = 0; z < len;
519 z += map_bankwidth(map), buf += map_bankwidth(map)) { 528 z += map_bankwidth(map), buf += map_bankwidth(map)) {
@@ -560,7 +569,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
560 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 569 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
561 return -EIO; 570 return -EIO;
562 } 571 }
563 572
564 /* Latency issues. Drop the lock, wait a while and retry */ 573 /* Latency issues. Drop the lock, wait a while and retry */
565 spin_unlock_bh(chip->mutex); 574 spin_unlock_bh(chip->mutex);
566 cfi_udelay(1); 575 cfi_udelay(1);
@@ -572,9 +581,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
572 if (!chip->buffer_write_time) 581 if (!chip->buffer_write_time)
573 chip->buffer_write_time++; 582 chip->buffer_write_time++;
574 } 583 }
575 if (z > 1) 584 if (z > 1)
576 chip->buffer_write_time++; 585 chip->buffer_write_time++;
577 586
578 /* Done and happy. */ 587 /* Done and happy. */
579 DISABLE_VPP(map); 588 DISABLE_VPP(map);
580 chip->state = FL_STATUS; 589 chip->state = FL_STATUS;
@@ -598,7 +607,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
598 return 0; 607 return 0;
599} 608}
600 609
601static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, 610static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
602 size_t len, size_t *retlen, const u_char *buf) 611 size_t len, size_t *retlen, const u_char *buf)
603{ 612{
604 struct map_info *map = mtd->priv; 613 struct map_info *map = mtd->priv;
@@ -620,7 +629,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
620 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize); 629 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
621 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len); 630 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
622#endif 631#endif
623 632
624 /* Write buffer is worth it only if more than one word to write... */ 633 /* Write buffer is worth it only if more than one word to write... */
625 while (len > 0) { 634 while (len > 0) {
626 /* We must not cross write block boundaries */ 635 /* We must not cross write block boundaries */
@@ -629,7 +638,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
629 if (size > len) 638 if (size > len)
630 size = len; 639 size = len;
631 640
632 ret = do_write_buffer(map, &cfi->chips[chipnum], 641 ret = do_write_buffer(map, &cfi->chips[chipnum],
633 ofs, buf, size); 642 ofs, buf, size);
634 if (ret) 643 if (ret)
635 return ret; 644 return ret;
@@ -640,13 +649,13 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
640 len -= size; 649 len -= size;
641 650
642 if (ofs >> cfi->chipshift) { 651 if (ofs >> cfi->chipshift) {
643 chipnum ++; 652 chipnum ++;
644 ofs = 0; 653 ofs = 0;
645 if (chipnum == cfi->numchips) 654 if (chipnum == cfi->numchips)
646 return 0; 655 return 0;
647 } 656 }
648 } 657 }
649 658
650 return 0; 659 return 0;
651} 660}
652 661
@@ -756,7 +765,7 @@ retry:
756 status = map_read(map, adr); 765 status = map_read(map, adr);
757 if (map_word_andequal(map, status, status_OK, status_OK)) 766 if (map_word_andequal(map, status, status_OK, status_OK))
758 break; 767 break;
759 768
760 /* Urgh. Chip not yet ready to talk to us. */ 769 /* Urgh. Chip not yet ready to talk to us. */
761 if (time_after(jiffies, timeo)) { 770 if (time_after(jiffies, timeo)) {
762 spin_unlock_bh(chip->mutex); 771 spin_unlock_bh(chip->mutex);
@@ -789,7 +798,7 @@ retry:
789 map_write(map, CMD(0x20), adr); 798 map_write(map, CMD(0x20), adr);
790 map_write(map, CMD(0xD0), adr); 799 map_write(map, CMD(0xD0), adr);
791 chip->state = FL_ERASING; 800 chip->state = FL_ERASING;
792 801
793 spin_unlock_bh(chip->mutex); 802 spin_unlock_bh(chip->mutex);
794 msleep(1000); 803 msleep(1000);
795 spin_lock_bh(chip->mutex); 804 spin_lock_bh(chip->mutex);
@@ -814,7 +823,7 @@ retry:
814 status = map_read(map, adr); 823 status = map_read(map, adr);
815 if (map_word_andequal(map, status, status_OK, status_OK)) 824 if (map_word_andequal(map, status, status_OK, status_OK))
816 break; 825 break;
817 826
818 /* OK Still waiting */ 827 /* OK Still waiting */
819 if (time_after(jiffies, timeo)) { 828 if (time_after(jiffies, timeo)) {
820 map_write(map, CMD(0x70), adr); 829 map_write(map, CMD(0x70), adr);
@@ -824,13 +833,13 @@ retry:
824 spin_unlock_bh(chip->mutex); 833 spin_unlock_bh(chip->mutex);
825 return -EIO; 834 return -EIO;
826 } 835 }
827 836
828 /* Latency issues. Drop the lock, wait a while and retry */ 837 /* Latency issues. Drop the lock, wait a while and retry */
829 spin_unlock_bh(chip->mutex); 838 spin_unlock_bh(chip->mutex);
830 cfi_udelay(1); 839 cfi_udelay(1);
831 spin_lock_bh(chip->mutex); 840 spin_lock_bh(chip->mutex);
832 } 841 }
833 842
834 DISABLE_VPP(map); 843 DISABLE_VPP(map);
835 ret = 0; 844 ret = 0;
836 845
@@ -855,7 +864,7 @@ retry:
855 /* Reset the error bits */ 864 /* Reset the error bits */
856 map_write(map, CMD(0x50), adr); 865 map_write(map, CMD(0x50), adr);
857 map_write(map, CMD(0x70), adr); 866 map_write(map, CMD(0x70), adr);
858 867
859 if ((chipstatus & 0x30) == 0x30) { 868 if ((chipstatus & 0x30) == 0x30) {
860 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus); 869 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
861 ret = -EIO; 870 ret = -EIO;
@@ -904,17 +913,17 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
904 913
905 i = 0; 914 i = 0;
906 915
907 /* Skip all erase regions which are ended before the start of 916 /* Skip all erase regions which are ended before the start of
908 the requested erase. Actually, to save on the calculations, 917 the requested erase. Actually, to save on the calculations,
909 we skip to the first erase region which starts after the 918 we skip to the first erase region which starts after the
910 start of the requested erase, and then go back one. 919 start of the requested erase, and then go back one.
911 */ 920 */
912 921
913 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset) 922 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
914 i++; 923 i++;
915 i--; 924 i--;
916 925
917 /* OK, now i is pointing at the erase region in which this 926 /* OK, now i is pointing at the erase region in which this
918 erase request starts. Check the start of the requested 927 erase request starts. Check the start of the requested
919 erase range is aligned with the erase size which is in 928 erase range is aligned with the erase size which is in
920 effect here. 929 effect here.
@@ -937,7 +946,7 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
937 the address actually falls 946 the address actually falls
938 */ 947 */
939 i--; 948 i--;
940 949
941 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) 950 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
942 return -EINVAL; 951 return -EINVAL;
943 952
@@ -949,7 +958,7 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
949 958
950 while(len) { 959 while(len) {
951 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); 960 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
952 961
953 if (ret) 962 if (ret)
954 return ret; 963 return ret;
955 964
@@ -962,15 +971,15 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
962 if (adr >> cfi->chipshift) { 971 if (adr >> cfi->chipshift) {
963 adr = 0; 972 adr = 0;
964 chipnum++; 973 chipnum++;
965 974
966 if (chipnum >= cfi->numchips) 975 if (chipnum >= cfi->numchips)
967 break; 976 break;
968 } 977 }
969 } 978 }
970 979
971 instr->state = MTD_ERASE_DONE; 980 instr->state = MTD_ERASE_DONE;
972 mtd_erase_callback(instr); 981 mtd_erase_callback(instr);
973 982
974 return 0; 983 return 0;
975} 984}
976 985
@@ -996,7 +1005,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
996 case FL_JEDEC_QUERY: 1005 case FL_JEDEC_QUERY:
997 chip->oldstate = chip->state; 1006 chip->oldstate = chip->state;
998 chip->state = FL_SYNCING; 1007 chip->state = FL_SYNCING;
999 /* No need to wake_up() on this state change - 1008 /* No need to wake_up() on this state change -
1000 * as the whole point is that nobody can do anything 1009 * as the whole point is that nobody can do anything
1001 * with the chip now anyway. 1010 * with the chip now anyway.
1002 */ 1011 */
@@ -1007,11 +1016,11 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1007 default: 1016 default:
1008 /* Not an idle state */ 1017 /* Not an idle state */
1009 add_wait_queue(&chip->wq, &wait); 1018 add_wait_queue(&chip->wq, &wait);
1010 1019
1011 spin_unlock_bh(chip->mutex); 1020 spin_unlock_bh(chip->mutex);
1012 schedule(); 1021 schedule();
1013 remove_wait_queue(&chip->wq, &wait); 1022 remove_wait_queue(&chip->wq, &wait);
1014 1023
1015 goto retry; 1024 goto retry;
1016 } 1025 }
1017 } 1026 }
@@ -1022,7 +1031,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1022 chip = &cfi->chips[i]; 1031 chip = &cfi->chips[i];
1023 1032
1024 spin_lock_bh(chip->mutex); 1033 spin_lock_bh(chip->mutex);
1025 1034
1026 if (chip->state == FL_SYNCING) { 1035 if (chip->state == FL_SYNCING) {
1027 chip->state = chip->oldstate; 1036 chip->state = chip->oldstate;
1028 wake_up(&chip->wq); 1037 wake_up(&chip->wq);
@@ -1057,9 +1066,9 @@ retry:
1057 1066
1058 case FL_STATUS: 1067 case FL_STATUS:
1059 status = map_read(map, adr); 1068 status = map_read(map, adr);
1060 if (map_word_andequal(map, status, status_OK, status_OK)) 1069 if (map_word_andequal(map, status, status_OK, status_OK))
1061 break; 1070 break;
1062 1071
1063 /* Urgh. Chip not yet ready to talk to us. */ 1072 /* Urgh. Chip not yet ready to talk to us. */
1064 if (time_after(jiffies, timeo)) { 1073 if (time_after(jiffies, timeo)) {
1065 spin_unlock_bh(chip->mutex); 1074 spin_unlock_bh(chip->mutex);
@@ -1088,7 +1097,7 @@ retry:
1088 map_write(map, CMD(0x60), adr); 1097 map_write(map, CMD(0x60), adr);
1089 map_write(map, CMD(0x01), adr); 1098 map_write(map, CMD(0x01), adr);
1090 chip->state = FL_LOCKING; 1099 chip->state = FL_LOCKING;
1091 1100
1092 spin_unlock_bh(chip->mutex); 1101 spin_unlock_bh(chip->mutex);
1093 msleep(1000); 1102 msleep(1000);
1094 spin_lock_bh(chip->mutex); 1103 spin_lock_bh(chip->mutex);
@@ -1102,7 +1111,7 @@ retry:
1102 status = map_read(map, adr); 1111 status = map_read(map, adr);
1103 if (map_word_andequal(map, status, status_OK, status_OK)) 1112 if (map_word_andequal(map, status, status_OK, status_OK))
1104 break; 1113 break;
1105 1114
1106 /* OK Still waiting */ 1115 /* OK Still waiting */
1107 if (time_after(jiffies, timeo)) { 1116 if (time_after(jiffies, timeo)) {
1108 map_write(map, CMD(0x70), adr); 1117 map_write(map, CMD(0x70), adr);
@@ -1112,13 +1121,13 @@ retry:
1112 spin_unlock_bh(chip->mutex); 1121 spin_unlock_bh(chip->mutex);
1113 return -EIO; 1122 return -EIO;
1114 } 1123 }
1115 1124
1116 /* Latency issues. Drop the lock, wait a while and retry */ 1125 /* Latency issues. Drop the lock, wait a while and retry */
1117 spin_unlock_bh(chip->mutex); 1126 spin_unlock_bh(chip->mutex);
1118 cfi_udelay(1); 1127 cfi_udelay(1);
1119 spin_lock_bh(chip->mutex); 1128 spin_lock_bh(chip->mutex);
1120 } 1129 }
1121 1130
1122 /* Done and happy. */ 1131 /* Done and happy. */
1123 chip->state = FL_STATUS; 1132 chip->state = FL_STATUS;
1124 DISABLE_VPP(map); 1133 DISABLE_VPP(map);
@@ -1162,8 +1171,8 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1162 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1171 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1172 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1164 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1173 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165#endif 1174#endif
1166 1175
1167 if (ret) 1176 if (ret)
1168 return ret; 1177 return ret;
1169 1178
@@ -1173,7 +1182,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1173 if (adr >> cfi->chipshift) { 1182 if (adr >> cfi->chipshift) {
1174 adr = 0; 1183 adr = 0;
1175 chipnum++; 1184 chipnum++;
1176 1185
1177 if (chipnum >= cfi->numchips) 1186 if (chipnum >= cfi->numchips)
1178 break; 1187 break;
1179 } 1188 }
@@ -1208,7 +1217,7 @@ retry:
1208 status = map_read(map, adr); 1217 status = map_read(map, adr);
1209 if (map_word_andequal(map, status, status_OK, status_OK)) 1218 if (map_word_andequal(map, status, status_OK, status_OK))
1210 break; 1219 break;
1211 1220
1212 /* Urgh. Chip not yet ready to talk to us. */ 1221 /* Urgh. Chip not yet ready to talk to us. */
1213 if (time_after(jiffies, timeo)) { 1222 if (time_after(jiffies, timeo)) {
1214 spin_unlock_bh(chip->mutex); 1223 spin_unlock_bh(chip->mutex);
@@ -1237,7 +1246,7 @@ retry:
1237 map_write(map, CMD(0x60), adr); 1246 map_write(map, CMD(0x60), adr);
1238 map_write(map, CMD(0xD0), adr); 1247 map_write(map, CMD(0xD0), adr);
1239 chip->state = FL_UNLOCKING; 1248 chip->state = FL_UNLOCKING;
1240 1249
1241 spin_unlock_bh(chip->mutex); 1250 spin_unlock_bh(chip->mutex);
1242 msleep(1000); 1251 msleep(1000);
1243 spin_lock_bh(chip->mutex); 1252 spin_lock_bh(chip->mutex);
@@ -1251,7 +1260,7 @@ retry:
1251 status = map_read(map, adr); 1260 status = map_read(map, adr);
1252 if (map_word_andequal(map, status, status_OK, status_OK)) 1261 if (map_word_andequal(map, status, status_OK, status_OK))
1253 break; 1262 break;
1254 1263
1255 /* OK Still waiting */ 1264 /* OK Still waiting */
1256 if (time_after(jiffies, timeo)) { 1265 if (time_after(jiffies, timeo)) {
1257 map_write(map, CMD(0x70), adr); 1266 map_write(map, CMD(0x70), adr);
@@ -1261,13 +1270,13 @@ retry:
1261 spin_unlock_bh(chip->mutex); 1270 spin_unlock_bh(chip->mutex);
1262 return -EIO; 1271 return -EIO;
1263 } 1272 }
1264 1273
1265 /* Latency issues. Drop the unlock, wait a while and retry */ 1274 /* Latency issues. Drop the unlock, wait a while and retry */
1266 spin_unlock_bh(chip->mutex); 1275 spin_unlock_bh(chip->mutex);
1267 cfi_udelay(1); 1276 cfi_udelay(1);
1268 spin_lock_bh(chip->mutex); 1277 spin_lock_bh(chip->mutex);
1269 } 1278 }
1270 1279
1271 /* Done and happy. */ 1280 /* Done and happy. */
1272 chip->state = FL_STATUS; 1281 chip->state = FL_STATUS;
1273 DISABLE_VPP(map); 1282 DISABLE_VPP(map);
@@ -1292,7 +1301,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1292 { 1301 {
1293 unsigned long temp_adr = adr; 1302 unsigned long temp_adr = adr;
1294 unsigned long temp_len = len; 1303 unsigned long temp_len = len;
1295 1304
1296 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1305 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1297 while (temp_len) { 1306 while (temp_len) {
1298 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor))); 1307 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
@@ -1310,7 +1319,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1310 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1319 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1311 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1320 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1312#endif 1321#endif
1313 1322
1314 return ret; 1323 return ret;
1315} 1324}
1316 1325
@@ -1334,7 +1343,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1334 case FL_JEDEC_QUERY: 1343 case FL_JEDEC_QUERY:
1335 chip->oldstate = chip->state; 1344 chip->oldstate = chip->state;
1336 chip->state = FL_PM_SUSPENDED; 1345 chip->state = FL_PM_SUSPENDED;
1337 /* No need to wake_up() on this state change - 1346 /* No need to wake_up() on this state change -
1338 * as the whole point is that nobody can do anything 1347 * as the whole point is that nobody can do anything
1339 * with the chip now anyway. 1348 * with the chip now anyway.
1340 */ 1349 */
@@ -1353,9 +1362,9 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1353 if (ret) { 1362 if (ret) {
1354 for (i--; i >=0; i--) { 1363 for (i--; i >=0; i--) {
1355 chip = &cfi->chips[i]; 1364 chip = &cfi->chips[i];
1356 1365
1357 spin_lock_bh(chip->mutex); 1366 spin_lock_bh(chip->mutex);
1358 1367
1359 if (chip->state == FL_PM_SUSPENDED) { 1368 if (chip->state == FL_PM_SUSPENDED) {
1360 /* No need to force it into a known state here, 1369 /* No need to force it into a known state here,
1361 because we're returning failure, and it didn't 1370 because we're returning failure, and it didn't
@@ -1365,8 +1374,8 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1365 } 1374 }
1366 spin_unlock_bh(chip->mutex); 1375 spin_unlock_bh(chip->mutex);
1367 } 1376 }
1368 } 1377 }
1369 1378
1370 return ret; 1379 return ret;
1371} 1380}
1372 1381
@@ -1378,11 +1387,11 @@ static void cfi_staa_resume(struct mtd_info *mtd)
1378 struct flchip *chip; 1387 struct flchip *chip;
1379 1388
1380 for (i=0; i<cfi->numchips; i++) { 1389 for (i=0; i<cfi->numchips; i++) {
1381 1390
1382 chip = &cfi->chips[i]; 1391 chip = &cfi->chips[i];
1383 1392
1384 spin_lock_bh(chip->mutex); 1393 spin_lock_bh(chip->mutex);
1385 1394
1386 /* Go to known state. Chip may have been power cycled */ 1395 /* Go to known state. Chip may have been power cycled */
1387 if (chip->state == FL_PM_SUSPENDED) { 1396 if (chip->state == FL_PM_SUSPENDED) {
1388 map_write(map, CMD(0xFF), 0); 1397 map_write(map, CMD(0xFF), 0);
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index cf750038ce6a..90eb30e06b7c 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Common Flash Interface probe code. 2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd. 3 (C) 2000 Red Hat. GPL'd.
4 $Id: cfi_probe.c,v 1.83 2004/11/16 18:19:02 nico Exp $ 4 $Id: cfi_probe.c,v 1.84 2005/11/07 11:14:23 gleixner Exp $
5*/ 5*/
6 6
7#include <linux/config.h> 7#include <linux/config.h>
@@ -20,7 +20,7 @@
20#include <linux/mtd/cfi.h> 20#include <linux/mtd/cfi.h>
21#include <linux/mtd/gen_probe.h> 21#include <linux/mtd/gen_probe.h>
22 22
23//#define DEBUG_CFI 23//#define DEBUG_CFI
24 24
25#ifdef DEBUG_CFI 25#ifdef DEBUG_CFI
26static void print_cfi_ident(struct cfi_ident *); 26static void print_cfi_ident(struct cfi_ident *);
@@ -103,7 +103,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
103 unsigned long *chip_map, struct cfi_private *cfi) 103 unsigned long *chip_map, struct cfi_private *cfi)
104{ 104{
105 int i; 105 int i;
106 106
107 if ((base + 0) >= map->size) { 107 if ((base + 0) >= map->size) {
108 printk(KERN_NOTICE 108 printk(KERN_NOTICE
109 "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n", 109 "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
@@ -128,7 +128,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
128 } 128 }
129 129
130 if (!cfi->numchips) { 130 if (!cfi->numchips) {
131 /* This is the first time we're called. Set up the CFI 131 /* This is the first time we're called. Set up the CFI
132 stuff accordingly and return */ 132 stuff accordingly and return */
133 return cfi_chip_setup(map, cfi); 133 return cfi_chip_setup(map, cfi);
134 } 134 }
@@ -138,13 +138,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
138 unsigned long start; 138 unsigned long start;
139 if(!test_bit(i, chip_map)) { 139 if(!test_bit(i, chip_map)) {
140 /* Skip location; no valid chip at this address */ 140 /* Skip location; no valid chip at this address */
141 continue; 141 continue;
142 } 142 }
143 start = i << cfi->chipshift; 143 start = i << cfi->chipshift;
144 /* This chip should be in read mode if it's one 144 /* This chip should be in read mode if it's one
145 we've already touched. */ 145 we've already touched. */
146 if (qry_present(map, start, cfi)) { 146 if (qry_present(map, start, cfi)) {
147 /* Eep. This chip also had the QRY marker. 147 /* Eep. This chip also had the QRY marker.
148 * Is it an alias for the new one? */ 148 * Is it an alias for the new one? */
149 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL); 149 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
150 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); 150 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
@@ -156,13 +156,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
156 map->name, base, start); 156 map->name, base, start);
157 return 0; 157 return 0;
158 } 158 }
159 /* Yes, it's actually got QRY for data. Most 159 /* Yes, it's actually got QRY for data. Most
160 * unfortunate. Stick the new chip in read mode 160 * unfortunate. Stick the new chip in read mode
161 * too and if it's the same, assume it's an alias. */ 161 * too and if it's the same, assume it's an alias. */
162 /* FIXME: Use other modes to do a proper check */ 162 /* FIXME: Use other modes to do a proper check */
163 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 163 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
164 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); 164 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
165 165
166 if (qry_present(map, base, cfi)) { 166 if (qry_present(map, base, cfi)) {
167 xip_allowed(base, map); 167 xip_allowed(base, map);
168 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", 168 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
@@ -171,12 +171,12 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
171 } 171 }
172 } 172 }
173 } 173 }
174 174
175 /* OK, if we got to here, then none of the previous chips appear to 175 /* OK, if we got to here, then none of the previous chips appear to
176 be aliases for the current one. */ 176 be aliases for the current one. */
177 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ 177 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
178 cfi->numchips++; 178 cfi->numchips++;
179 179
180 /* Put it back into Read Mode */ 180 /* Put it back into Read Mode */
181 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 181 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
182 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 182 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
@@ -185,11 +185,11 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
185 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", 185 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
186 map->name, cfi->interleave, cfi->device_type*8, base, 186 map->name, cfi->interleave, cfi->device_type*8, base,
187 map->bankwidth*8); 187 map->bankwidth*8);
188 188
189 return 1; 189 return 1;
190} 190}
191 191
192static int __xipram cfi_chip_setup(struct map_info *map, 192static int __xipram cfi_chip_setup(struct map_info *map,
193 struct cfi_private *cfi) 193 struct cfi_private *cfi)
194{ 194{
195 int ofs_factor = cfi->interleave*cfi->device_type; 195 int ofs_factor = cfi->interleave*cfi->device_type;
@@ -209,11 +209,11 @@ static int __xipram cfi_chip_setup(struct map_info *map,
209 printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 209 printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
210 return 0; 210 return 0;
211 } 211 }
212 212
213 memset(cfi->cfiq,0,sizeof(struct cfi_ident)); 213 memset(cfi->cfiq,0,sizeof(struct cfi_ident));
214 214
215 cfi->cfi_mode = CFI_MODE_CFI; 215 cfi->cfi_mode = CFI_MODE_CFI;
216 216
217 /* Read the CFI info structure */ 217 /* Read the CFI info structure */
218 xip_disable_qry(base, map, cfi); 218 xip_disable_qry(base, map, cfi);
219 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) 219 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
@@ -231,7 +231,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
231 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); 231 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
232 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); 232 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
233 cfi->mfr = cfi_read_query(map, base); 233 cfi->mfr = cfi_read_query(map, base);
234 cfi->id = cfi_read_query(map, base + ofs_factor); 234 cfi->id = cfi_read_query(map, base + ofs_factor);
235 235
236 /* Put it back into Read Mode */ 236 /* Put it back into Read Mode */
237 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 237 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
@@ -255,10 +255,10 @@ static int __xipram cfi_chip_setup(struct map_info *map,
255 255
256 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 256 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
257 cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]); 257 cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
258 258
259#ifdef DEBUG_CFI 259#ifdef DEBUG_CFI
260 printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n", 260 printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
261 i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff, 261 i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
262 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1); 262 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
263#endif 263#endif
264 } 264 }
@@ -271,33 +271,33 @@ static int __xipram cfi_chip_setup(struct map_info *map,
271} 271}
272 272
273#ifdef DEBUG_CFI 273#ifdef DEBUG_CFI
274static char *vendorname(__u16 vendor) 274static char *vendorname(__u16 vendor)
275{ 275{
276 switch (vendor) { 276 switch (vendor) {
277 case P_ID_NONE: 277 case P_ID_NONE:
278 return "None"; 278 return "None";
279 279
280 case P_ID_INTEL_EXT: 280 case P_ID_INTEL_EXT:
281 return "Intel/Sharp Extended"; 281 return "Intel/Sharp Extended";
282 282
283 case P_ID_AMD_STD: 283 case P_ID_AMD_STD:
284 return "AMD/Fujitsu Standard"; 284 return "AMD/Fujitsu Standard";
285 285
286 case P_ID_INTEL_STD: 286 case P_ID_INTEL_STD:
287 return "Intel/Sharp Standard"; 287 return "Intel/Sharp Standard";
288 288
289 case P_ID_AMD_EXT: 289 case P_ID_AMD_EXT:
290 return "AMD/Fujitsu Extended"; 290 return "AMD/Fujitsu Extended";
291 291
292 case P_ID_WINBOND: 292 case P_ID_WINBOND:
293 return "Winbond Standard"; 293 return "Winbond Standard";
294 294
295 case P_ID_ST_ADV: 295 case P_ID_ST_ADV:
296 return "ST Advanced"; 296 return "ST Advanced";
297 297
298 case P_ID_MITSUBISHI_STD: 298 case P_ID_MITSUBISHI_STD:
299 return "Mitsubishi Standard"; 299 return "Mitsubishi Standard";
300 300
301 case P_ID_MITSUBISHI_EXT: 301 case P_ID_MITSUBISHI_EXT:
302 return "Mitsubishi Extended"; 302 return "Mitsubishi Extended";
303 303
@@ -306,13 +306,13 @@ static char *vendorname(__u16 vendor)
306 306
307 case P_ID_INTEL_PERFORMANCE: 307 case P_ID_INTEL_PERFORMANCE:
308 return "Intel Performance Code"; 308 return "Intel Performance Code";
309 309
310 case P_ID_INTEL_DATA: 310 case P_ID_INTEL_DATA:
311 return "Intel Data"; 311 return "Intel Data";
312 312
313 case P_ID_RESERVED: 313 case P_ID_RESERVED:
314 return "Not Allowed / Reserved for Future Use"; 314 return "Not Allowed / Reserved for Future Use";
315 315
316 default: 316 default:
317 return "Unknown"; 317 return "Unknown";
318 } 318 }
@@ -325,21 +325,21 @@ static void print_cfi_ident(struct cfi_ident *cfip)
325 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') { 325 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
326 printk("Invalid CFI ident structure.\n"); 326 printk("Invalid CFI ident structure.\n");
327 return; 327 return;
328 } 328 }
329#endif 329#endif
330 printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID)); 330 printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
331 if (cfip->P_ADR) 331 if (cfip->P_ADR)
332 printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR); 332 printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
333 else 333 else
334 printk("No Primary Algorithm Table\n"); 334 printk("No Primary Algorithm Table\n");
335 335
336 printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID)); 336 printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
337 if (cfip->A_ADR) 337 if (cfip->A_ADR)
338 printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR); 338 printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
339 else 339 else
340 printk("No Alternate Algorithm Table\n"); 340 printk("No Alternate Algorithm Table\n");
341 341
342 342
343 printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf); 343 printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
344 printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf); 344 printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
345 if (cfip->VppMin) { 345 if (cfip->VppMin) {
@@ -348,61 +348,61 @@ static void print_cfi_ident(struct cfi_ident *cfip)
348 } 348 }
349 else 349 else
350 printk("No Vpp line\n"); 350 printk("No Vpp line\n");
351 351
352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); 352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); 353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
354 354
355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { 355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); 356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); 357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
358 } 358 }
359 else 359 else
360 printk("Full buffer write not supported\n"); 360 printk("Full buffer write not supported\n");
361 361
362 printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp); 362 printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
363 printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp)); 363 printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
364 if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) { 364 if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
365 printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp); 365 printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
366 printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp)); 366 printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
367 } 367 }
368 else 368 else
369 printk("Chip erase not supported\n"); 369 printk("Chip erase not supported\n");
370 370
371 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 371 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
372 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 372 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
373 switch(cfip->InterfaceDesc) { 373 switch(cfip->InterfaceDesc) {
374 case 0: 374 case 0:
375 printk(" - x8-only asynchronous interface\n"); 375 printk(" - x8-only asynchronous interface\n");
376 break; 376 break;
377 377
378 case 1: 378 case 1:
379 printk(" - x16-only asynchronous interface\n"); 379 printk(" - x16-only asynchronous interface\n");
380 break; 380 break;
381 381
382 case 2: 382 case 2:
383 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 383 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
384 break; 384 break;
385 385
386 case 3: 386 case 3:
387 printk(" - x32-only asynchronous interface\n"); 387 printk(" - x32-only asynchronous interface\n");
388 break; 388 break;
389 389
390 case 4: 390 case 4:
391 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 391 printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
392 break; 392 break;
393 393
394 case 65535: 394 case 65535:
395 printk(" - Not Allowed / Reserved\n"); 395 printk(" - Not Allowed / Reserved\n");
396 break; 396 break;
397 397
398 default: 398 default:
399 printk(" - Unknown\n"); 399 printk(" - Unknown\n");
400 break; 400 break;
401 } 401 }
402 402
403 printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize); 403 printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
404 printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions); 404 printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
405 405
406} 406}
407#endif /* DEBUG_CFI */ 407#endif /* DEBUG_CFI */
408 408
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 2b2ede2bfcca..d8e7a026ba5a 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * This code is covered by the GPL. 8 * This code is covered by the GPL.
9 * 9 *
10 * $Id: cfi_util.c,v 1.8 2004/12/14 19:55:56 nico Exp $ 10 * $Id: cfi_util.c,v 1.10 2005/11/07 11:14:23 gleixner Exp $
11 * 11 *
12 */ 12 */
13 13
@@ -56,7 +56,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
56 56
57 /* Read in the Extended Query Table */ 57 /* Read in the Extended Query Table */
58 for (i=0; i<size; i++) { 58 for (i=0; i<size; i++) {
59 ((unsigned char *)extp)[i] = 59 ((unsigned char *)extp)[i] =
60 cfi_read_query(map, base+((adr+i)*ofs_factor)); 60 cfi_read_query(map, base+((adr+i)*ofs_factor));
61 } 61 }
62 62
@@ -70,15 +70,6 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
70 local_irq_enable(); 70 local_irq_enable();
71#endif 71#endif
72 72
73 if (extp->MajorVersion != '1' ||
74 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
75 printk(KERN_WARNING " Unknown %s Extended Query "
76 "version %c.%c.\n", name, extp->MajorVersion,
77 extp->MinorVersion);
78 kfree(extp);
79 extp = NULL;
80 }
81
82 out: return extp; 73 out: return extp;
83} 74}
84 75
@@ -122,17 +113,17 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
122 113
123 i = 0; 114 i = 0;
124 115
125 /* Skip all erase regions which are ended before the start of 116 /* Skip all erase regions which are ended before the start of
126 the requested erase. Actually, to save on the calculations, 117 the requested erase. Actually, to save on the calculations,
127 we skip to the first erase region which starts after the 118 we skip to the first erase region which starts after the
128 start of the requested erase, and then go back one. 119 start of the requested erase, and then go back one.
129 */ 120 */
130 121
131 while (i < mtd->numeraseregions && ofs >= regions[i].offset) 122 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
132 i++; 123 i++;
133 i--; 124 i--;
134 125
135 /* OK, now i is pointing at the erase region in which this 126 /* OK, now i is pointing at the erase region in which this
136 erase request starts. Check the start of the requested 127 erase request starts. Check the start of the requested
137 erase range is aligned with the erase size which is in 128 erase range is aligned with the erase size which is in
138 effect here. 129 effect here.
@@ -155,7 +146,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
155 the address actually falls 146 the address actually falls
156 */ 147 */
157 i--; 148 i--;
158 149
159 if ((ofs + len) & (regions[i].erasesize-1)) 150 if ((ofs + len) & (regions[i].erasesize-1))
160 return -EINVAL; 151 return -EINVAL;
161 152
@@ -168,7 +159,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
168 int size = regions[i].erasesize; 159 int size = regions[i].erasesize;
169 160
170 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); 161 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
171 162
172 if (ret) 163 if (ret)
173 return ret; 164 return ret;
174 165
@@ -182,7 +173,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
182 if (adr >> cfi->chipshift) { 173 if (adr >> cfi->chipshift) {
183 adr = 0; 174 adr = 0;
184 chipnum++; 175 chipnum++;
185 176
186 if (chipnum >= cfi->numchips) 177 if (chipnum >= cfi->numchips)
187 break; 178 break;
188 } 179 }
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index d7d739a108ae..c2127840a183 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -41,7 +41,7 @@ static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
41 41
42 list_for_each(pos, &chip_drvs_list) { 42 list_for_each(pos, &chip_drvs_list) {
43 this = list_entry(pos, typeof(*this), list); 43 this = list_entry(pos, typeof(*this), list);
44 44
45 if (!strcmp(this->name, name)) { 45 if (!strcmp(this->name, name)) {
46 ret = this; 46 ret = this;
47 break; 47 break;
@@ -73,7 +73,7 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map)
73 73
74 ret = drv->probe(map); 74 ret = drv->probe(map);
75 75
76 /* We decrease the use count here. It may have been a 76 /* We decrease the use count here. It may have been a
77 probe-only module, which is no longer required from this 77 probe-only module, which is no longer required from this
78 point, having given us a handle on (and increased the use 78 point, having given us a handle on (and increased the use
79 count of) the actual driver code. 79 count of) the actual driver code.
@@ -82,7 +82,7 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map)
82 82
83 if (ret) 83 if (ret)
84 return ret; 84 return ret;
85 85
86 return NULL; 86 return NULL;
87} 87}
88/* 88/*
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index e1a5b76596c5..77303ce5dcf1 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -25,7 +25,7 @@ struct fwh_xxlock_thunk {
25 * so this code has not been tested with interleaved chips, 25 * so this code has not been tested with interleaved chips,
26 * and will likely fail in that context. 26 * and will likely fail in that context.
27 */ 27 */
28static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, 28static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
29 unsigned long adr, int len, void *thunk) 29 unsigned long adr, int len, void *thunk)
30{ 30{
31 struct cfi_private *cfi = map->fldrv_priv; 31 struct cfi_private *cfi = map->fldrv_priv;
@@ -44,7 +44,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
44 * - on 64k boundariesand 44 * - on 64k boundariesand
45 * - bit 1 set high 45 * - bit 1 set high
46 * - block lock registers are 4MiB lower - overflow subtract (danger) 46 * - block lock registers are 4MiB lower - overflow subtract (danger)
47 * 47 *
48 * The address manipulation is first done on the logical address 48 * The address manipulation is first done on the logical address
49 * which is 0 at the start of the chip, and then the offset of 49 * which is 0 at the start of the chip, and then the offset of
50 * the individual chip is addted to it. Any other order a weird 50 * the individual chip is addted to it. Any other order a weird
@@ -93,7 +93,7 @@ static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
93 93
94 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len, 94 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
95 (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK); 95 (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
96 96
97 return ret; 97 return ret;
98} 98}
99 99
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index dc065b22f79e..41bd59d20d85 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -2,7 +2,7 @@
2 * Routines common to all CFI-type probes. 2 * Routines common to all CFI-type probes.
3 * (C) 2001-2003 Red Hat, Inc. 3 * (C) 2001-2003 Red Hat, Inc.
4 * GPL'd 4 * GPL'd
5 * $Id: gen_probe.c,v 1.22 2005/01/24 23:49:50 rmk Exp $ 5 * $Id: gen_probe.c,v 1.24 2005/11/07 11:14:23 gleixner Exp $
6 */ 6 */
7 7
8#include <linux/kernel.h> 8#include <linux/kernel.h>
@@ -26,7 +26,7 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
26 26
27 /* First probe the map to see if we have CFI stuff there. */ 27 /* First probe the map to see if we have CFI stuff there. */
28 cfi = genprobe_ident_chips(map, cp); 28 cfi = genprobe_ident_chips(map, cp);
29 29
30 if (!cfi) 30 if (!cfi)
31 return NULL; 31 return NULL;
32 32
@@ -36,12 +36,12 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
36 mtd = check_cmd_set(map, 1); /* First the primary cmdset */ 36 mtd = check_cmd_set(map, 1); /* First the primary cmdset */
37 if (!mtd) 37 if (!mtd)
38 mtd = check_cmd_set(map, 0); /* Then the secondary */ 38 mtd = check_cmd_set(map, 0); /* Then the secondary */
39 39
40 if (mtd) 40 if (mtd)
41 return mtd; 41 return mtd;
42 42
43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n"); 43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
44 44
45 kfree(cfi->cfiq); 45 kfree(cfi->cfiq);
46 kfree(cfi); 46 kfree(cfi);
47 map->fldrv_priv = NULL; 47 map->fldrv_priv = NULL;
@@ -60,14 +60,14 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
60 60
61 memset(&cfi, 0, sizeof(cfi)); 61 memset(&cfi, 0, sizeof(cfi));
62 62
63 /* Call the probetype-specific code with all permutations of 63 /* Call the probetype-specific code with all permutations of
64 interleave and device type, etc. */ 64 interleave and device type, etc. */
65 if (!genprobe_new_chip(map, cp, &cfi)) { 65 if (!genprobe_new_chip(map, cp, &cfi)) {
66 /* The probe didn't like it */ 66 /* The probe didn't like it */
67 printk(KERN_DEBUG "%s: Found no %s device at location zero\n", 67 printk(KERN_DEBUG "%s: Found no %s device at location zero\n",
68 cp->name, map->name); 68 cp->name, map->name);
69 return NULL; 69 return NULL;
70 } 70 }
71 71
72#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD 72#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
73 probe routines won't ever return a broken CFI structure anyway, 73 probe routines won't ever return a broken CFI structure anyway,
@@ -92,13 +92,13 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
92 } else { 92 } else {
93 BUG(); 93 BUG();
94 } 94 }
95 95
96 cfi.numchips = 1; 96 cfi.numchips = 1;
97 97
98 /* 98 /*
99 * Allocate memory for bitmap of valid chips. 99 * Allocate memory for bitmap of valid chips.
100 * Align bitmap storage size to full byte. 100 * Align bitmap storage size to full byte.
101 */ 101 */
102 max_chips = map->size >> cfi.chipshift; 102 max_chips = map->size >> cfi.chipshift;
103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0); 103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0);
104 chip_map = kmalloc(mapsize, GFP_KERNEL); 104 chip_map = kmalloc(mapsize, GFP_KERNEL);
@@ -122,7 +122,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
122 } 122 }
123 123
124 /* 124 /*
125 * Now allocate the space for the structures we need to return to 125 * Now allocate the space for the structures we need to return to
126 * our caller, and copy the appropriate data into them. 126 * our caller, and copy the appropriate data into them.
127 */ 127 */
128 128
@@ -154,7 +154,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
154 return retcfi; 154 return retcfi;
155} 155}
156 156
157 157
158static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp, 158static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
159 struct cfi_private *cfi) 159 struct cfi_private *cfi)
160{ 160{
@@ -189,7 +189,7 @@ extern cfi_cmdset_fn_t cfi_cmdset_0001;
189extern cfi_cmdset_fn_t cfi_cmdset_0002; 189extern cfi_cmdset_fn_t cfi_cmdset_0002;
190extern cfi_cmdset_fn_t cfi_cmdset_0020; 190extern cfi_cmdset_fn_t cfi_cmdset_0020;
191 191
192static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map, 192static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
193 int primary) 193 int primary)
194{ 194{
195 struct cfi_private *cfi = map->fldrv_priv; 195 struct cfi_private *cfi = map->fldrv_priv;
@@ -199,7 +199,7 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
199 cfi_cmdset_fn_t *probe_function; 199 cfi_cmdset_fn_t *probe_function;
200 200
201 sprintf(probename, "cfi_cmdset_%4.4X", type); 201 sprintf(probename, "cfi_cmdset_%4.4X", type);
202 202
203 probe_function = inter_module_get_request(probename, probename); 203 probe_function = inter_module_get_request(probename, probename);
204 204
205 if (probe_function) { 205 if (probe_function) {
@@ -221,7 +221,7 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
221{ 221{
222 struct cfi_private *cfi = map->fldrv_priv; 222 struct cfi_private *cfi = map->fldrv_priv;
223 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; 223 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
224 224
225 if (type == P_ID_NONE || type == P_ID_RESERVED) 225 if (type == P_ID_NONE || type == P_ID_RESERVED)
226 return NULL; 226 return NULL;
227 227
@@ -235,6 +235,7 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
235#ifdef CONFIG_MTD_CFI_INTELEXT 235#ifdef CONFIG_MTD_CFI_INTELEXT
236 case 0x0001: 236 case 0x0001:
237 case 0x0003: 237 case 0x0003:
238 case 0x0200:
238 return cfi_cmdset_0001(map, primary); 239 return cfi_cmdset_0001(map, primary);
239#endif 240#endif
240#ifdef CONFIG_MTD_CFI_AMDSTD 241#ifdef CONFIG_MTD_CFI_AMDSTD
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
index 4f6778f3ee3e..c40b48dabed3 100644
--- a/drivers/mtd/chips/jedec.c
+++ b/drivers/mtd/chips/jedec.c
@@ -1,6 +1,6 @@
1 1
2/* JEDEC Flash Interface. 2/* JEDEC Flash Interface.
3 * This is an older type of interface for self programming flash. It is 3 * This is an older type of interface for self programming flash. It is
4 * commonly use in older AMD chips and is obsolete compared with CFI. 4 * commonly use in older AMD chips and is obsolete compared with CFI.
5 * It is called JEDEC because the JEDEC association distributes the ID codes 5 * It is called JEDEC because the JEDEC association distributes the ID codes
6 * for the chips. 6 * for the chips.
@@ -88,9 +88,9 @@ static const struct JEDECTable JEDEC_table[] = {
88 88
89static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id); 89static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
90static void jedec_sync(struct mtd_info *mtd) {}; 90static void jedec_sync(struct mtd_info *mtd) {};
91static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len, 91static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
92 size_t *retlen, u_char *buf); 92 size_t *retlen, u_char *buf);
93static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len, 93static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, u_char *buf); 94 size_t *retlen, u_char *buf);
95 95
96static struct mtd_info *jedec_probe(struct map_info *map); 96static struct mtd_info *jedec_probe(struct map_info *map);
@@ -122,7 +122,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
122 122
123 memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private)); 123 memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
124 priv = (struct jedec_private *)&MTD[1]; 124 priv = (struct jedec_private *)&MTD[1];
125 125
126 my_bank_size = map->size; 126 my_bank_size = map->size;
127 127
128 if (map->size/my_bank_size > MAX_JEDEC_CHIPS) 128 if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
@@ -131,13 +131,13 @@ static struct mtd_info *jedec_probe(struct map_info *map)
131 kfree(MTD); 131 kfree(MTD);
132 return NULL; 132 return NULL;
133 } 133 }
134 134
135 for (Base = 0; Base < map->size; Base += my_bank_size) 135 for (Base = 0; Base < map->size; Base += my_bank_size)
136 { 136 {
137 // Perhaps zero could designate all tests? 137 // Perhaps zero could designate all tests?
138 if (map->buswidth == 0) 138 if (map->buswidth == 0)
139 map->buswidth = 1; 139 map->buswidth = 1;
140 140
141 if (map->buswidth == 1){ 141 if (map->buswidth == 1){
142 if (jedec_probe8(map,Base,priv) == 0) { 142 if (jedec_probe8(map,Base,priv) == 0) {
143 printk("did recognize jedec chip\n"); 143 printk("did recognize jedec chip\n");
@@ -150,7 +150,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
150 if (map->buswidth == 4) 150 if (map->buswidth == 4)
151 jedec_probe32(map,Base,priv); 151 jedec_probe32(map,Base,priv);
152 } 152 }
153 153
154 // Get the biggest sector size 154 // Get the biggest sector size
155 SectorSize = 0; 155 SectorSize = 0;
156 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 156 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
@@ -160,7 +160,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
160 if (priv->chips[I].sectorsize > SectorSize) 160 if (priv->chips[I].sectorsize > SectorSize)
161 SectorSize = priv->chips[I].sectorsize; 161 SectorSize = priv->chips[I].sectorsize;
162 } 162 }
163 163
164 // Quickly ensure that the other sector sizes are factors of the largest 164 // Quickly ensure that the other sector sizes are factors of the largest
165 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 165 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
166 { 166 {
@@ -169,9 +169,9 @@ static struct mtd_info *jedec_probe(struct map_info *map)
169 printk("mtd: Failed. Device has incompatible mixed sector sizes\n"); 169 printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
170 kfree(MTD); 170 kfree(MTD);
171 return NULL; 171 return NULL;
172 } 172 }
173 } 173 }
174 174
175 /* Generate a part name that includes the number of different chips and 175 /* Generate a part name that includes the number of different chips and
176 other configuration information */ 176 other configuration information */
177 count = 1; 177 count = 1;
@@ -181,13 +181,13 @@ static struct mtd_info *jedec_probe(struct map_info *map)
181 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 181 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
182 { 182 {
183 const struct JEDECTable *JEDEC; 183 const struct JEDECTable *JEDEC;
184 184
185 if (priv->chips[I+1].jedec == priv->chips[I].jedec) 185 if (priv->chips[I+1].jedec == priv->chips[I].jedec)
186 { 186 {
187 count++; 187 count++;
188 continue; 188 continue;
189 } 189 }
190 190
191 // Locate the chip in the jedec table 191 // Locate the chip in the jedec table
192 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec); 192 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
193 if (JEDEC == 0) 193 if (JEDEC == 0)
@@ -196,11 +196,11 @@ static struct mtd_info *jedec_probe(struct map_info *map)
196 kfree(MTD); 196 kfree(MTD);
197 return NULL; 197 return NULL;
198 } 198 }
199 199
200 if (Uniq != 0) 200 if (Uniq != 0)
201 strcat(Part,","); 201 strcat(Part,",");
202 Uniq++; 202 Uniq++;
203 203
204 if (count != 1) 204 if (count != 1)
205 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name); 205 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
206 else 206 else
@@ -208,7 +208,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
208 if (strlen(Part) > sizeof(Part)*2/3) 208 if (strlen(Part) > sizeof(Part)*2/3)
209 break; 209 break;
210 count = 1; 210 count = 1;
211 } 211 }
212 212
213 /* Determine if the chips are organized in a linear fashion, or if there 213 /* Determine if the chips are organized in a linear fashion, or if there
214 are empty banks. Note, the last bank does not count here, only the 214 are empty banks. Note, the last bank does not count here, only the
@@ -233,7 +233,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
233 { 233 {
234 if (priv->bank_fill[I] != my_bank_size) 234 if (priv->bank_fill[I] != my_bank_size)
235 priv->is_banked = 1; 235 priv->is_banked = 1;
236 236
237 /* This even could be eliminated, but new de-optimized read/write 237 /* This even could be eliminated, but new de-optimized read/write
238 functions have to be written */ 238 functions have to be written */
239 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]); 239 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
@@ -242,7 +242,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
242 printk("mtd: Failed. Cannot handle unsymmetric banking\n"); 242 printk("mtd: Failed. Cannot handle unsymmetric banking\n");
243 kfree(MTD); 243 kfree(MTD);
244 return NULL; 244 return NULL;
245 } 245 }
246 } 246 }
247 } 247 }
248 } 248 }
@@ -250,7 +250,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
250 strcat(Part,", banked"); 250 strcat(Part,", banked");
251 251
252 // printk("Part: '%s'\n",Part); 252 // printk("Part: '%s'\n",Part);
253 253
254 memset(MTD,0,sizeof(*MTD)); 254 memset(MTD,0,sizeof(*MTD));
255 // strlcpy(MTD->name,Part,sizeof(MTD->name)); 255 // strlcpy(MTD->name,Part,sizeof(MTD->name));
256 MTD->name = map->name; 256 MTD->name = map->name;
@@ -291,7 +291,7 @@ static int checkparity(u_char C)
291 291
292/* Take an array of JEDEC numbers that represent interleved flash chips 292/* Take an array of JEDEC numbers that represent interleved flash chips
293 and process them. Check to make sure they are good JEDEC numbers, look 293 and process them. Check to make sure they are good JEDEC numbers, look
294 them up and then add them to the chip list */ 294 them up and then add them to the chip list */
295static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count, 295static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
296 unsigned long base,struct jedec_private *priv) 296 unsigned long base,struct jedec_private *priv)
297{ 297{
@@ -306,16 +306,16 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
306 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0) 306 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
307 return 0; 307 return 0;
308 } 308 }
309 309
310 // Finally, just make sure all the chip sizes are the same 310 // Finally, just make sure all the chip sizes are the same
311 JEDEC = jedec_idtoinf(Mfg[0],Id[0]); 311 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
312 312
313 if (JEDEC == 0) 313 if (JEDEC == 0)
314 { 314 {
315 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]); 315 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
316 return 0; 316 return 0;
317 } 317 }
318 318
319 Size = JEDEC->size; 319 Size = JEDEC->size;
320 SectorSize = JEDEC->sectorsize; 320 SectorSize = JEDEC->sectorsize;
321 for (I = 0; I != Count; I++) 321 for (I = 0; I != Count; I++)
@@ -331,7 +331,7 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
331 { 331 {
332 printk("mtd: Failed. Interleved flash does not have matching characteristics\n"); 332 printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
333 return 0; 333 return 0;
334 } 334 }
335 } 335 }
336 336
337 // Load the Chips 337 // Load the Chips
@@ -345,13 +345,13 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
345 { 345 {
346 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n"); 346 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
347 return 0; 347 return 0;
348 } 348 }
349 349
350 // Add them to the table 350 // Add them to the table
351 for (J = 0; J != Count; J++) 351 for (J = 0; J != Count; J++)
352 { 352 {
353 unsigned long Bank; 353 unsigned long Bank;
354 354
355 JEDEC = jedec_idtoinf(Mfg[J],Id[J]); 355 JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
356 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J]; 356 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
357 priv->chips[I].size = JEDEC->size; 357 priv->chips[I].size = JEDEC->size;
@@ -364,17 +364,17 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
364 // log2 n :| 364 // log2 n :|
365 priv->chips[I].addrshift = 0; 365 priv->chips[I].addrshift = 0;
366 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++); 366 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
367 367
368 // Determine how filled this bank is. 368 // Determine how filled this bank is.
369 Bank = base & (~(my_bank_size-1)); 369 Bank = base & (~(my_bank_size-1));
370 if (priv->bank_fill[Bank/my_bank_size] < base + 370 if (priv->bank_fill[Bank/my_bank_size] < base +
371 (JEDEC->size << priv->chips[I].addrshift) - Bank) 371 (JEDEC->size << priv->chips[I].addrshift) - Bank)
372 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank; 372 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
373 I++; 373 I++;
374 } 374 }
375 375
376 priv->size += priv->chips[I-1].size*Count; 376 priv->size += priv->chips[I-1].size*Count;
377 377
378 return priv->chips[I-1].size; 378 return priv->chips[I-1].size;
379} 379}
380 380
@@ -392,7 +392,7 @@ static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
392// Look for flash using an 8 bit bus interface 392// Look for flash using an 8 bit bus interface
393static int jedec_probe8(struct map_info *map,unsigned long base, 393static int jedec_probe8(struct map_info *map,unsigned long base,
394 struct jedec_private *priv) 394 struct jedec_private *priv)
395{ 395{
396 #define flread(x) map_read8(map,base+x) 396 #define flread(x) map_read8(map,base+x)
397 #define flwrite(v,x) map_write8(map,v,base+x) 397 #define flwrite(v,x) map_write8(map,v,base+x)
398 398
@@ -410,20 +410,20 @@ static int jedec_probe8(struct map_info *map,unsigned long base,
410 OldVal = flread(base); 410 OldVal = flread(base);
411 for (I = 0; OldVal != flread(base) && I < 10000; I++) 411 for (I = 0; OldVal != flread(base) && I < 10000; I++)
412 OldVal = flread(base); 412 OldVal = flread(base);
413 413
414 // Reset the chip 414 // Reset the chip
415 flwrite(Reset,0x555); 415 flwrite(Reset,0x555);
416 416
417 // Send the sequence 417 // Send the sequence
418 flwrite(AutoSel1,0x555); 418 flwrite(AutoSel1,0x555);
419 flwrite(AutoSel2,0x2AA); 419 flwrite(AutoSel2,0x2AA);
420 flwrite(AutoSel3,0x555); 420 flwrite(AutoSel3,0x555);
421 421
422 // Get the JEDEC numbers 422 // Get the JEDEC numbers
423 Mfg[0] = flread(0); 423 Mfg[0] = flread(0);
424 Id[0] = flread(1); 424 Id[0] = flread(1);
425 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]); 425 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
426 426
427 Size = handle_jedecs(map,Mfg,Id,1,base,priv); 427 Size = handle_jedecs(map,Mfg,Id,1,base,priv);
428 // printk("handle_jedecs Size is %x\n",(unsigned int)Size); 428 // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
429 if (Size == 0) 429 if (Size == 0)
@@ -431,13 +431,13 @@ static int jedec_probe8(struct map_info *map,unsigned long base,
431 flwrite(Reset,0x555); 431 flwrite(Reset,0x555);
432 return 0; 432 return 0;
433 } 433 }
434 434
435 435
436 // Reset. 436 // Reset.
437 flwrite(Reset,0x555); 437 flwrite(Reset,0x555);
438 438
439 return 1; 439 return 1;
440 440
441 #undef flread 441 #undef flread
442 #undef flwrite 442 #undef flwrite
443} 443}
@@ -470,17 +470,17 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
470 OldVal = flread(base); 470 OldVal = flread(base);
471 for (I = 0; OldVal != flread(base) && I < 10000; I++) 471 for (I = 0; OldVal != flread(base) && I < 10000; I++)
472 OldVal = flread(base); 472 OldVal = flread(base);
473 473
474 // Reset the chip 474 // Reset the chip
475 flwrite(Reset,0x555); 475 flwrite(Reset,0x555);
476 476
477 // Send the sequence 477 // Send the sequence
478 flwrite(AutoSel1,0x555); 478 flwrite(AutoSel1,0x555);
479 flwrite(AutoSel2,0x2AA); 479 flwrite(AutoSel2,0x2AA);
480 flwrite(AutoSel3,0x555); 480 flwrite(AutoSel3,0x555);
481 481
482 // Test #1, JEDEC numbers are readable from 0x??00/0x??01 482 // Test #1, JEDEC numbers are readable from 0x??00/0x??01
483 if (flread(0) != flread(0x100) || 483 if (flread(0) != flread(0x100) ||
484 flread(1) != flread(0x101)) 484 flread(1) != flread(0x101))
485 { 485 {
486 flwrite(Reset,0x555); 486 flwrite(Reset,0x555);
@@ -494,14 +494,14 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
494 OldVal = flread(1); 494 OldVal = flread(1);
495 for (I = 0; I != 4; I++) 495 for (I = 0; I != 4; I++)
496 Id[I] = (OldVal >> (I*8)); 496 Id[I] = (OldVal >> (I*8));
497 497
498 Size = handle_jedecs(map,Mfg,Id,4,base,priv); 498 Size = handle_jedecs(map,Mfg,Id,4,base,priv);
499 if (Size == 0) 499 if (Size == 0)
500 { 500 {
501 flwrite(Reset,0x555); 501 flwrite(Reset,0x555);
502 return 0; 502 return 0;
503 } 503 }
504 504
505 /* Check if there is address wrap around within a single bank, if this 505 /* Check if there is address wrap around within a single bank, if this
506 returns JEDEC numbers then we assume that it is wrap around. Notice 506 returns JEDEC numbers then we assume that it is wrap around. Notice
507 we call this routine with the JEDEC return still enabled, if two or 507 we call this routine with the JEDEC return still enabled, if two or
@@ -519,27 +519,27 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
519 519
520 // Reset. 520 // Reset.
521 flwrite(0xF0F0F0F0,0x555); 521 flwrite(0xF0F0F0F0,0x555);
522 522
523 return 1; 523 return 1;
524 524
525 #undef flread 525 #undef flread
526 #undef flwrite 526 #undef flwrite
527} 527}
528 528
529/* Linear read. */ 529/* Linear read. */
530static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len, 530static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
531 size_t *retlen, u_char *buf) 531 size_t *retlen, u_char *buf)
532{ 532{
533 struct map_info *map = mtd->priv; 533 struct map_info *map = mtd->priv;
534 534
535 map_copy_from(map, buf, from, len); 535 map_copy_from(map, buf, from, len);
536 *retlen = len; 536 *retlen = len;
537 return 0; 537 return 0;
538} 538}
539 539
540/* Banked read. Take special care to jump past the holes in the bank 540/* Banked read. Take special care to jump past the holes in the bank
541 mapping. This version assumes symetry in the holes.. */ 541 mapping. This version assumes symetry in the holes.. */
542static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len, 542static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
543 size_t *retlen, u_char *buf) 543 size_t *retlen, u_char *buf)
544{ 544{
545 struct map_info *map = mtd->priv; 545 struct map_info *map = mtd->priv;
@@ -555,17 +555,17 @@ static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
555 if (priv->bank_fill[0] - offset < len) 555 if (priv->bank_fill[0] - offset < len)
556 get = priv->bank_fill[0] - offset; 556 get = priv->bank_fill[0] - offset;
557 557
558 bank /= priv->bank_fill[0]; 558 bank /= priv->bank_fill[0];
559 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get); 559 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
560 560
561 len -= get; 561 len -= get;
562 *retlen += get; 562 *retlen += get;
563 from += get; 563 from += get;
564 } 564 }
565 return 0; 565 return 0;
566} 566}
567 567
568/* Pass the flags value that the flash return before it re-entered read 568/* Pass the flags value that the flash return before it re-entered read
569 mode. */ 569 mode. */
570static void jedec_flash_failed(unsigned char code) 570static void jedec_flash_failed(unsigned char code)
571{ 571{
@@ -579,17 +579,17 @@ static void jedec_flash_failed(unsigned char code)
579 printk("mtd: Programming didn't take\n"); 579 printk("mtd: Programming didn't take\n");
580} 580}
581 581
582/* This uses the erasure function described in the AMD Flash Handbook, 582/* This uses the erasure function described in the AMD Flash Handbook,
583 it will work for flashes with a fixed sector size only. Flashes with 583 it will work for flashes with a fixed sector size only. Flashes with
584 a selection of sector sizes (ie the AMD Am29F800B) will need a different 584 a selection of sector sizes (ie the AMD Am29F800B) will need a different
585 routine. This routine tries to parallize erasing multiple chips/sectors 585 routine. This routine tries to parallize erasing multiple chips/sectors
586 where possible */ 586 where possible */
587static int flash_erase(struct mtd_info *mtd, struct erase_info *instr) 587static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
588{ 588{
589 // Does IO to the currently selected chip 589 // Does IO to the currently selected chip
590 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift)) 590 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
591 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift)) 591 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
592 592
593 unsigned long Time = 0; 593 unsigned long Time = 0;
594 unsigned long NoTime = 0; 594 unsigned long NoTime = 0;
595 unsigned long start = instr->addr, len = instr->len; 595 unsigned long start = instr->addr, len = instr->len;
@@ -603,7 +603,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
603 (len % mtd->erasesize) != 0 || 603 (len % mtd->erasesize) != 0 ||
604 (len/mtd->erasesize) == 0) 604 (len/mtd->erasesize) == 0)
605 return -EINVAL; 605 return -EINVAL;
606 606
607 jedec_flash_chip_scan(priv,start,len); 607 jedec_flash_chip_scan(priv,start,len);
608 608
609 // Start the erase sequence on each chip 609 // Start the erase sequence on each chip
@@ -611,16 +611,16 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
611 { 611 {
612 unsigned long off; 612 unsigned long off;
613 struct jedec_flash_chip *chip = priv->chips + I; 613 struct jedec_flash_chip *chip = priv->chips + I;
614 614
615 if (chip->length == 0) 615 if (chip->length == 0)
616 continue; 616 continue;
617 617
618 if (chip->start + chip->length > chip->size) 618 if (chip->start + chip->length > chip->size)
619 { 619 {
620 printk("DIE\n"); 620 printk("DIE\n");
621 return -EIO; 621 return -EIO;
622 } 622 }
623 623
624 flwrite(0xF0,chip->start + 0x555); 624 flwrite(0xF0,chip->start + 0x555);
625 flwrite(0xAA,chip->start + 0x555); 625 flwrite(0xAA,chip->start + 0x555);
626 flwrite(0x55,chip->start + 0x2AA); 626 flwrite(0x55,chip->start + 0x2AA);
@@ -628,8 +628,8 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
628 flwrite(0xAA,chip->start + 0x555); 628 flwrite(0xAA,chip->start + 0x555);
629 flwrite(0x55,chip->start + 0x2AA); 629 flwrite(0x55,chip->start + 0x2AA);
630 630
631 /* Once we start selecting the erase sectors the delay between each 631 /* Once we start selecting the erase sectors the delay between each
632 command must not exceed 50us or it will immediately start erasing 632 command must not exceed 50us or it will immediately start erasing
633 and ignore the other sectors */ 633 and ignore the other sectors */
634 for (off = 0; off < len; off += chip->sectorsize) 634 for (off = 0; off < len; off += chip->sectorsize)
635 { 635 {
@@ -641,19 +641,19 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
641 { 641 {
642 printk("mtd: Ack! We timed out the erase timer!\n"); 642 printk("mtd: Ack! We timed out the erase timer!\n");
643 return -EIO; 643 return -EIO;
644 } 644 }
645 } 645 }
646 } 646 }
647 647
648 /* We could split this into a timer routine and return early, performing 648 /* We could split this into a timer routine and return early, performing
649 background erasure.. Maybe later if the need warrents */ 649 background erasure.. Maybe later if the need warrents */
650 650
651 /* Poll the flash for erasure completion, specs say this can take as long 651 /* Poll the flash for erasure completion, specs say this can take as long
652 as 480 seconds to do all the sectors (for a 2 meg flash). 652 as 480 seconds to do all the sectors (for a 2 meg flash).
653 Erasure time is dependent on chip age, temp and wear.. */ 653 Erasure time is dependent on chip age, temp and wear.. */
654 654
655 /* This being a generic routine assumes a 32 bit bus. It does read32s 655 /* This being a generic routine assumes a 32 bit bus. It does read32s
656 and bundles interleved chips into the same grouping. This will work 656 and bundles interleved chips into the same grouping. This will work
657 for all bus widths */ 657 for all bus widths */
658 Time = 0; 658 Time = 0;
659 NoTime = 0; 659 NoTime = 0;
@@ -664,20 +664,20 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
664 unsigned todo[4] = {0,0,0,0}; 664 unsigned todo[4] = {0,0,0,0};
665 unsigned todo_left = 0; 665 unsigned todo_left = 0;
666 unsigned J; 666 unsigned J;
667 667
668 if (chip->length == 0) 668 if (chip->length == 0)
669 continue; 669 continue;
670 670
671 /* Find all chips in this data line, realistically this is all 671 /* Find all chips in this data line, realistically this is all
672 or nothing up to the interleve count */ 672 or nothing up to the interleve count */
673 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++) 673 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
674 { 674 {
675 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) == 675 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
676 (chip->base & (~((1<<chip->addrshift)-1)))) 676 (chip->base & (~((1<<chip->addrshift)-1))))
677 { 677 {
678 todo_left++; 678 todo_left++;
679 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1; 679 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
680 } 680 }
681 } 681 }
682 682
683 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1], 683 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
@@ -687,7 +687,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
687 { 687 {
688 __u32 Last[4]; 688 __u32 Last[4];
689 unsigned long Count = 0; 689 unsigned long Count = 0;
690 690
691 /* During erase bit 7 is held low and bit 6 toggles, we watch this, 691 /* During erase bit 7 is held low and bit 6 toggles, we watch this,
692 should it stop toggling or go high then the erase is completed, 692 should it stop toggling or go high then the erase is completed,
693 or this is not really flash ;> */ 693 or this is not really flash ;> */
@@ -718,23 +718,23 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
718 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF; 718 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
719 if (todo[J] == 0) 719 if (todo[J] == 0)
720 continue; 720 continue;
721 721
722 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2) 722 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
723 { 723 {
724// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2); 724// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
725 continue; 725 continue;
726 } 726 }
727 727
728 if (Byte1 == Byte2) 728 if (Byte1 == Byte2)
729 { 729 {
730 jedec_flash_failed(Byte3); 730 jedec_flash_failed(Byte3);
731 return -EIO; 731 return -EIO;
732 } 732 }
733 733
734 todo[J] = 0; 734 todo[J] = 0;
735 todo_left--; 735 todo_left--;
736 } 736 }
737 737
738/* if (NoTime == 0) 738/* if (NoTime == 0)
739 Time += HZ/10 - schedule_timeout(HZ/10);*/ 739 Time += HZ/10 - schedule_timeout(HZ/10);*/
740 NoTime = 0; 740 NoTime = 0;
@@ -751,7 +751,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
751 break; 751 break;
752 } 752 }
753 Count++; 753 Count++;
754 754
755/* // Count time, max of 15s per sector (according to AMD) 755/* // Count time, max of 15s per sector (according to AMD)
756 if (Time > 15*len/mtd->erasesize*HZ) 756 if (Time > 15*len/mtd->erasesize*HZ)
757 { 757 {
@@ -759,38 +759,38 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
759 return -EIO; 759 return -EIO;
760 } */ 760 } */
761 } 761 }
762 762
763 // Skip to the next chip if we used chip erase 763 // Skip to the next chip if we used chip erase
764 if (chip->length == chip->size) 764 if (chip->length == chip->size)
765 off = chip->size; 765 off = chip->size;
766 else 766 else
767 off += chip->sectorsize; 767 off += chip->sectorsize;
768 768
769 if (off >= chip->length) 769 if (off >= chip->length)
770 break; 770 break;
771 NoTime = 1; 771 NoTime = 1;
772 } 772 }
773 773
774 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++) 774 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
775 { 775 {
776 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) == 776 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
777 (chip->base & (~((1<<chip->addrshift)-1)))) 777 (chip->base & (~((1<<chip->addrshift)-1))))
778 priv->chips[J].length = 0; 778 priv->chips[J].length = 0;
779 } 779 }
780 } 780 }
781 781
782 //printk("done\n"); 782 //printk("done\n");
783 instr->state = MTD_ERASE_DONE; 783 instr->state = MTD_ERASE_DONE;
784 mtd_erase_callback(instr); 784 mtd_erase_callback(instr);
785 return 0; 785 return 0;
786 786
787 #undef flread 787 #undef flread
788 #undef flwrite 788 #undef flwrite
789} 789}
790 790
791/* This is the simple flash writing function. It writes to every byte, in 791/* This is the simple flash writing function. It writes to every byte, in
792 sequence. It takes care of how to properly address the flash if 792 sequence. It takes care of how to properly address the flash if
793 the flash is interleved. It can only be used if all the chips in the 793 the flash is interleved. It can only be used if all the chips in the
794 array are identical!*/ 794 array are identical!*/
795static int flash_write(struct mtd_info *mtd, loff_t start, size_t len, 795static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
796 size_t *retlen, const u_char *buf) 796 size_t *retlen, const u_char *buf)
@@ -800,25 +800,25 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
800 of addrshift (interleave index) and then adds the control register index. */ 800 of addrshift (interleave index) and then adds the control register index. */
801 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift)) 801 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
802 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift)) 802 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
803 803
804 struct map_info *map = mtd->priv; 804 struct map_info *map = mtd->priv;
805 struct jedec_private *priv = map->fldrv_priv; 805 struct jedec_private *priv = map->fldrv_priv;
806 unsigned long base; 806 unsigned long base;
807 unsigned long off; 807 unsigned long off;
808 size_t save_len = len; 808 size_t save_len = len;
809 809
810 if (start + len > mtd->size) 810 if (start + len > mtd->size)
811 return -EIO; 811 return -EIO;
812 812
813 //printk("Here"); 813 //printk("Here");
814 814
815 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len); 815 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
816 while (len != 0) 816 while (len != 0)
817 { 817 {
818 struct jedec_flash_chip *chip = priv->chips; 818 struct jedec_flash_chip *chip = priv->chips;
819 unsigned long bank; 819 unsigned long bank;
820 unsigned long boffset; 820 unsigned long boffset;
821 821
822 // Compute the base of the flash. 822 // Compute the base of the flash.
823 off = ((unsigned long)start) % (chip->size << chip->addrshift); 823 off = ((unsigned long)start) % (chip->size << chip->addrshift);
824 base = start - off; 824 base = start - off;
@@ -828,10 +828,10 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
828 boffset = base & (priv->bank_fill[0]-1); 828 boffset = base & (priv->bank_fill[0]-1);
829 bank = (bank/priv->bank_fill[0])*my_bank_size; 829 bank = (bank/priv->bank_fill[0])*my_bank_size;
830 base = bank + boffset; 830 base = bank + boffset;
831 831
832 // printk("Flasing %X %X %X\n",base,chip->size,len); 832 // printk("Flasing %X %X %X\n",base,chip->size,len);
833 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift); 833 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
834 834
835 // Loop over this page 835 // Loop over this page
836 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++) 836 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
837 { 837 {
@@ -845,7 +845,7 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
845 } 845 }
846 if (((~oldbyte) & *buf) != 0) 846 if (((~oldbyte) & *buf) != 0)
847 printk("mtd: warn: Trying to set a 0 to a 1\n"); 847 printk("mtd: warn: Trying to set a 0 to a 1\n");
848 848
849 // Write 849 // Write
850 flwrite(0xAA,0x555); 850 flwrite(0xAA,0x555);
851 flwrite(0x55,0x2AA); 851 flwrite(0x55,0x2AA);
@@ -854,10 +854,10 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
854 Last[0] = map_read8(map,base + off); 854 Last[0] = map_read8(map,base + off);
855 Last[1] = map_read8(map,base + off); 855 Last[1] = map_read8(map,base + off);
856 Last[2] = map_read8(map,base + off); 856 Last[2] = map_read8(map,base + off);
857 857
858 /* Wait for the flash to finish the operation. We store the last 4 858 /* Wait for the flash to finish the operation. We store the last 4
859 status bytes that have been retrieved so we can determine why 859 status bytes that have been retrieved so we can determine why
860 it failed. The toggle bits keep toggling when there is a 860 it failed. The toggle bits keep toggling when there is a
861 failure */ 861 failure */
862 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] && 862 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
863 Count < 10000; Count++) 863 Count < 10000; Count++)
@@ -866,7 +866,7 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
866 { 866 {
867 jedec_flash_failed(Last[(Count - 3) % 4]); 867 jedec_flash_failed(Last[(Count - 3) % 4]);
868 return -EIO; 868 return -EIO;
869 } 869 }
870 } 870 }
871 } 871 }
872 *retlen = save_len; 872 *retlen = save_len;
@@ -885,24 +885,24 @@ static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start
885 // Zero the records 885 // Zero the records
886 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 886 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
887 priv->chips[I].start = priv->chips[I].length = 0; 887 priv->chips[I].start = priv->chips[I].length = 0;
888 888
889 // Intersect the region with each chip 889 // Intersect the region with each chip
890 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 890 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
891 { 891 {
892 struct jedec_flash_chip *chip = priv->chips + I; 892 struct jedec_flash_chip *chip = priv->chips + I;
893 unsigned long ByteStart; 893 unsigned long ByteStart;
894 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift); 894 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
895 895
896 // End is before this chip or the start is after it 896 // End is before this chip or the start is after it
897 if (start+len < chip->offset || 897 if (start+len < chip->offset ||
898 ChipEndByte - (1 << chip->addrshift) < start) 898 ChipEndByte - (1 << chip->addrshift) < start)
899 continue; 899 continue;
900 900
901 if (start < chip->offset) 901 if (start < chip->offset)
902 { 902 {
903 ByteStart = chip->offset; 903 ByteStart = chip->offset;
904 chip->start = 0; 904 chip->start = 0;
905 } 905 }
906 else 906 else
907 { 907 {
908 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift; 908 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 30da428eb7b9..edb306c03c0a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Common Flash Interface probe code. 2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd. 3 (C) 2000 Red Hat. GPL'd.
4 $Id: jedec_probe.c,v 1.63 2005/02/14 16:30:32 bjd Exp $ 4 $Id: jedec_probe.c,v 1.66 2005/11/07 11:14:23 gleixner Exp $
5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5) 5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
6 for the standard this probe goes back to. 6 for the standard this probe goes back to.
7 7
@@ -1719,7 +1719,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
1719 1719
1720static struct mtd_info *jedec_probe(struct map_info *map); 1720static struct mtd_info *jedec_probe(struct map_info *map);
1721 1721
1722static inline u32 jedec_read_mfr(struct map_info *map, __u32 base, 1722static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1723 struct cfi_private *cfi) 1723 struct cfi_private *cfi)
1724{ 1724{
1725 map_word result; 1725 map_word result;
@@ -1730,7 +1730,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1730 return result.x[0] & mask; 1730 return result.x[0] & mask;
1731} 1731}
1732 1732
1733static inline u32 jedec_read_id(struct map_info *map, __u32 base, 1733static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1734 struct cfi_private *cfi) 1734 struct cfi_private *cfi)
1735{ 1735{
1736 map_word result; 1736 map_word result;
@@ -1741,7 +1741,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1741 return result.x[0] & mask; 1741 return result.x[0] & mask;
1742} 1742}
1743 1743
1744static inline void jedec_reset(u32 base, struct map_info *map, 1744static inline void jedec_reset(u32 base, struct map_info *map,
1745 struct cfi_private *cfi) 1745 struct cfi_private *cfi)
1746{ 1746{
1747 /* Reset */ 1747 /* Reset */
@@ -1765,7 +1765,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1765 * so ensure we're in read mode. Send both the Intel and the AMD command 1765 * so ensure we're in read mode. Send both the Intel and the AMD command
1766 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1766 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
1767 * this should be safe. 1767 * this should be safe.
1768 */ 1768 */
1769 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 1769 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
1770 /* FIXME - should have reset delay before continuing */ 1770 /* FIXME - should have reset delay before continuing */
1771} 1771}
@@ -1807,14 +1807,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1807 printk("Found: %s\n",jedec_table[index].name); 1807 printk("Found: %s\n",jedec_table[index].name);
1808 1808
1809 num_erase_regions = jedec_table[index].NumEraseRegions; 1809 num_erase_regions = jedec_table[index].NumEraseRegions;
1810 1810
1811 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1811 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1812 if (!p_cfi->cfiq) { 1812 if (!p_cfi->cfiq) {
1813 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 1813 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
1814 return 0; 1814 return 0;
1815 } 1815 }
1816 1816
1817 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1817 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
1818 1818
1819 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; 1819 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
1820 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; 1820 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
@@ -1969,7 +1969,7 @@ static inline int jedec_match( __u32 base,
1969 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1969 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1970 /* FIXME - should have a delay before continuing */ 1970 /* FIXME - should have a delay before continuing */
1971 1971
1972 match_done: 1972 match_done:
1973 return rc; 1973 return rc;
1974} 1974}
1975 1975
@@ -1998,23 +1998,23 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
1998 "Probe at base(0x%08x) past the end of the map(0x%08lx)\n", 1998 "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
1999 base, map->size -1); 1999 base, map->size -1);
2000 return 0; 2000 return 0;
2001 2001
2002 } 2002 }
2003 /* Ensure the unlock addresses we try stay inside the map */ 2003 /* Ensure the unlock addresses we try stay inside the map */
2004 probe_offset1 = cfi_build_cmd_addr( 2004 probe_offset1 = cfi_build_cmd_addr(
2005 cfi->addr_unlock1, 2005 cfi->addr_unlock1,
2006 cfi_interleave(cfi), 2006 cfi_interleave(cfi),
2007 cfi->device_type); 2007 cfi->device_type);
2008 probe_offset2 = cfi_build_cmd_addr( 2008 probe_offset2 = cfi_build_cmd_addr(
2009 cfi->addr_unlock1, 2009 cfi->addr_unlock1,
2010 cfi_interleave(cfi), 2010 cfi_interleave(cfi),
2011 cfi->device_type); 2011 cfi->device_type);
2012 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 2012 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
2013 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 2013 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
2014 { 2014 {
2015 goto retry; 2015 goto retry;
2016 } 2016 }
2017 2017
2018 /* Reset */ 2018 /* Reset */
2019 jedec_reset(base, map, cfi); 2019 jedec_reset(base, map, cfi);
2020 2020
@@ -2027,13 +2027,13 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2027 /* FIXME - should have a delay before continuing */ 2027 /* FIXME - should have a delay before continuing */
2028 2028
2029 if (!cfi->numchips) { 2029 if (!cfi->numchips) {
2030 /* This is the first time we're called. Set up the CFI 2030 /* This is the first time we're called. Set up the CFI
2031 stuff accordingly and return */ 2031 stuff accordingly and return */
2032 2032
2033 cfi->mfr = jedec_read_mfr(map, base, cfi); 2033 cfi->mfr = jedec_read_mfr(map, base, cfi);
2034 cfi->id = jedec_read_id(map, base, cfi); 2034 cfi->id = jedec_read_id(map, base, cfi);
2035 DEBUG(MTD_DEBUG_LEVEL3, 2035 DEBUG(MTD_DEBUG_LEVEL3,
2036 "Search for id:(%02x %02x) interleave(%d) type(%d)\n", 2036 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2037 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); 2037 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2038 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) { 2038 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
2039 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { 2039 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
@@ -2062,7 +2062,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2062 return 0; 2062 return 0;
2063 } 2063 }
2064 } 2064 }
2065 2065
2066 /* Check each previous chip locations to see if it's an alias */ 2066 /* Check each previous chip locations to see if it's an alias */
2067 for (i=0; i < (base >> cfi->chipshift); i++) { 2067 for (i=0; i < (base >> cfi->chipshift); i++) {
2068 unsigned long start; 2068 unsigned long start;
@@ -2083,7 +2083,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2083 map->name, base, start); 2083 map->name, base, start);
2084 return 0; 2084 return 0;
2085 } 2085 }
2086 2086
2087 /* Yes, it's actually got the device IDs as data. Most 2087 /* Yes, it's actually got the device IDs as data. Most
2088 * unfortunate. Stick the new chip in read mode 2088 * unfortunate. Stick the new chip in read mode
2089 * too and if it's the same, assume it's an alias. */ 2089 * too and if it's the same, assume it's an alias. */
@@ -2097,20 +2097,20 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2097 } 2097 }
2098 } 2098 }
2099 } 2099 }
2100 2100
2101 /* OK, if we got to here, then none of the previous chips appear to 2101 /* OK, if we got to here, then none of the previous chips appear to
2102 be aliases for the current one. */ 2102 be aliases for the current one. */
2103 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ 2103 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
2104 cfi->numchips++; 2104 cfi->numchips++;
2105 2105
2106ok_out: 2106ok_out:
2107 /* Put it back into Read Mode */ 2107 /* Put it back into Read Mode */
2108 jedec_reset(base, map, cfi); 2108 jedec_reset(base, map, cfi);
2109 2109
2110 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", 2110 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
2111 map->name, cfi_interleave(cfi), cfi->device_type*8, base, 2111 map->name, cfi_interleave(cfi), cfi->device_type*8, base,
2112 map->bankwidth*8); 2112 map->bankwidth*8);
2113 2113
2114 return 1; 2114 return 1;
2115} 2115}
2116 2116
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index c6c83833cc32..a611de9b1515 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * Common code to handle absent "placeholder" devices 2 * Common code to handle absent "placeholder" devices
3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com> 3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
4 * $Id: map_absent.c,v 1.5 2004/11/16 18:29:00 dwmw2 Exp $ 4 * $Id: map_absent.c,v 1.6 2005/11/07 11:14:23 gleixner Exp $
5 * 5 *
6 * This map driver is used to allocate "placeholder" MTD 6 * This map driver is used to allocate "placeholder" MTD
7 * devices on systems that have socketed/removable media. 7 * devices on systems that have socketed/removable media.
8 * Use of this driver as a fallback preserves the expected 8 * Use of this driver as a fallback preserves the expected
9 * registration of MTD device nodes regardless of probe outcome. 9 * registration of MTD device nodes regardless of probe outcome.
10 * A usage example is as follows: 10 * A usage example is as follows:
11 * 11 *
@@ -80,7 +80,7 @@ static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t
80static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 80static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
81{ 81{
82 *retlen = 0; 82 *retlen = 0;
83 return -ENODEV; 83 return -ENODEV;
84} 84}
85 85
86static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr) 86static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
index c3cf0f63bc93..2d26bdef82d5 100644
--- a/drivers/mtd/chips/sharp.c
+++ b/drivers/mtd/chips/sharp.c
@@ -4,7 +4,7 @@
4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org> 4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
5 * 2000,2001 Lineo, Inc. 5 * 2000,2001 Lineo, Inc.
6 * 6 *
7 * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $ 7 * $Id: sharp.c,v 1.16 2005/11/07 11:14:23 gleixner Exp $
8 * 8 *
9 * Devices supported: 9 * Devices supported:
10 * LH28F016SCT Symmetrical block flash memory, 2Mx8 10 * LH28F016SCT Symmetrical block flash memory, 2Mx8
@@ -31,6 +31,7 @@
31#include <linux/mtd/cfi.h> 31#include <linux/mtd/cfi.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/slab.h>
34 35
35#define CMD_RESET 0xffffffff 36#define CMD_RESET 0xffffffff
36#define CMD_READ_ID 0x90909090 37#define CMD_READ_ID 0x90909090
@@ -214,7 +215,7 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
214/* This function returns with the chip->mutex lock held. */ 215/* This function returns with the chip->mutex lock held. */
215static int sharp_wait(struct map_info *map, struct flchip *chip) 216static int sharp_wait(struct map_info *map, struct flchip *chip)
216{ 217{
217 __u16 status; 218 int status, i;
218 unsigned long timeo = jiffies + HZ; 219 unsigned long timeo = jiffies + HZ;
219 DECLARE_WAITQUEUE(wait, current); 220 DECLARE_WAITQUEUE(wait, current);
220 int adr = 0; 221 int adr = 0;
@@ -227,13 +228,11 @@ retry:
227 map_write32(map,CMD_READ_STATUS,adr); 228 map_write32(map,CMD_READ_STATUS,adr);
228 chip->state = FL_STATUS; 229 chip->state = FL_STATUS;
229 case FL_STATUS: 230 case FL_STATUS:
230 status = map_read32(map,adr); 231 for(i=0;i<100;i++){
231//printk("status=%08x\n",status); 232 status = map_read32(map,adr);
232 233 if((status & SR_READY)==SR_READY)
233 udelay(100); 234 break;
234 if((status & SR_READY)!=SR_READY){ 235 udelay(1);
235//printk(".status=%08x\n",status);
236 udelay(100);
237 } 236 }
238 break; 237 break;
239 default: 238 default:
@@ -460,12 +459,12 @@ static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
460 remove_wait_queue(&chip->wq, &wait); 459 remove_wait_queue(&chip->wq, &wait);
461 460
462 //spin_lock_bh(chip->mutex); 461 //spin_lock_bh(chip->mutex);
463 462
464 if (signal_pending(current)){ 463 if (signal_pending(current)){
465 ret = -EINTR; 464 ret = -EINTR;
466 goto out; 465 goto out;
467 } 466 }
468 467
469 } 468 }
470 ret = -ETIME; 469 ret = -ETIME;
471out: 470out:
@@ -564,7 +563,7 @@ static int sharp_suspend(struct mtd_info *mtd)
564static void sharp_resume(struct mtd_info *mtd) 563static void sharp_resume(struct mtd_info *mtd)
565{ 564{
566 printk("sharp_resume()\n"); 565 printk("sharp_resume()\n");
567 566
568} 567}
569 568
570static void sharp_destroy(struct mtd_info *mtd) 569static void sharp_destroy(struct mtd_info *mtd)