aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/chips')
-rw-r--r--drivers/mtd/chips/Kconfig22
-rw-r--r--drivers/mtd/chips/Makefile4
-rw-r--r--drivers/mtd/chips/amd_flash.c80
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c484
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c156
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c183
-rw-r--r--drivers/mtd/chips/cfi_probe.c98
-rw-r--r--drivers/mtd/chips/cfi_util.c25
-rw-r--r--drivers/mtd/chips/chipreg.c6
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c33
-rw-r--r--drivers/mtd/chips/jedec.c206
-rw-r--r--drivers/mtd/chips/jedec_probe.c48
-rw-r--r--drivers/mtd/chips/map_absent.c8
-rw-r--r--drivers/mtd/chips/sharp.c23
15 files changed, 743 insertions, 639 deletions
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index df95d2158b1..eafa23f5cbd 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -1,5 +1,5 @@
1# drivers/mtd/chips/Kconfig 1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.15 2005/06/06 23:04:35 tpoynor Exp $ 2# $Id: Kconfig,v 1.18 2005/11/07 11:14:22 gleixner Exp $
3 3
4menu "RAM/ROM/Flash chip drivers" 4menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n 5 depends on MTD!=n
@@ -39,7 +39,7 @@ config MTD_CFI_ADV_OPTIONS
39 If you need to specify a specific endianness for access to flash 39 If you need to specify a specific endianness for access to flash
40 chips, or if you wish to reduce the size of the kernel by including 40 chips, or if you wish to reduce the size of the kernel by including
41 support for only specific arrangements of flash chips, say 'Y'. This 41 support for only specific arrangements of flash chips, say 'Y'. This
42 option does not directly affect the code, but will enable other 42 option does not directly affect the code, but will enable other
43 configuration options which allow you to do so. 43 configuration options which allow you to do so.
44 44
45 If unsure, say 'N'. 45 If unsure, say 'N'.
@@ -56,7 +56,7 @@ config MTD_CFI_NOSWAP
56 data bits when writing the 'magic' commands to the chips. Saying 56 data bits when writing the 'magic' commands to the chips. Saying
57 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't 57 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't
58 enabled, means that the CPU will not do any swapping; the chips 58 enabled, means that the CPU will not do any swapping; the chips
59 are expected to be wired to the CPU in 'host-endian' form. 59 are expected to be wired to the CPU in 'host-endian' form.
60 Specific arrangements are possible with the BIG_ENDIAN_BYTE and 60 Specific arrangements are possible with the BIG_ENDIAN_BYTE and
61 LITTLE_ENDIAN_BYTE, if the bytes are reversed. 61 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
62 62
@@ -79,10 +79,10 @@ config MTD_CFI_GEOMETRY
79 bool "Specific CFI Flash geometry selection" 79 bool "Specific CFI Flash geometry selection"
80 depends on MTD_CFI_ADV_OPTIONS 80 depends on MTD_CFI_ADV_OPTIONS
81 help 81 help
82 This option does not affect the code directly, but will enable 82 This option does not affect the code directly, but will enable
83 some other configuration options which would allow you to reduce 83 some other configuration options which would allow you to reduce
84 the size of the kernel by including support for only certain 84 the size of the kernel by including support for only certain
85 arrangements of CFI chips. If unsure, say 'N' and all options 85 arrangements of CFI chips. If unsure, say 'N' and all options
86 which are supported by the current code will be enabled. 86 which are supported by the current code will be enabled.
87 87
88config MTD_MAP_BANK_WIDTH_1 88config MTD_MAP_BANK_WIDTH_1
@@ -197,7 +197,7 @@ config MTD_CFI_AMDSTD
197 help 197 help
198 The Common Flash Interface defines a number of different command 198 The Common Flash Interface defines a number of different command
199 sets which a CFI-compliant chip may claim to implement. This code 199 sets which a CFI-compliant chip may claim to implement. This code
200 provides support for one of those command sets, used on chips 200 provides support for one of those command sets, used on chips
201 including the AMD Am29LV320. 201 including the AMD Am29LV320.
202 202
203config MTD_CFI_AMDSTD_RETRY 203config MTD_CFI_AMDSTD_RETRY
@@ -237,14 +237,14 @@ config MTD_RAM
237 tristate "Support for RAM chips in bus mapping" 237 tristate "Support for RAM chips in bus mapping"
238 depends on MTD 238 depends on MTD
239 help 239 help
240 This option enables basic support for RAM chips accessed through 240 This option enables basic support for RAM chips accessed through
241 a bus mapping driver. 241 a bus mapping driver.
242 242
243config MTD_ROM 243config MTD_ROM
244 tristate "Support for ROM chips in bus mapping" 244 tristate "Support for ROM chips in bus mapping"
245 depends on MTD 245 depends on MTD
246 help 246 help
247 This option enables basic support for ROM chips accessed through 247 This option enables basic support for ROM chips accessed through
248 a bus mapping driver. 248 a bus mapping driver.
249 249
250config MTD_ABSENT 250config MTD_ABSENT
@@ -275,7 +275,7 @@ config MTD_AMDSTD
275 depends on MTD && MTD_OBSOLETE_CHIPS 275 depends on MTD && MTD_OBSOLETE_CHIPS
276 help 276 help
277 This option enables support for flash chips using AMD-compatible 277 This option enables support for flash chips using AMD-compatible
278 commands, including some which are not CFI-compatible and hence 278 commands, including some which are not CFI-compatible and hence
279 cannot be used with the CONFIG_MTD_CFI_AMDSTD option. 279 cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
280 280
281 It also works on AMD compatible chips that do conform to CFI. 281 It also works on AMD compatible chips that do conform to CFI.
@@ -285,7 +285,7 @@ config MTD_SHARP
285 depends on MTD && MTD_OBSOLETE_CHIPS 285 depends on MTD && MTD_OBSOLETE_CHIPS
286 help 286 help
287 This option enables support for flash chips using Sharp-compatible 287 This option enables support for flash chips using Sharp-compatible
288 commands, including some which are not CFI-compatible and hence 288 commands, including some which are not CFI-compatible and hence
289 cannot be used with the CONFIG_MTD_CFI_INTELxxx options. 289 cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
290 290
291config MTD_JEDEC 291config MTD_JEDEC
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 6830489828c..8afe3092c4e 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# linux/drivers/chips/Makefile 2# linux/drivers/chips/Makefile
3# 3#
4# $Id: Makefile.common,v 1.4 2004/07/12 16:07:30 dwmw2 Exp $ 4# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
5 5
6# *** BIG UGLY NOTE *** 6# *** BIG UGLY NOTE ***
7# 7#
@@ -11,7 +11,7 @@
11# the CFI command set drivers are linked before gen_probe.o 11# the CFI command set drivers are linked before gen_probe.o
12 12
13obj-$(CONFIG_MTD) += chipreg.o 13obj-$(CONFIG_MTD) += chipreg.o
14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o 14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
15obj-$(CONFIG_MTD_CFI) += cfi_probe.o 15obj-$(CONFIG_MTD_CFI) += cfi_probe.o
16obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o 16obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
17obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o 17obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
index 2dafeba3f3d..fdb91b6f1d9 100644
--- a/drivers/mtd/chips/amd_flash.c
+++ b/drivers/mtd/chips/amd_flash.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com> 4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 * 5 *
6 * $Id: amd_flash.c,v 1.27 2005/02/04 07:43:09 jonashg Exp $ 6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7 * 7 *
8 * Copyright (c) 2001 Axis Communications AB 8 * Copyright (c) 2001 Axis Communications AB
9 * 9 *
@@ -93,9 +93,9 @@
93#define D6_MASK 0x40 93#define D6_MASK 0x40
94 94
95struct amd_flash_private { 95struct amd_flash_private {
96 int device_type; 96 int device_type;
97 int interleave; 97 int interleave;
98 int numchips; 98 int numchips;
99 unsigned long chipshift; 99 unsigned long chipshift;
100// const char *im_name; 100// const char *im_name;
101 struct flchip chips[0]; 101 struct flchip chips[0];
@@ -253,7 +253,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
253 int i; 253 int i;
254 int retval = 0; 254 int retval = 0;
255 int lock_status; 255 int lock_status;
256 256
257 map = mtd->priv; 257 map = mtd->priv;
258 258
259 /* Pass the whole chip through sector by sector and check for each 259 /* Pass the whole chip through sector by sector and check for each
@@ -273,7 +273,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
273 unlock_sector(map, eraseoffset, is_unlock); 273 unlock_sector(map, eraseoffset, is_unlock);
274 274
275 lock_status = is_sector_locked(map, eraseoffset); 275 lock_status = is_sector_locked(map, eraseoffset);
276 276
277 if (is_unlock && lock_status) { 277 if (is_unlock && lock_status) {
278 printk("Cannot unlock sector at address %x length %xx\n", 278 printk("Cannot unlock sector at address %x length %xx\n",
279 eraseoffset, merip->erasesize); 279 eraseoffset, merip->erasesize);
@@ -305,7 +305,7 @@ static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
305/* 305/*
306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first 306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
307 * matching table entry (-1 if not found or alias for already found chip). 307 * matching table entry (-1 if not found or alias for already found chip).
308 */ 308 */
309static int probe_new_chip(struct mtd_info *mtd, __u32 base, 309static int probe_new_chip(struct mtd_info *mtd, __u32 base,
310 struct flchip *chips, 310 struct flchip *chips,
311 struct amd_flash_private *private, 311 struct amd_flash_private *private,
@@ -636,7 +636,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
636 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, 636 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
637 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 } 637 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
638 } 638 }
639 } 639 }
640 }; 640 };
641 641
642 struct mtd_info *mtd; 642 struct mtd_info *mtd;
@@ -701,7 +701,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
701 701
702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * 702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
703 mtd->numeraseregions, GFP_KERNEL); 703 mtd->numeraseregions, GFP_KERNEL);
704 if (!mtd->eraseregions) { 704 if (!mtd->eraseregions) {
705 printk(KERN_WARNING "%s: Failed to allocate " 705 printk(KERN_WARNING "%s: Failed to allocate "
706 "memory for MTD erase region info\n", map->name); 706 "memory for MTD erase region info\n", map->name);
707 kfree(mtd); 707 kfree(mtd);
@@ -739,12 +739,12 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
739 mtd->type = MTD_NORFLASH; 739 mtd->type = MTD_NORFLASH;
740 mtd->flags = MTD_CAP_NORFLASH; 740 mtd->flags = MTD_CAP_NORFLASH;
741 mtd->name = map->name; 741 mtd->name = map->name;
742 mtd->erase = amd_flash_erase; 742 mtd->erase = amd_flash_erase;
743 mtd->read = amd_flash_read; 743 mtd->read = amd_flash_read;
744 mtd->write = amd_flash_write; 744 mtd->write = amd_flash_write;
745 mtd->sync = amd_flash_sync; 745 mtd->sync = amd_flash_sync;
746 mtd->suspend = amd_flash_suspend; 746 mtd->suspend = amd_flash_suspend;
747 mtd->resume = amd_flash_resume; 747 mtd->resume = amd_flash_resume;
748 mtd->lock = amd_flash_lock; 748 mtd->lock = amd_flash_lock;
749 mtd->unlock = amd_flash_unlock; 749 mtd->unlock = amd_flash_unlock;
750 750
@@ -789,7 +789,7 @@ retry:
789 map->name, chip->state); 789 map->name, chip->state);
790 set_current_state(TASK_UNINTERRUPTIBLE); 790 set_current_state(TASK_UNINTERRUPTIBLE);
791 add_wait_queue(&chip->wq, &wait); 791 add_wait_queue(&chip->wq, &wait);
792 792
793 spin_unlock_bh(chip->mutex); 793 spin_unlock_bh(chip->mutex);
794 794
795 schedule(); 795 schedule();
@@ -802,7 +802,7 @@ retry:
802 timeo = jiffies + HZ; 802 timeo = jiffies + HZ;
803 803
804 goto retry; 804 goto retry;
805 } 805 }
806 806
807 adr += chip->start; 807 adr += chip->start;
808 808
@@ -889,7 +889,7 @@ retry:
889 map->name, chip->state); 889 map->name, chip->state);
890 set_current_state(TASK_UNINTERRUPTIBLE); 890 set_current_state(TASK_UNINTERRUPTIBLE);
891 add_wait_queue(&chip->wq, &wait); 891 add_wait_queue(&chip->wq, &wait);
892 892
893 spin_unlock_bh(chip->mutex); 893 spin_unlock_bh(chip->mutex);
894 894
895 schedule(); 895 schedule();
@@ -901,7 +901,7 @@ retry:
901 timeo = jiffies + HZ; 901 timeo = jiffies + HZ;
902 902
903 goto retry; 903 goto retry;
904 } 904 }
905 905
906 chip->state = FL_WRITING; 906 chip->state = FL_WRITING;
907 907
@@ -911,7 +911,7 @@ retry:
911 wide_write(map, datum, adr); 911 wide_write(map, datum, adr);
912 912
913 times_left = 500000; 913 times_left = 500000;
914 while (times_left-- && flash_is_busy(map, adr, private->interleave)) { 914 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
915 if (need_resched()) { 915 if (need_resched()) {
916 spin_unlock_bh(chip->mutex); 916 spin_unlock_bh(chip->mutex);
917 schedule(); 917 schedule();
@@ -989,7 +989,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
989 if (ret) { 989 if (ret) {
990 return ret; 990 return ret;
991 } 991 }
992 992
993 ofs += n; 993 ofs += n;
994 buf += n; 994 buf += n;
995 (*retlen) += n; 995 (*retlen) += n;
@@ -1002,7 +1002,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
1002 } 1002 }
1003 } 1003 }
1004 } 1004 }
1005 1005
1006 /* We are now aligned, write as much as possible. */ 1006 /* We are now aligned, write as much as possible. */
1007 while(len >= map->buswidth) { 1007 while(len >= map->buswidth) {
1008 __u32 datum; 1008 __u32 datum;
@@ -1063,7 +1063,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
1063 if (ret) { 1063 if (ret) {
1064 return ret; 1064 return ret;
1065 } 1065 }
1066 1066
1067 (*retlen) += n; 1067 (*retlen) += n;
1068 } 1068 }
1069 1069
@@ -1085,7 +1085,7 @@ retry:
1085 if (chip->state != FL_READY){ 1085 if (chip->state != FL_READY){
1086 set_current_state(TASK_UNINTERRUPTIBLE); 1086 set_current_state(TASK_UNINTERRUPTIBLE);
1087 add_wait_queue(&chip->wq, &wait); 1087 add_wait_queue(&chip->wq, &wait);
1088 1088
1089 spin_unlock_bh(chip->mutex); 1089 spin_unlock_bh(chip->mutex);
1090 1090
1091 schedule(); 1091 schedule();
@@ -1098,7 +1098,7 @@ retry:
1098 timeo = jiffies + HZ; 1098 timeo = jiffies + HZ;
1099 1099
1100 goto retry; 1100 goto retry;
1101 } 1101 }
1102 1102
1103 chip->state = FL_ERASING; 1103 chip->state = FL_ERASING;
1104 1104
@@ -1106,30 +1106,30 @@ retry:
1106 ENABLE_VPP(map); 1106 ENABLE_VPP(map);
1107 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA); 1107 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1108 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr); 1108 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1109 1109
1110 timeo = jiffies + (HZ * 20); 1110 timeo = jiffies + (HZ * 20);
1111 1111
1112 spin_unlock_bh(chip->mutex); 1112 spin_unlock_bh(chip->mutex);
1113 msleep(1000); 1113 msleep(1000);
1114 spin_lock_bh(chip->mutex); 1114 spin_lock_bh(chip->mutex);
1115 1115
1116 while (flash_is_busy(map, adr, private->interleave)) { 1116 while (flash_is_busy(map, adr, private->interleave)) {
1117 1117
1118 if (chip->state != FL_ERASING) { 1118 if (chip->state != FL_ERASING) {
1119 /* Someone's suspended the erase. Sleep */ 1119 /* Someone's suspended the erase. Sleep */
1120 set_current_state(TASK_UNINTERRUPTIBLE); 1120 set_current_state(TASK_UNINTERRUPTIBLE);
1121 add_wait_queue(&chip->wq, &wait); 1121 add_wait_queue(&chip->wq, &wait);
1122 1122
1123 spin_unlock_bh(chip->mutex); 1123 spin_unlock_bh(chip->mutex);
1124 printk(KERN_INFO "%s: erase suspended. Sleeping\n", 1124 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1125 map->name); 1125 map->name);
1126 schedule(); 1126 schedule();
1127 remove_wait_queue(&chip->wq, &wait); 1127 remove_wait_queue(&chip->wq, &wait);
1128 1128
1129 if (signal_pending(current)) { 1129 if (signal_pending(current)) {
1130 return -EINTR; 1130 return -EINTR;
1131 } 1131 }
1132 1132
1133 timeo = jiffies + (HZ*2); /* FIXME */ 1133 timeo = jiffies + (HZ*2); /* FIXME */
1134 spin_lock_bh(chip->mutex); 1134 spin_lock_bh(chip->mutex);
1135 continue; 1135 continue;
@@ -1145,7 +1145,7 @@ retry:
1145 1145
1146 return -EIO; 1146 return -EIO;
1147 } 1147 }
1148 1148
1149 /* Latency issues. Drop the lock, wait a while and retry */ 1149 /* Latency issues. Drop the lock, wait a while and retry */
1150 spin_unlock_bh(chip->mutex); 1150 spin_unlock_bh(chip->mutex);
1151 1151
@@ -1153,7 +1153,7 @@ retry:
1153 schedule(); 1153 schedule();
1154 else 1154 else
1155 udelay(1); 1155 udelay(1);
1156 1156
1157 spin_lock_bh(chip->mutex); 1157 spin_lock_bh(chip->mutex);
1158 } 1158 }
1159 1159
@@ -1180,7 +1180,7 @@ retry:
1180 return -EIO; 1180 return -EIO;
1181 } 1181 }
1182 } 1182 }
1183 1183
1184 DISABLE_VPP(map); 1184 DISABLE_VPP(map);
1185 chip->state = FL_READY; 1185 chip->state = FL_READY;
1186 wake_up(&chip->wq); 1186 wake_up(&chip->wq);
@@ -1246,7 +1246,7 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1246 * with the erase region at that address. 1246 * with the erase region at that address.
1247 */ 1247 */
1248 1248
1249 while ((i < mtd->numeraseregions) && 1249 while ((i < mtd->numeraseregions) &&
1250 ((instr->addr + instr->len) >= regions[i].offset)) { 1250 ((instr->addr + instr->len) >= regions[i].offset)) {
1251 i++; 1251 i++;
1252 } 1252 }
@@ -1293,10 +1293,10 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1293 } 1293 }
1294 } 1294 }
1295 } 1295 }
1296 1296
1297 instr->state = MTD_ERASE_DONE; 1297 instr->state = MTD_ERASE_DONE;
1298 mtd_erase_callback(instr); 1298 mtd_erase_callback(instr);
1299 1299
1300 return 0; 1300 return 0;
1301} 1301}
1302 1302
@@ -1324,7 +1324,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
1324 case FL_JEDEC_QUERY: 1324 case FL_JEDEC_QUERY:
1325 chip->oldstate = chip->state; 1325 chip->oldstate = chip->state;
1326 chip->state = FL_SYNCING; 1326 chip->state = FL_SYNCING;
1327 /* No need to wake_up() on this state change - 1327 /* No need to wake_up() on this state change -
1328 * as the whole point is that nobody can do anything 1328 * as the whole point is that nobody can do anything
1329 * with the chip now anyway. 1329 * with the chip now anyway.
1330 */ 1330 */
@@ -1335,13 +1335,13 @@ static void amd_flash_sync(struct mtd_info *mtd)
1335 default: 1335 default:
1336 /* Not an idle state */ 1336 /* Not an idle state */
1337 add_wait_queue(&chip->wq, &wait); 1337 add_wait_queue(&chip->wq, &wait);
1338 1338
1339 spin_unlock_bh(chip->mutex); 1339 spin_unlock_bh(chip->mutex);
1340 1340
1341 schedule(); 1341 schedule();
1342 1342
1343 remove_wait_queue(&chip->wq, &wait); 1343 remove_wait_queue(&chip->wq, &wait);
1344 1344
1345 goto retry; 1345 goto retry;
1346 } 1346 }
1347 } 1347 }
@@ -1351,7 +1351,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
1351 chip = &private->chips[i]; 1351 chip = &private->chips[i];
1352 1352
1353 spin_lock_bh(chip->mutex); 1353 spin_lock_bh(chip->mutex);
1354 1354
1355 if (chip->state == FL_SYNCING) { 1355 if (chip->state == FL_SYNCING) {
1356 chip->state = chip->oldstate; 1356 chip->state = chip->oldstate;
1357 wake_up(&chip->wq); 1357 wake_up(&chip->wq);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c3fc9b2f21f..143f01a4c17 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,9 +4,9 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $ 7 * $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $
8 *
8 * 9 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org> 10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and 11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.) 12 * independent of the flash geometry (buswidth, interleave, etc.)
@@ -51,6 +51,7 @@
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); 55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *); 56static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
@@ -105,6 +106,7 @@ static struct mtd_chip_driver cfi_intelext_chipdrv = {
105static void cfi_tell_features(struct cfi_pri_intelext *extp) 106static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{ 107{
107 int i; 108 int i;
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); 110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported"); 111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported"); 112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
@@ -116,36 +118,43 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported"); 120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) { 121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
120 if (extp->FeatureSupport & (1<<i)) 122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i); 124 printk(" - Unknown Bit %X: supported\n", i);
122 } 125 }
123 126
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) { 129 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i)) 130 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i); 131 printk(" - Unknown Bit %X: supported\n", i);
129 } 132 }
130 133
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) { 137 for (i=2; i<3; i++) {
135 if (extp->BlkStatusRegMask & (1<<i)) 138 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i); 139 printk(" - Unknown Bit %X Active: yes\n",i);
137 } 140 }
138 141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf); 149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal) 150 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf); 152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144} 153}
145#endif 154#endif
146 155
147#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150{ 159{
151 struct map_info *map = mtd->priv; 160 struct map_info *map = mtd->priv;
@@ -176,7 +185,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176{ 185{
177 struct map_info *map = mtd->priv; 186 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv; 187 struct cfi_private *cfi = map->fldrv_priv;
179 188
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */ 189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ 190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
182} 191}
@@ -185,7 +194,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185{ 194{
186 struct map_info *map = mtd->priv; 195 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv; 196 struct cfi_private *cfi = map->fldrv_priv;
188 197
189 /* Note this is done after the region info is endian swapped */ 198 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] = 199 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
@@ -207,12 +216,13 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
207 if (cfi->cfiq->BufWriteTimeoutTyp) { 216 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" ); 217 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers; 218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
210 } 220 }
211} 221}
212 222
213static struct cfi_fixup cfi_fixup_table[] = { 223static struct cfi_fixup cfi_fixup_table[] = {
214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216#endif 226#endif
217#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, 228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
@@ -252,12 +262,21 @@ read_pri_intelext(struct map_info *map, __u16 adr)
252 if (!extp) 262 if (!extp)
253 return NULL; 263 return NULL;
254 264
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
255 /* Do some byteswapping if necessary */ 274 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport); 275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask); 276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr); 277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259 278
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') { 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
261 unsigned int extra_size = 0; 280 unsigned int extra_size = 0;
262 int nb_parts, i; 281 int nb_parts, i;
263 282
@@ -266,7 +285,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
266 sizeof(struct cfi_intelext_otpinfo); 285 sizeof(struct cfi_intelext_otpinfo);
267 286
268 /* Burst Read info */ 287 /* Burst Read info */
269 extra_size += 6; 288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
270 292
271 /* Number of hardware-partitions */ 293 /* Number of hardware-partitions */
272 extra_size += 1; 294 extra_size += 1;
@@ -274,6 +296,10 @@ read_pri_intelext(struct map_info *map, __u16 adr)
274 goto need_more; 296 goto need_more;
275 nb_parts = extp->extra[extra_size - 1]; 297 nb_parts = extp->extra[extra_size - 1];
276 298
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
277 for (i = 0; i < nb_parts; i++) { 303 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo; 304 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size]; 305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
@@ -285,6 +311,9 @@ read_pri_intelext(struct map_info *map, __u16 adr)
285 * sizeof(struct cfi_intelext_blockinfo); 311 * sizeof(struct cfi_intelext_blockinfo);
286 } 312 }
287 313
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
288 if (extp_size < sizeof(*extp) + extra_size) { 317 if (extp_size < sizeof(*extp) + extra_size) {
289 need_more: 318 need_more:
290 extp_size = sizeof(*extp) + extra_size; 319 extp_size = sizeof(*extp) + extra_size;
@@ -298,7 +327,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
298 goto again; 327 goto again;
299 } 328 }
300 } 329 }
301 330
302 return extp; 331 return extp;
303} 332}
304 333
@@ -339,7 +368,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340 369
341 if (cfi->cfi_mode == CFI_MODE_CFI) { 370 if (cfi->cfi_mode == CFI_MODE_CFI) {
342 /* 371 /*
343 * It's a real CFI chip, not one for which the probe 372 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature 373 * routine faked a CFI structure. So we read the feature
345 * table from it. 374 * table from it.
@@ -354,14 +383,14 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
354 } 383 }
355 384
356 /* Install our own private info structure */ 385 /* Install our own private info structure */
357 cfi->cmdset_priv = extp; 386 cfi->cmdset_priv = extp;
358 387
359 cfi_fixup(mtd, cfi_fixup_table); 388 cfi_fixup(mtd, cfi_fixup_table);
360 389
361#ifdef DEBUG_CFI_FEATURES 390#ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */ 391 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp); 392 cfi_tell_features(extp);
364#endif 393#endif
365 394
366 if(extp->SuspendCmdSupport & 1) { 395 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n"); 396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
@@ -379,10 +408,10 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0; 410 cfi->chips[i].ref_point_counter = 0;
382 } 411 }
383 412
384 map->fldrv = &cfi_intelext_chipdrv; 413 map->fldrv = &cfi_intelext_chipdrv;
385 414
386 return cfi_intelext_setup(mtd); 415 return cfi_intelext_setup(mtd);
387} 416}
388 417
@@ -399,13 +428,13 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
399 mtd->size = devsize * cfi->numchips; 428 mtd->size = devsize * cfi->numchips;
400 429
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL); 432 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) { 433 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); 434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err; 435 goto setup_err;
407 } 436 }
408 437
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize; 439 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -429,7 +458,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
429 } 458 }
430 459
431 for (i=0; i<mtd->numeraseregions;i++){ 460 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", 461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset, 462 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize, 463 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks); 464 mtd->eraseregions[i].numblocks);
@@ -480,7 +509,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
480 * arrangement at this point. This can be rearranged in the future 509 * arrangement at this point. This can be rearranged in the future
481 * if someone feels motivated enough. --nico 510 * if someone feels motivated enough. --nico
482 */ 511 */
483 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3' 512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
484 && extp->FeatureSupport & (1 << 9)) { 513 && extp->FeatureSupport & (1 << 9)) {
485 struct cfi_private *newcfi; 514 struct cfi_private *newcfi;
486 struct flchip *chip; 515 struct flchip *chip;
@@ -492,12 +521,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
492 sizeof(struct cfi_intelext_otpinfo); 521 sizeof(struct cfi_intelext_otpinfo);
493 522
494 /* Burst Read info */ 523 /* Burst Read info */
495 offs += 6; 524 offs += extp->extra[offs+1]+2;
496 525
497 /* Number of partition regions */ 526 /* Number of partition regions */
498 numregions = extp->extra[offs]; 527 numregions = extp->extra[offs];
499 offs += 1; 528 offs += 1;
500 529
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
532 offs += 2;
533
501 /* Number of hardware partitions */ 534 /* Number of hardware partitions */
502 numparts = 0; 535 numparts = 0;
503 for (i = 0; i < numregions; i++) { 536 for (i = 0; i < numregions; i++) {
@@ -509,6 +542,20 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
509 sizeof(struct cfi_intelext_blockinfo); 542 sizeof(struct cfi_intelext_blockinfo);
510 } 543 }
511 544
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags |= MTD_PROGRAM_REGIONS;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, MTD_PROGREGION_SIZE(mtd),
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557 }
558
512 /* 559 /*
513 * All functions below currently rely on all chips having 560 * All functions below currently rely on all chips having
514 * the same geometry so we'll just assume that all hardware 561 * the same geometry so we'll just assume that all hardware
@@ -653,8 +700,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
653 break; 700 break;
654 701
655 if (time_after(jiffies, timeo)) { 702 if (time_after(jiffies, timeo)) {
656 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 703 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
657 status.x[0]); 704 map->name, status.x[0]);
658 return -EIO; 705 return -EIO;
659 } 706 }
660 spin_unlock(chip->mutex); 707 spin_unlock(chip->mutex);
@@ -663,7 +710,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
663 /* Someone else might have been playing with it. */ 710 /* Someone else might have been playing with it. */
664 goto retry; 711 goto retry;
665 } 712 }
666 713
667 case FL_READY: 714 case FL_READY:
668 case FL_CFI_QUERY: 715 case FL_CFI_QUERY:
669 case FL_JEDEC_QUERY: 716 case FL_JEDEC_QUERY:
@@ -701,8 +748,8 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
701 map_write(map, CMD(0x70), adr); 748 map_write(map, CMD(0x70), adr);
702 chip->state = FL_ERASING; 749 chip->state = FL_ERASING;
703 chip->oldstate = FL_READY; 750 chip->oldstate = FL_READY;
704 printk(KERN_ERR "Chip not ready after erase " 751 printk(KERN_ERR "%s: Chip not ready after erase "
705 "suspended: status = 0x%lx\n", status.x[0]); 752 "suspended: status = 0x%lx\n", map->name, status.x[0]);
706 return -EIO; 753 return -EIO;
707 } 754 }
708 755
@@ -782,14 +829,14 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
782 switch(chip->oldstate) { 829 switch(chip->oldstate) {
783 case FL_ERASING: 830 case FL_ERASING:
784 chip->state = chip->oldstate; 831 chip->state = chip->oldstate;
785 /* What if one interleaved chip has finished and the 832 /* What if one interleaved chip has finished and the
786 other hasn't? The old code would leave the finished 833 other hasn't? The old code would leave the finished
787 one in READY mode. That's bad, and caused -EROFS 834 one in READY mode. That's bad, and caused -EROFS
788 errors to be returned from do_erase_oneblock because 835 errors to be returned from do_erase_oneblock because
789 that's the only bit it checked for at the time. 836 that's the only bit it checked for at the time.
790 As the state machine appears to explicitly allow 837 As the state machine appears to explicitly allow
791 sending the 0x70 (Read Status) command to an erasing 838 sending the 0x70 (Read Status) command to an erasing
792 chip and expecting it to be ignored, that's what we 839 chip and expecting it to be ignored, that's what we
793 do. */ 840 do. */
794 map_write(map, CMD(0xd0), adr); 841 map_write(map, CMD(0xd0), adr);
795 map_write(map, CMD(0x70), adr); 842 map_write(map, CMD(0x70), adr);
@@ -809,7 +856,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
809 DISABLE_VPP(map); 856 DISABLE_VPP(map);
810 break; 857 break;
811 default: 858 default:
812 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate); 859 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
813 } 860 }
814 wake_up(&chip->wq); 861 wake_up(&chip->wq);
815} 862}
@@ -1025,8 +1072,8 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
1025 1072
1026 adr += chip->start; 1073 adr += chip->start;
1027 1074
1028 /* Ensure cmd read/writes are aligned. */ 1075 /* Ensure cmd read/writes are aligned. */
1029 cmd_addr = adr & ~(map_bankwidth(map)-1); 1076 cmd_addr = adr & ~(map_bankwidth(map)-1);
1030 1077
1031 spin_lock(chip->mutex); 1078 spin_lock(chip->mutex);
1032 1079
@@ -1054,7 +1101,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1054 1101
1055 if (!map->virt || (from + len > mtd->size)) 1102 if (!map->virt || (from + len > mtd->size))
1056 return -EINVAL; 1103 return -EINVAL;
1057 1104
1058 *mtdbuf = (void *)map->virt + from; 1105 *mtdbuf = (void *)map->virt + from;
1059 *retlen = 0; 1106 *retlen = 0;
1060 1107
@@ -1081,7 +1128,7 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si
1081 1128
1082 *retlen += thislen; 1129 *retlen += thislen;
1083 len -= thislen; 1130 len -= thislen;
1084 1131
1085 ofs = 0; 1132 ofs = 0;
1086 chipnum++; 1133 chipnum++;
1087 } 1134 }
@@ -1120,7 +1167,7 @@ static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t fro
1120 if(chip->ref_point_counter == 0) 1167 if(chip->ref_point_counter == 0)
1121 chip->state = FL_READY; 1168 chip->state = FL_READY;
1122 } else 1169 } else
1123 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */ 1170 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1124 1171
1125 put_chip(map, chip, chip->start); 1172 put_chip(map, chip, chip->start);
1126 spin_unlock(chip->mutex); 1173 spin_unlock(chip->mutex);
@@ -1139,8 +1186,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
1139 1186
1140 adr += chip->start; 1187 adr += chip->start;
1141 1188
1142 /* Ensure cmd read/writes are aligned. */ 1189 /* Ensure cmd read/writes are aligned. */
1143 cmd_addr = adr & ~(map_bankwidth(map)-1); 1190 cmd_addr = adr & ~(map_bankwidth(map)-1);
1144 1191
1145 spin_lock(chip->mutex); 1192 spin_lock(chip->mutex);
1146 ret = get_chip(map, chip, cmd_addr, FL_READY); 1193 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -1195,7 +1242,7 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1195 *retlen += thislen; 1242 *retlen += thislen;
1196 len -= thislen; 1243 len -= thislen;
1197 buf += thislen; 1244 buf += thislen;
1198 1245
1199 ofs = 0; 1246 ofs = 0;
1200 chipnum++; 1247 chipnum++;
1201 } 1248 }
@@ -1212,12 +1259,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1212 1259
1213 adr += chip->start; 1260 adr += chip->start;
1214 1261
1215 /* Let's determine this according to the interleave only once */ 1262 /* Let's determine those according to the interleave only once */
1216 status_OK = CMD(0x80); 1263 status_OK = CMD(0x80);
1217 switch (mode) { 1264 switch (mode) {
1218 case FL_WRITING: write_cmd = CMD(0x40); break; 1265 case FL_WRITING:
1219 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break; 1266 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1220 default: return -EINVAL; 1267 break;
1268 case FL_OTP_WRITE:
1269 write_cmd = CMD(0xc0);
1270 break;
1271 default:
1272 return -EINVAL;
1221 } 1273 }
1222 1274
1223 spin_lock(chip->mutex); 1275 spin_lock(chip->mutex);
@@ -1258,12 +1310,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1258 status = map_read(map, adr); 1310 status = map_read(map, adr);
1259 if (map_word_andequal(map, status, status_OK, status_OK)) 1311 if (map_word_andequal(map, status, status_OK, status_OK))
1260 break; 1312 break;
1261 1313
1262 /* OK Still waiting */ 1314 /* OK Still waiting */
1263 if (time_after(jiffies, timeo)) { 1315 if (time_after(jiffies, timeo)) {
1316 map_write(map, CMD(0x70), adr);
1264 chip->state = FL_STATUS; 1317 chip->state = FL_STATUS;
1265 xip_enable(map, chip, adr); 1318 xip_enable(map, chip, adr);
1266 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n"); 1319 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1267 ret = -EIO; 1320 ret = -EIO;
1268 goto out; 1321 goto out;
1269 } 1322 }
@@ -1275,27 +1328,39 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1275 if (!z) { 1328 if (!z) {
1276 chip->word_write_time--; 1329 chip->word_write_time--;
1277 if (!chip->word_write_time) 1330 if (!chip->word_write_time)
1278 chip->word_write_time++; 1331 chip->word_write_time = 1;
1279 } 1332 }
1280 if (z > 1) 1333 if (z > 1)
1281 chip->word_write_time++; 1334 chip->word_write_time++;
1282 1335
1283 /* Done and happy. */ 1336 /* Done and happy. */
1284 chip->state = FL_STATUS; 1337 chip->state = FL_STATUS;
1285 1338
1286 /* check for lock bit */ 1339 /* check for errors */
1287 if (map_word_bitsset(map, status, CMD(0x02))) { 1340 if (map_word_bitsset(map, status, CMD(0x1a))) {
1288 /* clear status */ 1341 unsigned long chipstatus = MERGESTATUS(status);
1342
1343 /* reset status */
1289 map_write(map, CMD(0x50), adr); 1344 map_write(map, CMD(0x50), adr);
1290 /* put back into read status register mode */
1291 map_write(map, CMD(0x70), adr); 1345 map_write(map, CMD(0x70), adr);
1292 ret = -EROFS; 1346 xip_enable(map, chip, adr);
1347
1348 if (chipstatus & 0x02) {
1349 ret = -EROFS;
1350 } else if (chipstatus & 0x08) {
1351 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1352 ret = -EIO;
1353 } else {
1354 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1355 ret = -EINVAL;
1356 }
1357
1358 goto out;
1293 } 1359 }
1294 1360
1295 xip_enable(map, chip, adr); 1361 xip_enable(map, chip, adr);
1296 out: put_chip(map, chip, adr); 1362 out: put_chip(map, chip, adr);
1297 spin_unlock(chip->mutex); 1363 spin_unlock(chip->mutex);
1298
1299 return ret; 1364 return ret;
1300} 1365}
1301 1366
@@ -1328,7 +1393,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1328 1393
1329 ret = do_write_oneword(map, &cfi->chips[chipnum], 1394 ret = do_write_oneword(map, &cfi->chips[chipnum],
1330 bus_ofs, datum, FL_WRITING); 1395 bus_ofs, datum, FL_WRITING);
1331 if (ret) 1396 if (ret)
1332 return ret; 1397 return ret;
1333 1398
1334 len -= n; 1399 len -= n;
@@ -1337,13 +1402,13 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1337 (*retlen) += n; 1402 (*retlen) += n;
1338 1403
1339 if (ofs >> cfi->chipshift) { 1404 if (ofs >> cfi->chipshift) {
1340 chipnum ++; 1405 chipnum ++;
1341 ofs = 0; 1406 ofs = 0;
1342 if (chipnum == cfi->numchips) 1407 if (chipnum == cfi->numchips)
1343 return 0; 1408 return 0;
1344 } 1409 }
1345 } 1410 }
1346 1411
1347 while(len >= map_bankwidth(map)) { 1412 while(len >= map_bankwidth(map)) {
1348 map_word datum = map_word_load(map, buf); 1413 map_word datum = map_word_load(map, buf);
1349 1414
@@ -1358,7 +1423,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1358 len -= map_bankwidth(map); 1423 len -= map_bankwidth(map);
1359 1424
1360 if (ofs >> cfi->chipshift) { 1425 if (ofs >> cfi->chipshift) {
1361 chipnum ++; 1426 chipnum ++;
1362 ofs = 0; 1427 ofs = 0;
1363 if (chipnum == cfi->numchips) 1428 if (chipnum == cfi->numchips)
1364 return 0; 1429 return 0;
@@ -1373,9 +1438,9 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1373 1438
1374 ret = do_write_oneword(map, &cfi->chips[chipnum], 1439 ret = do_write_oneword(map, &cfi->chips[chipnum],
1375 ofs, datum, FL_WRITING); 1440 ofs, datum, FL_WRITING);
1376 if (ret) 1441 if (ret)
1377 return ret; 1442 return ret;
1378 1443
1379 (*retlen) += len; 1444 (*retlen) += len;
1380 } 1445 }
1381 1446
@@ -1383,20 +1448,24 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1383} 1448}
1384 1449
1385 1450
1386static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1451static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1387 unsigned long adr, const u_char *buf, int len) 1452 unsigned long adr, const struct kvec **pvec,
1453 unsigned long *pvec_seek, int len)
1388{ 1454{
1389 struct cfi_private *cfi = map->fldrv_priv; 1455 struct cfi_private *cfi = map->fldrv_priv;
1390 map_word status, status_OK; 1456 map_word status, status_OK, write_cmd, datum;
1391 unsigned long cmd_adr, timeo; 1457 unsigned long cmd_adr, timeo;
1392 int wbufsize, z, ret=0, bytes, words; 1458 int wbufsize, z, ret=0, word_gap, words;
1459 const struct kvec *vec;
1460 unsigned long vec_seek;
1393 1461
1394 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1462 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1395 adr += chip->start; 1463 adr += chip->start;
1396 cmd_adr = adr & ~(wbufsize-1); 1464 cmd_adr = adr & ~(wbufsize-1);
1397 1465
1398 /* Let's determine this according to the interleave only once */ 1466 /* Let's determine this according to the interleave only once */
1399 status_OK = CMD(0x80); 1467 status_OK = CMD(0x80);
1468 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1400 1469
1401 spin_lock(chip->mutex); 1470 spin_lock(chip->mutex);
1402 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1471 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
@@ -1410,7 +1479,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1410 xip_disable(map, chip, cmd_adr); 1479 xip_disable(map, chip, cmd_adr);
1411 1480
1412 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set 1481 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1413 [...], the device will not accept any more Write to Buffer commands". 1482 [...], the device will not accept any more Write to Buffer commands".
1414 So we must check here and reset those bits if they're set. Otherwise 1483 So we must check here and reset those bits if they're set. Otherwise
1415 we're just pissing in the wind */ 1484 we're just pissing in the wind */
1416 if (chip->state != FL_STATUS) 1485 if (chip->state != FL_STATUS)
@@ -1428,7 +1497,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1428 1497
1429 z = 0; 1498 z = 0;
1430 for (;;) { 1499 for (;;) {
1431 map_write(map, CMD(0xe8), cmd_adr); 1500 map_write(map, write_cmd, cmd_adr);
1432 1501
1433 status = map_read(map, cmd_adr); 1502 status = map_read(map, cmd_adr);
1434 if (map_word_andequal(map, status, status_OK, status_OK)) 1503 if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1446,41 +1515,66 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1446 map_write(map, CMD(0x50), cmd_adr); 1515 map_write(map, CMD(0x50), cmd_adr);
1447 map_write(map, CMD(0x70), cmd_adr); 1516 map_write(map, CMD(0x70), cmd_adr);
1448 xip_enable(map, chip, cmd_adr); 1517 xip_enable(map, chip, cmd_adr);
1449 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n", 1518 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1450 status.x[0], Xstatus.x[0]); 1519 map->name, status.x[0], Xstatus.x[0]);
1451 ret = -EIO; 1520 ret = -EIO;
1452 goto out; 1521 goto out;
1453 } 1522 }
1454 } 1523 }
1455 1524
1525 /* Figure out the number of words to write */
1526 word_gap = (-adr & (map_bankwidth(map)-1));
1527 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1528 if (!word_gap) {
1529 words--;
1530 } else {
1531 word_gap = map_bankwidth(map) - word_gap;
1532 adr -= word_gap;
1533 datum = map_word_ff(map);
1534 }
1535
1456 /* Write length of data to come */ 1536 /* Write length of data to come */
1457 bytes = len & (map_bankwidth(map)-1); 1537 map_write(map, CMD(words), cmd_adr );
1458 words = len / map_bankwidth(map);
1459 map_write(map, CMD(words - !bytes), cmd_adr );
1460 1538
1461 /* Write data */ 1539 /* Write data */
1462 z = 0; 1540 vec = *pvec;
1463 while(z < words * map_bankwidth(map)) { 1541 vec_seek = *pvec_seek;
1464 map_word datum = map_word_load(map, buf); 1542 do {
1465 map_write(map, datum, adr+z); 1543 int n = map_bankwidth(map) - word_gap;
1544 if (n > vec->iov_len - vec_seek)
1545 n = vec->iov_len - vec_seek;
1546 if (n > len)
1547 n = len;
1466 1548
1467 z += map_bankwidth(map); 1549 if (!word_gap && len < map_bankwidth(map))
1468 buf += map_bankwidth(map); 1550 datum = map_word_ff(map);
1469 }
1470 1551
1471 if (bytes) { 1552 datum = map_word_load_partial(map, datum,
1472 map_word datum; 1553 vec->iov_base + vec_seek,
1554 word_gap, n);
1473 1555
1474 datum = map_word_ff(map); 1556 len -= n;
1475 datum = map_word_load_partial(map, datum, buf, 0, bytes); 1557 word_gap += n;
1476 map_write(map, datum, adr+z); 1558 if (!len || word_gap == map_bankwidth(map)) {
1477 } 1559 map_write(map, datum, adr);
1560 adr += map_bankwidth(map);
1561 word_gap = 0;
1562 }
1563
1564 vec_seek += n;
1565 if (vec_seek == vec->iov_len) {
1566 vec++;
1567 vec_seek = 0;
1568 }
1569 } while (len);
1570 *pvec = vec;
1571 *pvec_seek = vec_seek;
1478 1572
1479 /* GO GO GO */ 1573 /* GO GO GO */
1480 map_write(map, CMD(0xd0), cmd_adr); 1574 map_write(map, CMD(0xd0), cmd_adr);
1481 chip->state = FL_WRITING; 1575 chip->state = FL_WRITING;
1482 1576
1483 INVALIDATE_CACHE_UDELAY(map, chip, 1577 INVALIDATE_CACHE_UDELAY(map, chip,
1484 cmd_adr, len, 1578 cmd_adr, len,
1485 chip->buffer_write_time); 1579 chip->buffer_write_time);
1486 1580
@@ -1506,13 +1600,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1506 1600
1507 /* OK Still waiting */ 1601 /* OK Still waiting */
1508 if (time_after(jiffies, timeo)) { 1602 if (time_after(jiffies, timeo)) {
1603 map_write(map, CMD(0x70), cmd_adr);
1509 chip->state = FL_STATUS; 1604 chip->state = FL_STATUS;
1510 xip_enable(map, chip, cmd_adr); 1605 xip_enable(map, chip, cmd_adr);
1511 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 1606 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1512 ret = -EIO; 1607 ret = -EIO;
1513 goto out; 1608 goto out;
1514 } 1609 }
1515 1610
1516 /* Latency issues. Drop the lock, wait a while and retry */ 1611 /* Latency issues. Drop the lock, wait a while and retry */
1517 z++; 1612 z++;
1518 UDELAY(map, chip, cmd_adr, 1); 1613 UDELAY(map, chip, cmd_adr, 1);
@@ -1520,21 +1615,34 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1520 if (!z) { 1615 if (!z) {
1521 chip->buffer_write_time--; 1616 chip->buffer_write_time--;
1522 if (!chip->buffer_write_time) 1617 if (!chip->buffer_write_time)
1523 chip->buffer_write_time++; 1618 chip->buffer_write_time = 1;
1524 } 1619 }
1525 if (z > 1) 1620 if (z > 1)
1526 chip->buffer_write_time++; 1621 chip->buffer_write_time++;
1527 1622
1528 /* Done and happy. */ 1623 /* Done and happy. */
1529 chip->state = FL_STATUS; 1624 chip->state = FL_STATUS;
1530 1625
1531 /* check for lock bit */ 1626 /* check for errors */
1532 if (map_word_bitsset(map, status, CMD(0x02))) { 1627 if (map_word_bitsset(map, status, CMD(0x1a))) {
1533 /* clear status */ 1628 unsigned long chipstatus = MERGESTATUS(status);
1629
1630 /* reset status */
1534 map_write(map, CMD(0x50), cmd_adr); 1631 map_write(map, CMD(0x50), cmd_adr);
1535 /* put back into read status register mode */ 1632 map_write(map, CMD(0x70), cmd_adr);
1536 map_write(map, CMD(0x70), adr); 1633 xip_enable(map, chip, cmd_adr);
1537 ret = -EROFS; 1634
1635 if (chipstatus & 0x02) {
1636 ret = -EROFS;
1637 } else if (chipstatus & 0x08) {
1638 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1639 ret = -EIO;
1640 } else {
1641 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1642 ret = -EINVAL;
1643 }
1644
1645 goto out;
1538 } 1646 }
1539 1647
1540 xip_enable(map, chip, cmd_adr); 1648 xip_enable(map, chip, cmd_adr);
@@ -1543,70 +1651,65 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1543 return ret; 1651 return ret;
1544} 1652}
1545 1653
1546static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 1654static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1547 size_t len, size_t *retlen, const u_char *buf) 1655 unsigned long count, loff_t to, size_t *retlen)
1548{ 1656{
1549 struct map_info *map = mtd->priv; 1657 struct map_info *map = mtd->priv;
1550 struct cfi_private *cfi = map->fldrv_priv; 1658 struct cfi_private *cfi = map->fldrv_priv;
1551 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1659 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1552 int ret = 0; 1660 int ret = 0;
1553 int chipnum; 1661 int chipnum;
1554 unsigned long ofs; 1662 unsigned long ofs, vec_seek, i;
1663 size_t len = 0;
1664
1665 for (i = 0; i < count; i++)
1666 len += vecs[i].iov_len;
1555 1667
1556 *retlen = 0; 1668 *retlen = 0;
1557 if (!len) 1669 if (!len)
1558 return 0; 1670 return 0;
1559 1671
1560 chipnum = to >> cfi->chipshift; 1672 chipnum = to >> cfi->chipshift;
1561 ofs = to - (chipnum << cfi->chipshift); 1673 ofs = to - (chipnum << cfi->chipshift);
1562 1674 vec_seek = 0;
1563 /* If it's not bus-aligned, do the first word write */
1564 if (ofs & (map_bankwidth(map)-1)) {
1565 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1566 if (local_len > len)
1567 local_len = len;
1568 ret = cfi_intelext_write_words(mtd, to, local_len,
1569 retlen, buf);
1570 if (ret)
1571 return ret;
1572 ofs += local_len;
1573 buf += local_len;
1574 len -= local_len;
1575
1576 if (ofs >> cfi->chipshift) {
1577 chipnum ++;
1578 ofs = 0;
1579 if (chipnum == cfi->numchips)
1580 return 0;
1581 }
1582 }
1583 1675
1584 while(len) { 1676 do {
1585 /* We must not cross write block boundaries */ 1677 /* We must not cross write block boundaries */
1586 int size = wbufsize - (ofs & (wbufsize-1)); 1678 int size = wbufsize - (ofs & (wbufsize-1));
1587 1679
1588 if (size > len) 1680 if (size > len)
1589 size = len; 1681 size = len;
1590 ret = do_write_buffer(map, &cfi->chips[chipnum], 1682 ret = do_write_buffer(map, &cfi->chips[chipnum],
1591 ofs, buf, size); 1683 ofs, &vecs, &vec_seek, size);
1592 if (ret) 1684 if (ret)
1593 return ret; 1685 return ret;
1594 1686
1595 ofs += size; 1687 ofs += size;
1596 buf += size;
1597 (*retlen) += size; 1688 (*retlen) += size;
1598 len -= size; 1689 len -= size;
1599 1690
1600 if (ofs >> cfi->chipshift) { 1691 if (ofs >> cfi->chipshift) {
1601 chipnum ++; 1692 chipnum ++;
1602 ofs = 0; 1693 ofs = 0;
1603 if (chipnum == cfi->numchips) 1694 if (chipnum == cfi->numchips)
1604 return 0; 1695 return 0;
1605 } 1696 }
1606 } 1697 } while (len);
1698
1607 return 0; 1699 return 0;
1608} 1700}
1609 1701
1702static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1703 size_t len, size_t *retlen, const u_char *buf)
1704{
1705 struct kvec vec;
1706
1707 vec.iov_base = (void *) buf;
1708 vec.iov_len = len;
1709
1710 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1711}
1712
1610static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, 1713static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, int len, void *thunk) 1714 unsigned long adr, int len, void *thunk)
1612{ 1715{
@@ -1672,23 +1775,17 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1672 status = map_read(map, adr); 1775 status = map_read(map, adr);
1673 if (map_word_andequal(map, status, status_OK, status_OK)) 1776 if (map_word_andequal(map, status, status_OK, status_OK))
1674 break; 1777 break;
1675 1778
1676 /* OK Still waiting */ 1779 /* OK Still waiting */
1677 if (time_after(jiffies, timeo)) { 1780 if (time_after(jiffies, timeo)) {
1678 map_word Xstatus;
1679 map_write(map, CMD(0x70), adr); 1781 map_write(map, CMD(0x70), adr);
1680 chip->state = FL_STATUS; 1782 chip->state = FL_STATUS;
1681 Xstatus = map_read(map, adr);
1682 /* Clear status bits */
1683 map_write(map, CMD(0x50), adr);
1684 map_write(map, CMD(0x70), adr);
1685 xip_enable(map, chip, adr); 1783 xip_enable(map, chip, adr);
1686 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n", 1784 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1687 adr, status.x[0], Xstatus.x[0]);
1688 ret = -EIO; 1785 ret = -EIO;
1689 goto out; 1786 goto out;
1690 } 1787 }
1691 1788
1692 /* Latency issues. Drop the lock, wait a while and retry */ 1789 /* Latency issues. Drop the lock, wait a while and retry */
1693 UDELAY(map, chip, adr, 1000000/HZ); 1790 UDELAY(map, chip, adr, 1000000/HZ);
1694 } 1791 }
@@ -1698,43 +1795,40 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1698 chip->state = FL_STATUS; 1795 chip->state = FL_STATUS;
1699 status = map_read(map, adr); 1796 status = map_read(map, adr);
1700 1797
1701 /* check for lock bit */ 1798 /* check for errors */
1702 if (map_word_bitsset(map, status, CMD(0x3a))) { 1799 if (map_word_bitsset(map, status, CMD(0x3a))) {
1703 unsigned long chipstatus; 1800 unsigned long chipstatus = MERGESTATUS(status);
1704 1801
1705 /* Reset the error bits */ 1802 /* Reset the error bits */
1706 map_write(map, CMD(0x50), adr); 1803 map_write(map, CMD(0x50), adr);
1707 map_write(map, CMD(0x70), adr); 1804 map_write(map, CMD(0x70), adr);
1708 xip_enable(map, chip, adr); 1805 xip_enable(map, chip, adr);
1709 1806
1710 chipstatus = MERGESTATUS(status);
1711
1712 if ((chipstatus & 0x30) == 0x30) { 1807 if ((chipstatus & 0x30) == 0x30) {
1713 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus); 1808 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1714 ret = -EIO; 1809 ret = -EINVAL;
1715 } else if (chipstatus & 0x02) { 1810 } else if (chipstatus & 0x02) {
1716 /* Protection bit set */ 1811 /* Protection bit set */
1717 ret = -EROFS; 1812 ret = -EROFS;
1718 } else if (chipstatus & 0x8) { 1813 } else if (chipstatus & 0x8) {
1719 /* Voltage */ 1814 /* Voltage */
1720 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus); 1815 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1721 ret = -EIO; 1816 ret = -EIO;
1722 } else if (chipstatus & 0x20) { 1817 } else if (chipstatus & 0x20 && retries--) {
1723 if (retries--) { 1818 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1724 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1819 timeo = jiffies + HZ;
1725 timeo = jiffies + HZ; 1820 put_chip(map, chip, adr);
1726 put_chip(map, chip, adr); 1821 spin_unlock(chip->mutex);
1727 spin_unlock(chip->mutex); 1822 goto retry;
1728 goto retry; 1823 } else {
1729 } 1824 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1730 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1731 ret = -EIO; 1825 ret = -EIO;
1732 } 1826 }
1733 } else { 1827
1734 xip_enable(map, chip, adr); 1828 goto out;
1735 ret = 0;
1736 } 1829 }
1737 1830
1831 xip_enable(map, chip, adr);
1738 out: put_chip(map, chip, adr); 1832 out: put_chip(map, chip, adr);
1739 spin_unlock(chip->mutex); 1833 spin_unlock(chip->mutex);
1740 return ret; 1834 return ret;
@@ -1754,7 +1848,7 @@ int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1754 1848
1755 instr->state = MTD_ERASE_DONE; 1849 instr->state = MTD_ERASE_DONE;
1756 mtd_erase_callback(instr); 1850 mtd_erase_callback(instr);
1757 1851
1758 return 0; 1852 return 0;
1759} 1853}
1760 1854
@@ -1775,7 +1869,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1775 if (!ret) { 1869 if (!ret) {
1776 chip->oldstate = chip->state; 1870 chip->oldstate = chip->state;
1777 chip->state = FL_SYNCING; 1871 chip->state = FL_SYNCING;
1778 /* No need to wake_up() on this state change - 1872 /* No need to wake_up() on this state change -
1779 * as the whole point is that nobody can do anything 1873 * as the whole point is that nobody can do anything
1780 * with the chip now anyway. 1874 * with the chip now anyway.
1781 */ 1875 */
@@ -1789,7 +1883,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
1789 chip = &cfi->chips[i]; 1883 chip = &cfi->chips[i];
1790 1884
1791 spin_lock(chip->mutex); 1885 spin_lock(chip->mutex);
1792 1886
1793 if (chip->state == FL_SYNCING) { 1887 if (chip->state == FL_SYNCING) {
1794 chip->state = chip->oldstate; 1888 chip->state = chip->oldstate;
1795 chip->oldstate = FL_READY; 1889 chip->oldstate = FL_READY;
@@ -1846,7 +1940,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1846 1940
1847 ENABLE_VPP(map); 1941 ENABLE_VPP(map);
1848 xip_disable(map, chip, adr); 1942 xip_disable(map, chip, adr);
1849 1943
1850 map_write(map, CMD(0x60), adr); 1944 map_write(map, CMD(0x60), adr);
1851 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 1945 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1852 map_write(map, CMD(0x01), adr); 1946 map_write(map, CMD(0x01), adr);
@@ -1874,25 +1968,22 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1874 status = map_read(map, adr); 1968 status = map_read(map, adr);
1875 if (map_word_andequal(map, status, status_OK, status_OK)) 1969 if (map_word_andequal(map, status, status_OK, status_OK))
1876 break; 1970 break;
1877 1971
1878 /* OK Still waiting */ 1972 /* OK Still waiting */
1879 if (time_after(jiffies, timeo)) { 1973 if (time_after(jiffies, timeo)) {
1880 map_word Xstatus;
1881 map_write(map, CMD(0x70), adr); 1974 map_write(map, CMD(0x70), adr);
1882 chip->state = FL_STATUS; 1975 chip->state = FL_STATUS;
1883 Xstatus = map_read(map, adr);
1884 xip_enable(map, chip, adr); 1976 xip_enable(map, chip, adr);
1885 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n", 1977 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1886 status.x[0], Xstatus.x[0]);
1887 put_chip(map, chip, adr); 1978 put_chip(map, chip, adr);
1888 spin_unlock(chip->mutex); 1979 spin_unlock(chip->mutex);
1889 return -EIO; 1980 return -EIO;
1890 } 1981 }
1891 1982
1892 /* Latency issues. Drop the lock, wait a while and retry */ 1983 /* Latency issues. Drop the lock, wait a while and retry */
1893 UDELAY(map, chip, adr, 1); 1984 UDELAY(map, chip, adr, 1);
1894 } 1985 }
1895 1986
1896 /* Done and happy. */ 1987 /* Done and happy. */
1897 chip->state = FL_STATUS; 1988 chip->state = FL_STATUS;
1898 xip_enable(map, chip, adr); 1989 xip_enable(map, chip, adr);
@@ -1912,9 +2003,9 @@ static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1912 ofs, len, 0); 2003 ofs, len, 0);
1913#endif 2004#endif
1914 2005
1915 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2006 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1916 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK); 2007 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1917 2008
1918#ifdef DEBUG_LOCK_BITS 2009#ifdef DEBUG_LOCK_BITS
1919 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2010 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1920 __FUNCTION__, ret); 2011 __FUNCTION__, ret);
@@ -1938,20 +2029,20 @@ static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1938 2029
1939 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 2030 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1940 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK); 2031 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1941 2032
1942#ifdef DEBUG_LOCK_BITS 2033#ifdef DEBUG_LOCK_BITS
1943 printk(KERN_DEBUG "%s: lock status after, ret=%d\n", 2034 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1944 __FUNCTION__, ret); 2035 __FUNCTION__, ret);
1945 cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 2036 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1946 ofs, len, 0); 2037 ofs, len, 0);
1947#endif 2038#endif
1948 2039
1949 return ret; 2040 return ret;
1950} 2041}
1951 2042
1952#ifdef CONFIG_MTD_OTP 2043#ifdef CONFIG_MTD_OTP
1953 2044
1954typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 2045typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1955 u_long data_offset, u_char *buf, u_int size, 2046 u_long data_offset, u_char *buf, u_int size,
1956 u_long prot_offset, u_int groupno, u_int groupsize); 2047 u_long prot_offset, u_int groupno, u_int groupsize);
1957 2048
@@ -2002,7 +2093,7 @@ do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2002 2093
2003 datum = map_word_load_partial(map, datum, buf, gap, n); 2094 datum = map_word_load_partial(map, datum, buf, gap, n);
2004 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 2095 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2005 if (ret) 2096 if (ret)
2006 return ret; 2097 return ret;
2007 2098
2008 offset += n; 2099 offset += n;
@@ -2195,7 +2286,7 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2195 NULL, do_otp_lock, 1); 2286 NULL, do_otp_lock, 1);
2196} 2287}
2197 2288
2198static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 2289static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2199 struct otp_info *buf, size_t len) 2290 struct otp_info *buf, size_t len)
2200{ 2291{
2201 size_t retlen; 2292 size_t retlen;
@@ -2238,7 +2329,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2238 if (chip->oldstate == FL_READY) { 2329 if (chip->oldstate == FL_READY) {
2239 chip->oldstate = chip->state; 2330 chip->oldstate = chip->state;
2240 chip->state = FL_PM_SUSPENDED; 2331 chip->state = FL_PM_SUSPENDED;
2241 /* No need to wake_up() on this state change - 2332 /* No need to wake_up() on this state change -
2242 * as the whole point is that nobody can do anything 2333 * as the whole point is that nobody can do anything
2243 * with the chip now anyway. 2334 * with the chip now anyway.
2244 */ 2335 */
@@ -2266,9 +2357,9 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2266 if (ret) { 2357 if (ret) {
2267 for (i--; i >=0; i--) { 2358 for (i--; i >=0; i--) {
2268 chip = &cfi->chips[i]; 2359 chip = &cfi->chips[i];
2269 2360
2270 spin_lock(chip->mutex); 2361 spin_lock(chip->mutex);
2271 2362
2272 if (chip->state == FL_PM_SUSPENDED) { 2363 if (chip->state == FL_PM_SUSPENDED) {
2273 /* No need to force it into a known state here, 2364 /* No need to force it into a known state here,
2274 because we're returning failure, and it didn't 2365 because we're returning failure, and it didn't
@@ -2279,8 +2370,8 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2279 } 2370 }
2280 spin_unlock(chip->mutex); 2371 spin_unlock(chip->mutex);
2281 } 2372 }
2282 } 2373 }
2283 2374
2284 return ret; 2375 return ret;
2285} 2376}
2286 2377
@@ -2292,11 +2383,11 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2292 struct flchip *chip; 2383 struct flchip *chip;
2293 2384
2294 for (i=0; i<cfi->numchips; i++) { 2385 for (i=0; i<cfi->numchips; i++) {
2295 2386
2296 chip = &cfi->chips[i]; 2387 chip = &cfi->chips[i];
2297 2388
2298 spin_lock(chip->mutex); 2389 spin_lock(chip->mutex);
2299 2390
2300 /* Go to known state. Chip may have been power cycled */ 2391 /* Go to known state. Chip may have been power cycled */
2301 if (chip->state == FL_PM_SUSPENDED) { 2392 if (chip->state == FL_PM_SUSPENDED) {
2302 map_write(map, CMD(0xFF), cfi->chips[i].start); 2393 map_write(map, CMD(0xFF), cfi->chips[i].start);
@@ -2318,7 +2409,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2318 struct flchip *chip = &cfi->chips[i]; 2409 struct flchip *chip = &cfi->chips[i];
2319 2410
2320 /* force the completion of any ongoing operation 2411 /* force the completion of any ongoing operation
2321 and switch to array mode so any bootloader in 2412 and switch to array mode so any bootloader in
2322 flash is accessible for soft reboot. */ 2413 flash is accessible for soft reboot. */
2323 spin_lock(chip->mutex); 2414 spin_lock(chip->mutex);
2324 ret = get_chip(map, chip, chip->start, FL_SYNCING); 2415 ret = get_chip(map, chip, chip->start, FL_SYNCING);
@@ -2355,20 +2446,23 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
2355 kfree(mtd->eraseregions); 2446 kfree(mtd->eraseregions);
2356} 2447}
2357 2448
2358static char im_name_1[]="cfi_cmdset_0001"; 2449static char im_name_0001[] = "cfi_cmdset_0001";
2359static char im_name_3[]="cfi_cmdset_0003"; 2450static char im_name_0003[] = "cfi_cmdset_0003";
2451static char im_name_0200[] = "cfi_cmdset_0200";
2360 2452
2361static int __init cfi_intelext_init(void) 2453static int __init cfi_intelext_init(void)
2362{ 2454{
2363 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001); 2455 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2364 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001); 2456 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2457 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2365 return 0; 2458 return 0;
2366} 2459}
2367 2460
2368static void __exit cfi_intelext_exit(void) 2461static void __exit cfi_intelext_exit(void)
2369{ 2462{
2370 inter_module_unregister(im_name_1); 2463 inter_module_unregister(im_name_0001);
2371 inter_module_unregister(im_name_3); 2464 inter_module_unregister(im_name_0003);
2465 inter_module_unregister(im_name_0200);
2372} 2466}
2373 2467
2374module_init(cfi_intelext_init); 2468module_init(cfi_intelext_init);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0e6475050da..aed10bd5c3c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -10,14 +10,14 @@
10 * 10 *
11 * 4_by_16 work by Carolyn J. Smith 11 * 4_by_16 work by Carolyn J. Smith
12 * 12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre) 14 * by Nicolas Pitre)
15 * 15 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 * 17 *
18 * This code is GPL 18 * This code is GPL
19 * 19 *
20 * $Id: cfi_cmdset_0002.c,v 1.118 2005/07/04 22:34:29 gleixner Exp $ 20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21 * 21 *
22 */ 22 */
23 23
@@ -93,7 +93,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
93 }; 93 };
94 94
95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 printk(" Address sensitive unlock: %s\n", 96 printk(" Address sensitive unlock: %s\n",
97 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 97 (extp->SiliconRevision & 1) ? "Not required" : "Required");
98 98
99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
@@ -118,9 +118,9 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
118 else 118 else
119 printk(" Page mode: %d word page\n", extp->PageMode << 2); 119 printk(" Page mode: %d word page\n", extp->PageMode << 2);
120 120
121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 extp->VppMin >> 4, extp->VppMin & 0xf); 122 extp->VppMin >> 4, extp->VppMin & 0xf);
123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMax >> 4, extp->VppMax & 0xf); 124 extp->VppMax >> 4, extp->VppMax & 0xf);
125 125
126 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 126 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
@@ -177,7 +177,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 mtd->erase = cfi_amdstd_erase_chip; 178 mtd->erase = cfi_amdstd_erase_chip;
179 } 179 }
180 180
181} 181}
182 182
183static struct cfi_fixup cfi_fixup_table[] = { 183static struct cfi_fixup cfi_fixup_table[] = {
@@ -239,7 +239,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
239 239
240 if (cfi->cfi_mode==CFI_MODE_CFI){ 240 if (cfi->cfi_mode==CFI_MODE_CFI){
241 unsigned char bootloc; 241 unsigned char bootloc;
242 /* 242 /*
243 * It's a real CFI chip, not one for which the probe 243 * It's a real CFI chip, not one for which the probe
244 * routine faked a CFI structure. So we read the feature 244 * routine faked a CFI structure. So we read the feature
245 * table from it. 245 * table from it.
@@ -253,8 +253,18 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
253 return NULL; 253 return NULL;
254 } 254 }
255 255
256 if (extp->MajorVersion != '1' ||
257 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
258 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
259 "version %c.%c.\n", extp->MajorVersion,
260 extp->MinorVersion);
261 kfree(extp);
262 kfree(mtd);
263 return NULL;
264 }
265
256 /* Install our own private info structure */ 266 /* Install our own private info structure */
257 cfi->cmdset_priv = extp; 267 cfi->cmdset_priv = extp;
258 268
259 /* Apply cfi device specific fixups */ 269 /* Apply cfi device specific fixups */
260 cfi_fixup(mtd, cfi_fixup_table); 270 cfi_fixup(mtd, cfi_fixup_table);
@@ -262,7 +272,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
262#ifdef DEBUG_CFI_FEATURES 272#ifdef DEBUG_CFI_FEATURES
263 /* Tell the user about it in lots of lovely detail */ 273 /* Tell the user about it in lots of lovely detail */
264 cfi_tell_features(extp); 274 cfi_tell_features(extp);
265#endif 275#endif
266 276
267 bootloc = extp->TopBottom; 277 bootloc = extp->TopBottom;
268 if ((bootloc != 2) && (bootloc != 3)) { 278 if ((bootloc != 2) && (bootloc != 3)) {
@@ -273,11 +283,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
273 283
274 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 284 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
275 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 285 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
276 286
277 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 287 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
278 int j = (cfi->cfiq->NumEraseRegions-1)-i; 288 int j = (cfi->cfiq->NumEraseRegions-1)-i;
279 __u32 swap; 289 __u32 swap;
280 290
281 swap = cfi->cfiq->EraseRegionInfo[i]; 291 swap = cfi->cfiq->EraseRegionInfo[i];
282 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 292 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
283 cfi->cfiq->EraseRegionInfo[j] = swap; 293 cfi->cfiq->EraseRegionInfo[j] = swap;
@@ -288,11 +298,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
288 cfi->addr_unlock2 = 0x2aa; 298 cfi->addr_unlock2 = 0x2aa;
289 /* Modify the unlock address if we are in compatibility mode */ 299 /* Modify the unlock address if we are in compatibility mode */
290 if ( /* x16 in x8 mode */ 300 if ( /* x16 in x8 mode */
291 ((cfi->device_type == CFI_DEVICETYPE_X8) && 301 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
292 (cfi->cfiq->InterfaceDesc == 2)) || 302 (cfi->cfiq->InterfaceDesc == 2)) ||
293 /* x32 in x16 mode */ 303 /* x32 in x16 mode */
294 ((cfi->device_type == CFI_DEVICETYPE_X16) && 304 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
295 (cfi->cfiq->InterfaceDesc == 4))) 305 (cfi->cfiq->InterfaceDesc == 4)))
296 { 306 {
297 cfi->addr_unlock1 = 0xaaa; 307 cfi->addr_unlock1 = 0xaaa;
298 cfi->addr_unlock2 = 0x555; 308 cfi->addr_unlock2 = 0x555;
@@ -310,10 +320,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
310 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 320 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
311 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 321 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
312 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 322 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
313 } 323 }
314 324
315 map->fldrv = &cfi_amdstd_chipdrv; 325 map->fldrv = &cfi_amdstd_chipdrv;
316 326
317 return cfi_amdstd_setup(mtd); 327 return cfi_amdstd_setup(mtd);
318} 328}
319 329
@@ -326,24 +336,24 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
326 unsigned long offset = 0; 336 unsigned long offset = 0;
327 int i,j; 337 int i,j;
328 338
329 printk(KERN_NOTICE "number of %s chips: %d\n", 339 printk(KERN_NOTICE "number of %s chips: %d\n",
330 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 340 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
331 /* Select the correct geometry setup */ 341 /* Select the correct geometry setup */
332 mtd->size = devsize * cfi->numchips; 342 mtd->size = devsize * cfi->numchips;
333 343
334 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 344 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
335 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 345 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
336 * mtd->numeraseregions, GFP_KERNEL); 346 * mtd->numeraseregions, GFP_KERNEL);
337 if (!mtd->eraseregions) { 347 if (!mtd->eraseregions) {
338 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 348 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
339 goto setup_err; 349 goto setup_err;
340 } 350 }
341 351
342 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 352 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
343 unsigned long ernum, ersize; 353 unsigned long ernum, ersize;
344 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 354 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
345 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 355 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
346 356
347 if (mtd->erasesize < ersize) { 357 if (mtd->erasesize < ersize) {
348 mtd->erasesize = ersize; 358 mtd->erasesize = ersize;
349 } 359 }
@@ -429,7 +439,7 @@ static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word
429 oldd = map_read(map, addr); 439 oldd = map_read(map, addr);
430 curd = map_read(map, addr); 440 curd = map_read(map, addr);
431 441
432 return map_word_equal(map, oldd, curd) && 442 return map_word_equal(map, oldd, curd) &&
433 map_word_equal(map, curd, expected); 443 map_word_equal(map, curd, expected);
434} 444}
435 445
@@ -461,7 +471,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
461 /* Someone else might have been playing with it. */ 471 /* Someone else might have been playing with it. */
462 goto retry; 472 goto retry;
463 } 473 }
464 474
465 case FL_READY: 475 case FL_READY:
466 case FL_CFI_QUERY: 476 case FL_CFI_QUERY:
467 case FL_JEDEC_QUERY: 477 case FL_JEDEC_QUERY:
@@ -504,7 +514,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
504 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 514 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
505 return -EIO; 515 return -EIO;
506 } 516 }
507 517
508 spin_unlock(chip->mutex); 518 spin_unlock(chip->mutex);
509 cfi_udelay(1); 519 cfi_udelay(1);
510 spin_lock(chip->mutex); 520 spin_lock(chip->mutex);
@@ -607,7 +617,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
607 * When a delay is required for the flash operation to complete, the 617 * When a delay is required for the flash operation to complete, the
608 * xip_udelay() function is polling for both the given timeout and pending 618 * xip_udelay() function is polling for both the given timeout and pending
609 * (but still masked) hardware interrupts. Whenever there is an interrupt 619 * (but still masked) hardware interrupts. Whenever there is an interrupt
610 * pending then the flash erase operation is suspended, array mode restored 620 * pending then the flash erase operation is suspended, array mode restored
611 * and interrupts unmasked. Task scheduling might also happen at that 621 * and interrupts unmasked. Task scheduling might also happen at that
612 * point. The CPU eventually returns from the interrupt or the call to 622 * point. The CPU eventually returns from the interrupt or the call to
613 * schedule() and the suspended flash operation is resumed for the remaining 623 * schedule() and the suspended flash operation is resumed for the remaining
@@ -631,9 +641,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
631 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 641 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
632 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 642 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
633 /* 643 /*
634 * Let's suspend the erase operation when supported. 644 * Let's suspend the erase operation when supported.
635 * Note that we currently don't try to suspend 645 * Note that we currently don't try to suspend
636 * interleaved chips if there is already another 646 * interleaved chips if there is already another
637 * operation suspended (imagine what happens 647 * operation suspended (imagine what happens
638 * when one chip was already done with the current 648 * when one chip was already done with the current
639 * operation while another chip suspended it, then 649 * operation while another chip suspended it, then
@@ -769,8 +779,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
769 779
770 adr += chip->start; 780 adr += chip->start;
771 781
772 /* Ensure cmd read/writes are aligned. */ 782 /* Ensure cmd read/writes are aligned. */
773 cmd_addr = adr & ~(map_bankwidth(map)-1); 783 cmd_addr = adr & ~(map_bankwidth(map)-1);
774 784
775 spin_lock(chip->mutex); 785 spin_lock(chip->mutex);
776 ret = get_chip(map, chip, cmd_addr, FL_READY); 786 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -850,7 +860,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
850#endif 860#endif
851 set_current_state(TASK_UNINTERRUPTIBLE); 861 set_current_state(TASK_UNINTERRUPTIBLE);
852 add_wait_queue(&chip->wq, &wait); 862 add_wait_queue(&chip->wq, &wait);
853 863
854 spin_unlock(chip->mutex); 864 spin_unlock(chip->mutex);
855 865
856 schedule(); 866 schedule();
@@ -862,7 +872,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
862 timeo = jiffies + HZ; 872 timeo = jiffies + HZ;
863 873
864 goto retry; 874 goto retry;
865 } 875 }
866 876
867 adr += chip->start; 877 adr += chip->start;
868 878
@@ -871,14 +881,14 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
871 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 881 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
872 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 882 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
873 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 883 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
874 884
875 map_copy_from(map, buf, adr, len); 885 map_copy_from(map, buf, adr, len);
876 886
877 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 887 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
878 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 888 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
879 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 889 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
880 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 890 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
881 891
882 wake_up(&chip->wq); 892 wake_up(&chip->wq);
883 spin_unlock(chip->mutex); 893 spin_unlock(chip->mutex);
884 894
@@ -987,7 +997,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
987 chip->word_write_time); 997 chip->word_write_time);
988 998
989 /* See comment above for timeout value. */ 999 /* See comment above for timeout value. */
990 timeo = jiffies + uWriteTimeout; 1000 timeo = jiffies + uWriteTimeout;
991 for (;;) { 1001 for (;;) {
992 if (chip->state != FL_WRITING) { 1002 if (chip->state != FL_WRITING) {
993 /* Someone's suspended the write. Sleep */ 1003 /* Someone's suspended the write. Sleep */
@@ -1003,16 +1013,16 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1003 continue; 1013 continue;
1004 } 1014 }
1005 1015
1006 if (chip_ready(map, adr)) 1016 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1007 break;
1008
1009 if (time_after(jiffies, timeo)) {
1010 xip_enable(map, chip, adr); 1017 xip_enable(map, chip, adr);
1011 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1018 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1012 xip_disable(map, chip, adr); 1019 xip_disable(map, chip, adr);
1013 break; 1020 break;
1014 } 1021 }
1015 1022
1023 if (chip_ready(map, adr))
1024 break;
1025
1016 /* Latency issues. Drop the lock, wait a while and retry */ 1026 /* Latency issues. Drop the lock, wait a while and retry */
1017 UDELAY(map, chip, adr, 1); 1027 UDELAY(map, chip, adr, 1);
1018 } 1028 }
@@ -1022,7 +1032,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1022 map_write( map, CMD(0xF0), chip->start ); 1032 map_write( map, CMD(0xF0), chip->start );
1023 /* FIXME - should have reset delay before continuing */ 1033 /* FIXME - should have reset delay before continuing */
1024 1034
1025 if (++retry_cnt <= MAX_WORD_RETRIES) 1035 if (++retry_cnt <= MAX_WORD_RETRIES)
1026 goto retry; 1036 goto retry;
1027 1037
1028 ret = -EIO; 1038 ret = -EIO;
@@ -1090,27 +1100,27 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1090 1100
1091 /* Number of bytes to copy from buffer */ 1101 /* Number of bytes to copy from buffer */
1092 n = min_t(int, len, map_bankwidth(map)-i); 1102 n = min_t(int, len, map_bankwidth(map)-i);
1093 1103
1094 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1104 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1095 1105
1096 ret = do_write_oneword(map, &cfi->chips[chipnum], 1106 ret = do_write_oneword(map, &cfi->chips[chipnum],
1097 bus_ofs, tmp_buf); 1107 bus_ofs, tmp_buf);
1098 if (ret) 1108 if (ret)
1099 return ret; 1109 return ret;
1100 1110
1101 ofs += n; 1111 ofs += n;
1102 buf += n; 1112 buf += n;
1103 (*retlen) += n; 1113 (*retlen) += n;
1104 len -= n; 1114 len -= n;
1105 1115
1106 if (ofs >> cfi->chipshift) { 1116 if (ofs >> cfi->chipshift) {
1107 chipnum ++; 1117 chipnum ++;
1108 ofs = 0; 1118 ofs = 0;
1109 if (chipnum == cfi->numchips) 1119 if (chipnum == cfi->numchips)
1110 return 0; 1120 return 0;
1111 } 1121 }
1112 } 1122 }
1113 1123
1114 /* We are now aligned, write as much as possible */ 1124 /* We are now aligned, write as much as possible */
1115 while(len >= map_bankwidth(map)) { 1125 while(len >= map_bankwidth(map)) {
1116 map_word datum; 1126 map_word datum;
@@ -1128,7 +1138,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1128 len -= map_bankwidth(map); 1138 len -= map_bankwidth(map);
1129 1139
1130 if (ofs >> cfi->chipshift) { 1140 if (ofs >> cfi->chipshift) {
1131 chipnum ++; 1141 chipnum ++;
1132 ofs = 0; 1142 ofs = 0;
1133 if (chipnum == cfi->numchips) 1143 if (chipnum == cfi->numchips)
1134 return 0; 1144 return 0;
@@ -1166,12 +1176,12 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1166 spin_unlock(cfi->chips[chipnum].mutex); 1176 spin_unlock(cfi->chips[chipnum].mutex);
1167 1177
1168 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1178 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1169 1179
1170 ret = do_write_oneword(map, &cfi->chips[chipnum], 1180 ret = do_write_oneword(map, &cfi->chips[chipnum],
1171 ofs, tmp_buf); 1181 ofs, tmp_buf);
1172 if (ret) 1182 if (ret)
1173 return ret; 1183 return ret;
1174 1184
1175 (*retlen) += len; 1185 (*retlen) += len;
1176 } 1186 }
1177 1187
@@ -1183,7 +1193,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1183 * FIXME: interleaved mode not tested, and probably not supported! 1193 * FIXME: interleaved mode not tested, and probably not supported!
1184 */ 1194 */
1185static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1195static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1186 unsigned long adr, const u_char *buf, 1196 unsigned long adr, const u_char *buf,
1187 int len) 1197 int len)
1188{ 1198{
1189 struct cfi_private *cfi = map->fldrv_priv; 1199 struct cfi_private *cfi = map->fldrv_priv;
@@ -1213,7 +1223,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1213 XIP_INVAL_CACHED_RANGE(map, adr, len); 1223 XIP_INVAL_CACHED_RANGE(map, adr, len);
1214 ENABLE_VPP(map); 1224 ENABLE_VPP(map);
1215 xip_disable(map, chip, cmd_adr); 1225 xip_disable(map, chip, cmd_adr);
1216 1226
1217 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1227 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1218 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1228 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1219 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1229 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -1247,8 +1257,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1247 adr, map_bankwidth(map), 1257 adr, map_bankwidth(map),
1248 chip->word_write_time); 1258 chip->word_write_time);
1249 1259
1250 timeo = jiffies + uWriteTimeout; 1260 timeo = jiffies + uWriteTimeout;
1251 1261
1252 for (;;) { 1262 for (;;) {
1253 if (chip->state != FL_WRITING) { 1263 if (chip->state != FL_WRITING) {
1254 /* Someone's suspended the write. Sleep */ 1264 /* Someone's suspended the write. Sleep */
@@ -1264,13 +1274,13 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1264 continue; 1274 continue;
1265 } 1275 }
1266 1276
1277 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1278 break;
1279
1267 if (chip_ready(map, adr)) { 1280 if (chip_ready(map, adr)) {
1268 xip_enable(map, chip, adr); 1281 xip_enable(map, chip, adr);
1269 goto op_done; 1282 goto op_done;
1270 } 1283 }
1271
1272 if( time_after(jiffies, timeo))
1273 break;
1274 1284
1275 /* Latency issues. Drop the lock, wait a while and retry */ 1285 /* Latency issues. Drop the lock, wait a while and retry */
1276 UDELAY(map, chip, adr, 1); 1286 UDELAY(map, chip, adr, 1);
@@ -1342,7 +1352,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1342 if (size % map_bankwidth(map)) 1352 if (size % map_bankwidth(map))
1343 size -= size % map_bankwidth(map); 1353 size -= size % map_bankwidth(map);
1344 1354
1345 ret = do_write_buffer(map, &cfi->chips[chipnum], 1355 ret = do_write_buffer(map, &cfi->chips[chipnum],
1346 ofs, buf, size); 1356 ofs, buf, size);
1347 if (ret) 1357 if (ret)
1348 return ret; 1358 return ret;
@@ -1353,7 +1363,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1353 len -= size; 1363 len -= size;
1354 1364
1355 if (ofs >> cfi->chipshift) { 1365 if (ofs >> cfi->chipshift) {
1356 chipnum ++; 1366 chipnum ++;
1357 ofs = 0; 1367 ofs = 0;
1358 if (chipnum == cfi->numchips) 1368 if (chipnum == cfi->numchips)
1359 return 0; 1369 return 0;
@@ -1570,7 +1580,7 @@ int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1570 1580
1571 instr->state = MTD_ERASE_DONE; 1581 instr->state = MTD_ERASE_DONE;
1572 mtd_erase_callback(instr); 1582 mtd_erase_callback(instr);
1573 1583
1574 return 0; 1584 return 0;
1575} 1585}
1576 1586
@@ -1593,7 +1603,7 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1593 1603
1594 instr->state = MTD_ERASE_DONE; 1604 instr->state = MTD_ERASE_DONE;
1595 mtd_erase_callback(instr); 1605 mtd_erase_callback(instr);
1596 1606
1597 return 0; 1607 return 0;
1598} 1608}
1599 1609
@@ -1620,7 +1630,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1620 case FL_JEDEC_QUERY: 1630 case FL_JEDEC_QUERY:
1621 chip->oldstate = chip->state; 1631 chip->oldstate = chip->state;
1622 chip->state = FL_SYNCING; 1632 chip->state = FL_SYNCING;
1623 /* No need to wake_up() on this state change - 1633 /* No need to wake_up() on this state change -
1624 * as the whole point is that nobody can do anything 1634 * as the whole point is that nobody can do anything
1625 * with the chip now anyway. 1635 * with the chip now anyway.
1626 */ 1636 */
@@ -1631,13 +1641,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1631 default: 1641 default:
1632 /* Not an idle state */ 1642 /* Not an idle state */
1633 add_wait_queue(&chip->wq, &wait); 1643 add_wait_queue(&chip->wq, &wait);
1634 1644
1635 spin_unlock(chip->mutex); 1645 spin_unlock(chip->mutex);
1636 1646
1637 schedule(); 1647 schedule();
1638 1648
1639 remove_wait_queue(&chip->wq, &wait); 1649 remove_wait_queue(&chip->wq, &wait);
1640 1650
1641 goto retry; 1651 goto retry;
1642 } 1652 }
1643 } 1653 }
@@ -1648,7 +1658,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1648 chip = &cfi->chips[i]; 1658 chip = &cfi->chips[i];
1649 1659
1650 spin_lock(chip->mutex); 1660 spin_lock(chip->mutex);
1651 1661
1652 if (chip->state == FL_SYNCING) { 1662 if (chip->state == FL_SYNCING) {
1653 chip->state = chip->oldstate; 1663 chip->state = chip->oldstate;
1654 wake_up(&chip->wq); 1664 wake_up(&chip->wq);
@@ -1678,7 +1688,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1678 case FL_JEDEC_QUERY: 1688 case FL_JEDEC_QUERY:
1679 chip->oldstate = chip->state; 1689 chip->oldstate = chip->state;
1680 chip->state = FL_PM_SUSPENDED; 1690 chip->state = FL_PM_SUSPENDED;
1681 /* No need to wake_up() on this state change - 1691 /* No need to wake_up() on this state change -
1682 * as the whole point is that nobody can do anything 1692 * as the whole point is that nobody can do anything
1683 * with the chip now anyway. 1693 * with the chip now anyway.
1684 */ 1694 */
@@ -1699,7 +1709,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1699 chip = &cfi->chips[i]; 1709 chip = &cfi->chips[i];
1700 1710
1701 spin_lock(chip->mutex); 1711 spin_lock(chip->mutex);
1702 1712
1703 if (chip->state == FL_PM_SUSPENDED) { 1713 if (chip->state == FL_PM_SUSPENDED) {
1704 chip->state = chip->oldstate; 1714 chip->state = chip->oldstate;
1705 wake_up(&chip->wq); 1715 wake_up(&chip->wq);
@@ -1707,7 +1717,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1707 spin_unlock(chip->mutex); 1717 spin_unlock(chip->mutex);
1708 } 1718 }
1709 } 1719 }
1710 1720
1711 return ret; 1721 return ret;
1712} 1722}
1713 1723
@@ -1720,11 +1730,11 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1720 struct flchip *chip; 1730 struct flchip *chip;
1721 1731
1722 for (i=0; i<cfi->numchips; i++) { 1732 for (i=0; i<cfi->numchips; i++) {
1723 1733
1724 chip = &cfi->chips[i]; 1734 chip = &cfi->chips[i];
1725 1735
1726 spin_lock(chip->mutex); 1736 spin_lock(chip->mutex);
1727 1737
1728 if (chip->state == FL_PM_SUSPENDED) { 1738 if (chip->state == FL_PM_SUSPENDED) {
1729 chip->state = FL_READY; 1739 chip->state = FL_READY;
1730 map_write(map, CMD(0xF0), chip->start); 1740 map_write(map, CMD(0xF0), chip->start);
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index c894f880157..c4a19d2dc67 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -4,8 +4,8 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0020.c,v 1.19 2005/07/13 15:52:45 dwmw2 Exp $ 7 * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
8 * 8 *
9 * 10/10/2000 Nicolas Pitre <nico@cam.org> 9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and 10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.) 11 * independent of the flash geometry (buswidth, interleave, etc.)
@@ -81,17 +81,17 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
81 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); 81 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
82 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); 82 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
83 for (i=9; i<32; i++) { 83 for (i=9; i<32; i++) {
84 if (extp->FeatureSupport & (1<<i)) 84 if (extp->FeatureSupport & (1<<i))
85 printk(" - Unknown Bit %X: supported\n", i); 85 printk(" - Unknown Bit %X: supported\n", i);
86 } 86 }
87 87
88 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); 88 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
89 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); 89 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
90 for (i=1; i<8; i++) { 90 for (i=1; i<8; i++) {
91 if (extp->SuspendCmdSupport & (1<<i)) 91 if (extp->SuspendCmdSupport & (1<<i))
92 printk(" - Unknown Bit %X: supported\n", i); 92 printk(" - Unknown Bit %X: supported\n", i);
93 } 93 }
94 94
95 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); 95 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
96 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); 96 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
97 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); 97 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
@@ -99,11 +99,11 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
99 if (extp->BlkStatusRegMask & (1<<i)) 99 if (extp->BlkStatusRegMask & (1<<i))
100 printk(" - Unknown Bit %X Active: yes\n",i); 100 printk(" - Unknown Bit %X Active: yes\n",i);
101 } 101 }
102 102
103 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 103 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
104 extp->VccOptimal >> 8, extp->VccOptimal & 0xf); 104 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
105 if (extp->VppOptimal) 105 if (extp->VppOptimal)
106 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 106 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
107 extp->VppOptimal >> 8, extp->VppOptimal & 0xf); 107 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
108} 108}
109#endif 109#endif
@@ -121,7 +121,7 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
121 int i; 121 int i;
122 122
123 if (cfi->cfi_mode) { 123 if (cfi->cfi_mode) {
124 /* 124 /*
125 * It's a real CFI chip, not one for which the probe 125 * It's a real CFI chip, not one for which the probe
126 * routine faked a CFI structure. So we read the feature 126 * routine faked a CFI structure. So we read the feature
127 * table from it. 127 * table from it.
@@ -133,24 +133,33 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
133 if (!extp) 133 if (!extp)
134 return NULL; 134 return NULL;
135 135
136 if (extp->MajorVersion != '1' ||
137 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
138 printk(KERN_ERR " Unknown ST Microelectronics"
139 " Extended Query version %c.%c.\n",
140 extp->MajorVersion, extp->MinorVersion);
141 kfree(extp);
142 return NULL;
143 }
144
136 /* Do some byteswapping if necessary */ 145 /* Do some byteswapping if necessary */
137 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport); 146 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
138 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask); 147 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
139 148
140#ifdef DEBUG_CFI_FEATURES 149#ifdef DEBUG_CFI_FEATURES
141 /* Tell the user about it in lots of lovely detail */ 150 /* Tell the user about it in lots of lovely detail */
142 cfi_tell_features(extp); 151 cfi_tell_features(extp);
143#endif 152#endif
144 153
145 /* Install our own private info structure */ 154 /* Install our own private info structure */
146 cfi->cmdset_priv = extp; 155 cfi->cmdset_priv = extp;
147 } 156 }
148 157
149 for (i=0; i< cfi->numchips; i++) { 158 for (i=0; i< cfi->numchips; i++) {
150 cfi->chips[i].word_write_time = 128; 159 cfi->chips[i].word_write_time = 128;
151 cfi->chips[i].buffer_write_time = 128; 160 cfi->chips[i].buffer_write_time = 128;
152 cfi->chips[i].erase_time = 1024; 161 cfi->chips[i].erase_time = 1024;
153 } 162 }
154 163
155 return cfi_staa_setup(map); 164 return cfi_staa_setup(map);
156} 165}
@@ -178,15 +187,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
178 mtd->size = devsize * cfi->numchips; 187 mtd->size = devsize * cfi->numchips;
179 188
180 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 189 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
181 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 190 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
182 * mtd->numeraseregions, GFP_KERNEL); 191 * mtd->numeraseregions, GFP_KERNEL);
183 if (!mtd->eraseregions) { 192 if (!mtd->eraseregions) {
184 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); 193 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
185 kfree(cfi->cmdset_priv); 194 kfree(cfi->cmdset_priv);
186 kfree(mtd); 195 kfree(mtd);
187 return NULL; 196 return NULL;
188 } 197 }
189 198
190 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 199 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
191 unsigned long ernum, ersize; 200 unsigned long ernum, ersize;
192 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 201 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -219,7 +228,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
219 mtd->eraseregions[i].numblocks); 228 mtd->eraseregions[i].numblocks);
220 } 229 }
221 230
222 /* Also select the correct geometry setup too */ 231 /* Also select the correct geometry setup too */
223 mtd->erase = cfi_staa_erase_varsize; 232 mtd->erase = cfi_staa_erase_varsize;
224 mtd->read = cfi_staa_read; 233 mtd->read = cfi_staa_read;
225 mtd->write = cfi_staa_write_buffers; 234 mtd->write = cfi_staa_write_buffers;
@@ -250,8 +259,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
250 259
251 adr += chip->start; 260 adr += chip->start;
252 261
253 /* Ensure cmd read/writes are aligned. */ 262 /* Ensure cmd read/writes are aligned. */
254 cmd_addr = adr & ~(map_bankwidth(map)-1); 263 cmd_addr = adr & ~(map_bankwidth(map)-1);
255 264
256 /* Let's determine this according to the interleave only once */ 265 /* Let's determine this according to the interleave only once */
257 status_OK = CMD(0x80); 266 status_OK = CMD(0x80);
@@ -267,7 +276,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
267 case FL_ERASING: 276 case FL_ERASING:
268 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)) 277 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
269 goto sleep; /* We don't support erase suspend */ 278 goto sleep; /* We don't support erase suspend */
270 279
271 map_write (map, CMD(0xb0), cmd_addr); 280 map_write (map, CMD(0xb0), cmd_addr);
272 /* If the flash has finished erasing, then 'erase suspend' 281 /* If the flash has finished erasing, then 'erase suspend'
273 * appears to make some (28F320) flash devices switch to 282 * appears to make some (28F320) flash devices switch to
@@ -282,7 +291,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
282 status = map_read(map, cmd_addr); 291 status = map_read(map, cmd_addr);
283 if (map_word_andequal(map, status, status_OK, status_OK)) 292 if (map_word_andequal(map, status, status_OK, status_OK))
284 break; 293 break;
285 294
286 if (time_after(jiffies, timeo)) { 295 if (time_after(jiffies, timeo)) {
287 /* Urgh */ 296 /* Urgh */
288 map_write(map, CMD(0xd0), cmd_addr); 297 map_write(map, CMD(0xd0), cmd_addr);
@@ -294,17 +303,17 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
294 "suspended: status = 0x%lx\n", status.x[0]); 303 "suspended: status = 0x%lx\n", status.x[0]);
295 return -EIO; 304 return -EIO;
296 } 305 }
297 306
298 spin_unlock_bh(chip->mutex); 307 spin_unlock_bh(chip->mutex);
299 cfi_udelay(1); 308 cfi_udelay(1);
300 spin_lock_bh(chip->mutex); 309 spin_lock_bh(chip->mutex);
301 } 310 }
302 311
303 suspended = 1; 312 suspended = 1;
304 map_write(map, CMD(0xff), cmd_addr); 313 map_write(map, CMD(0xff), cmd_addr);
305 chip->state = FL_READY; 314 chip->state = FL_READY;
306 break; 315 break;
307 316
308#if 0 317#if 0
309 case FL_WRITING: 318 case FL_WRITING:
310 /* Not quite yet */ 319 /* Not quite yet */
@@ -325,7 +334,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
325 chip->state = FL_READY; 334 chip->state = FL_READY;
326 break; 335 break;
327 } 336 }
328 337
329 /* Urgh. Chip not yet ready to talk to us. */ 338 /* Urgh. Chip not yet ready to talk to us. */
330 if (time_after(jiffies, timeo)) { 339 if (time_after(jiffies, timeo)) {
331 spin_unlock_bh(chip->mutex); 340 spin_unlock_bh(chip->mutex);
@@ -355,17 +364,17 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
355 364
356 if (suspended) { 365 if (suspended) {
357 chip->state = chip->oldstate; 366 chip->state = chip->oldstate;
358 /* What if one interleaved chip has finished and the 367 /* What if one interleaved chip has finished and the
359 other hasn't? The old code would leave the finished 368 other hasn't? The old code would leave the finished
360 one in READY mode. That's bad, and caused -EROFS 369 one in READY mode. That's bad, and caused -EROFS
361 errors to be returned from do_erase_oneblock because 370 errors to be returned from do_erase_oneblock because
362 that's the only bit it checked for at the time. 371 that's the only bit it checked for at the time.
363 As the state machine appears to explicitly allow 372 As the state machine appears to explicitly allow
364 sending the 0x70 (Read Status) command to an erasing 373 sending the 0x70 (Read Status) command to an erasing
365 chip and expecting it to be ignored, that's what we 374 chip and expecting it to be ignored, that's what we
366 do. */ 375 do. */
367 map_write(map, CMD(0xd0), cmd_addr); 376 map_write(map, CMD(0xd0), cmd_addr);
368 map_write(map, CMD(0x70), cmd_addr); 377 map_write(map, CMD(0x70), cmd_addr);
369 } 378 }
370 379
371 wake_up(&chip->wq); 380 wake_up(&chip->wq);
@@ -405,14 +414,14 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
405 *retlen += thislen; 414 *retlen += thislen;
406 len -= thislen; 415 len -= thislen;
407 buf += thislen; 416 buf += thislen;
408 417
409 ofs = 0; 418 ofs = 0;
410 chipnum++; 419 chipnum++;
411 } 420 }
412 return ret; 421 return ret;
413} 422}
414 423
415static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 424static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
416 unsigned long adr, const u_char *buf, int len) 425 unsigned long adr, const u_char *buf, int len)
417{ 426{
418 struct cfi_private *cfi = map->fldrv_priv; 427 struct cfi_private *cfi = map->fldrv_priv;
@@ -420,7 +429,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
420 unsigned long cmd_adr, timeo; 429 unsigned long cmd_adr, timeo;
421 DECLARE_WAITQUEUE(wait, current); 430 DECLARE_WAITQUEUE(wait, current);
422 int wbufsize, z; 431 int wbufsize, z;
423 432
424 /* M58LW064A requires bus alignment for buffer wriets -- saw */ 433 /* M58LW064A requires bus alignment for buffer wriets -- saw */
425 if (adr & (map_bankwidth(map)-1)) 434 if (adr & (map_bankwidth(map)-1))
426 return -EINVAL; 435 return -EINVAL;
@@ -428,10 +437,10 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
428 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 437 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
429 adr += chip->start; 438 adr += chip->start;
430 cmd_adr = adr & ~(wbufsize-1); 439 cmd_adr = adr & ~(wbufsize-1);
431 440
432 /* Let's determine this according to the interleave only once */ 441 /* Let's determine this according to the interleave only once */
433 status_OK = CMD(0x80); 442 status_OK = CMD(0x80);
434 443
435 timeo = jiffies + HZ; 444 timeo = jiffies + HZ;
436 retry: 445 retry:
437 446
@@ -439,7 +448,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
439 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state); 448 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
440#endif 449#endif
441 spin_lock_bh(chip->mutex); 450 spin_lock_bh(chip->mutex);
442 451
443 /* Check that the chip's ready to talk to us. 452 /* Check that the chip's ready to talk to us.
444 * Later, we can actually think about interrupting it 453 * Later, we can actually think about interrupting it
445 * if it's in FL_ERASING state. 454 * if it's in FL_ERASING state.
@@ -448,7 +457,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
448 switch (chip->state) { 457 switch (chip->state) {
449 case FL_READY: 458 case FL_READY:
450 break; 459 break;
451 460
452 case FL_CFI_QUERY: 461 case FL_CFI_QUERY:
453 case FL_JEDEC_QUERY: 462 case FL_JEDEC_QUERY:
454 map_write(map, CMD(0x70), cmd_adr); 463 map_write(map, CMD(0x70), cmd_adr);
@@ -513,7 +522,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
513 522
514 /* Write length of data to come */ 523 /* Write length of data to come */
515 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr ); 524 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
516 525
517 /* Write data */ 526 /* Write data */
518 for (z = 0; z < len; 527 for (z = 0; z < len;
519 z += map_bankwidth(map), buf += map_bankwidth(map)) { 528 z += map_bankwidth(map), buf += map_bankwidth(map)) {
@@ -560,7 +569,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
560 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 569 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
561 return -EIO; 570 return -EIO;
562 } 571 }
563 572
564 /* Latency issues. Drop the lock, wait a while and retry */ 573 /* Latency issues. Drop the lock, wait a while and retry */
565 spin_unlock_bh(chip->mutex); 574 spin_unlock_bh(chip->mutex);
566 cfi_udelay(1); 575 cfi_udelay(1);
@@ -572,9 +581,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
572 if (!chip->buffer_write_time) 581 if (!chip->buffer_write_time)
573 chip->buffer_write_time++; 582 chip->buffer_write_time++;
574 } 583 }
575 if (z > 1) 584 if (z > 1)
576 chip->buffer_write_time++; 585 chip->buffer_write_time++;
577 586
578 /* Done and happy. */ 587 /* Done and happy. */
579 DISABLE_VPP(map); 588 DISABLE_VPP(map);
580 chip->state = FL_STATUS; 589 chip->state = FL_STATUS;
@@ -598,7 +607,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
598 return 0; 607 return 0;
599} 608}
600 609
601static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, 610static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
602 size_t len, size_t *retlen, const u_char *buf) 611 size_t len, size_t *retlen, const u_char *buf)
603{ 612{
604 struct map_info *map = mtd->priv; 613 struct map_info *map = mtd->priv;
@@ -620,7 +629,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
620 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize); 629 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
621 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len); 630 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
622#endif 631#endif
623 632
624 /* Write buffer is worth it only if more than one word to write... */ 633 /* Write buffer is worth it only if more than one word to write... */
625 while (len > 0) { 634 while (len > 0) {
626 /* We must not cross write block boundaries */ 635 /* We must not cross write block boundaries */
@@ -629,7 +638,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
629 if (size > len) 638 if (size > len)
630 size = len; 639 size = len;
631 640
632 ret = do_write_buffer(map, &cfi->chips[chipnum], 641 ret = do_write_buffer(map, &cfi->chips[chipnum],
633 ofs, buf, size); 642 ofs, buf, size);
634 if (ret) 643 if (ret)
635 return ret; 644 return ret;
@@ -640,13 +649,13 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
640 len -= size; 649 len -= size;
641 650
642 if (ofs >> cfi->chipshift) { 651 if (ofs >> cfi->chipshift) {
643 chipnum ++; 652 chipnum ++;
644 ofs = 0; 653 ofs = 0;
645 if (chipnum == cfi->numchips) 654 if (chipnum == cfi->numchips)
646 return 0; 655 return 0;
647 } 656 }
648 } 657 }
649 658
650 return 0; 659 return 0;
651} 660}
652 661
@@ -756,7 +765,7 @@ retry:
756 status = map_read(map, adr); 765 status = map_read(map, adr);
757 if (map_word_andequal(map, status, status_OK, status_OK)) 766 if (map_word_andequal(map, status, status_OK, status_OK))
758 break; 767 break;
759 768
760 /* Urgh. Chip not yet ready to talk to us. */ 769 /* Urgh. Chip not yet ready to talk to us. */
761 if (time_after(jiffies, timeo)) { 770 if (time_after(jiffies, timeo)) {
762 spin_unlock_bh(chip->mutex); 771 spin_unlock_bh(chip->mutex);
@@ -789,7 +798,7 @@ retry:
789 map_write(map, CMD(0x20), adr); 798 map_write(map, CMD(0x20), adr);
790 map_write(map, CMD(0xD0), adr); 799 map_write(map, CMD(0xD0), adr);
791 chip->state = FL_ERASING; 800 chip->state = FL_ERASING;
792 801
793 spin_unlock_bh(chip->mutex); 802 spin_unlock_bh(chip->mutex);
794 msleep(1000); 803 msleep(1000);
795 spin_lock_bh(chip->mutex); 804 spin_lock_bh(chip->mutex);
@@ -814,7 +823,7 @@ retry:
814 status = map_read(map, adr); 823 status = map_read(map, adr);
815 if (map_word_andequal(map, status, status_OK, status_OK)) 824 if (map_word_andequal(map, status, status_OK, status_OK))
816 break; 825 break;
817 826
818 /* OK Still waiting */ 827 /* OK Still waiting */
819 if (time_after(jiffies, timeo)) { 828 if (time_after(jiffies, timeo)) {
820 map_write(map, CMD(0x70), adr); 829 map_write(map, CMD(0x70), adr);
@@ -824,13 +833,13 @@ retry:
824 spin_unlock_bh(chip->mutex); 833 spin_unlock_bh(chip->mutex);
825 return -EIO; 834 return -EIO;
826 } 835 }
827 836
828 /* Latency issues. Drop the lock, wait a while and retry */ 837 /* Latency issues. Drop the lock, wait a while and retry */
829 spin_unlock_bh(chip->mutex); 838 spin_unlock_bh(chip->mutex);
830 cfi_udelay(1); 839 cfi_udelay(1);
831 spin_lock_bh(chip->mutex); 840 spin_lock_bh(chip->mutex);
832 } 841 }
833 842
834 DISABLE_VPP(map); 843 DISABLE_VPP(map);
835 ret = 0; 844 ret = 0;
836 845
@@ -855,7 +864,7 @@ retry:
855 /* Reset the error bits */ 864 /* Reset the error bits */
856 map_write(map, CMD(0x50), adr); 865 map_write(map, CMD(0x50), adr);
857 map_write(map, CMD(0x70), adr); 866 map_write(map, CMD(0x70), adr);
858 867
859 if ((chipstatus & 0x30) == 0x30) { 868 if ((chipstatus & 0x30) == 0x30) {
860 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus); 869 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
861 ret = -EIO; 870 ret = -EIO;
@@ -904,17 +913,17 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
904 913
905 i = 0; 914 i = 0;
906 915
907 /* Skip all erase regions which are ended before the start of 916 /* Skip all erase regions which are ended before the start of
908 the requested erase. Actually, to save on the calculations, 917 the requested erase. Actually, to save on the calculations,
909 we skip to the first erase region which starts after the 918 we skip to the first erase region which starts after the
910 start of the requested erase, and then go back one. 919 start of the requested erase, and then go back one.
911 */ 920 */
912 921
913 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset) 922 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
914 i++; 923 i++;
915 i--; 924 i--;
916 925
917 /* OK, now i is pointing at the erase region in which this 926 /* OK, now i is pointing at the erase region in which this
918 erase request starts. Check the start of the requested 927 erase request starts. Check the start of the requested
919 erase range is aligned with the erase size which is in 928 erase range is aligned with the erase size which is in
920 effect here. 929 effect here.
@@ -937,7 +946,7 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
937 the address actually falls 946 the address actually falls
938 */ 947 */
939 i--; 948 i--;
940 949
941 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) 950 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
942 return -EINVAL; 951 return -EINVAL;
943 952
@@ -949,7 +958,7 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
949 958
950 while(len) { 959 while(len) {
951 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); 960 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
952 961
953 if (ret) 962 if (ret)
954 return ret; 963 return ret;
955 964
@@ -962,15 +971,15 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
962 if (adr >> cfi->chipshift) { 971 if (adr >> cfi->chipshift) {
963 adr = 0; 972 adr = 0;
964 chipnum++; 973 chipnum++;
965 974
966 if (chipnum >= cfi->numchips) 975 if (chipnum >= cfi->numchips)
967 break; 976 break;
968 } 977 }
969 } 978 }
970 979
971 instr->state = MTD_ERASE_DONE; 980 instr->state = MTD_ERASE_DONE;
972 mtd_erase_callback(instr); 981 mtd_erase_callback(instr);
973 982
974 return 0; 983 return 0;
975} 984}
976 985
@@ -996,7 +1005,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
996 case FL_JEDEC_QUERY: 1005 case FL_JEDEC_QUERY:
997 chip->oldstate = chip->state; 1006 chip->oldstate = chip->state;
998 chip->state = FL_SYNCING; 1007 chip->state = FL_SYNCING;
999 /* No need to wake_up() on this state change - 1008 /* No need to wake_up() on this state change -
1000 * as the whole point is that nobody can do anything 1009 * as the whole point is that nobody can do anything
1001 * with the chip now anyway. 1010 * with the chip now anyway.
1002 */ 1011 */
@@ -1007,11 +1016,11 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1007 default: 1016 default:
1008 /* Not an idle state */ 1017 /* Not an idle state */
1009 add_wait_queue(&chip->wq, &wait); 1018 add_wait_queue(&chip->wq, &wait);
1010 1019
1011 spin_unlock_bh(chip->mutex); 1020 spin_unlock_bh(chip->mutex);
1012 schedule(); 1021 schedule();
1013 remove_wait_queue(&chip->wq, &wait); 1022 remove_wait_queue(&chip->wq, &wait);
1014 1023
1015 goto retry; 1024 goto retry;
1016 } 1025 }
1017 } 1026 }
@@ -1022,7 +1031,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1022 chip = &cfi->chips[i]; 1031 chip = &cfi->chips[i];
1023 1032
1024 spin_lock_bh(chip->mutex); 1033 spin_lock_bh(chip->mutex);
1025 1034
1026 if (chip->state == FL_SYNCING) { 1035 if (chip->state == FL_SYNCING) {
1027 chip->state = chip->oldstate; 1036 chip->state = chip->oldstate;
1028 wake_up(&chip->wq); 1037 wake_up(&chip->wq);
@@ -1057,9 +1066,9 @@ retry:
1057 1066
1058 case FL_STATUS: 1067 case FL_STATUS:
1059 status = map_read(map, adr); 1068 status = map_read(map, adr);
1060 if (map_word_andequal(map, status, status_OK, status_OK)) 1069 if (map_word_andequal(map, status, status_OK, status_OK))
1061 break; 1070 break;
1062 1071
1063 /* Urgh. Chip not yet ready to talk to us. */ 1072 /* Urgh. Chip not yet ready to talk to us. */
1064 if (time_after(jiffies, timeo)) { 1073 if (time_after(jiffies, timeo)) {
1065 spin_unlock_bh(chip->mutex); 1074 spin_unlock_bh(chip->mutex);
@@ -1088,7 +1097,7 @@ retry:
1088 map_write(map, CMD(0x60), adr); 1097 map_write(map, CMD(0x60), adr);
1089 map_write(map, CMD(0x01), adr); 1098 map_write(map, CMD(0x01), adr);
1090 chip->state = FL_LOCKING; 1099 chip->state = FL_LOCKING;
1091 1100
1092 spin_unlock_bh(chip->mutex); 1101 spin_unlock_bh(chip->mutex);
1093 msleep(1000); 1102 msleep(1000);
1094 spin_lock_bh(chip->mutex); 1103 spin_lock_bh(chip->mutex);
@@ -1102,7 +1111,7 @@ retry:
1102 status = map_read(map, adr); 1111 status = map_read(map, adr);
1103 if (map_word_andequal(map, status, status_OK, status_OK)) 1112 if (map_word_andequal(map, status, status_OK, status_OK))
1104 break; 1113 break;
1105 1114
1106 /* OK Still waiting */ 1115 /* OK Still waiting */
1107 if (time_after(jiffies, timeo)) { 1116 if (time_after(jiffies, timeo)) {
1108 map_write(map, CMD(0x70), adr); 1117 map_write(map, CMD(0x70), adr);
@@ -1112,13 +1121,13 @@ retry:
1112 spin_unlock_bh(chip->mutex); 1121 spin_unlock_bh(chip->mutex);
1113 return -EIO; 1122 return -EIO;
1114 } 1123 }
1115 1124
1116 /* Latency issues. Drop the lock, wait a while and retry */ 1125 /* Latency issues. Drop the lock, wait a while and retry */
1117 spin_unlock_bh(chip->mutex); 1126 spin_unlock_bh(chip->mutex);
1118 cfi_udelay(1); 1127 cfi_udelay(1);
1119 spin_lock_bh(chip->mutex); 1128 spin_lock_bh(chip->mutex);
1120 } 1129 }
1121 1130
1122 /* Done and happy. */ 1131 /* Done and happy. */
1123 chip->state = FL_STATUS; 1132 chip->state = FL_STATUS;
1124 DISABLE_VPP(map); 1133 DISABLE_VPP(map);
@@ -1162,8 +1171,8 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1162 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1171 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1172 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1164 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1173 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165#endif 1174#endif
1166 1175
1167 if (ret) 1176 if (ret)
1168 return ret; 1177 return ret;
1169 1178
@@ -1173,7 +1182,7 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1173 if (adr >> cfi->chipshift) { 1182 if (adr >> cfi->chipshift) {
1174 adr = 0; 1183 adr = 0;
1175 chipnum++; 1184 chipnum++;
1176 1185
1177 if (chipnum >= cfi->numchips) 1186 if (chipnum >= cfi->numchips)
1178 break; 1187 break;
1179 } 1188 }
@@ -1208,7 +1217,7 @@ retry:
1208 status = map_read(map, adr); 1217 status = map_read(map, adr);
1209 if (map_word_andequal(map, status, status_OK, status_OK)) 1218 if (map_word_andequal(map, status, status_OK, status_OK))
1210 break; 1219 break;
1211 1220
1212 /* Urgh. Chip not yet ready to talk to us. */ 1221 /* Urgh. Chip not yet ready to talk to us. */
1213 if (time_after(jiffies, timeo)) { 1222 if (time_after(jiffies, timeo)) {
1214 spin_unlock_bh(chip->mutex); 1223 spin_unlock_bh(chip->mutex);
@@ -1237,7 +1246,7 @@ retry:
1237 map_write(map, CMD(0x60), adr); 1246 map_write(map, CMD(0x60), adr);
1238 map_write(map, CMD(0xD0), adr); 1247 map_write(map, CMD(0xD0), adr);
1239 chip->state = FL_UNLOCKING; 1248 chip->state = FL_UNLOCKING;
1240 1249
1241 spin_unlock_bh(chip->mutex); 1250 spin_unlock_bh(chip->mutex);
1242 msleep(1000); 1251 msleep(1000);
1243 spin_lock_bh(chip->mutex); 1252 spin_lock_bh(chip->mutex);
@@ -1251,7 +1260,7 @@ retry:
1251 status = map_read(map, adr); 1260 status = map_read(map, adr);
1252 if (map_word_andequal(map, status, status_OK, status_OK)) 1261 if (map_word_andequal(map, status, status_OK, status_OK))
1253 break; 1262 break;
1254 1263
1255 /* OK Still waiting */ 1264 /* OK Still waiting */
1256 if (time_after(jiffies, timeo)) { 1265 if (time_after(jiffies, timeo)) {
1257 map_write(map, CMD(0x70), adr); 1266 map_write(map, CMD(0x70), adr);
@@ -1261,13 +1270,13 @@ retry:
1261 spin_unlock_bh(chip->mutex); 1270 spin_unlock_bh(chip->mutex);
1262 return -EIO; 1271 return -EIO;
1263 } 1272 }
1264 1273
1265 /* Latency issues. Drop the unlock, wait a while and retry */ 1274 /* Latency issues. Drop the unlock, wait a while and retry */
1266 spin_unlock_bh(chip->mutex); 1275 spin_unlock_bh(chip->mutex);
1267 cfi_udelay(1); 1276 cfi_udelay(1);
1268 spin_lock_bh(chip->mutex); 1277 spin_lock_bh(chip->mutex);
1269 } 1278 }
1270 1279
1271 /* Done and happy. */ 1280 /* Done and happy. */
1272 chip->state = FL_STATUS; 1281 chip->state = FL_STATUS;
1273 DISABLE_VPP(map); 1282 DISABLE_VPP(map);
@@ -1292,7 +1301,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1292 { 1301 {
1293 unsigned long temp_adr = adr; 1302 unsigned long temp_adr = adr;
1294 unsigned long temp_len = len; 1303 unsigned long temp_len = len;
1295 1304
1296 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); 1305 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1297 while (temp_len) { 1306 while (temp_len) {
1298 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor))); 1307 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
@@ -1310,7 +1319,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1310 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); 1319 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1311 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); 1320 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1312#endif 1321#endif
1313 1322
1314 return ret; 1323 return ret;
1315} 1324}
1316 1325
@@ -1334,7 +1343,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1334 case FL_JEDEC_QUERY: 1343 case FL_JEDEC_QUERY:
1335 chip->oldstate = chip->state; 1344 chip->oldstate = chip->state;
1336 chip->state = FL_PM_SUSPENDED; 1345 chip->state = FL_PM_SUSPENDED;
1337 /* No need to wake_up() on this state change - 1346 /* No need to wake_up() on this state change -
1338 * as the whole point is that nobody can do anything 1347 * as the whole point is that nobody can do anything
1339 * with the chip now anyway. 1348 * with the chip now anyway.
1340 */ 1349 */
@@ -1353,9 +1362,9 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1353 if (ret) { 1362 if (ret) {
1354 for (i--; i >=0; i--) { 1363 for (i--; i >=0; i--) {
1355 chip = &cfi->chips[i]; 1364 chip = &cfi->chips[i];
1356 1365
1357 spin_lock_bh(chip->mutex); 1366 spin_lock_bh(chip->mutex);
1358 1367
1359 if (chip->state == FL_PM_SUSPENDED) { 1368 if (chip->state == FL_PM_SUSPENDED) {
1360 /* No need to force it into a known state here, 1369 /* No need to force it into a known state here,
1361 because we're returning failure, and it didn't 1370 because we're returning failure, and it didn't
@@ -1365,8 +1374,8 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1365 } 1374 }
1366 spin_unlock_bh(chip->mutex); 1375 spin_unlock_bh(chip->mutex);
1367 } 1376 }
1368 } 1377 }
1369 1378
1370 return ret; 1379 return ret;
1371} 1380}
1372 1381
@@ -1378,11 +1387,11 @@ static void cfi_staa_resume(struct mtd_info *mtd)
1378 struct flchip *chip; 1387 struct flchip *chip;
1379 1388
1380 for (i=0; i<cfi->numchips; i++) { 1389 for (i=0; i<cfi->numchips; i++) {
1381 1390
1382 chip = &cfi->chips[i]; 1391 chip = &cfi->chips[i];
1383 1392
1384 spin_lock_bh(chip->mutex); 1393 spin_lock_bh(chip->mutex);
1385 1394
1386 /* Go to known state. Chip may have been power cycled */ 1395 /* Go to known state. Chip may have been power cycled */
1387 if (chip->state == FL_PM_SUSPENDED) { 1396 if (chip->state == FL_PM_SUSPENDED) {
1388 map_write(map, CMD(0xFF), 0); 1397 map_write(map, CMD(0xFF), 0);
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index cf750038ce6..90eb30e06b7 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Common Flash Interface probe code. 2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd. 3 (C) 2000 Red Hat. GPL'd.
4 $Id: cfi_probe.c,v 1.83 2004/11/16 18:19:02 nico Exp $ 4 $Id: cfi_probe.c,v 1.84 2005/11/07 11:14:23 gleixner Exp $
5*/ 5*/
6 6
7#include <linux/config.h> 7#include <linux/config.h>
@@ -20,7 +20,7 @@
20#include <linux/mtd/cfi.h> 20#include <linux/mtd/cfi.h>
21#include <linux/mtd/gen_probe.h> 21#include <linux/mtd/gen_probe.h>
22 22
23//#define DEBUG_CFI 23//#define DEBUG_CFI
24 24
25#ifdef DEBUG_CFI 25#ifdef DEBUG_CFI
26static void print_cfi_ident(struct cfi_ident *); 26static void print_cfi_ident(struct cfi_ident *);
@@ -103,7 +103,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
103 unsigned long *chip_map, struct cfi_private *cfi) 103 unsigned long *chip_map, struct cfi_private *cfi)
104{ 104{
105 int i; 105 int i;
106 106
107 if ((base + 0) >= map->size) { 107 if ((base + 0) >= map->size) {
108 printk(KERN_NOTICE 108 printk(KERN_NOTICE
109 "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n", 109 "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
@@ -128,7 +128,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
128 } 128 }
129 129
130 if (!cfi->numchips) { 130 if (!cfi->numchips) {
131 /* This is the first time we're called. Set up the CFI 131 /* This is the first time we're called. Set up the CFI
132 stuff accordingly and return */ 132 stuff accordingly and return */
133 return cfi_chip_setup(map, cfi); 133 return cfi_chip_setup(map, cfi);
134 } 134 }
@@ -138,13 +138,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
138 unsigned long start; 138 unsigned long start;
139 if(!test_bit(i, chip_map)) { 139 if(!test_bit(i, chip_map)) {
140 /* Skip location; no valid chip at this address */ 140 /* Skip location; no valid chip at this address */
141 continue; 141 continue;
142 } 142 }
143 start = i << cfi->chipshift; 143 start = i << cfi->chipshift;
144 /* This chip should be in read mode if it's one 144 /* This chip should be in read mode if it's one
145 we've already touched. */ 145 we've already touched. */
146 if (qry_present(map, start, cfi)) { 146 if (qry_present(map, start, cfi)) {
147 /* Eep. This chip also had the QRY marker. 147 /* Eep. This chip also had the QRY marker.
148 * Is it an alias for the new one? */ 148 * Is it an alias for the new one? */
149 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL); 149 cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
150 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); 150 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
@@ -156,13 +156,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
156 map->name, base, start); 156 map->name, base, start);
157 return 0; 157 return 0;
158 } 158 }
159 /* Yes, it's actually got QRY for data. Most 159 /* Yes, it's actually got QRY for data. Most
160 * unfortunate. Stick the new chip in read mode 160 * unfortunate. Stick the new chip in read mode
161 * too and if it's the same, assume it's an alias. */ 161 * too and if it's the same, assume it's an alias. */
162 /* FIXME: Use other modes to do a proper check */ 162 /* FIXME: Use other modes to do a proper check */
163 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 163 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
164 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); 164 cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
165 165
166 if (qry_present(map, base, cfi)) { 166 if (qry_present(map, base, cfi)) {
167 xip_allowed(base, map); 167 xip_allowed(base, map);
168 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", 168 printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
@@ -171,12 +171,12 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
171 } 171 }
172 } 172 }
173 } 173 }
174 174
175 /* OK, if we got to here, then none of the previous chips appear to 175 /* OK, if we got to here, then none of the previous chips appear to
176 be aliases for the current one. */ 176 be aliases for the current one. */
177 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ 177 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
178 cfi->numchips++; 178 cfi->numchips++;
179 179
180 /* Put it back into Read Mode */ 180 /* Put it back into Read Mode */
181 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 181 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
182 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 182 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
@@ -185,11 +185,11 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
185 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", 185 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
186 map->name, cfi->interleave, cfi->device_type*8, base, 186 map->name, cfi->interleave, cfi->device_type*8, base,
187 map->bankwidth*8); 187 map->bankwidth*8);
188 188
189 return 1; 189 return 1;
190} 190}
191 191
192static int __xipram cfi_chip_setup(struct map_info *map, 192static int __xipram cfi_chip_setup(struct map_info *map,
193 struct cfi_private *cfi) 193 struct cfi_private *cfi)
194{ 194{
195 int ofs_factor = cfi->interleave*cfi->device_type; 195 int ofs_factor = cfi->interleave*cfi->device_type;
@@ -209,11 +209,11 @@ static int __xipram cfi_chip_setup(struct map_info *map,
209 printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 209 printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
210 return 0; 210 return 0;
211 } 211 }
212 212
213 memset(cfi->cfiq,0,sizeof(struct cfi_ident)); 213 memset(cfi->cfiq,0,sizeof(struct cfi_ident));
214 214
215 cfi->cfi_mode = CFI_MODE_CFI; 215 cfi->cfi_mode = CFI_MODE_CFI;
216 216
217 /* Read the CFI info structure */ 217 /* Read the CFI info structure */
218 xip_disable_qry(base, map, cfi); 218 xip_disable_qry(base, map, cfi);
219 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) 219 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
@@ -231,7 +231,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
231 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); 231 cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
232 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); 232 cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
233 cfi->mfr = cfi_read_query(map, base); 233 cfi->mfr = cfi_read_query(map, base);
234 cfi->id = cfi_read_query(map, base + ofs_factor); 234 cfi->id = cfi_read_query(map, base + ofs_factor);
235 235
236 /* Put it back into Read Mode */ 236 /* Put it back into Read Mode */
237 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 237 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
@@ -255,10 +255,10 @@ static int __xipram cfi_chip_setup(struct map_info *map,
255 255
256 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 256 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
257 cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]); 257 cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
258 258
259#ifdef DEBUG_CFI 259#ifdef DEBUG_CFI
260 printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n", 260 printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
261 i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff, 261 i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
262 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1); 262 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
263#endif 263#endif
264 } 264 }
@@ -271,33 +271,33 @@ static int __xipram cfi_chip_setup(struct map_info *map,
271} 271}
272 272
273#ifdef DEBUG_CFI 273#ifdef DEBUG_CFI
274static char *vendorname(__u16 vendor) 274static char *vendorname(__u16 vendor)
275{ 275{
276 switch (vendor) { 276 switch (vendor) {
277 case P_ID_NONE: 277 case P_ID_NONE:
278 return "None"; 278 return "None";
279 279
280 case P_ID_INTEL_EXT: 280 case P_ID_INTEL_EXT:
281 return "Intel/Sharp Extended"; 281 return "Intel/Sharp Extended";
282 282
283 case P_ID_AMD_STD: 283 case P_ID_AMD_STD:
284 return "AMD/Fujitsu Standard"; 284 return "AMD/Fujitsu Standard";
285 285
286 case P_ID_INTEL_STD: 286 case P_ID_INTEL_STD:
287 return "Intel/Sharp Standard"; 287 return "Intel/Sharp Standard";
288 288
289 case P_ID_AMD_EXT: 289 case P_ID_AMD_EXT:
290 return "AMD/Fujitsu Extended"; 290 return "AMD/Fujitsu Extended";
291 291
292 case P_ID_WINBOND: 292 case P_ID_WINBOND:
293 return "Winbond Standard"; 293 return "Winbond Standard";
294 294
295 case P_ID_ST_ADV: 295 case P_ID_ST_ADV:
296 return "ST Advanced"; 296 return "ST Advanced";
297 297
298 case P_ID_MITSUBISHI_STD: 298 case P_ID_MITSUBISHI_STD:
299 return "Mitsubishi Standard"; 299 return "Mitsubishi Standard";
300 300
301 case P_ID_MITSUBISHI_EXT: 301 case P_ID_MITSUBISHI_EXT:
302 return "Mitsubishi Extended"; 302 return "Mitsubishi Extended";
303 303
@@ -306,13 +306,13 @@ static char *vendorname(__u16 vendor)
306 306
307 case P_ID_INTEL_PERFORMANCE: 307 case P_ID_INTEL_PERFORMANCE:
308 return "Intel Performance Code"; 308 return "Intel Performance Code";
309 309
310 case P_ID_INTEL_DATA: 310 case P_ID_INTEL_DATA:
311 return "Intel Data"; 311 return "Intel Data";
312 312
313 case P_ID_RESERVED: 313 case P_ID_RESERVED:
314 return "Not Allowed / Reserved for Future Use"; 314 return "Not Allowed / Reserved for Future Use";
315 315
316 default: 316 default:
317 return "Unknown"; 317 return "Unknown";
318 } 318 }
@@ -325,21 +325,21 @@ static void print_cfi_ident(struct cfi_ident *cfip)
325 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') { 325 if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
326 printk("Invalid CFI ident structure.\n"); 326 printk("Invalid CFI ident structure.\n");
327 return; 327 return;
328 } 328 }
329#endif 329#endif
330 printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID)); 330 printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
331 if (cfip->P_ADR) 331 if (cfip->P_ADR)
332 printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR); 332 printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
333 else 333 else
334 printk("No Primary Algorithm Table\n"); 334 printk("No Primary Algorithm Table\n");
335 335
336 printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID)); 336 printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
337 if (cfip->A_ADR) 337 if (cfip->A_ADR)
338 printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR); 338 printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
339 else 339 else
340 printk("No Alternate Algorithm Table\n"); 340 printk("No Alternate Algorithm Table\n");
341 341
342 342
343 printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf); 343 printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
344 printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf); 344 printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
345 if (cfip->VppMin) { 345 if (cfip->VppMin) {
@@ -348,61 +348,61 @@ static void print_cfi_ident(struct cfi_ident *cfip)
348 } 348 }
349 else 349 else
350 printk("No Vpp line\n"); 350 printk("No Vpp line\n");
351 351
352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); 352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); 353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
354 354
355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { 355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); 356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); 357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
358 } 358 }
359 else 359 else
360 printk("Full buffer write not supported\n"); 360 printk("Full buffer write not supported\n");
361 361
362 printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp); 362 printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
363 printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp)); 363 printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
364 if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) { 364 if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
365 printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp); 365 printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
366 printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp)); 366 printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
367 } 367 }
368 else 368 else
369 printk("Chip erase not supported\n"); 369 printk("Chip erase not supported\n");
370 370
371 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 371 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
372 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 372 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
373 switch(cfip->InterfaceDesc) { 373 switch(cfip->InterfaceDesc) {
374 case 0: 374 case 0:
375 printk(" - x8-only asynchronous interface\n"); 375 printk(" - x8-only asynchronous interface\n");
376 break; 376 break;
377 377
378 case 1: 378 case 1:
379 printk(" - x16-only asynchronous interface\n"); 379 printk(" - x16-only asynchronous interface\n");
380 break; 380 break;
381 381
382 case 2: 382 case 2:
383 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 383 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
384 break; 384 break;
385 385
386 case 3: 386 case 3:
387 printk(" - x32-only asynchronous interface\n"); 387 printk(" - x32-only asynchronous interface\n");
388 break; 388 break;
389 389
390 case 4: 390 case 4:
391 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 391 printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
392 break; 392 break;
393 393
394 case 65535: 394 case 65535:
395 printk(" - Not Allowed / Reserved\n"); 395 printk(" - Not Allowed / Reserved\n");
396 break; 396 break;
397 397
398 default: 398 default:
399 printk(" - Unknown\n"); 399 printk(" - Unknown\n");
400 break; 400 break;
401 } 401 }
402 402
403 printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize); 403 printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
404 printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions); 404 printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
405 405
406} 406}
407#endif /* DEBUG_CFI */ 407#endif /* DEBUG_CFI */
408 408
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 2b2ede2bfcc..d8e7a026ba5 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * This code is covered by the GPL. 8 * This code is covered by the GPL.
9 * 9 *
10 * $Id: cfi_util.c,v 1.8 2004/12/14 19:55:56 nico Exp $ 10 * $Id: cfi_util.c,v 1.10 2005/11/07 11:14:23 gleixner Exp $
11 * 11 *
12 */ 12 */
13 13
@@ -56,7 +56,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
56 56
57 /* Read in the Extended Query Table */ 57 /* Read in the Extended Query Table */
58 for (i=0; i<size; i++) { 58 for (i=0; i<size; i++) {
59 ((unsigned char *)extp)[i] = 59 ((unsigned char *)extp)[i] =
60 cfi_read_query(map, base+((adr+i)*ofs_factor)); 60 cfi_read_query(map, base+((adr+i)*ofs_factor));
61 } 61 }
62 62
@@ -70,15 +70,6 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
70 local_irq_enable(); 70 local_irq_enable();
71#endif 71#endif
72 72
73 if (extp->MajorVersion != '1' ||
74 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
75 printk(KERN_WARNING " Unknown %s Extended Query "
76 "version %c.%c.\n", name, extp->MajorVersion,
77 extp->MinorVersion);
78 kfree(extp);
79 extp = NULL;
80 }
81
82 out: return extp; 73 out: return extp;
83} 74}
84 75
@@ -122,17 +113,17 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
122 113
123 i = 0; 114 i = 0;
124 115
125 /* Skip all erase regions which are ended before the start of 116 /* Skip all erase regions which are ended before the start of
126 the requested erase. Actually, to save on the calculations, 117 the requested erase. Actually, to save on the calculations,
127 we skip to the first erase region which starts after the 118 we skip to the first erase region which starts after the
128 start of the requested erase, and then go back one. 119 start of the requested erase, and then go back one.
129 */ 120 */
130 121
131 while (i < mtd->numeraseregions && ofs >= regions[i].offset) 122 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
132 i++; 123 i++;
133 i--; 124 i--;
134 125
135 /* OK, now i is pointing at the erase region in which this 126 /* OK, now i is pointing at the erase region in which this
136 erase request starts. Check the start of the requested 127 erase request starts. Check the start of the requested
137 erase range is aligned with the erase size which is in 128 erase range is aligned with the erase size which is in
138 effect here. 129 effect here.
@@ -155,7 +146,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
155 the address actually falls 146 the address actually falls
156 */ 147 */
157 i--; 148 i--;
158 149
159 if ((ofs + len) & (regions[i].erasesize-1)) 150 if ((ofs + len) & (regions[i].erasesize-1))
160 return -EINVAL; 151 return -EINVAL;
161 152
@@ -168,7 +159,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
168 int size = regions[i].erasesize; 159 int size = regions[i].erasesize;
169 160
170 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); 161 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
171 162
172 if (ret) 163 if (ret)
173 return ret; 164 return ret;
174 165
@@ -182,7 +173,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
182 if (adr >> cfi->chipshift) { 173 if (adr >> cfi->chipshift) {
183 adr = 0; 174 adr = 0;
184 chipnum++; 175 chipnum++;
185 176
186 if (chipnum >= cfi->numchips) 177 if (chipnum >= cfi->numchips)
187 break; 178 break;
188 } 179 }
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index d7d739a108a..c2127840a18 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -41,7 +41,7 @@ static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
41 41
42 list_for_each(pos, &chip_drvs_list) { 42 list_for_each(pos, &chip_drvs_list) {
43 this = list_entry(pos, typeof(*this), list); 43 this = list_entry(pos, typeof(*this), list);
44 44
45 if (!strcmp(this->name, name)) { 45 if (!strcmp(this->name, name)) {
46 ret = this; 46 ret = this;
47 break; 47 break;
@@ -73,7 +73,7 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map)
73 73
74 ret = drv->probe(map); 74 ret = drv->probe(map);
75 75
76 /* We decrease the use count here. It may have been a 76 /* We decrease the use count here. It may have been a
77 probe-only module, which is no longer required from this 77 probe-only module, which is no longer required from this
78 point, having given us a handle on (and increased the use 78 point, having given us a handle on (and increased the use
79 count of) the actual driver code. 79 count of) the actual driver code.
@@ -82,7 +82,7 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map)
82 82
83 if (ret) 83 if (ret)
84 return ret; 84 return ret;
85 85
86 return NULL; 86 return NULL;
87} 87}
88/* 88/*
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index e1a5b76596c..77303ce5dcf 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -25,7 +25,7 @@ struct fwh_xxlock_thunk {
25 * so this code has not been tested with interleaved chips, 25 * so this code has not been tested with interleaved chips,
26 * and will likely fail in that context. 26 * and will likely fail in that context.
27 */ 27 */
28static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, 28static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
29 unsigned long adr, int len, void *thunk) 29 unsigned long adr, int len, void *thunk)
30{ 30{
31 struct cfi_private *cfi = map->fldrv_priv; 31 struct cfi_private *cfi = map->fldrv_priv;
@@ -44,7 +44,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
44 * - on 64k boundariesand 44 * - on 64k boundariesand
45 * - bit 1 set high 45 * - bit 1 set high
46 * - block lock registers are 4MiB lower - overflow subtract (danger) 46 * - block lock registers are 4MiB lower - overflow subtract (danger)
47 * 47 *
48 * The address manipulation is first done on the logical address 48 * The address manipulation is first done on the logical address
49 * which is 0 at the start of the chip, and then the offset of 49 * which is 0 at the start of the chip, and then the offset of
50 * the individual chip is addted to it. Any other order a weird 50 * the individual chip is addted to it. Any other order a weird
@@ -93,7 +93,7 @@ static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
93 93
94 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len, 94 ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
95 (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK); 95 (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
96 96
97 return ret; 97 return ret;
98} 98}
99 99
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index dc065b22f79..41bd59d20d8 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -2,7 +2,7 @@
2 * Routines common to all CFI-type probes. 2 * Routines common to all CFI-type probes.
3 * (C) 2001-2003 Red Hat, Inc. 3 * (C) 2001-2003 Red Hat, Inc.
4 * GPL'd 4 * GPL'd
5 * $Id: gen_probe.c,v 1.22 2005/01/24 23:49:50 rmk Exp $ 5 * $Id: gen_probe.c,v 1.24 2005/11/07 11:14:23 gleixner Exp $
6 */ 6 */
7 7
8#include <linux/kernel.h> 8#include <linux/kernel.h>
@@ -26,7 +26,7 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
26 26
27 /* First probe the map to see if we have CFI stuff there. */ 27 /* First probe the map to see if we have CFI stuff there. */
28 cfi = genprobe_ident_chips(map, cp); 28 cfi = genprobe_ident_chips(map, cp);
29 29
30 if (!cfi) 30 if (!cfi)
31 return NULL; 31 return NULL;
32 32
@@ -36,12 +36,12 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
36 mtd = check_cmd_set(map, 1); /* First the primary cmdset */ 36 mtd = check_cmd_set(map, 1); /* First the primary cmdset */
37 if (!mtd) 37 if (!mtd)
38 mtd = check_cmd_set(map, 0); /* Then the secondary */ 38 mtd = check_cmd_set(map, 0); /* Then the secondary */
39 39
40 if (mtd) 40 if (mtd)
41 return mtd; 41 return mtd;
42 42
43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n"); 43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
44 44
45 kfree(cfi->cfiq); 45 kfree(cfi->cfiq);
46 kfree(cfi); 46 kfree(cfi);
47 map->fldrv_priv = NULL; 47 map->fldrv_priv = NULL;
@@ -60,14 +60,14 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
60 60
61 memset(&cfi, 0, sizeof(cfi)); 61 memset(&cfi, 0, sizeof(cfi));
62 62
63 /* Call the probetype-specific code with all permutations of 63 /* Call the probetype-specific code with all permutations of
64 interleave and device type, etc. */ 64 interleave and device type, etc. */
65 if (!genprobe_new_chip(map, cp, &cfi)) { 65 if (!genprobe_new_chip(map, cp, &cfi)) {
66 /* The probe didn't like it */ 66 /* The probe didn't like it */
67 printk(KERN_DEBUG "%s: Found no %s device at location zero\n", 67 printk(KERN_DEBUG "%s: Found no %s device at location zero\n",
68 cp->name, map->name); 68 cp->name, map->name);
69 return NULL; 69 return NULL;
70 } 70 }
71 71
72#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD 72#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
73 probe routines won't ever return a broken CFI structure anyway, 73 probe routines won't ever return a broken CFI structure anyway,
@@ -92,13 +92,13 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
92 } else { 92 } else {
93 BUG(); 93 BUG();
94 } 94 }
95 95
96 cfi.numchips = 1; 96 cfi.numchips = 1;
97 97
98 /* 98 /*
99 * Allocate memory for bitmap of valid chips. 99 * Allocate memory for bitmap of valid chips.
100 * Align bitmap storage size to full byte. 100 * Align bitmap storage size to full byte.
101 */ 101 */
102 max_chips = map->size >> cfi.chipshift; 102 max_chips = map->size >> cfi.chipshift;
103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0); 103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0);
104 chip_map = kmalloc(mapsize, GFP_KERNEL); 104 chip_map = kmalloc(mapsize, GFP_KERNEL);
@@ -122,7 +122,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
122 } 122 }
123 123
124 /* 124 /*
125 * Now allocate the space for the structures we need to return to 125 * Now allocate the space for the structures we need to return to
126 * our caller, and copy the appropriate data into them. 126 * our caller, and copy the appropriate data into them.
127 */ 127 */
128 128
@@ -154,7 +154,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
154 return retcfi; 154 return retcfi;
155} 155}
156 156
157 157
158static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp, 158static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
159 struct cfi_private *cfi) 159 struct cfi_private *cfi)
160{ 160{
@@ -189,7 +189,7 @@ extern cfi_cmdset_fn_t cfi_cmdset_0001;
189extern cfi_cmdset_fn_t cfi_cmdset_0002; 189extern cfi_cmdset_fn_t cfi_cmdset_0002;
190extern cfi_cmdset_fn_t cfi_cmdset_0020; 190extern cfi_cmdset_fn_t cfi_cmdset_0020;
191 191
192static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map, 192static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
193 int primary) 193 int primary)
194{ 194{
195 struct cfi_private *cfi = map->fldrv_priv; 195 struct cfi_private *cfi = map->fldrv_priv;
@@ -199,7 +199,7 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
199 cfi_cmdset_fn_t *probe_function; 199 cfi_cmdset_fn_t *probe_function;
200 200
201 sprintf(probename, "cfi_cmdset_%4.4X", type); 201 sprintf(probename, "cfi_cmdset_%4.4X", type);
202 202
203 probe_function = inter_module_get_request(probename, probename); 203 probe_function = inter_module_get_request(probename, probename);
204 204
205 if (probe_function) { 205 if (probe_function) {
@@ -221,7 +221,7 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
221{ 221{
222 struct cfi_private *cfi = map->fldrv_priv; 222 struct cfi_private *cfi = map->fldrv_priv;
223 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; 223 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
224 224
225 if (type == P_ID_NONE || type == P_ID_RESERVED) 225 if (type == P_ID_NONE || type == P_ID_RESERVED)
226 return NULL; 226 return NULL;
227 227
@@ -235,6 +235,7 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
235#ifdef CONFIG_MTD_CFI_INTELEXT 235#ifdef CONFIG_MTD_CFI_INTELEXT
236 case 0x0001: 236 case 0x0001:
237 case 0x0003: 237 case 0x0003:
238 case 0x0200:
238 return cfi_cmdset_0001(map, primary); 239 return cfi_cmdset_0001(map, primary);
239#endif 240#endif
240#ifdef CONFIG_MTD_CFI_AMDSTD 241#ifdef CONFIG_MTD_CFI_AMDSTD
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
index 4f6778f3ee3..c40b48dabed 100644
--- a/drivers/mtd/chips/jedec.c
+++ b/drivers/mtd/chips/jedec.c
@@ -1,6 +1,6 @@
1 1
2/* JEDEC Flash Interface. 2/* JEDEC Flash Interface.
3 * This is an older type of interface for self programming flash. It is 3 * This is an older type of interface for self programming flash. It is
4 * commonly use in older AMD chips and is obsolete compared with CFI. 4 * commonly use in older AMD chips and is obsolete compared with CFI.
5 * It is called JEDEC because the JEDEC association distributes the ID codes 5 * It is called JEDEC because the JEDEC association distributes the ID codes
6 * for the chips. 6 * for the chips.
@@ -88,9 +88,9 @@ static const struct JEDECTable JEDEC_table[] = {
88 88
89static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id); 89static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
90static void jedec_sync(struct mtd_info *mtd) {}; 90static void jedec_sync(struct mtd_info *mtd) {};
91static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len, 91static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
92 size_t *retlen, u_char *buf); 92 size_t *retlen, u_char *buf);
93static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len, 93static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, u_char *buf); 94 size_t *retlen, u_char *buf);
95 95
96static struct mtd_info *jedec_probe(struct map_info *map); 96static struct mtd_info *jedec_probe(struct map_info *map);
@@ -122,7 +122,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
122 122
123 memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private)); 123 memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
124 priv = (struct jedec_private *)&MTD[1]; 124 priv = (struct jedec_private *)&MTD[1];
125 125
126 my_bank_size = map->size; 126 my_bank_size = map->size;
127 127
128 if (map->size/my_bank_size > MAX_JEDEC_CHIPS) 128 if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
@@ -131,13 +131,13 @@ static struct mtd_info *jedec_probe(struct map_info *map)
131 kfree(MTD); 131 kfree(MTD);
132 return NULL; 132 return NULL;
133 } 133 }
134 134
135 for (Base = 0; Base < map->size; Base += my_bank_size) 135 for (Base = 0; Base < map->size; Base += my_bank_size)
136 { 136 {
137 // Perhaps zero could designate all tests? 137 // Perhaps zero could designate all tests?
138 if (map->buswidth == 0) 138 if (map->buswidth == 0)
139 map->buswidth = 1; 139 map->buswidth = 1;
140 140
141 if (map->buswidth == 1){ 141 if (map->buswidth == 1){
142 if (jedec_probe8(map,Base,priv) == 0) { 142 if (jedec_probe8(map,Base,priv) == 0) {
143 printk("did recognize jedec chip\n"); 143 printk("did recognize jedec chip\n");
@@ -150,7 +150,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
150 if (map->buswidth == 4) 150 if (map->buswidth == 4)
151 jedec_probe32(map,Base,priv); 151 jedec_probe32(map,Base,priv);
152 } 152 }
153 153
154 // Get the biggest sector size 154 // Get the biggest sector size
155 SectorSize = 0; 155 SectorSize = 0;
156 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 156 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
@@ -160,7 +160,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
160 if (priv->chips[I].sectorsize > SectorSize) 160 if (priv->chips[I].sectorsize > SectorSize)
161 SectorSize = priv->chips[I].sectorsize; 161 SectorSize = priv->chips[I].sectorsize;
162 } 162 }
163 163
164 // Quickly ensure that the other sector sizes are factors of the largest 164 // Quickly ensure that the other sector sizes are factors of the largest
165 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 165 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
166 { 166 {
@@ -169,9 +169,9 @@ static struct mtd_info *jedec_probe(struct map_info *map)
169 printk("mtd: Failed. Device has incompatible mixed sector sizes\n"); 169 printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
170 kfree(MTD); 170 kfree(MTD);
171 return NULL; 171 return NULL;
172 } 172 }
173 } 173 }
174 174
175 /* Generate a part name that includes the number of different chips and 175 /* Generate a part name that includes the number of different chips and
176 other configuration information */ 176 other configuration information */
177 count = 1; 177 count = 1;
@@ -181,13 +181,13 @@ static struct mtd_info *jedec_probe(struct map_info *map)
181 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 181 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
182 { 182 {
183 const struct JEDECTable *JEDEC; 183 const struct JEDECTable *JEDEC;
184 184
185 if (priv->chips[I+1].jedec == priv->chips[I].jedec) 185 if (priv->chips[I+1].jedec == priv->chips[I].jedec)
186 { 186 {
187 count++; 187 count++;
188 continue; 188 continue;
189 } 189 }
190 190
191 // Locate the chip in the jedec table 191 // Locate the chip in the jedec table
192 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec); 192 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
193 if (JEDEC == 0) 193 if (JEDEC == 0)
@@ -196,11 +196,11 @@ static struct mtd_info *jedec_probe(struct map_info *map)
196 kfree(MTD); 196 kfree(MTD);
197 return NULL; 197 return NULL;
198 } 198 }
199 199
200 if (Uniq != 0) 200 if (Uniq != 0)
201 strcat(Part,","); 201 strcat(Part,",");
202 Uniq++; 202 Uniq++;
203 203
204 if (count != 1) 204 if (count != 1)
205 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name); 205 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
206 else 206 else
@@ -208,7 +208,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
208 if (strlen(Part) > sizeof(Part)*2/3) 208 if (strlen(Part) > sizeof(Part)*2/3)
209 break; 209 break;
210 count = 1; 210 count = 1;
211 } 211 }
212 212
213 /* Determine if the chips are organized in a linear fashion, or if there 213 /* Determine if the chips are organized in a linear fashion, or if there
214 are empty banks. Note, the last bank does not count here, only the 214 are empty banks. Note, the last bank does not count here, only the
@@ -233,7 +233,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
233 { 233 {
234 if (priv->bank_fill[I] != my_bank_size) 234 if (priv->bank_fill[I] != my_bank_size)
235 priv->is_banked = 1; 235 priv->is_banked = 1;
236 236
237 /* This even could be eliminated, but new de-optimized read/write 237 /* This even could be eliminated, but new de-optimized read/write
238 functions have to be written */ 238 functions have to be written */
239 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]); 239 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
@@ -242,7 +242,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
242 printk("mtd: Failed. Cannot handle unsymmetric banking\n"); 242 printk("mtd: Failed. Cannot handle unsymmetric banking\n");
243 kfree(MTD); 243 kfree(MTD);
244 return NULL; 244 return NULL;
245 } 245 }
246 } 246 }
247 } 247 }
248 } 248 }
@@ -250,7 +250,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
250 strcat(Part,", banked"); 250 strcat(Part,", banked");
251 251
252 // printk("Part: '%s'\n",Part); 252 // printk("Part: '%s'\n",Part);
253 253
254 memset(MTD,0,sizeof(*MTD)); 254 memset(MTD,0,sizeof(*MTD));
255 // strlcpy(MTD->name,Part,sizeof(MTD->name)); 255 // strlcpy(MTD->name,Part,sizeof(MTD->name));
256 MTD->name = map->name; 256 MTD->name = map->name;
@@ -291,7 +291,7 @@ static int checkparity(u_char C)
291 291
292/* Take an array of JEDEC numbers that represent interleved flash chips 292/* Take an array of JEDEC numbers that represent interleved flash chips
293 and process them. Check to make sure they are good JEDEC numbers, look 293 and process them. Check to make sure they are good JEDEC numbers, look
294 them up and then add them to the chip list */ 294 them up and then add them to the chip list */
295static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count, 295static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
296 unsigned long base,struct jedec_private *priv) 296 unsigned long base,struct jedec_private *priv)
297{ 297{
@@ -306,16 +306,16 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
306 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0) 306 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
307 return 0; 307 return 0;
308 } 308 }
309 309
310 // Finally, just make sure all the chip sizes are the same 310 // Finally, just make sure all the chip sizes are the same
311 JEDEC = jedec_idtoinf(Mfg[0],Id[0]); 311 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
312 312
313 if (JEDEC == 0) 313 if (JEDEC == 0)
314 { 314 {
315 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]); 315 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
316 return 0; 316 return 0;
317 } 317 }
318 318
319 Size = JEDEC->size; 319 Size = JEDEC->size;
320 SectorSize = JEDEC->sectorsize; 320 SectorSize = JEDEC->sectorsize;
321 for (I = 0; I != Count; I++) 321 for (I = 0; I != Count; I++)
@@ -331,7 +331,7 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
331 { 331 {
332 printk("mtd: Failed. Interleved flash does not have matching characteristics\n"); 332 printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
333 return 0; 333 return 0;
334 } 334 }
335 } 335 }
336 336
337 // Load the Chips 337 // Load the Chips
@@ -345,13 +345,13 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
345 { 345 {
346 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n"); 346 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
347 return 0; 347 return 0;
348 } 348 }
349 349
350 // Add them to the table 350 // Add them to the table
351 for (J = 0; J != Count; J++) 351 for (J = 0; J != Count; J++)
352 { 352 {
353 unsigned long Bank; 353 unsigned long Bank;
354 354
355 JEDEC = jedec_idtoinf(Mfg[J],Id[J]); 355 JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
356 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J]; 356 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
357 priv->chips[I].size = JEDEC->size; 357 priv->chips[I].size = JEDEC->size;
@@ -364,17 +364,17 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
364 // log2 n :| 364 // log2 n :|
365 priv->chips[I].addrshift = 0; 365 priv->chips[I].addrshift = 0;
366 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++); 366 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
367 367
368 // Determine how filled this bank is. 368 // Determine how filled this bank is.
369 Bank = base & (~(my_bank_size-1)); 369 Bank = base & (~(my_bank_size-1));
370 if (priv->bank_fill[Bank/my_bank_size] < base + 370 if (priv->bank_fill[Bank/my_bank_size] < base +
371 (JEDEC->size << priv->chips[I].addrshift) - Bank) 371 (JEDEC->size << priv->chips[I].addrshift) - Bank)
372 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank; 372 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
373 I++; 373 I++;
374 } 374 }
375 375
376 priv->size += priv->chips[I-1].size*Count; 376 priv->size += priv->chips[I-1].size*Count;
377 377
378 return priv->chips[I-1].size; 378 return priv->chips[I-1].size;
379} 379}
380 380
@@ -392,7 +392,7 @@ static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
392// Look for flash using an 8 bit bus interface 392// Look for flash using an 8 bit bus interface
393static int jedec_probe8(struct map_info *map,unsigned long base, 393static int jedec_probe8(struct map_info *map,unsigned long base,
394 struct jedec_private *priv) 394 struct jedec_private *priv)
395{ 395{
396 #define flread(x) map_read8(map,base+x) 396 #define flread(x) map_read8(map,base+x)
397 #define flwrite(v,x) map_write8(map,v,base+x) 397 #define flwrite(v,x) map_write8(map,v,base+x)
398 398
@@ -410,20 +410,20 @@ static int jedec_probe8(struct map_info *map,unsigned long base,
410 OldVal = flread(base); 410 OldVal = flread(base);
411 for (I = 0; OldVal != flread(base) && I < 10000; I++) 411 for (I = 0; OldVal != flread(base) && I < 10000; I++)
412 OldVal = flread(base); 412 OldVal = flread(base);
413 413
414 // Reset the chip 414 // Reset the chip
415 flwrite(Reset,0x555); 415 flwrite(Reset,0x555);
416 416
417 // Send the sequence 417 // Send the sequence
418 flwrite(AutoSel1,0x555); 418 flwrite(AutoSel1,0x555);
419 flwrite(AutoSel2,0x2AA); 419 flwrite(AutoSel2,0x2AA);
420 flwrite(AutoSel3,0x555); 420 flwrite(AutoSel3,0x555);
421 421
422 // Get the JEDEC numbers 422 // Get the JEDEC numbers
423 Mfg[0] = flread(0); 423 Mfg[0] = flread(0);
424 Id[0] = flread(1); 424 Id[0] = flread(1);
425 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]); 425 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
426 426
427 Size = handle_jedecs(map,Mfg,Id,1,base,priv); 427 Size = handle_jedecs(map,Mfg,Id,1,base,priv);
428 // printk("handle_jedecs Size is %x\n",(unsigned int)Size); 428 // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
429 if (Size == 0) 429 if (Size == 0)
@@ -431,13 +431,13 @@ static int jedec_probe8(struct map_info *map,unsigned long base,
431 flwrite(Reset,0x555); 431 flwrite(Reset,0x555);
432 return 0; 432 return 0;
433 } 433 }
434 434
435 435
436 // Reset. 436 // Reset.
437 flwrite(Reset,0x555); 437 flwrite(Reset,0x555);
438 438
439 return 1; 439 return 1;
440 440
441 #undef flread 441 #undef flread
442 #undef flwrite 442 #undef flwrite
443} 443}
@@ -470,17 +470,17 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
470 OldVal = flread(base); 470 OldVal = flread(base);
471 for (I = 0; OldVal != flread(base) && I < 10000; I++) 471 for (I = 0; OldVal != flread(base) && I < 10000; I++)
472 OldVal = flread(base); 472 OldVal = flread(base);
473 473
474 // Reset the chip 474 // Reset the chip
475 flwrite(Reset,0x555); 475 flwrite(Reset,0x555);
476 476
477 // Send the sequence 477 // Send the sequence
478 flwrite(AutoSel1,0x555); 478 flwrite(AutoSel1,0x555);
479 flwrite(AutoSel2,0x2AA); 479 flwrite(AutoSel2,0x2AA);
480 flwrite(AutoSel3,0x555); 480 flwrite(AutoSel3,0x555);
481 481
482 // Test #1, JEDEC numbers are readable from 0x??00/0x??01 482 // Test #1, JEDEC numbers are readable from 0x??00/0x??01
483 if (flread(0) != flread(0x100) || 483 if (flread(0) != flread(0x100) ||
484 flread(1) != flread(0x101)) 484 flread(1) != flread(0x101))
485 { 485 {
486 flwrite(Reset,0x555); 486 flwrite(Reset,0x555);
@@ -494,14 +494,14 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
494 OldVal = flread(1); 494 OldVal = flread(1);
495 for (I = 0; I != 4; I++) 495 for (I = 0; I != 4; I++)
496 Id[I] = (OldVal >> (I*8)); 496 Id[I] = (OldVal >> (I*8));
497 497
498 Size = handle_jedecs(map,Mfg,Id,4,base,priv); 498 Size = handle_jedecs(map,Mfg,Id,4,base,priv);
499 if (Size == 0) 499 if (Size == 0)
500 { 500 {
501 flwrite(Reset,0x555); 501 flwrite(Reset,0x555);
502 return 0; 502 return 0;
503 } 503 }
504 504
505 /* Check if there is address wrap around within a single bank, if this 505 /* Check if there is address wrap around within a single bank, if this
506 returns JEDEC numbers then we assume that it is wrap around. Notice 506 returns JEDEC numbers then we assume that it is wrap around. Notice
507 we call this routine with the JEDEC return still enabled, if two or 507 we call this routine with the JEDEC return still enabled, if two or
@@ -519,27 +519,27 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
519 519
520 // Reset. 520 // Reset.
521 flwrite(0xF0F0F0F0,0x555); 521 flwrite(0xF0F0F0F0,0x555);
522 522
523 return 1; 523 return 1;
524 524
525 #undef flread 525 #undef flread
526 #undef flwrite 526 #undef flwrite
527} 527}
528 528
529/* Linear read. */ 529/* Linear read. */
530static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len, 530static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
531 size_t *retlen, u_char *buf) 531 size_t *retlen, u_char *buf)
532{ 532{
533 struct map_info *map = mtd->priv; 533 struct map_info *map = mtd->priv;
534 534
535 map_copy_from(map, buf, from, len); 535 map_copy_from(map, buf, from, len);
536 *retlen = len; 536 *retlen = len;
537 return 0; 537 return 0;
538} 538}
539 539
540/* Banked read. Take special care to jump past the holes in the bank 540/* Banked read. Take special care to jump past the holes in the bank
541 mapping. This version assumes symetry in the holes.. */ 541 mapping. This version assumes symetry in the holes.. */
542static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len, 542static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
543 size_t *retlen, u_char *buf) 543 size_t *retlen, u_char *buf)
544{ 544{
545 struct map_info *map = mtd->priv; 545 struct map_info *map = mtd->priv;
@@ -555,17 +555,17 @@ static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
555 if (priv->bank_fill[0] - offset < len) 555 if (priv->bank_fill[0] - offset < len)
556 get = priv->bank_fill[0] - offset; 556 get = priv->bank_fill[0] - offset;
557 557
558 bank /= priv->bank_fill[0]; 558 bank /= priv->bank_fill[0];
559 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get); 559 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
560 560
561 len -= get; 561 len -= get;
562 *retlen += get; 562 *retlen += get;
563 from += get; 563 from += get;
564 } 564 }
565 return 0; 565 return 0;
566} 566}
567 567
568/* Pass the flags value that the flash return before it re-entered read 568/* Pass the flags value that the flash return before it re-entered read
569 mode. */ 569 mode. */
570static void jedec_flash_failed(unsigned char code) 570static void jedec_flash_failed(unsigned char code)
571{ 571{
@@ -579,17 +579,17 @@ static void jedec_flash_failed(unsigned char code)
579 printk("mtd: Programming didn't take\n"); 579 printk("mtd: Programming didn't take\n");
580} 580}
581 581
582/* This uses the erasure function described in the AMD Flash Handbook, 582/* This uses the erasure function described in the AMD Flash Handbook,
583 it will work for flashes with a fixed sector size only. Flashes with 583 it will work for flashes with a fixed sector size only. Flashes with
584 a selection of sector sizes (ie the AMD Am29F800B) will need a different 584 a selection of sector sizes (ie the AMD Am29F800B) will need a different
585 routine. This routine tries to parallize erasing multiple chips/sectors 585 routine. This routine tries to parallize erasing multiple chips/sectors
586 where possible */ 586 where possible */
587static int flash_erase(struct mtd_info *mtd, struct erase_info *instr) 587static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
588{ 588{
589 // Does IO to the currently selected chip 589 // Does IO to the currently selected chip
590 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift)) 590 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
591 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift)) 591 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
592 592
593 unsigned long Time = 0; 593 unsigned long Time = 0;
594 unsigned long NoTime = 0; 594 unsigned long NoTime = 0;
595 unsigned long start = instr->addr, len = instr->len; 595 unsigned long start = instr->addr, len = instr->len;
@@ -603,7 +603,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
603 (len % mtd->erasesize) != 0 || 603 (len % mtd->erasesize) != 0 ||
604 (len/mtd->erasesize) == 0) 604 (len/mtd->erasesize) == 0)
605 return -EINVAL; 605 return -EINVAL;
606 606
607 jedec_flash_chip_scan(priv,start,len); 607 jedec_flash_chip_scan(priv,start,len);
608 608
609 // Start the erase sequence on each chip 609 // Start the erase sequence on each chip
@@ -611,16 +611,16 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
611 { 611 {
612 unsigned long off; 612 unsigned long off;
613 struct jedec_flash_chip *chip = priv->chips + I; 613 struct jedec_flash_chip *chip = priv->chips + I;
614 614
615 if (chip->length == 0) 615 if (chip->length == 0)
616 continue; 616 continue;
617 617
618 if (chip->start + chip->length > chip->size) 618 if (chip->start + chip->length > chip->size)
619 { 619 {
620 printk("DIE\n"); 620 printk("DIE\n");
621 return -EIO; 621 return -EIO;
622 } 622 }
623 623
624 flwrite(0xF0,chip->start + 0x555); 624 flwrite(0xF0,chip->start + 0x555);
625 flwrite(0xAA,chip->start + 0x555); 625 flwrite(0xAA,chip->start + 0x555);
626 flwrite(0x55,chip->start + 0x2AA); 626 flwrite(0x55,chip->start + 0x2AA);
@@ -628,8 +628,8 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
628 flwrite(0xAA,chip->start + 0x555); 628 flwrite(0xAA,chip->start + 0x555);
629 flwrite(0x55,chip->start + 0x2AA); 629 flwrite(0x55,chip->start + 0x2AA);
630 630
631 /* Once we start selecting the erase sectors the delay between each 631 /* Once we start selecting the erase sectors the delay between each
632 command must not exceed 50us or it will immediately start erasing 632 command must not exceed 50us or it will immediately start erasing
633 and ignore the other sectors */ 633 and ignore the other sectors */
634 for (off = 0; off < len; off += chip->sectorsize) 634 for (off = 0; off < len; off += chip->sectorsize)
635 { 635 {
@@ -641,19 +641,19 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
641 { 641 {
642 printk("mtd: Ack! We timed out the erase timer!\n"); 642 printk("mtd: Ack! We timed out the erase timer!\n");
643 return -EIO; 643 return -EIO;
644 } 644 }
645 } 645 }
646 } 646 }
647 647
648 /* We could split this into a timer routine and return early, performing 648 /* We could split this into a timer routine and return early, performing
649 background erasure.. Maybe later if the need warrents */ 649 background erasure.. Maybe later if the need warrents */
650 650
651 /* Poll the flash for erasure completion, specs say this can take as long 651 /* Poll the flash for erasure completion, specs say this can take as long
652 as 480 seconds to do all the sectors (for a 2 meg flash). 652 as 480 seconds to do all the sectors (for a 2 meg flash).
653 Erasure time is dependent on chip age, temp and wear.. */ 653 Erasure time is dependent on chip age, temp and wear.. */
654 654
655 /* This being a generic routine assumes a 32 bit bus. It does read32s 655 /* This being a generic routine assumes a 32 bit bus. It does read32s
656 and bundles interleved chips into the same grouping. This will work 656 and bundles interleved chips into the same grouping. This will work
657 for all bus widths */ 657 for all bus widths */
658 Time = 0; 658 Time = 0;
659 NoTime = 0; 659 NoTime = 0;
@@ -664,20 +664,20 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
664 unsigned todo[4] = {0,0,0,0}; 664 unsigned todo[4] = {0,0,0,0};
665 unsigned todo_left = 0; 665 unsigned todo_left = 0;
666 unsigned J; 666 unsigned J;
667 667
668 if (chip->length == 0) 668 if (chip->length == 0)
669 continue; 669 continue;
670 670
671 /* Find all chips in this data line, realistically this is all 671 /* Find all chips in this data line, realistically this is all
672 or nothing up to the interleve count */ 672 or nothing up to the interleve count */
673 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++) 673 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
674 { 674 {
675 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) == 675 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
676 (chip->base & (~((1<<chip->addrshift)-1)))) 676 (chip->base & (~((1<<chip->addrshift)-1))))
677 { 677 {
678 todo_left++; 678 todo_left++;
679 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1; 679 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
680 } 680 }
681 } 681 }
682 682
683 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1], 683 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
@@ -687,7 +687,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
687 { 687 {
688 __u32 Last[4]; 688 __u32 Last[4];
689 unsigned long Count = 0; 689 unsigned long Count = 0;
690 690
691 /* During erase bit 7 is held low and bit 6 toggles, we watch this, 691 /* During erase bit 7 is held low and bit 6 toggles, we watch this,
692 should it stop toggling or go high then the erase is completed, 692 should it stop toggling or go high then the erase is completed,
693 or this is not really flash ;> */ 693 or this is not really flash ;> */
@@ -718,23 +718,23 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
718 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF; 718 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
719 if (todo[J] == 0) 719 if (todo[J] == 0)
720 continue; 720 continue;
721 721
722 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2) 722 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
723 { 723 {
724// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2); 724// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
725 continue; 725 continue;
726 } 726 }
727 727
728 if (Byte1 == Byte2) 728 if (Byte1 == Byte2)
729 { 729 {
730 jedec_flash_failed(Byte3); 730 jedec_flash_failed(Byte3);
731 return -EIO; 731 return -EIO;
732 } 732 }
733 733
734 todo[J] = 0; 734 todo[J] = 0;
735 todo_left--; 735 todo_left--;
736 } 736 }
737 737
738/* if (NoTime == 0) 738/* if (NoTime == 0)
739 Time += HZ/10 - schedule_timeout(HZ/10);*/ 739 Time += HZ/10 - schedule_timeout(HZ/10);*/
740 NoTime = 0; 740 NoTime = 0;
@@ -751,7 +751,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
751 break; 751 break;
752 } 752 }
753 Count++; 753 Count++;
754 754
755/* // Count time, max of 15s per sector (according to AMD) 755/* // Count time, max of 15s per sector (according to AMD)
756 if (Time > 15*len/mtd->erasesize*HZ) 756 if (Time > 15*len/mtd->erasesize*HZ)
757 { 757 {
@@ -759,38 +759,38 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
759 return -EIO; 759 return -EIO;
760 } */ 760 } */
761 } 761 }
762 762
763 // Skip to the next chip if we used chip erase 763 // Skip to the next chip if we used chip erase
764 if (chip->length == chip->size) 764 if (chip->length == chip->size)
765 off = chip->size; 765 off = chip->size;
766 else 766 else
767 off += chip->sectorsize; 767 off += chip->sectorsize;
768 768
769 if (off >= chip->length) 769 if (off >= chip->length)
770 break; 770 break;
771 NoTime = 1; 771 NoTime = 1;
772 } 772 }
773 773
774 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++) 774 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
775 { 775 {
776 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) == 776 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
777 (chip->base & (~((1<<chip->addrshift)-1)))) 777 (chip->base & (~((1<<chip->addrshift)-1))))
778 priv->chips[J].length = 0; 778 priv->chips[J].length = 0;
779 } 779 }
780 } 780 }
781 781
782 //printk("done\n"); 782 //printk("done\n");
783 instr->state = MTD_ERASE_DONE; 783 instr->state = MTD_ERASE_DONE;
784 mtd_erase_callback(instr); 784 mtd_erase_callback(instr);
785 return 0; 785 return 0;
786 786
787 #undef flread 787 #undef flread
788 #undef flwrite 788 #undef flwrite
789} 789}
790 790
791/* This is the simple flash writing function. It writes to every byte, in 791/* This is the simple flash writing function. It writes to every byte, in
792 sequence. It takes care of how to properly address the flash if 792 sequence. It takes care of how to properly address the flash if
793 the flash is interleved. It can only be used if all the chips in the 793 the flash is interleved. It can only be used if all the chips in the
794 array are identical!*/ 794 array are identical!*/
795static int flash_write(struct mtd_info *mtd, loff_t start, size_t len, 795static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
796 size_t *retlen, const u_char *buf) 796 size_t *retlen, const u_char *buf)
@@ -800,25 +800,25 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
800 of addrshift (interleave index) and then adds the control register index. */ 800 of addrshift (interleave index) and then adds the control register index. */
801 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift)) 801 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
802 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift)) 802 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
803 803
804 struct map_info *map = mtd->priv; 804 struct map_info *map = mtd->priv;
805 struct jedec_private *priv = map->fldrv_priv; 805 struct jedec_private *priv = map->fldrv_priv;
806 unsigned long base; 806 unsigned long base;
807 unsigned long off; 807 unsigned long off;
808 size_t save_len = len; 808 size_t save_len = len;
809 809
810 if (start + len > mtd->size) 810 if (start + len > mtd->size)
811 return -EIO; 811 return -EIO;
812 812
813 //printk("Here"); 813 //printk("Here");
814 814
815 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len); 815 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
816 while (len != 0) 816 while (len != 0)
817 { 817 {
818 struct jedec_flash_chip *chip = priv->chips; 818 struct jedec_flash_chip *chip = priv->chips;
819 unsigned long bank; 819 unsigned long bank;
820 unsigned long boffset; 820 unsigned long boffset;
821 821
822 // Compute the base of the flash. 822 // Compute the base of the flash.
823 off = ((unsigned long)start) % (chip->size << chip->addrshift); 823 off = ((unsigned long)start) % (chip->size << chip->addrshift);
824 base = start - off; 824 base = start - off;
@@ -828,10 +828,10 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
828 boffset = base & (priv->bank_fill[0]-1); 828 boffset = base & (priv->bank_fill[0]-1);
829 bank = (bank/priv->bank_fill[0])*my_bank_size; 829 bank = (bank/priv->bank_fill[0])*my_bank_size;
830 base = bank + boffset; 830 base = bank + boffset;
831 831
832 // printk("Flasing %X %X %X\n",base,chip->size,len); 832 // printk("Flasing %X %X %X\n",base,chip->size,len);
833 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift); 833 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
834 834
835 // Loop over this page 835 // Loop over this page
836 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++) 836 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
837 { 837 {
@@ -845,7 +845,7 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
845 } 845 }
846 if (((~oldbyte) & *buf) != 0) 846 if (((~oldbyte) & *buf) != 0)
847 printk("mtd: warn: Trying to set a 0 to a 1\n"); 847 printk("mtd: warn: Trying to set a 0 to a 1\n");
848 848
849 // Write 849 // Write
850 flwrite(0xAA,0x555); 850 flwrite(0xAA,0x555);
851 flwrite(0x55,0x2AA); 851 flwrite(0x55,0x2AA);
@@ -854,10 +854,10 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
854 Last[0] = map_read8(map,base + off); 854 Last[0] = map_read8(map,base + off);
855 Last[1] = map_read8(map,base + off); 855 Last[1] = map_read8(map,base + off);
856 Last[2] = map_read8(map,base + off); 856 Last[2] = map_read8(map,base + off);
857 857
858 /* Wait for the flash to finish the operation. We store the last 4 858 /* Wait for the flash to finish the operation. We store the last 4
859 status bytes that have been retrieved so we can determine why 859 status bytes that have been retrieved so we can determine why
860 it failed. The toggle bits keep toggling when there is a 860 it failed. The toggle bits keep toggling when there is a
861 failure */ 861 failure */
862 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] && 862 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
863 Count < 10000; Count++) 863 Count < 10000; Count++)
@@ -866,7 +866,7 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
866 { 866 {
867 jedec_flash_failed(Last[(Count - 3) % 4]); 867 jedec_flash_failed(Last[(Count - 3) % 4]);
868 return -EIO; 868 return -EIO;
869 } 869 }
870 } 870 }
871 } 871 }
872 *retlen = save_len; 872 *retlen = save_len;
@@ -885,24 +885,24 @@ static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start
885 // Zero the records 885 // Zero the records
886 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 886 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
887 priv->chips[I].start = priv->chips[I].length = 0; 887 priv->chips[I].start = priv->chips[I].length = 0;
888 888
889 // Intersect the region with each chip 889 // Intersect the region with each chip
890 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) 890 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
891 { 891 {
892 struct jedec_flash_chip *chip = priv->chips + I; 892 struct jedec_flash_chip *chip = priv->chips + I;
893 unsigned long ByteStart; 893 unsigned long ByteStart;
894 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift); 894 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
895 895
896 // End is before this chip or the start is after it 896 // End is before this chip or the start is after it
897 if (start+len < chip->offset || 897 if (start+len < chip->offset ||
898 ChipEndByte - (1 << chip->addrshift) < start) 898 ChipEndByte - (1 << chip->addrshift) < start)
899 continue; 899 continue;
900 900
901 if (start < chip->offset) 901 if (start < chip->offset)
902 { 902 {
903 ByteStart = chip->offset; 903 ByteStart = chip->offset;
904 chip->start = 0; 904 chip->start = 0;
905 } 905 }
906 else 906 else
907 { 907 {
908 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift; 908 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 30da428eb7b..edb306c03c0 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Common Flash Interface probe code. 2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd. 3 (C) 2000 Red Hat. GPL'd.
4 $Id: jedec_probe.c,v 1.63 2005/02/14 16:30:32 bjd Exp $ 4 $Id: jedec_probe.c,v 1.66 2005/11/07 11:14:23 gleixner Exp $
5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5) 5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
6 for the standard this probe goes back to. 6 for the standard this probe goes back to.
7 7
@@ -1719,7 +1719,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
1719 1719
1720static struct mtd_info *jedec_probe(struct map_info *map); 1720static struct mtd_info *jedec_probe(struct map_info *map);
1721 1721
1722static inline u32 jedec_read_mfr(struct map_info *map, __u32 base, 1722static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1723 struct cfi_private *cfi) 1723 struct cfi_private *cfi)
1724{ 1724{
1725 map_word result; 1725 map_word result;
@@ -1730,7 +1730,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
1730 return result.x[0] & mask; 1730 return result.x[0] & mask;
1731} 1731}
1732 1732
1733static inline u32 jedec_read_id(struct map_info *map, __u32 base, 1733static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1734 struct cfi_private *cfi) 1734 struct cfi_private *cfi)
1735{ 1735{
1736 map_word result; 1736 map_word result;
@@ -1741,7 +1741,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
1741 return result.x[0] & mask; 1741 return result.x[0] & mask;
1742} 1742}
1743 1743
1744static inline void jedec_reset(u32 base, struct map_info *map, 1744static inline void jedec_reset(u32 base, struct map_info *map,
1745 struct cfi_private *cfi) 1745 struct cfi_private *cfi)
1746{ 1746{
1747 /* Reset */ 1747 /* Reset */
@@ -1765,7 +1765,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
1765 * so ensure we're in read mode. Send both the Intel and the AMD command 1765 * so ensure we're in read mode. Send both the Intel and the AMD command
1766 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1766 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
1767 * this should be safe. 1767 * this should be safe.
1768 */ 1768 */
1769 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 1769 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
1770 /* FIXME - should have reset delay before continuing */ 1770 /* FIXME - should have reset delay before continuing */
1771} 1771}
@@ -1807,14 +1807,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1807 printk("Found: %s\n",jedec_table[index].name); 1807 printk("Found: %s\n",jedec_table[index].name);
1808 1808
1809 num_erase_regions = jedec_table[index].NumEraseRegions; 1809 num_erase_regions = jedec_table[index].NumEraseRegions;
1810 1810
1811 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1811 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1812 if (!p_cfi->cfiq) { 1812 if (!p_cfi->cfiq) {
1813 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 1813 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
1814 return 0; 1814 return 0;
1815 } 1815 }
1816 1816
1817 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1817 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
1818 1818
1819 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; 1819 p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
1820 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; 1820 p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
@@ -1969,7 +1969,7 @@ static inline int jedec_match( __u32 base,
1969 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1969 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1970 /* FIXME - should have a delay before continuing */ 1970 /* FIXME - should have a delay before continuing */
1971 1971
1972 match_done: 1972 match_done:
1973 return rc; 1973 return rc;
1974} 1974}
1975 1975
@@ -1998,23 +1998,23 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
1998 "Probe at base(0x%08x) past the end of the map(0x%08lx)\n", 1998 "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
1999 base, map->size -1); 1999 base, map->size -1);
2000 return 0; 2000 return 0;
2001 2001
2002 } 2002 }
2003 /* Ensure the unlock addresses we try stay inside the map */ 2003 /* Ensure the unlock addresses we try stay inside the map */
2004 probe_offset1 = cfi_build_cmd_addr( 2004 probe_offset1 = cfi_build_cmd_addr(
2005 cfi->addr_unlock1, 2005 cfi->addr_unlock1,
2006 cfi_interleave(cfi), 2006 cfi_interleave(cfi),
2007 cfi->device_type); 2007 cfi->device_type);
2008 probe_offset2 = cfi_build_cmd_addr( 2008 probe_offset2 = cfi_build_cmd_addr(
2009 cfi->addr_unlock1, 2009 cfi->addr_unlock1,
2010 cfi_interleave(cfi), 2010 cfi_interleave(cfi),
2011 cfi->device_type); 2011 cfi->device_type);
2012 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 2012 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
2013 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 2013 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
2014 { 2014 {
2015 goto retry; 2015 goto retry;
2016 } 2016 }
2017 2017
2018 /* Reset */ 2018 /* Reset */
2019 jedec_reset(base, map, cfi); 2019 jedec_reset(base, map, cfi);
2020 2020
@@ -2027,13 +2027,13 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2027 /* FIXME - should have a delay before continuing */ 2027 /* FIXME - should have a delay before continuing */
2028 2028
2029 if (!cfi->numchips) { 2029 if (!cfi->numchips) {
2030 /* This is the first time we're called. Set up the CFI 2030 /* This is the first time we're called. Set up the CFI
2031 stuff accordingly and return */ 2031 stuff accordingly and return */
2032 2032
2033 cfi->mfr = jedec_read_mfr(map, base, cfi); 2033 cfi->mfr = jedec_read_mfr(map, base, cfi);
2034 cfi->id = jedec_read_id(map, base, cfi); 2034 cfi->id = jedec_read_id(map, base, cfi);
2035 DEBUG(MTD_DEBUG_LEVEL3, 2035 DEBUG(MTD_DEBUG_LEVEL3,
2036 "Search for id:(%02x %02x) interleave(%d) type(%d)\n", 2036 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2037 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); 2037 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2038 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) { 2038 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
2039 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { 2039 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
@@ -2062,7 +2062,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2062 return 0; 2062 return 0;
2063 } 2063 }
2064 } 2064 }
2065 2065
2066 /* Check each previous chip locations to see if it's an alias */ 2066 /* Check each previous chip locations to see if it's an alias */
2067 for (i=0; i < (base >> cfi->chipshift); i++) { 2067 for (i=0; i < (base >> cfi->chipshift); i++) {
2068 unsigned long start; 2068 unsigned long start;
@@ -2083,7 +2083,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2083 map->name, base, start); 2083 map->name, base, start);
2084 return 0; 2084 return 0;
2085 } 2085 }
2086 2086
2087 /* Yes, it's actually got the device IDs as data. Most 2087 /* Yes, it's actually got the device IDs as data. Most
2088 * unfortunate. Stick the new chip in read mode 2088 * unfortunate. Stick the new chip in read mode
2089 * too and if it's the same, assume it's an alias. */ 2089 * too and if it's the same, assume it's an alias. */
@@ -2097,20 +2097,20 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2097 } 2097 }
2098 } 2098 }
2099 } 2099 }
2100 2100
2101 /* OK, if we got to here, then none of the previous chips appear to 2101 /* OK, if we got to here, then none of the previous chips appear to
2102 be aliases for the current one. */ 2102 be aliases for the current one. */
2103 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ 2103 set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
2104 cfi->numchips++; 2104 cfi->numchips++;
2105 2105
2106ok_out: 2106ok_out:
2107 /* Put it back into Read Mode */ 2107 /* Put it back into Read Mode */
2108 jedec_reset(base, map, cfi); 2108 jedec_reset(base, map, cfi);
2109 2109
2110 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", 2110 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
2111 map->name, cfi_interleave(cfi), cfi->device_type*8, base, 2111 map->name, cfi_interleave(cfi), cfi->device_type*8, base,
2112 map->bankwidth*8); 2112 map->bankwidth*8);
2113 2113
2114 return 1; 2114 return 1;
2115} 2115}
2116 2116
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index c6c83833cc3..a611de9b151 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * Common code to handle absent "placeholder" devices 2 * Common code to handle absent "placeholder" devices
3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com> 3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
4 * $Id: map_absent.c,v 1.5 2004/11/16 18:29:00 dwmw2 Exp $ 4 * $Id: map_absent.c,v 1.6 2005/11/07 11:14:23 gleixner Exp $
5 * 5 *
6 * This map driver is used to allocate "placeholder" MTD 6 * This map driver is used to allocate "placeholder" MTD
7 * devices on systems that have socketed/removable media. 7 * devices on systems that have socketed/removable media.
8 * Use of this driver as a fallback preserves the expected 8 * Use of this driver as a fallback preserves the expected
9 * registration of MTD device nodes regardless of probe outcome. 9 * registration of MTD device nodes regardless of probe outcome.
10 * A usage example is as follows: 10 * A usage example is as follows:
11 * 11 *
@@ -80,7 +80,7 @@ static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t
80static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 80static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
81{ 81{
82 *retlen = 0; 82 *retlen = 0;
83 return -ENODEV; 83 return -ENODEV;
84} 84}
85 85
86static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr) 86static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
index c3cf0f63bc9..2d26bdef82d 100644
--- a/drivers/mtd/chips/sharp.c
+++ b/drivers/mtd/chips/sharp.c
@@ -4,7 +4,7 @@
4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org> 4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
5 * 2000,2001 Lineo, Inc. 5 * 2000,2001 Lineo, Inc.
6 * 6 *
7 * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $ 7 * $Id: sharp.c,v 1.16 2005/11/07 11:14:23 gleixner Exp $
8 * 8 *
9 * Devices supported: 9 * Devices supported:
10 * LH28F016SCT Symmetrical block flash memory, 2Mx8 10 * LH28F016SCT Symmetrical block flash memory, 2Mx8
@@ -31,6 +31,7 @@
31#include <linux/mtd/cfi.h> 31#include <linux/mtd/cfi.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/slab.h>
34 35
35#define CMD_RESET 0xffffffff 36#define CMD_RESET 0xffffffff
36#define CMD_READ_ID 0x90909090 37#define CMD_READ_ID 0x90909090
@@ -214,7 +215,7 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
214/* This function returns with the chip->mutex lock held. */ 215/* This function returns with the chip->mutex lock held. */
215static int sharp_wait(struct map_info *map, struct flchip *chip) 216static int sharp_wait(struct map_info *map, struct flchip *chip)
216{ 217{
217 __u16 status; 218 int status, i;
218 unsigned long timeo = jiffies + HZ; 219 unsigned long timeo = jiffies + HZ;
219 DECLARE_WAITQUEUE(wait, current); 220 DECLARE_WAITQUEUE(wait, current);
220 int adr = 0; 221 int adr = 0;
@@ -227,13 +228,11 @@ retry:
227 map_write32(map,CMD_READ_STATUS,adr); 228 map_write32(map,CMD_READ_STATUS,adr);
228 chip->state = FL_STATUS; 229 chip->state = FL_STATUS;
229 case FL_STATUS: 230 case FL_STATUS:
230 status = map_read32(map,adr); 231 for(i=0;i<100;i++){
231//printk("status=%08x\n",status); 232 status = map_read32(map,adr);
232 233 if((status & SR_READY)==SR_READY)
233 udelay(100); 234 break;
234 if((status & SR_READY)!=SR_READY){ 235 udelay(1);
235//printk(".status=%08x\n",status);
236 udelay(100);
237 } 236 }
238 break; 237 break;
239 default: 238 default:
@@ -460,12 +459,12 @@ static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
460 remove_wait_queue(&chip->wq, &wait); 459 remove_wait_queue(&chip->wq, &wait);
461 460
462 //spin_lock_bh(chip->mutex); 461 //spin_lock_bh(chip->mutex);
463 462
464 if (signal_pending(current)){ 463 if (signal_pending(current)){
465 ret = -EINTR; 464 ret = -EINTR;
466 goto out; 465 goto out;
467 } 466 }
468 467
469 } 468 }
470 ret = -ETIME; 469 ret = -ETIME;
471out: 470out:
@@ -564,7 +563,7 @@ static int sharp_suspend(struct mtd_info *mtd)
564static void sharp_resume(struct mtd_info *mtd) 563static void sharp_resume(struct mtd_info *mtd)
565{ 564{
566 printk("sharp_resume()\n"); 565 printk("sharp_resume()\n");
567 566
568} 567}
569 568
570static void sharp_destroy(struct mtd_info *mtd) 569static void sharp_destroy(struct mtd_info *mtd)