aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips/cfi_cmdset_0002.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0002.c')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c156
1 files changed, 83 insertions, 73 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 0e6475050da9..aed10bd5c3c3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -10,14 +10,14 @@
10 * 10 *
11 * 4_by_16 work by Carolyn J. Smith 11 * 4_by_16 work by Carolyn J. Smith
12 * 12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre) 14 * by Nicolas Pitre)
15 * 15 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 * 17 *
18 * This code is GPL 18 * This code is GPL
19 * 19 *
20 * $Id: cfi_cmdset_0002.c,v 1.118 2005/07/04 22:34:29 gleixner Exp $ 20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21 * 21 *
22 */ 22 */
23 23
@@ -93,7 +93,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
93 }; 93 };
94 94
95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 95 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 printk(" Address sensitive unlock: %s\n", 96 printk(" Address sensitive unlock: %s\n",
97 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 97 (extp->SiliconRevision & 1) ? "Not required" : "Required");
98 98
99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 99 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
@@ -118,9 +118,9 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
118 else 118 else
119 printk(" Page mode: %d word page\n", extp->PageMode << 2); 119 printk(" Page mode: %d word page\n", extp->PageMode << 2);
120 120
121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 121 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 extp->VppMin >> 4, extp->VppMin & 0xf); 122 extp->VppMin >> 4, extp->VppMin & 0xf);
123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 123 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMax >> 4, extp->VppMax & 0xf); 124 extp->VppMax >> 4, extp->VppMax & 0xf);
125 125
126 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 126 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
@@ -177,7 +177,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 177 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 mtd->erase = cfi_amdstd_erase_chip; 178 mtd->erase = cfi_amdstd_erase_chip;
179 } 179 }
180 180
181} 181}
182 182
183static struct cfi_fixup cfi_fixup_table[] = { 183static struct cfi_fixup cfi_fixup_table[] = {
@@ -239,7 +239,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
239 239
240 if (cfi->cfi_mode==CFI_MODE_CFI){ 240 if (cfi->cfi_mode==CFI_MODE_CFI){
241 unsigned char bootloc; 241 unsigned char bootloc;
242 /* 242 /*
243 * It's a real CFI chip, not one for which the probe 243 * It's a real CFI chip, not one for which the probe
244 * routine faked a CFI structure. So we read the feature 244 * routine faked a CFI structure. So we read the feature
245 * table from it. 245 * table from it.
@@ -253,8 +253,18 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
253 return NULL; 253 return NULL;
254 } 254 }
255 255
256 if (extp->MajorVersion != '1' ||
257 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
258 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
259 "version %c.%c.\n", extp->MajorVersion,
260 extp->MinorVersion);
261 kfree(extp);
262 kfree(mtd);
263 return NULL;
264 }
265
256 /* Install our own private info structure */ 266 /* Install our own private info structure */
257 cfi->cmdset_priv = extp; 267 cfi->cmdset_priv = extp;
258 268
259 /* Apply cfi device specific fixups */ 269 /* Apply cfi device specific fixups */
260 cfi_fixup(mtd, cfi_fixup_table); 270 cfi_fixup(mtd, cfi_fixup_table);
@@ -262,7 +272,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
262#ifdef DEBUG_CFI_FEATURES 272#ifdef DEBUG_CFI_FEATURES
263 /* Tell the user about it in lots of lovely detail */ 273 /* Tell the user about it in lots of lovely detail */
264 cfi_tell_features(extp); 274 cfi_tell_features(extp);
265#endif 275#endif
266 276
267 bootloc = extp->TopBottom; 277 bootloc = extp->TopBottom;
268 if ((bootloc != 2) && (bootloc != 3)) { 278 if ((bootloc != 2) && (bootloc != 3)) {
@@ -273,11 +283,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
273 283
274 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 284 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
275 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 285 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
276 286
277 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 287 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
278 int j = (cfi->cfiq->NumEraseRegions-1)-i; 288 int j = (cfi->cfiq->NumEraseRegions-1)-i;
279 __u32 swap; 289 __u32 swap;
280 290
281 swap = cfi->cfiq->EraseRegionInfo[i]; 291 swap = cfi->cfiq->EraseRegionInfo[i];
282 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 292 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
283 cfi->cfiq->EraseRegionInfo[j] = swap; 293 cfi->cfiq->EraseRegionInfo[j] = swap;
@@ -288,11 +298,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
288 cfi->addr_unlock2 = 0x2aa; 298 cfi->addr_unlock2 = 0x2aa;
289 /* Modify the unlock address if we are in compatibility mode */ 299 /* Modify the unlock address if we are in compatibility mode */
290 if ( /* x16 in x8 mode */ 300 if ( /* x16 in x8 mode */
291 ((cfi->device_type == CFI_DEVICETYPE_X8) && 301 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
292 (cfi->cfiq->InterfaceDesc == 2)) || 302 (cfi->cfiq->InterfaceDesc == 2)) ||
293 /* x32 in x16 mode */ 303 /* x32 in x16 mode */
294 ((cfi->device_type == CFI_DEVICETYPE_X16) && 304 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
295 (cfi->cfiq->InterfaceDesc == 4))) 305 (cfi->cfiq->InterfaceDesc == 4)))
296 { 306 {
297 cfi->addr_unlock1 = 0xaaa; 307 cfi->addr_unlock1 = 0xaaa;
298 cfi->addr_unlock2 = 0x555; 308 cfi->addr_unlock2 = 0x555;
@@ -310,10 +320,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
310 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 320 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
311 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 321 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
312 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 322 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
313 } 323 }
314 324
315 map->fldrv = &cfi_amdstd_chipdrv; 325 map->fldrv = &cfi_amdstd_chipdrv;
316 326
317 return cfi_amdstd_setup(mtd); 327 return cfi_amdstd_setup(mtd);
318} 328}
319 329
@@ -326,24 +336,24 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
326 unsigned long offset = 0; 336 unsigned long offset = 0;
327 int i,j; 337 int i,j;
328 338
329 printk(KERN_NOTICE "number of %s chips: %d\n", 339 printk(KERN_NOTICE "number of %s chips: %d\n",
330 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 340 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
331 /* Select the correct geometry setup */ 341 /* Select the correct geometry setup */
332 mtd->size = devsize * cfi->numchips; 342 mtd->size = devsize * cfi->numchips;
333 343
334 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 344 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
335 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 345 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
336 * mtd->numeraseregions, GFP_KERNEL); 346 * mtd->numeraseregions, GFP_KERNEL);
337 if (!mtd->eraseregions) { 347 if (!mtd->eraseregions) {
338 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 348 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
339 goto setup_err; 349 goto setup_err;
340 } 350 }
341 351
342 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 352 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
343 unsigned long ernum, ersize; 353 unsigned long ernum, ersize;
344 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 354 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
345 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 355 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
346 356
347 if (mtd->erasesize < ersize) { 357 if (mtd->erasesize < ersize) {
348 mtd->erasesize = ersize; 358 mtd->erasesize = ersize;
349 } 359 }
@@ -429,7 +439,7 @@ static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word
429 oldd = map_read(map, addr); 439 oldd = map_read(map, addr);
430 curd = map_read(map, addr); 440 curd = map_read(map, addr);
431 441
432 return map_word_equal(map, oldd, curd) && 442 return map_word_equal(map, oldd, curd) &&
433 map_word_equal(map, curd, expected); 443 map_word_equal(map, curd, expected);
434} 444}
435 445
@@ -461,7 +471,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
461 /* Someone else might have been playing with it. */ 471 /* Someone else might have been playing with it. */
462 goto retry; 472 goto retry;
463 } 473 }
464 474
465 case FL_READY: 475 case FL_READY:
466 case FL_CFI_QUERY: 476 case FL_CFI_QUERY:
467 case FL_JEDEC_QUERY: 477 case FL_JEDEC_QUERY:
@@ -504,7 +514,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
504 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 514 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
505 return -EIO; 515 return -EIO;
506 } 516 }
507 517
508 spin_unlock(chip->mutex); 518 spin_unlock(chip->mutex);
509 cfi_udelay(1); 519 cfi_udelay(1);
510 spin_lock(chip->mutex); 520 spin_lock(chip->mutex);
@@ -607,7 +617,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
607 * When a delay is required for the flash operation to complete, the 617 * When a delay is required for the flash operation to complete, the
608 * xip_udelay() function is polling for both the given timeout and pending 618 * xip_udelay() function is polling for both the given timeout and pending
609 * (but still masked) hardware interrupts. Whenever there is an interrupt 619 * (but still masked) hardware interrupts. Whenever there is an interrupt
610 * pending then the flash erase operation is suspended, array mode restored 620 * pending then the flash erase operation is suspended, array mode restored
611 * and interrupts unmasked. Task scheduling might also happen at that 621 * and interrupts unmasked. Task scheduling might also happen at that
612 * point. The CPU eventually returns from the interrupt or the call to 622 * point. The CPU eventually returns from the interrupt or the call to
613 * schedule() and the suspended flash operation is resumed for the remaining 623 * schedule() and the suspended flash operation is resumed for the remaining
@@ -631,9 +641,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
631 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 641 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
632 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 642 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
633 /* 643 /*
634 * Let's suspend the erase operation when supported. 644 * Let's suspend the erase operation when supported.
635 * Note that we currently don't try to suspend 645 * Note that we currently don't try to suspend
636 * interleaved chips if there is already another 646 * interleaved chips if there is already another
637 * operation suspended (imagine what happens 647 * operation suspended (imagine what happens
638 * when one chip was already done with the current 648 * when one chip was already done with the current
639 * operation while another chip suspended it, then 649 * operation while another chip suspended it, then
@@ -769,8 +779,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
769 779
770 adr += chip->start; 780 adr += chip->start;
771 781
772 /* Ensure cmd read/writes are aligned. */ 782 /* Ensure cmd read/writes are aligned. */
773 cmd_addr = adr & ~(map_bankwidth(map)-1); 783 cmd_addr = adr & ~(map_bankwidth(map)-1);
774 784
775 spin_lock(chip->mutex); 785 spin_lock(chip->mutex);
776 ret = get_chip(map, chip, cmd_addr, FL_READY); 786 ret = get_chip(map, chip, cmd_addr, FL_READY);
@@ -850,7 +860,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
850#endif 860#endif
851 set_current_state(TASK_UNINTERRUPTIBLE); 861 set_current_state(TASK_UNINTERRUPTIBLE);
852 add_wait_queue(&chip->wq, &wait); 862 add_wait_queue(&chip->wq, &wait);
853 863
854 spin_unlock(chip->mutex); 864 spin_unlock(chip->mutex);
855 865
856 schedule(); 866 schedule();
@@ -862,7 +872,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
862 timeo = jiffies + HZ; 872 timeo = jiffies + HZ;
863 873
864 goto retry; 874 goto retry;
865 } 875 }
866 876
867 adr += chip->start; 877 adr += chip->start;
868 878
@@ -871,14 +881,14 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
871 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 881 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
872 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 882 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
873 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 883 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
874 884
875 map_copy_from(map, buf, adr, len); 885 map_copy_from(map, buf, adr, len);
876 886
877 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 887 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
878 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 888 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
879 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 889 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
880 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 890 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
881 891
882 wake_up(&chip->wq); 892 wake_up(&chip->wq);
883 spin_unlock(chip->mutex); 893 spin_unlock(chip->mutex);
884 894
@@ -987,7 +997,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
987 chip->word_write_time); 997 chip->word_write_time);
988 998
989 /* See comment above for timeout value. */ 999 /* See comment above for timeout value. */
990 timeo = jiffies + uWriteTimeout; 1000 timeo = jiffies + uWriteTimeout;
991 for (;;) { 1001 for (;;) {
992 if (chip->state != FL_WRITING) { 1002 if (chip->state != FL_WRITING) {
993 /* Someone's suspended the write. Sleep */ 1003 /* Someone's suspended the write. Sleep */
@@ -1003,16 +1013,16 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1003 continue; 1013 continue;
1004 } 1014 }
1005 1015
1006 if (chip_ready(map, adr)) 1016 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1007 break;
1008
1009 if (time_after(jiffies, timeo)) {
1010 xip_enable(map, chip, adr); 1017 xip_enable(map, chip, adr);
1011 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1018 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1012 xip_disable(map, chip, adr); 1019 xip_disable(map, chip, adr);
1013 break; 1020 break;
1014 } 1021 }
1015 1022
1023 if (chip_ready(map, adr))
1024 break;
1025
1016 /* Latency issues. Drop the lock, wait a while and retry */ 1026 /* Latency issues. Drop the lock, wait a while and retry */
1017 UDELAY(map, chip, adr, 1); 1027 UDELAY(map, chip, adr, 1);
1018 } 1028 }
@@ -1022,7 +1032,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1022 map_write( map, CMD(0xF0), chip->start ); 1032 map_write( map, CMD(0xF0), chip->start );
1023 /* FIXME - should have reset delay before continuing */ 1033 /* FIXME - should have reset delay before continuing */
1024 1034
1025 if (++retry_cnt <= MAX_WORD_RETRIES) 1035 if (++retry_cnt <= MAX_WORD_RETRIES)
1026 goto retry; 1036 goto retry;
1027 1037
1028 ret = -EIO; 1038 ret = -EIO;
@@ -1090,27 +1100,27 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1090 1100
1091 /* Number of bytes to copy from buffer */ 1101 /* Number of bytes to copy from buffer */
1092 n = min_t(int, len, map_bankwidth(map)-i); 1102 n = min_t(int, len, map_bankwidth(map)-i);
1093 1103
1094 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1104 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1095 1105
1096 ret = do_write_oneword(map, &cfi->chips[chipnum], 1106 ret = do_write_oneword(map, &cfi->chips[chipnum],
1097 bus_ofs, tmp_buf); 1107 bus_ofs, tmp_buf);
1098 if (ret) 1108 if (ret)
1099 return ret; 1109 return ret;
1100 1110
1101 ofs += n; 1111 ofs += n;
1102 buf += n; 1112 buf += n;
1103 (*retlen) += n; 1113 (*retlen) += n;
1104 len -= n; 1114 len -= n;
1105 1115
1106 if (ofs >> cfi->chipshift) { 1116 if (ofs >> cfi->chipshift) {
1107 chipnum ++; 1117 chipnum ++;
1108 ofs = 0; 1118 ofs = 0;
1109 if (chipnum == cfi->numchips) 1119 if (chipnum == cfi->numchips)
1110 return 0; 1120 return 0;
1111 } 1121 }
1112 } 1122 }
1113 1123
1114 /* We are now aligned, write as much as possible */ 1124 /* We are now aligned, write as much as possible */
1115 while(len >= map_bankwidth(map)) { 1125 while(len >= map_bankwidth(map)) {
1116 map_word datum; 1126 map_word datum;
@@ -1128,7 +1138,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1128 len -= map_bankwidth(map); 1138 len -= map_bankwidth(map);
1129 1139
1130 if (ofs >> cfi->chipshift) { 1140 if (ofs >> cfi->chipshift) {
1131 chipnum ++; 1141 chipnum ++;
1132 ofs = 0; 1142 ofs = 0;
1133 if (chipnum == cfi->numchips) 1143 if (chipnum == cfi->numchips)
1134 return 0; 1144 return 0;
@@ -1166,12 +1176,12 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1166 spin_unlock(cfi->chips[chipnum].mutex); 1176 spin_unlock(cfi->chips[chipnum].mutex);
1167 1177
1168 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1178 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1169 1179
1170 ret = do_write_oneword(map, &cfi->chips[chipnum], 1180 ret = do_write_oneword(map, &cfi->chips[chipnum],
1171 ofs, tmp_buf); 1181 ofs, tmp_buf);
1172 if (ret) 1182 if (ret)
1173 return ret; 1183 return ret;
1174 1184
1175 (*retlen) += len; 1185 (*retlen) += len;
1176 } 1186 }
1177 1187
@@ -1183,7 +1193,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1183 * FIXME: interleaved mode not tested, and probably not supported! 1193 * FIXME: interleaved mode not tested, and probably not supported!
1184 */ 1194 */
1185static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1195static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1186 unsigned long adr, const u_char *buf, 1196 unsigned long adr, const u_char *buf,
1187 int len) 1197 int len)
1188{ 1198{
1189 struct cfi_private *cfi = map->fldrv_priv; 1199 struct cfi_private *cfi = map->fldrv_priv;
@@ -1213,7 +1223,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1213 XIP_INVAL_CACHED_RANGE(map, adr, len); 1223 XIP_INVAL_CACHED_RANGE(map, adr, len);
1214 ENABLE_VPP(map); 1224 ENABLE_VPP(map);
1215 xip_disable(map, chip, cmd_adr); 1225 xip_disable(map, chip, cmd_adr);
1216 1226
1217 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1227 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1218 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1228 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1219 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1229 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -1247,8 +1257,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1247 adr, map_bankwidth(map), 1257 adr, map_bankwidth(map),
1248 chip->word_write_time); 1258 chip->word_write_time);
1249 1259
1250 timeo = jiffies + uWriteTimeout; 1260 timeo = jiffies + uWriteTimeout;
1251 1261
1252 for (;;) { 1262 for (;;) {
1253 if (chip->state != FL_WRITING) { 1263 if (chip->state != FL_WRITING) {
1254 /* Someone's suspended the write. Sleep */ 1264 /* Someone's suspended the write. Sleep */
@@ -1264,13 +1274,13 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1264 continue; 1274 continue;
1265 } 1275 }
1266 1276
1277 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1278 break;
1279
1267 if (chip_ready(map, adr)) { 1280 if (chip_ready(map, adr)) {
1268 xip_enable(map, chip, adr); 1281 xip_enable(map, chip, adr);
1269 goto op_done; 1282 goto op_done;
1270 } 1283 }
1271
1272 if( time_after(jiffies, timeo))
1273 break;
1274 1284
1275 /* Latency issues. Drop the lock, wait a while and retry */ 1285 /* Latency issues. Drop the lock, wait a while and retry */
1276 UDELAY(map, chip, adr, 1); 1286 UDELAY(map, chip, adr, 1);
@@ -1342,7 +1352,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1342 if (size % map_bankwidth(map)) 1352 if (size % map_bankwidth(map))
1343 size -= size % map_bankwidth(map); 1353 size -= size % map_bankwidth(map);
1344 1354
1345 ret = do_write_buffer(map, &cfi->chips[chipnum], 1355 ret = do_write_buffer(map, &cfi->chips[chipnum],
1346 ofs, buf, size); 1356 ofs, buf, size);
1347 if (ret) 1357 if (ret)
1348 return ret; 1358 return ret;
@@ -1353,7 +1363,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1353 len -= size; 1363 len -= size;
1354 1364
1355 if (ofs >> cfi->chipshift) { 1365 if (ofs >> cfi->chipshift) {
1356 chipnum ++; 1366 chipnum ++;
1357 ofs = 0; 1367 ofs = 0;
1358 if (chipnum == cfi->numchips) 1368 if (chipnum == cfi->numchips)
1359 return 0; 1369 return 0;
@@ -1570,7 +1580,7 @@ int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1570 1580
1571 instr->state = MTD_ERASE_DONE; 1581 instr->state = MTD_ERASE_DONE;
1572 mtd_erase_callback(instr); 1582 mtd_erase_callback(instr);
1573 1583
1574 return 0; 1584 return 0;
1575} 1585}
1576 1586
@@ -1593,7 +1603,7 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1593 1603
1594 instr->state = MTD_ERASE_DONE; 1604 instr->state = MTD_ERASE_DONE;
1595 mtd_erase_callback(instr); 1605 mtd_erase_callback(instr);
1596 1606
1597 return 0; 1607 return 0;
1598} 1608}
1599 1609
@@ -1620,7 +1630,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1620 case FL_JEDEC_QUERY: 1630 case FL_JEDEC_QUERY:
1621 chip->oldstate = chip->state; 1631 chip->oldstate = chip->state;
1622 chip->state = FL_SYNCING; 1632 chip->state = FL_SYNCING;
1623 /* No need to wake_up() on this state change - 1633 /* No need to wake_up() on this state change -
1624 * as the whole point is that nobody can do anything 1634 * as the whole point is that nobody can do anything
1625 * with the chip now anyway. 1635 * with the chip now anyway.
1626 */ 1636 */
@@ -1631,13 +1641,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1631 default: 1641 default:
1632 /* Not an idle state */ 1642 /* Not an idle state */
1633 add_wait_queue(&chip->wq, &wait); 1643 add_wait_queue(&chip->wq, &wait);
1634 1644
1635 spin_unlock(chip->mutex); 1645 spin_unlock(chip->mutex);
1636 1646
1637 schedule(); 1647 schedule();
1638 1648
1639 remove_wait_queue(&chip->wq, &wait); 1649 remove_wait_queue(&chip->wq, &wait);
1640 1650
1641 goto retry; 1651 goto retry;
1642 } 1652 }
1643 } 1653 }
@@ -1648,7 +1658,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1648 chip = &cfi->chips[i]; 1658 chip = &cfi->chips[i];
1649 1659
1650 spin_lock(chip->mutex); 1660 spin_lock(chip->mutex);
1651 1661
1652 if (chip->state == FL_SYNCING) { 1662 if (chip->state == FL_SYNCING) {
1653 chip->state = chip->oldstate; 1663 chip->state = chip->oldstate;
1654 wake_up(&chip->wq); 1664 wake_up(&chip->wq);
@@ -1678,7 +1688,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1678 case FL_JEDEC_QUERY: 1688 case FL_JEDEC_QUERY:
1679 chip->oldstate = chip->state; 1689 chip->oldstate = chip->state;
1680 chip->state = FL_PM_SUSPENDED; 1690 chip->state = FL_PM_SUSPENDED;
1681 /* No need to wake_up() on this state change - 1691 /* No need to wake_up() on this state change -
1682 * as the whole point is that nobody can do anything 1692 * as the whole point is that nobody can do anything
1683 * with the chip now anyway. 1693 * with the chip now anyway.
1684 */ 1694 */
@@ -1699,7 +1709,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1699 chip = &cfi->chips[i]; 1709 chip = &cfi->chips[i];
1700 1710
1701 spin_lock(chip->mutex); 1711 spin_lock(chip->mutex);
1702 1712
1703 if (chip->state == FL_PM_SUSPENDED) { 1713 if (chip->state == FL_PM_SUSPENDED) {
1704 chip->state = chip->oldstate; 1714 chip->state = chip->oldstate;
1705 wake_up(&chip->wq); 1715 wake_up(&chip->wq);
@@ -1707,7 +1717,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1707 spin_unlock(chip->mutex); 1717 spin_unlock(chip->mutex);
1708 } 1718 }
1709 } 1719 }
1710 1720
1711 return ret; 1721 return ret;
1712} 1722}
1713 1723
@@ -1720,11 +1730,11 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1720 struct flchip *chip; 1730 struct flchip *chip;
1721 1731
1722 for (i=0; i<cfi->numchips; i++) { 1732 for (i=0; i<cfi->numchips; i++) {
1723 1733
1724 chip = &cfi->chips[i]; 1734 chip = &cfi->chips[i];
1725 1735
1726 spin_lock(chip->mutex); 1736 spin_lock(chip->mutex);
1727 1737
1728 if (chip->state == FL_PM_SUSPENDED) { 1738 if (chip->state == FL_PM_SUSPENDED) {
1729 chip->state = FL_READY; 1739 chip->state = FL_READY;
1730 map_write(map, CMD(0xF0), chip->start); 1740 map_write(map, CMD(0xF0), chip->start);