diff options
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0002.c')
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 344 |
1 files changed, 226 insertions, 118 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index f3600e8d5382..d81079ef91a5 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | #include <linux/reboot.h> | ||
35 | #include <linux/mtd/compatmac.h> | 36 | #include <linux/mtd/compatmac.h> |
36 | #include <linux/mtd/map.h> | 37 | #include <linux/mtd/map.h> |
37 | #include <linux/mtd/mtd.h> | 38 | #include <linux/mtd/mtd.h> |
@@ -43,10 +44,6 @@ | |||
43 | 44 | ||
44 | #define MAX_WORD_RETRIES 3 | 45 | #define MAX_WORD_RETRIES 3 |
45 | 46 | ||
46 | #define MANUFACTURER_AMD 0x0001 | ||
47 | #define MANUFACTURER_ATMEL 0x001F | ||
48 | #define MANUFACTURER_MACRONIX 0x00C2 | ||
49 | #define MANUFACTURER_SST 0x00BF | ||
50 | #define SST49LF004B 0x0060 | 47 | #define SST49LF004B 0x0060 |
51 | #define SST49LF040B 0x0050 | 48 | #define SST49LF040B 0x0050 |
52 | #define SST49LF008A 0x005a | 49 | #define SST49LF008A 0x005a |
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); | |||
60 | static void cfi_amdstd_sync (struct mtd_info *); | 57 | static void cfi_amdstd_sync (struct mtd_info *); |
61 | static int cfi_amdstd_suspend (struct mtd_info *); | 58 | static int cfi_amdstd_suspend (struct mtd_info *); |
62 | static void cfi_amdstd_resume (struct mtd_info *); | 59 | static void cfi_amdstd_resume (struct mtd_info *); |
60 | static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); | ||
63 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | 61 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
64 | 62 | ||
65 | static void cfi_amdstd_destroy(struct mtd_info *); | 63 | static void cfi_amdstd_destroy(struct mtd_info *); |
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) | |||
168 | * This reduces the risk of false detection due to | 166 | * This reduces the risk of false detection due to |
169 | * the 8-bit device ID. | 167 | * the 8-bit device ID. |
170 | */ | 168 | */ |
171 | (cfi->mfr == MANUFACTURER_MACRONIX)) { | 169 | (cfi->mfr == CFI_MFR_MACRONIX)) { |
172 | DEBUG(MTD_DEBUG_LEVEL1, | 170 | DEBUG(MTD_DEBUG_LEVEL1, |
173 | "%s: Macronix MX29LV400C with bottom boot block" | 171 | "%s: Macronix MX29LV400C with bottom boot block" |
174 | " detected\n", map->name); | 172 | " detected\n", map->name); |
@@ -260,6 +258,42 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) | |||
260 | mtd->flags |= MTD_POWERUP_LOCK; | 258 | mtd->flags |= MTD_POWERUP_LOCK; |
261 | } | 259 | } |
262 | 260 | ||
261 | static void fixup_old_sst_eraseregion(struct mtd_info *mtd) | ||
262 | { | ||
263 | struct map_info *map = mtd->priv; | ||
264 | struct cfi_private *cfi = map->fldrv_priv; | ||
265 | |||
266 | /* | ||
267 | * These flashes report two seperate eraseblock regions based on the | ||
268 | * sector_erase-size and block_erase-size, although they both operate on the | ||
269 | * same memory. This is not allowed according to CFI, so we just pick the | ||
270 | * sector_erase-size. | ||
271 | */ | ||
272 | cfi->cfiq->NumEraseRegions = 1; | ||
273 | } | ||
274 | |||
275 | static void fixup_sst39vf(struct mtd_info *mtd, void *param) | ||
276 | { | ||
277 | struct map_info *map = mtd->priv; | ||
278 | struct cfi_private *cfi = map->fldrv_priv; | ||
279 | |||
280 | fixup_old_sst_eraseregion(mtd); | ||
281 | |||
282 | cfi->addr_unlock1 = 0x5555; | ||
283 | cfi->addr_unlock2 = 0x2AAA; | ||
284 | } | ||
285 | |||
286 | static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) | ||
287 | { | ||
288 | struct map_info *map = mtd->priv; | ||
289 | struct cfi_private *cfi = map->fldrv_priv; | ||
290 | |||
291 | fixup_old_sst_eraseregion(mtd); | ||
292 | |||
293 | cfi->addr_unlock1 = 0x555; | ||
294 | cfi->addr_unlock2 = 0x2AA; | ||
295 | } | ||
296 | |||
263 | static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) | 297 | static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) |
264 | { | 298 | { |
265 | struct map_info *map = mtd->priv; | 299 | struct map_info *map = mtd->priv; |
@@ -282,11 +316,24 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) | |||
282 | } | 316 | } |
283 | } | 317 | } |
284 | 318 | ||
319 | /* Used to fix CFI-Tables of chips without Extended Query Tables */ | ||
320 | static struct cfi_fixup cfi_nopri_fixup_table[] = { | ||
321 | { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602 | ||
322 | { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601 | ||
323 | { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202 | ||
324 | { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201 | ||
325 | { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B | ||
326 | { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B | ||
327 | { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B | ||
328 | { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B | ||
329 | { 0, 0, NULL, NULL } | ||
330 | }; | ||
331 | |||
285 | static struct cfi_fixup cfi_fixup_table[] = { | 332 | static struct cfi_fixup cfi_fixup_table[] = { |
286 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, | 333 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, |
287 | #ifdef AMD_BOOTLOC_BUG | 334 | #ifdef AMD_BOOTLOC_BUG |
288 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, | 335 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, |
289 | { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, | 336 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, |
290 | #endif | 337 | #endif |
291 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, | 338 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, |
292 | { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, | 339 | { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, |
@@ -304,9 +351,9 @@ static struct cfi_fixup cfi_fixup_table[] = { | |||
304 | { 0, 0, NULL, NULL } | 351 | { 0, 0, NULL, NULL } |
305 | }; | 352 | }; |
306 | static struct cfi_fixup jedec_fixup_table[] = { | 353 | static struct cfi_fixup jedec_fixup_table[] = { |
307 | { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, | 354 | { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, |
308 | { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, | 355 | { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, |
309 | { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, | 356 | { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, |
310 | { 0, 0, NULL, NULL } | 357 | { 0, 0, NULL, NULL } |
311 | }; | 358 | }; |
312 | 359 | ||
@@ -355,67 +402,72 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
355 | mtd->name = map->name; | 402 | mtd->name = map->name; |
356 | mtd->writesize = 1; | 403 | mtd->writesize = 1; |
357 | 404 | ||
405 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; | ||
406 | |||
358 | if (cfi->cfi_mode==CFI_MODE_CFI){ | 407 | if (cfi->cfi_mode==CFI_MODE_CFI){ |
359 | unsigned char bootloc; | 408 | unsigned char bootloc; |
360 | /* | ||
361 | * It's a real CFI chip, not one for which the probe | ||
362 | * routine faked a CFI structure. So we read the feature | ||
363 | * table from it. | ||
364 | */ | ||
365 | __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; | 409 | __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; |
366 | struct cfi_pri_amdstd *extp; | 410 | struct cfi_pri_amdstd *extp; |
367 | 411 | ||
368 | extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); | 412 | extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); |
369 | if (!extp) { | 413 | if (extp) { |
370 | kfree(mtd); | 414 | /* |
371 | return NULL; | 415 | * It's a real CFI chip, not one for which the probe |
372 | } | 416 | * routine faked a CFI structure. |
373 | 417 | */ | |
374 | cfi_fixup_major_minor(cfi, extp); | 418 | cfi_fixup_major_minor(cfi, extp); |
375 | 419 | ||
376 | if (extp->MajorVersion != '1' || | 420 | if (extp->MajorVersion != '1' || |
377 | (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { | 421 | (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { |
378 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " | 422 | printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " |
379 | "version %c.%c.\n", extp->MajorVersion, | 423 | "version %c.%c.\n", extp->MajorVersion, |
380 | extp->MinorVersion); | 424 | extp->MinorVersion); |
381 | kfree(extp); | 425 | kfree(extp); |
382 | kfree(mtd); | 426 | kfree(mtd); |
383 | return NULL; | 427 | return NULL; |
384 | } | 428 | } |
385 | 429 | ||
386 | /* Install our own private info structure */ | 430 | /* Install our own private info structure */ |
387 | cfi->cmdset_priv = extp; | 431 | cfi->cmdset_priv = extp; |
388 | 432 | ||
389 | /* Apply cfi device specific fixups */ | 433 | /* Apply cfi device specific fixups */ |
390 | cfi_fixup(mtd, cfi_fixup_table); | 434 | cfi_fixup(mtd, cfi_fixup_table); |
391 | 435 | ||
392 | #ifdef DEBUG_CFI_FEATURES | 436 | #ifdef DEBUG_CFI_FEATURES |
393 | /* Tell the user about it in lots of lovely detail */ | 437 | /* Tell the user about it in lots of lovely detail */ |
394 | cfi_tell_features(extp); | 438 | cfi_tell_features(extp); |
395 | #endif | 439 | #endif |
396 | 440 | ||
397 | bootloc = extp->TopBottom; | 441 | bootloc = extp->TopBottom; |
398 | if ((bootloc != 2) && (bootloc != 3)) { | 442 | if ((bootloc < 2) || (bootloc > 5)) { |
399 | printk(KERN_WARNING "%s: CFI does not contain boot " | 443 | printk(KERN_WARNING "%s: CFI contains unrecognised boot " |
400 | "bank location. Assuming top.\n", map->name); | 444 | "bank location (%d). Assuming bottom.\n", |
401 | bootloc = 2; | 445 | map->name, bootloc); |
402 | } | 446 | bootloc = 2; |
447 | } | ||
403 | 448 | ||
404 | if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { | 449 | if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { |
405 | printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); | 450 | printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); |
406 | 451 | ||
407 | for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { | 452 | for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { |
408 | int j = (cfi->cfiq->NumEraseRegions-1)-i; | 453 | int j = (cfi->cfiq->NumEraseRegions-1)-i; |
409 | __u32 swap; | 454 | __u32 swap; |
410 | 455 | ||
411 | swap = cfi->cfiq->EraseRegionInfo[i]; | 456 | swap = cfi->cfiq->EraseRegionInfo[i]; |
412 | cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; | 457 | cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; |
413 | cfi->cfiq->EraseRegionInfo[j] = swap; | 458 | cfi->cfiq->EraseRegionInfo[j] = swap; |
459 | } | ||
414 | } | 460 | } |
461 | /* Set the default CFI lock/unlock addresses */ | ||
462 | cfi->addr_unlock1 = 0x555; | ||
463 | cfi->addr_unlock2 = 0x2aa; | ||
464 | } | ||
465 | cfi_fixup(mtd, cfi_nopri_fixup_table); | ||
466 | |||
467 | if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { | ||
468 | kfree(mtd); | ||
469 | return NULL; | ||
415 | } | 470 | } |
416 | /* Set the default CFI lock/unlock addresses */ | ||
417 | cfi->addr_unlock1 = 0x555; | ||
418 | cfi->addr_unlock2 = 0x2aa; | ||
419 | 471 | ||
420 | } /* CFI mode */ | 472 | } /* CFI mode */ |
421 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { | 473 | else if (cfi->cfi_mode == CFI_MODE_JEDEC) { |
@@ -437,7 +489,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
437 | 489 | ||
438 | return cfi_amdstd_setup(mtd); | 490 | return cfi_amdstd_setup(mtd); |
439 | } | 491 | } |
492 | struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); | ||
493 | struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); | ||
440 | EXPORT_SYMBOL_GPL(cfi_cmdset_0002); | 494 | EXPORT_SYMBOL_GPL(cfi_cmdset_0002); |
495 | EXPORT_SYMBOL_GPL(cfi_cmdset_0006); | ||
496 | EXPORT_SYMBOL_GPL(cfi_cmdset_0701); | ||
441 | 497 | ||
442 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) | 498 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) |
443 | { | 499 | { |
@@ -491,13 +547,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) | |||
491 | #endif | 547 | #endif |
492 | 548 | ||
493 | __module_get(THIS_MODULE); | 549 | __module_get(THIS_MODULE); |
550 | register_reboot_notifier(&mtd->reboot_notifier); | ||
494 | return mtd; | 551 | return mtd; |
495 | 552 | ||
496 | setup_err: | 553 | setup_err: |
497 | if(mtd) { | 554 | kfree(mtd->eraseregions); |
498 | kfree(mtd->eraseregions); | 555 | kfree(mtd); |
499 | kfree(mtd); | ||
500 | } | ||
501 | kfree(cfi->cmdset_priv); | 556 | kfree(cfi->cmdset_priv); |
502 | kfree(cfi->cfiq); | 557 | kfree(cfi->cfiq); |
503 | return NULL; | 558 | return NULL; |
@@ -571,9 +626,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
571 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); | 626 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); |
572 | return -EIO; | 627 | return -EIO; |
573 | } | 628 | } |
574 | spin_unlock(chip->mutex); | 629 | mutex_unlock(&chip->mutex); |
575 | cfi_udelay(1); | 630 | cfi_udelay(1); |
576 | spin_lock(chip->mutex); | 631 | mutex_lock(&chip->mutex); |
577 | /* Someone else might have been playing with it. */ | 632 | /* Someone else might have been playing with it. */ |
578 | goto retry; | 633 | goto retry; |
579 | } | 634 | } |
@@ -617,9 +672,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
617 | return -EIO; | 672 | return -EIO; |
618 | } | 673 | } |
619 | 674 | ||
620 | spin_unlock(chip->mutex); | 675 | mutex_unlock(&chip->mutex); |
621 | cfi_udelay(1); | 676 | cfi_udelay(1); |
622 | spin_lock(chip->mutex); | 677 | mutex_lock(&chip->mutex); |
623 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | 678 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
624 | So we can just loop here. */ | 679 | So we can just loop here. */ |
625 | } | 680 | } |
@@ -634,6 +689,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
634 | chip->state = FL_READY; | 689 | chip->state = FL_READY; |
635 | return 0; | 690 | return 0; |
636 | 691 | ||
692 | case FL_SHUTDOWN: | ||
693 | /* The machine is rebooting */ | ||
694 | return -EIO; | ||
695 | |||
637 | case FL_POINT: | 696 | case FL_POINT: |
638 | /* Only if there's no operation suspended... */ | 697 | /* Only if there's no operation suspended... */ |
639 | if (mode == FL_READY && chip->oldstate == FL_READY) | 698 | if (mode == FL_READY && chip->oldstate == FL_READY) |
@@ -643,10 +702,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
643 | sleep: | 702 | sleep: |
644 | set_current_state(TASK_UNINTERRUPTIBLE); | 703 | set_current_state(TASK_UNINTERRUPTIBLE); |
645 | add_wait_queue(&chip->wq, &wait); | 704 | add_wait_queue(&chip->wq, &wait); |
646 | spin_unlock(chip->mutex); | 705 | mutex_unlock(&chip->mutex); |
647 | schedule(); | 706 | schedule(); |
648 | remove_wait_queue(&chip->wq, &wait); | 707 | remove_wait_queue(&chip->wq, &wait); |
649 | spin_lock(chip->mutex); | 708 | mutex_lock(&chip->mutex); |
650 | goto resettime; | 709 | goto resettime; |
651 | } | 710 | } |
652 | } | 711 | } |
@@ -778,7 +837,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
778 | (void) map_read(map, adr); | 837 | (void) map_read(map, adr); |
779 | xip_iprefetch(); | 838 | xip_iprefetch(); |
780 | local_irq_enable(); | 839 | local_irq_enable(); |
781 | spin_unlock(chip->mutex); | 840 | mutex_unlock(&chip->mutex); |
782 | xip_iprefetch(); | 841 | xip_iprefetch(); |
783 | cond_resched(); | 842 | cond_resched(); |
784 | 843 | ||
@@ -788,15 +847,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
788 | * a suspended erase state. If so let's wait | 847 | * a suspended erase state. If so let's wait |
789 | * until it's done. | 848 | * until it's done. |
790 | */ | 849 | */ |
791 | spin_lock(chip->mutex); | 850 | mutex_lock(&chip->mutex); |
792 | while (chip->state != FL_XIP_WHILE_ERASING) { | 851 | while (chip->state != FL_XIP_WHILE_ERASING) { |
793 | DECLARE_WAITQUEUE(wait, current); | 852 | DECLARE_WAITQUEUE(wait, current); |
794 | set_current_state(TASK_UNINTERRUPTIBLE); | 853 | set_current_state(TASK_UNINTERRUPTIBLE); |
795 | add_wait_queue(&chip->wq, &wait); | 854 | add_wait_queue(&chip->wq, &wait); |
796 | spin_unlock(chip->mutex); | 855 | mutex_unlock(&chip->mutex); |
797 | schedule(); | 856 | schedule(); |
798 | remove_wait_queue(&chip->wq, &wait); | 857 | remove_wait_queue(&chip->wq, &wait); |
799 | spin_lock(chip->mutex); | 858 | mutex_lock(&chip->mutex); |
800 | } | 859 | } |
801 | /* Disallow XIP again */ | 860 | /* Disallow XIP again */ |
802 | local_irq_disable(); | 861 | local_irq_disable(); |
@@ -858,17 +917,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
858 | 917 | ||
859 | #define UDELAY(map, chip, adr, usec) \ | 918 | #define UDELAY(map, chip, adr, usec) \ |
860 | do { \ | 919 | do { \ |
861 | spin_unlock(chip->mutex); \ | 920 | mutex_unlock(&chip->mutex); \ |
862 | cfi_udelay(usec); \ | 921 | cfi_udelay(usec); \ |
863 | spin_lock(chip->mutex); \ | 922 | mutex_lock(&chip->mutex); \ |
864 | } while (0) | 923 | } while (0) |
865 | 924 | ||
866 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ | 925 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ |
867 | do { \ | 926 | do { \ |
868 | spin_unlock(chip->mutex); \ | 927 | mutex_unlock(&chip->mutex); \ |
869 | INVALIDATE_CACHED_RANGE(map, adr, len); \ | 928 | INVALIDATE_CACHED_RANGE(map, adr, len); \ |
870 | cfi_udelay(usec); \ | 929 | cfi_udelay(usec); \ |
871 | spin_lock(chip->mutex); \ | 930 | mutex_lock(&chip->mutex); \ |
872 | } while (0) | 931 | } while (0) |
873 | 932 | ||
874 | #endif | 933 | #endif |
@@ -884,10 +943,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
884 | /* Ensure cmd read/writes are aligned. */ | 943 | /* Ensure cmd read/writes are aligned. */ |
885 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 944 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
886 | 945 | ||
887 | spin_lock(chip->mutex); | 946 | mutex_lock(&chip->mutex); |
888 | ret = get_chip(map, chip, cmd_addr, FL_READY); | 947 | ret = get_chip(map, chip, cmd_addr, FL_READY); |
889 | if (ret) { | 948 | if (ret) { |
890 | spin_unlock(chip->mutex); | 949 | mutex_unlock(&chip->mutex); |
891 | return ret; | 950 | return ret; |
892 | } | 951 | } |
893 | 952 | ||
@@ -900,7 +959,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
900 | 959 | ||
901 | put_chip(map, chip, cmd_addr); | 960 | put_chip(map, chip, cmd_addr); |
902 | 961 | ||
903 | spin_unlock(chip->mutex); | 962 | mutex_unlock(&chip->mutex); |
904 | return 0; | 963 | return 0; |
905 | } | 964 | } |
906 | 965 | ||
@@ -954,7 +1013,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
954 | struct cfi_private *cfi = map->fldrv_priv; | 1013 | struct cfi_private *cfi = map->fldrv_priv; |
955 | 1014 | ||
956 | retry: | 1015 | retry: |
957 | spin_lock(chip->mutex); | 1016 | mutex_lock(&chip->mutex); |
958 | 1017 | ||
959 | if (chip->state != FL_READY){ | 1018 | if (chip->state != FL_READY){ |
960 | #if 0 | 1019 | #if 0 |
@@ -963,7 +1022,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
963 | set_current_state(TASK_UNINTERRUPTIBLE); | 1022 | set_current_state(TASK_UNINTERRUPTIBLE); |
964 | add_wait_queue(&chip->wq, &wait); | 1023 | add_wait_queue(&chip->wq, &wait); |
965 | 1024 | ||
966 | spin_unlock(chip->mutex); | 1025 | mutex_unlock(&chip->mutex); |
967 | 1026 | ||
968 | schedule(); | 1027 | schedule(); |
969 | remove_wait_queue(&chip->wq, &wait); | 1028 | remove_wait_queue(&chip->wq, &wait); |
@@ -992,7 +1051,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
992 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | 1051 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
993 | 1052 | ||
994 | wake_up(&chip->wq); | 1053 | wake_up(&chip->wq); |
995 | spin_unlock(chip->mutex); | 1054 | mutex_unlock(&chip->mutex); |
996 | 1055 | ||
997 | return 0; | 1056 | return 0; |
998 | } | 1057 | } |
@@ -1061,10 +1120,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1061 | 1120 | ||
1062 | adr += chip->start; | 1121 | adr += chip->start; |
1063 | 1122 | ||
1064 | spin_lock(chip->mutex); | 1123 | mutex_lock(&chip->mutex); |
1065 | ret = get_chip(map, chip, adr, FL_WRITING); | 1124 | ret = get_chip(map, chip, adr, FL_WRITING); |
1066 | if (ret) { | 1125 | if (ret) { |
1067 | spin_unlock(chip->mutex); | 1126 | mutex_unlock(&chip->mutex); |
1068 | return ret; | 1127 | return ret; |
1069 | } | 1128 | } |
1070 | 1129 | ||
@@ -1107,11 +1166,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1107 | 1166 | ||
1108 | set_current_state(TASK_UNINTERRUPTIBLE); | 1167 | set_current_state(TASK_UNINTERRUPTIBLE); |
1109 | add_wait_queue(&chip->wq, &wait); | 1168 | add_wait_queue(&chip->wq, &wait); |
1110 | spin_unlock(chip->mutex); | 1169 | mutex_unlock(&chip->mutex); |
1111 | schedule(); | 1170 | schedule(); |
1112 | remove_wait_queue(&chip->wq, &wait); | 1171 | remove_wait_queue(&chip->wq, &wait); |
1113 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1172 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1114 | spin_lock(chip->mutex); | 1173 | mutex_lock(&chip->mutex); |
1115 | continue; | 1174 | continue; |
1116 | } | 1175 | } |
1117 | 1176 | ||
@@ -1143,7 +1202,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1143 | op_done: | 1202 | op_done: |
1144 | chip->state = FL_READY; | 1203 | chip->state = FL_READY; |
1145 | put_chip(map, chip, adr); | 1204 | put_chip(map, chip, adr); |
1146 | spin_unlock(chip->mutex); | 1205 | mutex_unlock(&chip->mutex); |
1147 | 1206 | ||
1148 | return ret; | 1207 | return ret; |
1149 | } | 1208 | } |
@@ -1175,7 +1234,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1175 | map_word tmp_buf; | 1234 | map_word tmp_buf; |
1176 | 1235 | ||
1177 | retry: | 1236 | retry: |
1178 | spin_lock(cfi->chips[chipnum].mutex); | 1237 | mutex_lock(&cfi->chips[chipnum].mutex); |
1179 | 1238 | ||
1180 | if (cfi->chips[chipnum].state != FL_READY) { | 1239 | if (cfi->chips[chipnum].state != FL_READY) { |
1181 | #if 0 | 1240 | #if 0 |
@@ -1184,7 +1243,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1184 | set_current_state(TASK_UNINTERRUPTIBLE); | 1243 | set_current_state(TASK_UNINTERRUPTIBLE); |
1185 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1244 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1186 | 1245 | ||
1187 | spin_unlock(cfi->chips[chipnum].mutex); | 1246 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1188 | 1247 | ||
1189 | schedule(); | 1248 | schedule(); |
1190 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1249 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1198,7 +1257,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1198 | /* Load 'tmp_buf' with old contents of flash */ | 1257 | /* Load 'tmp_buf' with old contents of flash */ |
1199 | tmp_buf = map_read(map, bus_ofs+chipstart); | 1258 | tmp_buf = map_read(map, bus_ofs+chipstart); |
1200 | 1259 | ||
1201 | spin_unlock(cfi->chips[chipnum].mutex); | 1260 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1202 | 1261 | ||
1203 | /* Number of bytes to copy from buffer */ | 1262 | /* Number of bytes to copy from buffer */ |
1204 | n = min_t(int, len, map_bankwidth(map)-i); | 1263 | n = min_t(int, len, map_bankwidth(map)-i); |
@@ -1253,7 +1312,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1253 | map_word tmp_buf; | 1312 | map_word tmp_buf; |
1254 | 1313 | ||
1255 | retry1: | 1314 | retry1: |
1256 | spin_lock(cfi->chips[chipnum].mutex); | 1315 | mutex_lock(&cfi->chips[chipnum].mutex); |
1257 | 1316 | ||
1258 | if (cfi->chips[chipnum].state != FL_READY) { | 1317 | if (cfi->chips[chipnum].state != FL_READY) { |
1259 | #if 0 | 1318 | #if 0 |
@@ -1262,7 +1321,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1262 | set_current_state(TASK_UNINTERRUPTIBLE); | 1321 | set_current_state(TASK_UNINTERRUPTIBLE); |
1263 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1322 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1264 | 1323 | ||
1265 | spin_unlock(cfi->chips[chipnum].mutex); | 1324 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1266 | 1325 | ||
1267 | schedule(); | 1326 | schedule(); |
1268 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1327 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1275,7 +1334,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1275 | 1334 | ||
1276 | tmp_buf = map_read(map, ofs + chipstart); | 1335 | tmp_buf = map_read(map, ofs + chipstart); |
1277 | 1336 | ||
1278 | spin_unlock(cfi->chips[chipnum].mutex); | 1337 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1279 | 1338 | ||
1280 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); | 1339 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); |
1281 | 1340 | ||
@@ -1310,10 +1369,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1310 | adr += chip->start; | 1369 | adr += chip->start; |
1311 | cmd_adr = adr; | 1370 | cmd_adr = adr; |
1312 | 1371 | ||
1313 | spin_lock(chip->mutex); | 1372 | mutex_lock(&chip->mutex); |
1314 | ret = get_chip(map, chip, adr, FL_WRITING); | 1373 | ret = get_chip(map, chip, adr, FL_WRITING); |
1315 | if (ret) { | 1374 | if (ret) { |
1316 | spin_unlock(chip->mutex); | 1375 | mutex_unlock(&chip->mutex); |
1317 | return ret; | 1376 | return ret; |
1318 | } | 1377 | } |
1319 | 1378 | ||
@@ -1368,11 +1427,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1368 | 1427 | ||
1369 | set_current_state(TASK_UNINTERRUPTIBLE); | 1428 | set_current_state(TASK_UNINTERRUPTIBLE); |
1370 | add_wait_queue(&chip->wq, &wait); | 1429 | add_wait_queue(&chip->wq, &wait); |
1371 | spin_unlock(chip->mutex); | 1430 | mutex_unlock(&chip->mutex); |
1372 | schedule(); | 1431 | schedule(); |
1373 | remove_wait_queue(&chip->wq, &wait); | 1432 | remove_wait_queue(&chip->wq, &wait); |
1374 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1433 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1375 | spin_lock(chip->mutex); | 1434 | mutex_lock(&chip->mutex); |
1376 | continue; | 1435 | continue; |
1377 | } | 1436 | } |
1378 | 1437 | ||
@@ -1400,7 +1459,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1400 | op_done: | 1459 | op_done: |
1401 | chip->state = FL_READY; | 1460 | chip->state = FL_READY; |
1402 | put_chip(map, chip, adr); | 1461 | put_chip(map, chip, adr); |
1403 | spin_unlock(chip->mutex); | 1462 | mutex_unlock(&chip->mutex); |
1404 | 1463 | ||
1405 | return ret; | 1464 | return ret; |
1406 | } | 1465 | } |
@@ -1500,10 +1559,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1500 | 1559 | ||
1501 | adr = cfi->addr_unlock1; | 1560 | adr = cfi->addr_unlock1; |
1502 | 1561 | ||
1503 | spin_lock(chip->mutex); | 1562 | mutex_lock(&chip->mutex); |
1504 | ret = get_chip(map, chip, adr, FL_WRITING); | 1563 | ret = get_chip(map, chip, adr, FL_WRITING); |
1505 | if (ret) { | 1564 | if (ret) { |
1506 | spin_unlock(chip->mutex); | 1565 | mutex_unlock(&chip->mutex); |
1507 | return ret; | 1566 | return ret; |
1508 | } | 1567 | } |
1509 | 1568 | ||
@@ -1536,10 +1595,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1536 | /* Someone's suspended the erase. Sleep */ | 1595 | /* Someone's suspended the erase. Sleep */ |
1537 | set_current_state(TASK_UNINTERRUPTIBLE); | 1596 | set_current_state(TASK_UNINTERRUPTIBLE); |
1538 | add_wait_queue(&chip->wq, &wait); | 1597 | add_wait_queue(&chip->wq, &wait); |
1539 | spin_unlock(chip->mutex); | 1598 | mutex_unlock(&chip->mutex); |
1540 | schedule(); | 1599 | schedule(); |
1541 | remove_wait_queue(&chip->wq, &wait); | 1600 | remove_wait_queue(&chip->wq, &wait); |
1542 | spin_lock(chip->mutex); | 1601 | mutex_lock(&chip->mutex); |
1543 | continue; | 1602 | continue; |
1544 | } | 1603 | } |
1545 | if (chip->erase_suspended) { | 1604 | if (chip->erase_suspended) { |
@@ -1573,7 +1632,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1573 | chip->state = FL_READY; | 1632 | chip->state = FL_READY; |
1574 | xip_enable(map, chip, adr); | 1633 | xip_enable(map, chip, adr); |
1575 | put_chip(map, chip, adr); | 1634 | put_chip(map, chip, adr); |
1576 | spin_unlock(chip->mutex); | 1635 | mutex_unlock(&chip->mutex); |
1577 | 1636 | ||
1578 | return ret; | 1637 | return ret; |
1579 | } | 1638 | } |
@@ -1588,10 +1647,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1588 | 1647 | ||
1589 | adr += chip->start; | 1648 | adr += chip->start; |
1590 | 1649 | ||
1591 | spin_lock(chip->mutex); | 1650 | mutex_lock(&chip->mutex); |
1592 | ret = get_chip(map, chip, adr, FL_ERASING); | 1651 | ret = get_chip(map, chip, adr, FL_ERASING); |
1593 | if (ret) { | 1652 | if (ret) { |
1594 | spin_unlock(chip->mutex); | 1653 | mutex_unlock(&chip->mutex); |
1595 | return ret; | 1654 | return ret; |
1596 | } | 1655 | } |
1597 | 1656 | ||
@@ -1624,10 +1683,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1624 | /* Someone's suspended the erase. Sleep */ | 1683 | /* Someone's suspended the erase. Sleep */ |
1625 | set_current_state(TASK_UNINTERRUPTIBLE); | 1684 | set_current_state(TASK_UNINTERRUPTIBLE); |
1626 | add_wait_queue(&chip->wq, &wait); | 1685 | add_wait_queue(&chip->wq, &wait); |
1627 | spin_unlock(chip->mutex); | 1686 | mutex_unlock(&chip->mutex); |
1628 | schedule(); | 1687 | schedule(); |
1629 | remove_wait_queue(&chip->wq, &wait); | 1688 | remove_wait_queue(&chip->wq, &wait); |
1630 | spin_lock(chip->mutex); | 1689 | mutex_lock(&chip->mutex); |
1631 | continue; | 1690 | continue; |
1632 | } | 1691 | } |
1633 | if (chip->erase_suspended) { | 1692 | if (chip->erase_suspended) { |
@@ -1663,7 +1722,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1663 | 1722 | ||
1664 | chip->state = FL_READY; | 1723 | chip->state = FL_READY; |
1665 | put_chip(map, chip, adr); | 1724 | put_chip(map, chip, adr); |
1666 | spin_unlock(chip->mutex); | 1725 | mutex_unlock(&chip->mutex); |
1667 | return ret; | 1726 | return ret; |
1668 | } | 1727 | } |
1669 | 1728 | ||
@@ -1715,7 +1774,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1715 | struct cfi_private *cfi = map->fldrv_priv; | 1774 | struct cfi_private *cfi = map->fldrv_priv; |
1716 | int ret; | 1775 | int ret; |
1717 | 1776 | ||
1718 | spin_lock(chip->mutex); | 1777 | mutex_lock(&chip->mutex); |
1719 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); | 1778 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); |
1720 | if (ret) | 1779 | if (ret) |
1721 | goto out_unlock; | 1780 | goto out_unlock; |
@@ -1741,7 +1800,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1741 | ret = 0; | 1800 | ret = 0; |
1742 | 1801 | ||
1743 | out_unlock: | 1802 | out_unlock: |
1744 | spin_unlock(chip->mutex); | 1803 | mutex_unlock(&chip->mutex); |
1745 | return ret; | 1804 | return ret; |
1746 | } | 1805 | } |
1747 | 1806 | ||
@@ -1751,7 +1810,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1751 | struct cfi_private *cfi = map->fldrv_priv; | 1810 | struct cfi_private *cfi = map->fldrv_priv; |
1752 | int ret; | 1811 | int ret; |
1753 | 1812 | ||
1754 | spin_lock(chip->mutex); | 1813 | mutex_lock(&chip->mutex); |
1755 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); | 1814 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); |
1756 | if (ret) | 1815 | if (ret) |
1757 | goto out_unlock; | 1816 | goto out_unlock; |
@@ -1769,7 +1828,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1769 | ret = 0; | 1828 | ret = 0; |
1770 | 1829 | ||
1771 | out_unlock: | 1830 | out_unlock: |
1772 | spin_unlock(chip->mutex); | 1831 | mutex_unlock(&chip->mutex); |
1773 | return ret; | 1832 | return ret; |
1774 | } | 1833 | } |
1775 | 1834 | ||
@@ -1797,7 +1856,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1797 | chip = &cfi->chips[i]; | 1856 | chip = &cfi->chips[i]; |
1798 | 1857 | ||
1799 | retry: | 1858 | retry: |
1800 | spin_lock(chip->mutex); | 1859 | mutex_lock(&chip->mutex); |
1801 | 1860 | ||
1802 | switch(chip->state) { | 1861 | switch(chip->state) { |
1803 | case FL_READY: | 1862 | case FL_READY: |
@@ -1811,7 +1870,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1811 | * with the chip now anyway. | 1870 | * with the chip now anyway. |
1812 | */ | 1871 | */ |
1813 | case FL_SYNCING: | 1872 | case FL_SYNCING: |
1814 | spin_unlock(chip->mutex); | 1873 | mutex_unlock(&chip->mutex); |
1815 | break; | 1874 | break; |
1816 | 1875 | ||
1817 | default: | 1876 | default: |
@@ -1819,7 +1878,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1819 | set_current_state(TASK_UNINTERRUPTIBLE); | 1878 | set_current_state(TASK_UNINTERRUPTIBLE); |
1820 | add_wait_queue(&chip->wq, &wait); | 1879 | add_wait_queue(&chip->wq, &wait); |
1821 | 1880 | ||
1822 | spin_unlock(chip->mutex); | 1881 | mutex_unlock(&chip->mutex); |
1823 | 1882 | ||
1824 | schedule(); | 1883 | schedule(); |
1825 | 1884 | ||
@@ -1834,13 +1893,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1834 | for (i--; i >=0; i--) { | 1893 | for (i--; i >=0; i--) { |
1835 | chip = &cfi->chips[i]; | 1894 | chip = &cfi->chips[i]; |
1836 | 1895 | ||
1837 | spin_lock(chip->mutex); | 1896 | mutex_lock(&chip->mutex); |
1838 | 1897 | ||
1839 | if (chip->state == FL_SYNCING) { | 1898 | if (chip->state == FL_SYNCING) { |
1840 | chip->state = chip->oldstate; | 1899 | chip->state = chip->oldstate; |
1841 | wake_up(&chip->wq); | 1900 | wake_up(&chip->wq); |
1842 | } | 1901 | } |
1843 | spin_unlock(chip->mutex); | 1902 | mutex_unlock(&chip->mutex); |
1844 | } | 1903 | } |
1845 | } | 1904 | } |
1846 | 1905 | ||
@@ -1856,7 +1915,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1856 | for (i=0; !ret && i<cfi->numchips; i++) { | 1915 | for (i=0; !ret && i<cfi->numchips; i++) { |
1857 | chip = &cfi->chips[i]; | 1916 | chip = &cfi->chips[i]; |
1858 | 1917 | ||
1859 | spin_lock(chip->mutex); | 1918 | mutex_lock(&chip->mutex); |
1860 | 1919 | ||
1861 | switch(chip->state) { | 1920 | switch(chip->state) { |
1862 | case FL_READY: | 1921 | case FL_READY: |
@@ -1876,7 +1935,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1876 | ret = -EAGAIN; | 1935 | ret = -EAGAIN; |
1877 | break; | 1936 | break; |
1878 | } | 1937 | } |
1879 | spin_unlock(chip->mutex); | 1938 | mutex_unlock(&chip->mutex); |
1880 | } | 1939 | } |
1881 | 1940 | ||
1882 | /* Unlock the chips again */ | 1941 | /* Unlock the chips again */ |
@@ -1885,13 +1944,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1885 | for (i--; i >=0; i--) { | 1944 | for (i--; i >=0; i--) { |
1886 | chip = &cfi->chips[i]; | 1945 | chip = &cfi->chips[i]; |
1887 | 1946 | ||
1888 | spin_lock(chip->mutex); | 1947 | mutex_lock(&chip->mutex); |
1889 | 1948 | ||
1890 | if (chip->state == FL_PM_SUSPENDED) { | 1949 | if (chip->state == FL_PM_SUSPENDED) { |
1891 | chip->state = chip->oldstate; | 1950 | chip->state = chip->oldstate; |
1892 | wake_up(&chip->wq); | 1951 | wake_up(&chip->wq); |
1893 | } | 1952 | } |
1894 | spin_unlock(chip->mutex); | 1953 | mutex_unlock(&chip->mutex); |
1895 | } | 1954 | } |
1896 | } | 1955 | } |
1897 | 1956 | ||
@@ -1910,7 +1969,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1910 | 1969 | ||
1911 | chip = &cfi->chips[i]; | 1970 | chip = &cfi->chips[i]; |
1912 | 1971 | ||
1913 | spin_lock(chip->mutex); | 1972 | mutex_lock(&chip->mutex); |
1914 | 1973 | ||
1915 | if (chip->state == FL_PM_SUSPENDED) { | 1974 | if (chip->state == FL_PM_SUSPENDED) { |
1916 | chip->state = FL_READY; | 1975 | chip->state = FL_READY; |
@@ -1920,15 +1979,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1920 | else | 1979 | else |
1921 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); | 1980 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); |
1922 | 1981 | ||
1923 | spin_unlock(chip->mutex); | 1982 | mutex_unlock(&chip->mutex); |
1924 | } | 1983 | } |
1925 | } | 1984 | } |
1926 | 1985 | ||
1986 | |||
1987 | /* | ||
1988 | * Ensure that the flash device is put back into read array mode before | ||
1989 | * unloading the driver or rebooting. On some systems, rebooting while | ||
1990 | * the flash is in query/program/erase mode will prevent the CPU from | ||
1991 | * fetching the bootloader code, requiring a hard reset or power cycle. | ||
1992 | */ | ||
1993 | static int cfi_amdstd_reset(struct mtd_info *mtd) | ||
1994 | { | ||
1995 | struct map_info *map = mtd->priv; | ||
1996 | struct cfi_private *cfi = map->fldrv_priv; | ||
1997 | int i, ret; | ||
1998 | struct flchip *chip; | ||
1999 | |||
2000 | for (i = 0; i < cfi->numchips; i++) { | ||
2001 | |||
2002 | chip = &cfi->chips[i]; | ||
2003 | |||
2004 | mutex_lock(&chip->mutex); | ||
2005 | |||
2006 | ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); | ||
2007 | if (!ret) { | ||
2008 | map_write(map, CMD(0xF0), chip->start); | ||
2009 | chip->state = FL_SHUTDOWN; | ||
2010 | put_chip(map, chip, chip->start); | ||
2011 | } | ||
2012 | |||
2013 | mutex_unlock(&chip->mutex); | ||
2014 | } | ||
2015 | |||
2016 | return 0; | ||
2017 | } | ||
2018 | |||
2019 | |||
2020 | static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, | ||
2021 | void *v) | ||
2022 | { | ||
2023 | struct mtd_info *mtd; | ||
2024 | |||
2025 | mtd = container_of(nb, struct mtd_info, reboot_notifier); | ||
2026 | cfi_amdstd_reset(mtd); | ||
2027 | return NOTIFY_DONE; | ||
2028 | } | ||
2029 | |||
2030 | |||
1927 | static void cfi_amdstd_destroy(struct mtd_info *mtd) | 2031 | static void cfi_amdstd_destroy(struct mtd_info *mtd) |
1928 | { | 2032 | { |
1929 | struct map_info *map = mtd->priv; | 2033 | struct map_info *map = mtd->priv; |
1930 | struct cfi_private *cfi = map->fldrv_priv; | 2034 | struct cfi_private *cfi = map->fldrv_priv; |
1931 | 2035 | ||
2036 | cfi_amdstd_reset(mtd); | ||
2037 | unregister_reboot_notifier(&mtd->reboot_notifier); | ||
1932 | kfree(cfi->cmdset_priv); | 2038 | kfree(cfi->cmdset_priv); |
1933 | kfree(cfi->cfiq); | 2039 | kfree(cfi->cfiq); |
1934 | kfree(cfi); | 2040 | kfree(cfi); |
@@ -1938,3 +2044,5 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd) | |||
1938 | MODULE_LICENSE("GPL"); | 2044 | MODULE_LICENSE("GPL"); |
1939 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); | 2045 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); |
1940 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); | 2046 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); |
2047 | MODULE_ALIAS("cfi_cmdset_0006"); | ||
2048 | MODULE_ALIAS("cfi_cmdset_0701"); | ||