aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips
diff options
context:
space:
mode:
authorTodd Poynor <tpoynor@mvista.com>2005-06-06 19:04:39 -0400
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-06-29 08:18:40 -0400
commit02b15e343aeefb49f8cac949be599d78250a568f (patch)
treec9316c3d91fd79d67b2e6b7eadea5c92723355d9 /drivers/mtd/chips
parent0dfc62465ef92c7ddcb1ba223bf062453566fd0f (diff)
[MTD] XIP for AMD CFI flash.
Author: Vitaly Wool <vwool@ru.mvista.com> Signed-off-by: Todd Poynor <tpoynor@mvista.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/mtd/chips')
-rw-r--r--drivers/mtd/chips/Kconfig4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c402
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
3 files changed, 317 insertions, 95 deletions
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index f4eda1e40d51..b5dc59389bb3 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -1,5 +1,5 @@
1# drivers/mtd/chips/Kconfig 1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.14 2005/02/08 17:11:15 nico Exp $ 2# $Id: Kconfig,v 1.15 2005/06/06 23:04:35 tpoynor Exp $
3 3
4menu "RAM/ROM/Flash chip drivers" 4menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n 5 depends on MTD!=n
@@ -300,7 +300,7 @@ config MTD_JEDEC
300 300
301config MTD_XIP 301config MTD_XIP
302 bool "XIP aware MTD support" 302 bool "XIP aware MTD support"
303 depends on !SMP && MTD_CFI_INTELEXT && EXPERIMENTAL 303 depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL
304 default y if XIP_KERNEL 304 default y if XIP_KERNEL
305 help 305 help
306 This allows MTD support to work with flash memory which is also 306 This allows MTD support to work with flash memory which is also
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 49cd81207137..e42eefbda0e1 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -4,16 +4,20 @@
4 * 4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
7 * 8 *
8 * 2_by_8 routines added by Simon Munton 9 * 2_by_8 routines added by Simon Munton
9 * 10 *
10 * 4_by_16 work by Carolyn J. Smith 11 * 4_by_16 work by Carolyn J. Smith
11 * 12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
12 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
13 * 17 *
14 * This code is GPL 18 * This code is GPL
15 * 19 *
16 * $Id: cfi_cmdset_0002.c,v 1.116 2005/05/24 13:29:42 gleixner Exp $ 20 * $Id: cfi_cmdset_0002.c,v 1.117 2005/06/06 23:04:35 tpoynor Exp $
17 * 21 *
18 */ 22 */
19 23
@@ -34,6 +38,7 @@
34#include <linux/mtd/map.h> 38#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h> 39#include <linux/mtd/mtd.h>
36#include <linux/mtd/cfi.h> 40#include <linux/mtd/cfi.h>
41#include <linux/mtd/xip.h>
37 42
38#define AMD_BOOTLOC_BUG 43#define AMD_BOOTLOC_BUG
39#define FORCE_WORD_WRITE 0 44#define FORCE_WORD_WRITE 0
@@ -393,7 +398,7 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
393 * correctly and is therefore not done (particulary with interleaved chips 398 * correctly and is therefore not done (particulary with interleaved chips
394 * as each chip must be checked independantly of the others). 399 * as each chip must be checked independantly of the others).
395 */ 400 */
396static int chip_ready(struct map_info *map, unsigned long addr) 401static int __xipram chip_ready(struct map_info *map, unsigned long addr)
397{ 402{
398 map_word d, t; 403 map_word d, t;
399 404
@@ -418,7 +423,7 @@ static int chip_ready(struct map_info *map, unsigned long addr)
418 * as each chip must be checked independantly of the others). 423 * as each chip must be checked independantly of the others).
419 * 424 *
420 */ 425 */
421static int chip_good(struct map_info *map, unsigned long addr, map_word expected) 426static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
422{ 427{
423 map_word oldd, curd; 428 map_word oldd, curd;
424 429
@@ -448,12 +453,12 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
448 453
449 if (time_after(jiffies, timeo)) { 454 if (time_after(jiffies, timeo)) {
450 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 455 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
451 cfi_spin_unlock(chip->mutex); 456 spin_unlock(chip->mutex);
452 return -EIO; 457 return -EIO;
453 } 458 }
454 cfi_spin_unlock(chip->mutex); 459 spin_unlock(chip->mutex);
455 cfi_udelay(1); 460 cfi_udelay(1);
456 cfi_spin_lock(chip->mutex); 461 spin_lock(chip->mutex);
457 /* Someone else might have been playing with it. */ 462 /* Someone else might have been playing with it. */
458 goto retry; 463 goto retry;
459 } 464 }
@@ -501,15 +506,23 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
501 return -EIO; 506 return -EIO;
502 } 507 }
503 508
504 cfi_spin_unlock(chip->mutex); 509 spin_unlock(chip->mutex);
505 cfi_udelay(1); 510 cfi_udelay(1);
506 cfi_spin_lock(chip->mutex); 511 spin_lock(chip->mutex);
507 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 512 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
508 So we can just loop here. */ 513 So we can just loop here. */
509 } 514 }
510 chip->state = FL_READY; 515 chip->state = FL_READY;
511 return 0; 516 return 0;
512 517
518 case FL_XIP_WHILE_ERASING:
519 if (mode != FL_READY && mode != FL_POINT &&
520 (!cfip || !(cfip->EraseSuspend&2)))
521 goto sleep;
522 chip->oldstate = chip->state;
523 chip->state = FL_READY;
524 return 0;
525
513 case FL_POINT: 526 case FL_POINT:
514 /* Only if there's no operation suspended... */ 527 /* Only if there's no operation suspended... */
515 if (mode == FL_READY && chip->oldstate == FL_READY) 528 if (mode == FL_READY && chip->oldstate == FL_READY)
@@ -519,10 +532,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
519 sleep: 532 sleep:
520 set_current_state(TASK_UNINTERRUPTIBLE); 533 set_current_state(TASK_UNINTERRUPTIBLE);
521 add_wait_queue(&chip->wq, &wait); 534 add_wait_queue(&chip->wq, &wait);
522 cfi_spin_unlock(chip->mutex); 535 spin_unlock(chip->mutex);
523 schedule(); 536 schedule();
524 remove_wait_queue(&chip->wq, &wait); 537 remove_wait_queue(&chip->wq, &wait);
525 cfi_spin_lock(chip->mutex); 538 spin_lock(chip->mutex);
526 goto resettime; 539 goto resettime;
527 } 540 }
528} 541}
@@ -540,6 +553,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
540 chip->state = FL_ERASING; 553 chip->state = FL_ERASING;
541 break; 554 break;
542 555
556 case FL_XIP_WHILE_ERASING:
557 chip->state = chip->oldstate;
558 chip->oldstate = FL_READY;
559 break;
560
543 case FL_READY: 561 case FL_READY:
544 case FL_STATUS: 562 case FL_STATUS:
545 /* We should really make set_vpp() count, rather than doing this */ 563 /* We should really make set_vpp() count, rather than doing this */
@@ -551,6 +569,198 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
551 wake_up(&chip->wq); 569 wake_up(&chip->wq);
552} 570}
553 571
572#ifdef CONFIG_MTD_XIP
573
574/*
575 * No interrupt what so ever can be serviced while the flash isn't in array
576 * mode. This is ensured by the xip_disable() and xip_enable() functions
577 * enclosing any code path where the flash is known not to be in array mode.
578 * And within a XIP disabled code path, only functions marked with __xipram
579 * may be called and nothing else (it's a good thing to inspect generated
580 * assembly to make sure inline functions were actually inlined and that gcc
581 * didn't emit calls to its own support functions). Also configuring MTD CFI
582 * support to a single buswidth and a single interleave is also recommended.
583 */
584#include <asm/hardware.h>
585static void xip_disable(struct map_info *map, struct flchip *chip,
586 unsigned long adr)
587{
588 /* TODO: chips with no XIP use should ignore and return */
589 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
590 local_irq_disable();
591}
592
593static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
594 unsigned long adr)
595{
596 struct cfi_private *cfi = map->fldrv_priv;
597
598 if (chip->state != FL_POINT && chip->state != FL_READY) {
599 map_write(map, CMD(0xf0), adr);
600 chip->state = FL_READY;
601 }
602 (void) map_read(map, adr);
603 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
604 local_irq_enable();
605}
606
607/*
608 * When a delay is required for the flash operation to complete, the
609 * xip_udelay() function is polling for both the given timeout and pending
610 * (but still masked) hardware interrupts. Whenever there is an interrupt
611 * pending then the flash erase operation is suspended, array mode restored
612 * and interrupts unmasked. Task scheduling might also happen at that
613 * point. The CPU eventually returns from the interrupt or the call to
614 * schedule() and the suspended flash operation is resumed for the remaining
615 * of the delay period.
616 *
617 * Warning: this function _will_ fool interrupt latency tracing tools.
618 */
619
620static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
621 unsigned long adr, int usec)
622{
623 struct cfi_private *cfi = map->fldrv_priv;
624 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
625 map_word status, OK = CMD(0x80);
626 unsigned long suspended, start = xip_currtime();
627 flstate_t oldstate;
628
629 do {
630 cpu_relax();
631 if (xip_irqpending() && extp &&
632 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
633 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
634 /*
635 * Let's suspend the erase operation when supported.
636 * Note that we currently don't try to suspend
637 * interleaved chips if there is already another
638 * operation suspended (imagine what happens
639 * when one chip was already done with the current
640 * operation while another chip suspended it, then
641 * we resume the whole thing at once). Yes, it
642 * can happen!
643 */
644 map_write(map, CMD(0xb0), adr);
645 usec -= xip_elapsed_since(start);
646 suspended = xip_currtime();
647 do {
648 if (xip_elapsed_since(suspended) > 100000) {
649 /*
650 * The chip doesn't want to suspend
651 * after waiting for 100 msecs.
652 * This is a critical error but there
653 * is not much we can do here.
654 */
655 return;
656 }
657 status = map_read(map, adr);
658 } while (!map_word_andequal(map, status, OK, OK));
659
660 /* Suspend succeeded */
661 oldstate = chip->state;
662 if (!map_word_bitsset(map, status, CMD(0x40)))
663 break;
664 chip->state = FL_XIP_WHILE_ERASING;
665 chip->erase_suspended = 1;
666 map_write(map, CMD(0xf0), adr);
667 (void) map_read(map, adr);
668 asm volatile (".rep 8; nop; .endr");
669 local_irq_enable();
670 spin_unlock(chip->mutex);
671 asm volatile (".rep 8; nop; .endr");
672 cond_resched();
673
674 /*
675 * We're back. However someone else might have
676 * decided to go write to the chip if we are in
677 * a suspended erase state. If so let's wait
678 * until it's done.
679 */
680 spin_lock(chip->mutex);
681 while (chip->state != FL_XIP_WHILE_ERASING) {
682 DECLARE_WAITQUEUE(wait, current);
683 set_current_state(TASK_UNINTERRUPTIBLE);
684 add_wait_queue(&chip->wq, &wait);
685 spin_unlock(chip->mutex);
686 schedule();
687 remove_wait_queue(&chip->wq, &wait);
688 spin_lock(chip->mutex);
689 }
690 /* Disallow XIP again */
691 local_irq_disable();
692
693 /* Resume the write or erase operation */
694 map_write(map, CMD(0x30), adr);
695 chip->state = oldstate;
696 start = xip_currtime();
697 } else if (usec >= 1000000/HZ) {
698 /*
699 * Try to save on CPU power when waiting delay
700 * is at least a system timer tick period.
701 * No need to be extremely accurate here.
702 */
703 xip_cpu_idle();
704 }
705 status = map_read(map, adr);
706 } while (!map_word_andequal(map, status, OK, OK)
707 && xip_elapsed_since(start) < usec);
708}
709
710#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
711
712/*
713 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
714 * the flash is actively programming or erasing since we have to poll for
715 * the operation to complete anyway. We can't do that in a generic way with
716 * a XIP setup so do it before the actual flash operation in this case
717 * and stub it out from INVALIDATE_CACHE_UDELAY.
718 */
719#define XIP_INVAL_CACHED_RANGE(map, from, size) \
720 INVALIDATE_CACHED_RANGE(map, from, size)
721
722#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
723 UDELAY(map, chip, adr, usec)
724
725/*
726 * Extra notes:
727 *
728 * Activating this XIP support changes the way the code works a bit. For
729 * example the code to suspend the current process when concurrent access
730 * happens is never executed because xip_udelay() will always return with the
731 * same chip state as it was entered with. This is why there is no care for
732 * the presence of add_wait_queue() or schedule() calls from within a couple
733 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
734 * The queueing and scheduling are always happening within xip_udelay().
735 *
736 * Similarly, get_chip() and put_chip() just happen to always be executed
737 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
738 * is in array mode, therefore never executing many cases therein and not
739 * causing any problem with XIP.
740 */
741
742#else
743
744#define xip_disable(map, chip, adr)
745#define xip_enable(map, chip, adr)
746#define XIP_INVAL_CACHED_RANGE(x...)
747
748#define UDELAY(map, chip, adr, usec) \
749do { \
750 spin_unlock(chip->mutex); \
751 cfi_udelay(usec); \
752 spin_lock(chip->mutex); \
753} while (0)
754
755#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
756do { \
757 spin_unlock(chip->mutex); \
758 INVALIDATE_CACHED_RANGE(map, adr, len); \
759 cfi_udelay(usec); \
760 spin_lock(chip->mutex); \
761} while (0)
762
763#endif
554 764
555static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 765static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
556{ 766{
@@ -563,10 +773,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
563 /* Ensure cmd read/writes are aligned. */ 773 /* Ensure cmd read/writes are aligned. */
564 cmd_addr = adr & ~(map_bankwidth(map)-1); 774 cmd_addr = adr & ~(map_bankwidth(map)-1);
565 775
566 cfi_spin_lock(chip->mutex); 776 spin_lock(chip->mutex);
567 ret = get_chip(map, chip, cmd_addr, FL_READY); 777 ret = get_chip(map, chip, cmd_addr, FL_READY);
568 if (ret) { 778 if (ret) {
569 cfi_spin_unlock(chip->mutex); 779 spin_unlock(chip->mutex);
570 return ret; 780 return ret;
571 } 781 }
572 782
@@ -579,7 +789,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
579 789
580 put_chip(map, chip, cmd_addr); 790 put_chip(map, chip, cmd_addr);
581 791
582 cfi_spin_unlock(chip->mutex); 792 spin_unlock(chip->mutex);
583 return 0; 793 return 0;
584} 794}
585 795
@@ -633,7 +843,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
633 struct cfi_private *cfi = map->fldrv_priv; 843 struct cfi_private *cfi = map->fldrv_priv;
634 844
635 retry: 845 retry:
636 cfi_spin_lock(chip->mutex); 846 spin_lock(chip->mutex);
637 847
638 if (chip->state != FL_READY){ 848 if (chip->state != FL_READY){
639#if 0 849#if 0
@@ -642,7 +852,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
642 set_current_state(TASK_UNINTERRUPTIBLE); 852 set_current_state(TASK_UNINTERRUPTIBLE);
643 add_wait_queue(&chip->wq, &wait); 853 add_wait_queue(&chip->wq, &wait);
644 854
645 cfi_spin_unlock(chip->mutex); 855 spin_unlock(chip->mutex);
646 856
647 schedule(); 857 schedule();
648 remove_wait_queue(&chip->wq, &wait); 858 remove_wait_queue(&chip->wq, &wait);
@@ -671,7 +881,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
671 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 881 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
672 882
673 wake_up(&chip->wq); 883 wake_up(&chip->wq);
674 cfi_spin_unlock(chip->mutex); 884 spin_unlock(chip->mutex);
675 885
676 return 0; 886 return 0;
677} 887}
@@ -720,7 +930,7 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
720} 930}
721 931
722 932
723static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 933static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
724{ 934{
725 struct cfi_private *cfi = map->fldrv_priv; 935 struct cfi_private *cfi = map->fldrv_priv;
726 unsigned long timeo = jiffies + HZ; 936 unsigned long timeo = jiffies + HZ;
@@ -740,10 +950,10 @@ static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned
740 950
741 adr += chip->start; 951 adr += chip->start;
742 952
743 cfi_spin_lock(chip->mutex); 953 spin_lock(chip->mutex);
744 ret = get_chip(map, chip, adr, FL_WRITING); 954 ret = get_chip(map, chip, adr, FL_WRITING);
745 if (ret) { 955 if (ret) {
746 cfi_spin_unlock(chip->mutex); 956 spin_unlock(chip->mutex);
747 return ret; 957 return ret;
748 } 958 }
749 959
@@ -763,7 +973,9 @@ static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned
763 goto op_done; 973 goto op_done;
764 } 974 }
765 975
976 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
766 ENABLE_VPP(map); 977 ENABLE_VPP(map);
978 xip_disable(map, chip, adr);
767 retry: 979 retry:
768 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 980 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
769 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 981 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
@@ -771,9 +983,9 @@ static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned
771 map_write(map, datum, adr); 983 map_write(map, datum, adr);
772 chip->state = FL_WRITING; 984 chip->state = FL_WRITING;
773 985
774 cfi_spin_unlock(chip->mutex); 986 INVALIDATE_CACHE_UDELAY(map, chip,
775 cfi_udelay(chip->word_write_time); 987 adr, map_bankwidth(map),
776 cfi_spin_lock(chip->mutex); 988 chip->word_write_time);
777 989
778 /* See comment above for timeout value. */ 990 /* See comment above for timeout value. */
779 timeo = jiffies + uWriteTimeout; 991 timeo = jiffies + uWriteTimeout;
@@ -784,11 +996,11 @@ static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned
784 996
785 set_current_state(TASK_UNINTERRUPTIBLE); 997 set_current_state(TASK_UNINTERRUPTIBLE);
786 add_wait_queue(&chip->wq, &wait); 998 add_wait_queue(&chip->wq, &wait);
787 cfi_spin_unlock(chip->mutex); 999 spin_unlock(chip->mutex);
788 schedule(); 1000 schedule();
789 remove_wait_queue(&chip->wq, &wait); 1001 remove_wait_queue(&chip->wq, &wait);
790 timeo = jiffies + (HZ / 2); /* FIXME */ 1002 timeo = jiffies + (HZ / 2); /* FIXME */
791 cfi_spin_lock(chip->mutex); 1003 spin_lock(chip->mutex);
792 continue; 1004 continue;
793 } 1005 }
794 1006
@@ -796,14 +1008,14 @@ static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned
796 break; 1008 break;
797 1009
798 if (time_after(jiffies, timeo)) { 1010 if (time_after(jiffies, timeo)) {
1011 xip_enable(map, chip, adr);
799 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1012 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1013 xip_disable(map, chip, adr);
800 break; 1014 break;
801 } 1015 }
802 1016
803 /* Latency issues. Drop the lock, wait a while and retry */ 1017 /* Latency issues. Drop the lock, wait a while and retry */
804 cfi_spin_unlock(chip->mutex); 1018 UDELAY(map, chip, adr, 1);
805 cfi_udelay(1);
806 cfi_spin_lock(chip->mutex);
807 } 1019 }
808 /* Did we succeed? */ 1020 /* Did we succeed? */
809 if (!chip_good(map, adr, datum)) { 1021 if (!chip_good(map, adr, datum)) {
@@ -816,10 +1028,11 @@ static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned
816 1028
817 ret = -EIO; 1029 ret = -EIO;
818 } 1030 }
1031 xip_enable(map, chip, adr);
819 op_done: 1032 op_done:
820 chip->state = FL_READY; 1033 chip->state = FL_READY;
821 put_chip(map, chip, adr); 1034 put_chip(map, chip, adr);
822 cfi_spin_unlock(chip->mutex); 1035 spin_unlock(chip->mutex);
823 1036
824 return ret; 1037 return ret;
825} 1038}
@@ -851,7 +1064,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
851 map_word tmp_buf; 1064 map_word tmp_buf;
852 1065
853 retry: 1066 retry:
854 cfi_spin_lock(cfi->chips[chipnum].mutex); 1067 spin_lock(cfi->chips[chipnum].mutex);
855 1068
856 if (cfi->chips[chipnum].state != FL_READY) { 1069 if (cfi->chips[chipnum].state != FL_READY) {
857#if 0 1070#if 0
@@ -860,7 +1073,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
860 set_current_state(TASK_UNINTERRUPTIBLE); 1073 set_current_state(TASK_UNINTERRUPTIBLE);
861 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1074 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
862 1075
863 cfi_spin_unlock(cfi->chips[chipnum].mutex); 1076 spin_unlock(cfi->chips[chipnum].mutex);
864 1077
865 schedule(); 1078 schedule();
866 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1079 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -874,7 +1087,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
874 /* Load 'tmp_buf' with old contents of flash */ 1087 /* Load 'tmp_buf' with old contents of flash */
875 tmp_buf = map_read(map, bus_ofs+chipstart); 1088 tmp_buf = map_read(map, bus_ofs+chipstart);
876 1089
877 cfi_spin_unlock(cfi->chips[chipnum].mutex); 1090 spin_unlock(cfi->chips[chipnum].mutex);
878 1091
879 /* Number of bytes to copy from buffer */ 1092 /* Number of bytes to copy from buffer */
880 n = min_t(int, len, map_bankwidth(map)-i); 1093 n = min_t(int, len, map_bankwidth(map)-i);
@@ -929,7 +1142,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
929 map_word tmp_buf; 1142 map_word tmp_buf;
930 1143
931 retry1: 1144 retry1:
932 cfi_spin_lock(cfi->chips[chipnum].mutex); 1145 spin_lock(cfi->chips[chipnum].mutex);
933 1146
934 if (cfi->chips[chipnum].state != FL_READY) { 1147 if (cfi->chips[chipnum].state != FL_READY) {
935#if 0 1148#if 0
@@ -938,7 +1151,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
938 set_current_state(TASK_UNINTERRUPTIBLE); 1151 set_current_state(TASK_UNINTERRUPTIBLE);
939 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1152 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
940 1153
941 cfi_spin_unlock(cfi->chips[chipnum].mutex); 1154 spin_unlock(cfi->chips[chipnum].mutex);
942 1155
943 schedule(); 1156 schedule();
944 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1157 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -951,7 +1164,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
951 1164
952 tmp_buf = map_read(map, ofs + chipstart); 1165 tmp_buf = map_read(map, ofs + chipstart);
953 1166
954 cfi_spin_unlock(cfi->chips[chipnum].mutex); 1167 spin_unlock(cfi->chips[chipnum].mutex);
955 1168
956 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1169 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
957 1170
@@ -970,8 +1183,9 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
970/* 1183/*
971 * FIXME: interleaved mode not tested, and probably not supported! 1184 * FIXME: interleaved mode not tested, and probably not supported!
972 */ 1185 */
973static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 1186static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
974 unsigned long adr, const u_char *buf, int len) 1187 unsigned long adr, const u_char *buf,
1188 int len)
975{ 1189{
976 struct cfi_private *cfi = map->fldrv_priv; 1190 struct cfi_private *cfi = map->fldrv_priv;
977 unsigned long timeo = jiffies + HZ; 1191 unsigned long timeo = jiffies + HZ;
@@ -985,10 +1199,10 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
985 adr += chip->start; 1199 adr += chip->start;
986 cmd_adr = adr; 1200 cmd_adr = adr;
987 1201
988 cfi_spin_lock(chip->mutex); 1202 spin_lock(chip->mutex);
989 ret = get_chip(map, chip, adr, FL_WRITING); 1203 ret = get_chip(map, chip, adr, FL_WRITING);
990 if (ret) { 1204 if (ret) {
991 cfi_spin_unlock(chip->mutex); 1205 spin_unlock(chip->mutex);
992 return ret; 1206 return ret;
993 } 1207 }
994 1208
@@ -997,7 +1211,10 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
997 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1211 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
998 __func__, adr, datum.x[0] ); 1212 __func__, adr, datum.x[0] );
999 1213
1214 XIP_INVAL_CACHED_RANGE(map, adr, len);
1000 ENABLE_VPP(map); 1215 ENABLE_VPP(map);
1216 xip_disable(map, chip, cmd_adr);
1217
1001 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1218 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1002 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1219 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1003 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1220 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -1027,9 +1244,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
1027 map_write(map, CMD(0x29), cmd_adr); 1244 map_write(map, CMD(0x29), cmd_adr);
1028 chip->state = FL_WRITING; 1245 chip->state = FL_WRITING;
1029 1246
1030 cfi_spin_unlock(chip->mutex); 1247 INVALIDATE_CACHE_UDELAY(map, chip,
1031 cfi_udelay(chip->buffer_write_time); 1248 adr, map_bankwidth(map),
1032 cfi_spin_lock(chip->mutex); 1249 chip->word_write_time);
1033 1250
1034 timeo = jiffies + uWriteTimeout; 1251 timeo = jiffies + uWriteTimeout;
1035 1252
@@ -1040,38 +1257,39 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
1040 1257
1041 set_current_state(TASK_UNINTERRUPTIBLE); 1258 set_current_state(TASK_UNINTERRUPTIBLE);
1042 add_wait_queue(&chip->wq, &wait); 1259 add_wait_queue(&chip->wq, &wait);
1043 cfi_spin_unlock(chip->mutex); 1260 spin_unlock(chip->mutex);
1044 schedule(); 1261 schedule();
1045 remove_wait_queue(&chip->wq, &wait); 1262 remove_wait_queue(&chip->wq, &wait);
1046 timeo = jiffies + (HZ / 2); /* FIXME */ 1263 timeo = jiffies + (HZ / 2); /* FIXME */
1047 cfi_spin_lock(chip->mutex); 1264 spin_lock(chip->mutex);
1048 continue; 1265 continue;
1049 } 1266 }
1050 1267
1051 if (chip_ready(map, adr)) 1268 if (chip_ready(map, adr)) {
1269 xip_enable(map, chip, adr);
1052 goto op_done; 1270 goto op_done;
1271 }
1053 1272
1054 if( time_after(jiffies, timeo)) 1273 if( time_after(jiffies, timeo))
1055 break; 1274 break;
1056 1275
1057 /* Latency issues. Drop the lock, wait a while and retry */ 1276 /* Latency issues. Drop the lock, wait a while and retry */
1058 cfi_spin_unlock(chip->mutex); 1277 UDELAY(map, chip, adr, 1);
1059 cfi_udelay(1);
1060 cfi_spin_lock(chip->mutex);
1061 } 1278 }
1062 1279
1063 printk(KERN_WARNING "MTD %s(): software timeout\n",
1064 __func__ );
1065
1066 /* reset on all failures. */ 1280 /* reset on all failures. */
1067 map_write( map, CMD(0xF0), chip->start ); 1281 map_write( map, CMD(0xF0), chip->start );
1282 xip_enable(map, chip, adr);
1068 /* FIXME - should have reset delay before continuing */ 1283 /* FIXME - should have reset delay before continuing */
1069 1284
1285 printk(KERN_WARNING "MTD %s(): software timeout\n",
1286 __func__ );
1287
1070 ret = -EIO; 1288 ret = -EIO;
1071 op_done: 1289 op_done:
1072 chip->state = FL_READY; 1290 chip->state = FL_READY;
1073 put_chip(map, chip, adr); 1291 put_chip(map, chip, adr);
1074 cfi_spin_unlock(chip->mutex); 1292 spin_unlock(chip->mutex);
1075 1293
1076 return ret; 1294 return ret;
1077} 1295}
@@ -1161,7 +1379,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1161 * Handle devices with one erase region, that only implement 1379 * Handle devices with one erase region, that only implement
1162 * the chip erase command. 1380 * the chip erase command.
1163 */ 1381 */
1164static inline int do_erase_chip(struct map_info *map, struct flchip *chip) 1382static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1165{ 1383{
1166 struct cfi_private *cfi = map->fldrv_priv; 1384 struct cfi_private *cfi = map->fldrv_priv;
1167 unsigned long timeo = jiffies + HZ; 1385 unsigned long timeo = jiffies + HZ;
@@ -1171,17 +1389,20 @@ static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1171 1389
1172 adr = cfi->addr_unlock1; 1390 adr = cfi->addr_unlock1;
1173 1391
1174 cfi_spin_lock(chip->mutex); 1392 spin_lock(chip->mutex);
1175 ret = get_chip(map, chip, adr, FL_WRITING); 1393 ret = get_chip(map, chip, adr, FL_WRITING);
1176 if (ret) { 1394 if (ret) {
1177 cfi_spin_unlock(chip->mutex); 1395 spin_unlock(chip->mutex);
1178 return ret; 1396 return ret;
1179 } 1397 }
1180 1398
1181 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1399 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1182 __func__, chip->start ); 1400 __func__, chip->start );
1183 1401
1402 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1184 ENABLE_VPP(map); 1403 ENABLE_VPP(map);
1404 xip_disable(map, chip, adr);
1405
1185 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1406 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1186 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1407 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1187 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1408 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -1193,9 +1414,9 @@ static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1193 chip->erase_suspended = 0; 1414 chip->erase_suspended = 0;
1194 chip->in_progress_block_addr = adr; 1415 chip->in_progress_block_addr = adr;
1195 1416
1196 cfi_spin_unlock(chip->mutex); 1417 INVALIDATE_CACHE_UDELAY(map, chip,
1197 msleep(chip->erase_time/2); 1418 adr, map->size,
1198 cfi_spin_lock(chip->mutex); 1419 chip->erase_time*500);
1199 1420
1200 timeo = jiffies + (HZ*20); 1421 timeo = jiffies + (HZ*20);
1201 1422
@@ -1204,10 +1425,10 @@ static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1204 /* Someone's suspended the erase. Sleep */ 1425 /* Someone's suspended the erase. Sleep */
1205 set_current_state(TASK_UNINTERRUPTIBLE); 1426 set_current_state(TASK_UNINTERRUPTIBLE);
1206 add_wait_queue(&chip->wq, &wait); 1427 add_wait_queue(&chip->wq, &wait);
1207 cfi_spin_unlock(chip->mutex); 1428 spin_unlock(chip->mutex);
1208 schedule(); 1429 schedule();
1209 remove_wait_queue(&chip->wq, &wait); 1430 remove_wait_queue(&chip->wq, &wait);
1210 cfi_spin_lock(chip->mutex); 1431 spin_lock(chip->mutex);
1211 continue; 1432 continue;
1212 } 1433 }
1213 if (chip->erase_suspended) { 1434 if (chip->erase_suspended) {
@@ -1227,10 +1448,7 @@ static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1227 } 1448 }
1228 1449
1229 /* Latency issues. Drop the lock, wait a while and retry */ 1450 /* Latency issues. Drop the lock, wait a while and retry */
1230 cfi_spin_unlock(chip->mutex); 1451 UDELAY(map, chip, adr, 1000000/HZ);
1231 set_current_state(TASK_UNINTERRUPTIBLE);
1232 schedule_timeout(1);
1233 cfi_spin_lock(chip->mutex);
1234 } 1452 }
1235 /* Did we succeed? */ 1453 /* Did we succeed? */
1236 if (!chip_good(map, adr, map_word_ff(map))) { 1454 if (!chip_good(map, adr, map_word_ff(map))) {
@@ -1242,14 +1460,15 @@ static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
1242 } 1460 }
1243 1461
1244 chip->state = FL_READY; 1462 chip->state = FL_READY;
1463 xip_enable(map, chip, adr);
1245 put_chip(map, chip, adr); 1464 put_chip(map, chip, adr);
1246 cfi_spin_unlock(chip->mutex); 1465 spin_unlock(chip->mutex);
1247 1466
1248 return ret; 1467 return ret;
1249} 1468}
1250 1469
1251 1470
1252static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1471static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1253{ 1472{
1254 struct cfi_private *cfi = map->fldrv_priv; 1473 struct cfi_private *cfi = map->fldrv_priv;
1255 unsigned long timeo = jiffies + HZ; 1474 unsigned long timeo = jiffies + HZ;
@@ -1258,17 +1477,20 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
1258 1477
1259 adr += chip->start; 1478 adr += chip->start;
1260 1479
1261 cfi_spin_lock(chip->mutex); 1480 spin_lock(chip->mutex);
1262 ret = get_chip(map, chip, adr, FL_ERASING); 1481 ret = get_chip(map, chip, adr, FL_ERASING);
1263 if (ret) { 1482 if (ret) {
1264 cfi_spin_unlock(chip->mutex); 1483 spin_unlock(chip->mutex);
1265 return ret; 1484 return ret;
1266 } 1485 }
1267 1486
1268 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1487 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1269 __func__, adr ); 1488 __func__, adr );
1270 1489
1490 XIP_INVAL_CACHED_RANGE(map, adr, len);
1271 ENABLE_VPP(map); 1491 ENABLE_VPP(map);
1492 xip_disable(map, chip, adr);
1493
1272 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1494 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1273 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1495 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1274 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1496 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -1279,10 +1501,10 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
1279 chip->state = FL_ERASING; 1501 chip->state = FL_ERASING;
1280 chip->erase_suspended = 0; 1502 chip->erase_suspended = 0;
1281 chip->in_progress_block_addr = adr; 1503 chip->in_progress_block_addr = adr;
1282 1504
1283 cfi_spin_unlock(chip->mutex); 1505 INVALIDATE_CACHE_UDELAY(map, chip,
1284 msleep(chip->erase_time/2); 1506 adr, len,
1285 cfi_spin_lock(chip->mutex); 1507 chip->erase_time*500);
1286 1508
1287 timeo = jiffies + (HZ*20); 1509 timeo = jiffies + (HZ*20);
1288 1510
@@ -1291,10 +1513,10 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
1291 /* Someone's suspended the erase. Sleep */ 1513 /* Someone's suspended the erase. Sleep */
1292 set_current_state(TASK_UNINTERRUPTIBLE); 1514 set_current_state(TASK_UNINTERRUPTIBLE);
1293 add_wait_queue(&chip->wq, &wait); 1515 add_wait_queue(&chip->wq, &wait);
1294 cfi_spin_unlock(chip->mutex); 1516 spin_unlock(chip->mutex);
1295 schedule(); 1517 schedule();
1296 remove_wait_queue(&chip->wq, &wait); 1518 remove_wait_queue(&chip->wq, &wait);
1297 cfi_spin_lock(chip->mutex); 1519 spin_lock(chip->mutex);
1298 continue; 1520 continue;
1299 } 1521 }
1300 if (chip->erase_suspended) { 1522 if (chip->erase_suspended) {
@@ -1304,20 +1526,20 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
1304 chip->erase_suspended = 0; 1526 chip->erase_suspended = 0;
1305 } 1527 }
1306 1528
1307 if (chip_ready(map, adr)) 1529 if (chip_ready(map, adr)) {
1530 xip_enable(map, chip, adr);
1308 break; 1531 break;
1532 }
1309 1533
1310 if (time_after(jiffies, timeo)) { 1534 if (time_after(jiffies, timeo)) {
1535 xip_enable(map, chip, adr);
1311 printk(KERN_WARNING "MTD %s(): software timeout\n", 1536 printk(KERN_WARNING "MTD %s(): software timeout\n",
1312 __func__ ); 1537 __func__ );
1313 break; 1538 break;
1314 } 1539 }
1315 1540
1316 /* Latency issues. Drop the lock, wait a while and retry */ 1541 /* Latency issues. Drop the lock, wait a while and retry */
1317 cfi_spin_unlock(chip->mutex); 1542 UDELAY(map, chip, adr, 1000000/HZ);
1318 set_current_state(TASK_UNINTERRUPTIBLE);
1319 schedule_timeout(1);
1320 cfi_spin_lock(chip->mutex);
1321 } 1543 }
1322 /* Did we succeed? */ 1544 /* Did we succeed? */
1323 if (!chip_good(map, adr, map_word_ff(map))) { 1545 if (!chip_good(map, adr, map_word_ff(map))) {
@@ -1330,7 +1552,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
1330 1552
1331 chip->state = FL_READY; 1553 chip->state = FL_READY;
1332 put_chip(map, chip, adr); 1554 put_chip(map, chip, adr);
1333 cfi_spin_unlock(chip->mutex); 1555 spin_unlock(chip->mutex);
1334 return ret; 1556 return ret;
1335} 1557}
1336 1558
@@ -1390,7 +1612,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1390 chip = &cfi->chips[i]; 1612 chip = &cfi->chips[i];
1391 1613
1392 retry: 1614 retry:
1393 cfi_spin_lock(chip->mutex); 1615 spin_lock(chip->mutex);
1394 1616
1395 switch(chip->state) { 1617 switch(chip->state) {
1396 case FL_READY: 1618 case FL_READY:
@@ -1404,14 +1626,14 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1404 * with the chip now anyway. 1626 * with the chip now anyway.
1405 */ 1627 */
1406 case FL_SYNCING: 1628 case FL_SYNCING:
1407 cfi_spin_unlock(chip->mutex); 1629 spin_unlock(chip->mutex);
1408 break; 1630 break;
1409 1631
1410 default: 1632 default:
1411 /* Not an idle state */ 1633 /* Not an idle state */
1412 add_wait_queue(&chip->wq, &wait); 1634 add_wait_queue(&chip->wq, &wait);
1413 1635
1414 cfi_spin_unlock(chip->mutex); 1636 spin_unlock(chip->mutex);
1415 1637
1416 schedule(); 1638 schedule();
1417 1639
@@ -1426,13 +1648,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
1426 for (i--; i >=0; i--) { 1648 for (i--; i >=0; i--) {
1427 chip = &cfi->chips[i]; 1649 chip = &cfi->chips[i];
1428 1650
1429 cfi_spin_lock(chip->mutex); 1651 spin_lock(chip->mutex);
1430 1652
1431 if (chip->state == FL_SYNCING) { 1653 if (chip->state == FL_SYNCING) {
1432 chip->state = chip->oldstate; 1654 chip->state = chip->oldstate;
1433 wake_up(&chip->wq); 1655 wake_up(&chip->wq);
1434 } 1656 }
1435 cfi_spin_unlock(chip->mutex); 1657 spin_unlock(chip->mutex);
1436 } 1658 }
1437} 1659}
1438 1660
@@ -1448,7 +1670,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1448 for (i=0; !ret && i<cfi->numchips; i++) { 1670 for (i=0; !ret && i<cfi->numchips; i++) {
1449 chip = &cfi->chips[i]; 1671 chip = &cfi->chips[i];
1450 1672
1451 cfi_spin_lock(chip->mutex); 1673 spin_lock(chip->mutex);
1452 1674
1453 switch(chip->state) { 1675 switch(chip->state) {
1454 case FL_READY: 1676 case FL_READY:
@@ -1468,7 +1690,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1468 ret = -EAGAIN; 1690 ret = -EAGAIN;
1469 break; 1691 break;
1470 } 1692 }
1471 cfi_spin_unlock(chip->mutex); 1693 spin_unlock(chip->mutex);
1472 } 1694 }
1473 1695
1474 /* Unlock the chips again */ 1696 /* Unlock the chips again */
@@ -1477,13 +1699,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
1477 for (i--; i >=0; i--) { 1699 for (i--; i >=0; i--) {
1478 chip = &cfi->chips[i]; 1700 chip = &cfi->chips[i];
1479 1701
1480 cfi_spin_lock(chip->mutex); 1702 spin_lock(chip->mutex);
1481 1703
1482 if (chip->state == FL_PM_SUSPENDED) { 1704 if (chip->state == FL_PM_SUSPENDED) {
1483 chip->state = chip->oldstate; 1705 chip->state = chip->oldstate;
1484 wake_up(&chip->wq); 1706 wake_up(&chip->wq);
1485 } 1707 }
1486 cfi_spin_unlock(chip->mutex); 1708 spin_unlock(chip->mutex);
1487 } 1709 }
1488 } 1710 }
1489 1711
@@ -1502,7 +1724,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1502 1724
1503 chip = &cfi->chips[i]; 1725 chip = &cfi->chips[i];
1504 1726
1505 cfi_spin_lock(chip->mutex); 1727 spin_lock(chip->mutex);
1506 1728
1507 if (chip->state == FL_PM_SUSPENDED) { 1729 if (chip->state == FL_PM_SUSPENDED) {
1508 chip->state = FL_READY; 1730 chip->state = FL_READY;
@@ -1512,7 +1734,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
1512 else 1734 else
1513 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1735 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1514 1736
1515 cfi_spin_unlock(chip->mutex); 1737 spin_unlock(chip->mutex);
1516 } 1738 }
1517} 1739}
1518 1740
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index fbf44708a861..e1a5b76596c5 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
58 * to flash memory - that means that we don't have to check status 58 * to flash memory - that means that we don't have to check status
59 * and timeout. 59 * and timeout.
60 */ 60 */
61 cfi_spin_lock(chip->mutex); 61 spin_lock(chip->mutex);
62 ret = get_chip(map, chip, adr, FL_LOCKING); 62 ret = get_chip(map, chip, adr, FL_LOCKING);
63 if (ret) { 63 if (ret) {
64 cfi_spin_unlock(chip->mutex); 64 spin_unlock(chip->mutex);
65 return ret; 65 return ret;
66 } 66 }
67 67
@@ -71,7 +71,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
71 /* Done and happy. */ 71 /* Done and happy. */
72 chip->state = FL_READY; 72 chip->state = FL_READY;
73 put_chip(map, chip, adr); 73 put_chip(map, chip, adr);
74 cfi_spin_unlock(chip->mutex); 74 spin_unlock(chip->mutex);
75 return 0; 75 return 0;
76} 76}
77 77