aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips/cfi_cmdset_0020.c
diff options
context:
space:
mode:
authorStefani Seibold <stefani@seibold.net>2010-04-18 16:46:44 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-05-10 09:22:30 -0400
commitc4e773764cead9358fd4b036d1b883fff3968513 (patch)
treeceb30e53d7ba33a071653c2bc05c06293d84575f /drivers/mtd/chips/cfi_cmdset_0020.c
parent67026418f534045525a7c39f506006cd7fbd197f (diff)
mtd: fix a huge latency problem in the MTD CFI and LPDDR flash drivers.
The use of a memcpy() during a spinlock operation will cause very long thread context switch delays if the flash chip bandwidth is low and the data to be copied large, because a spinlock will disable preemption. For example: A flash with 6,5 MB/s bandwidth will cause under ubifs, which request sometimes 128 KiB (the flash erase size), a preemption delay of 20 milliseconds. High priority threads will not be served during this time, regardless whether this threads access the flash or not. This behavior breaks real time. The patch changes all the use of spin_lock operations for xxxx->mutex into mutex operations, which is exact what the name says and means. I have checked the code of the drivers and there is no use of atomic pathes like interrupt or timers. The mtdoops facility will also not be used by this drivers. So it is dave to replace the spin_lock against mutex. There is no performance regression since the mutex is normally not acquired. Changelog: 06.03.2010 First release 26.03.2010 Fix mutex[1] issue and tested it for compile failure Signed-off-by: Stefani Seibold <stefani@seibold.net> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0020.c')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c136
1 files changed, 68 insertions, 68 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0667a671525d..e54e8c169d76 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
265 265
266 timeo = jiffies + HZ; 266 timeo = jiffies + HZ;
267 retry: 267 retry:
268 spin_lock_bh(chip->mutex); 268 mutex_lock(&chip->mutex);
269 269
270 /* Check that the chip's ready to talk to us. 270 /* Check that the chip's ready to talk to us.
271 * If it's in FL_ERASING state, suspend it and make it talk now. 271 * If it's in FL_ERASING state, suspend it and make it talk now.
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
296 /* make sure we're in 'read status' mode */ 296 /* make sure we're in 'read status' mode */
297 map_write(map, CMD(0x70), cmd_addr); 297 map_write(map, CMD(0x70), cmd_addr);
298 chip->state = FL_ERASING; 298 chip->state = FL_ERASING;
299 spin_unlock_bh(chip->mutex); 299 mutex_unlock(&chip->mutex);
300 printk(KERN_ERR "Chip not ready after erase " 300 printk(KERN_ERR "Chip not ready after erase "
301 "suspended: status = 0x%lx\n", status.x[0]); 301 "suspended: status = 0x%lx\n", status.x[0]);
302 return -EIO; 302 return -EIO;
303 } 303 }
304 304
305 spin_unlock_bh(chip->mutex); 305 mutex_unlock(&chip->mutex);
306 cfi_udelay(1); 306 cfi_udelay(1);
307 spin_lock_bh(chip->mutex); 307 mutex_lock(&chip->mutex);
308 } 308 }
309 309
310 suspended = 1; 310 suspended = 1;
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
335 335
336 /* Urgh. Chip not yet ready to talk to us. */ 336 /* Urgh. Chip not yet ready to talk to us. */
337 if (time_after(jiffies, timeo)) { 337 if (time_after(jiffies, timeo)) {
338 spin_unlock_bh(chip->mutex); 338 mutex_unlock(&chip->mutex);
339 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); 339 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
340 return -EIO; 340 return -EIO;
341 } 341 }
342 342
343 /* Latency issues. Drop the lock, wait a while and retry */ 343 /* Latency issues. Drop the lock, wait a while and retry */
344 spin_unlock_bh(chip->mutex); 344 mutex_unlock(&chip->mutex);
345 cfi_udelay(1); 345 cfi_udelay(1);
346 goto retry; 346 goto retry;
347 347
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
351 someone changes the status */ 351 someone changes the status */
352 set_current_state(TASK_UNINTERRUPTIBLE); 352 set_current_state(TASK_UNINTERRUPTIBLE);
353 add_wait_queue(&chip->wq, &wait); 353 add_wait_queue(&chip->wq, &wait);
354 spin_unlock_bh(chip->mutex); 354 mutex_unlock(&chip->mutex);
355 schedule(); 355 schedule();
356 remove_wait_queue(&chip->wq, &wait); 356 remove_wait_queue(&chip->wq, &wait);
357 timeo = jiffies + HZ; 357 timeo = jiffies + HZ;
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
376 } 376 }
377 377
378 wake_up(&chip->wq); 378 wake_up(&chip->wq);
379 spin_unlock_bh(chip->mutex); 379 mutex_unlock(&chip->mutex);
380 return 0; 380 return 0;
381} 381}
382 382
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
445#ifdef DEBUG_CFI_FEATURES 445#ifdef DEBUG_CFI_FEATURES
446 printk("%s: chip->state[%d]\n", __func__, chip->state); 446 printk("%s: chip->state[%d]\n", __func__, chip->state);
447#endif 447#endif
448 spin_lock_bh(chip->mutex); 448 mutex_lock(&chip->mutex);
449 449
450 /* Check that the chip's ready to talk to us. 450 /* Check that the chip's ready to talk to us.
451 * Later, we can actually think about interrupting it 451 * Later, we can actually think about interrupting it
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
470 break; 470 break;
471 /* Urgh. Chip not yet ready to talk to us. */ 471 /* Urgh. Chip not yet ready to talk to us. */
472 if (time_after(jiffies, timeo)) { 472 if (time_after(jiffies, timeo)) {
473 spin_unlock_bh(chip->mutex); 473 mutex_unlock(&chip->mutex);
474 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", 474 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
475 status.x[0], map_read(map, cmd_adr).x[0]); 475 status.x[0], map_read(map, cmd_adr).x[0]);
476 return -EIO; 476 return -EIO;
477 } 477 }
478 478
479 /* Latency issues. Drop the lock, wait a while and retry */ 479 /* Latency issues. Drop the lock, wait a while and retry */
480 spin_unlock_bh(chip->mutex); 480 mutex_unlock(&chip->mutex);
481 cfi_udelay(1); 481 cfi_udelay(1);
482 goto retry; 482 goto retry;
483 483
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
486 someone changes the status */ 486 someone changes the status */
487 set_current_state(TASK_UNINTERRUPTIBLE); 487 set_current_state(TASK_UNINTERRUPTIBLE);
488 add_wait_queue(&chip->wq, &wait); 488 add_wait_queue(&chip->wq, &wait);
489 spin_unlock_bh(chip->mutex); 489 mutex_unlock(&chip->mutex);
490 schedule(); 490 schedule();
491 remove_wait_queue(&chip->wq, &wait); 491 remove_wait_queue(&chip->wq, &wait);
492 timeo = jiffies + HZ; 492 timeo = jiffies + HZ;
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
503 if (map_word_andequal(map, status, status_OK, status_OK)) 503 if (map_word_andequal(map, status, status_OK, status_OK))
504 break; 504 break;
505 505
506 spin_unlock_bh(chip->mutex); 506 mutex_unlock(&chip->mutex);
507 cfi_udelay(1); 507 cfi_udelay(1);
508 spin_lock_bh(chip->mutex); 508 mutex_lock(&chip->mutex);
509 509
510 if (++z > 100) { 510 if (++z > 100) {
511 /* Argh. Not ready for write to buffer */ 511 /* Argh. Not ready for write to buffer */
512 DISABLE_VPP(map); 512 DISABLE_VPP(map);
513 map_write(map, CMD(0x70), cmd_adr); 513 map_write(map, CMD(0x70), cmd_adr);
514 chip->state = FL_STATUS; 514 chip->state = FL_STATUS;
515 spin_unlock_bh(chip->mutex); 515 mutex_unlock(&chip->mutex);
516 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); 516 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
517 return -EIO; 517 return -EIO;
518 } 518 }
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
532 map_write(map, CMD(0xd0), cmd_adr); 532 map_write(map, CMD(0xd0), cmd_adr);
533 chip->state = FL_WRITING; 533 chip->state = FL_WRITING;
534 534
535 spin_unlock_bh(chip->mutex); 535 mutex_unlock(&chip->mutex);
536 cfi_udelay(chip->buffer_write_time); 536 cfi_udelay(chip->buffer_write_time);
537 spin_lock_bh(chip->mutex); 537 mutex_lock(&chip->mutex);
538 538
539 timeo = jiffies + (HZ/2); 539 timeo = jiffies + (HZ/2);
540 z = 0; 540 z = 0;
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
543 /* Someone's suspended the write. Sleep */ 543 /* Someone's suspended the write. Sleep */
544 set_current_state(TASK_UNINTERRUPTIBLE); 544 set_current_state(TASK_UNINTERRUPTIBLE);
545 add_wait_queue(&chip->wq, &wait); 545 add_wait_queue(&chip->wq, &wait);
546 spin_unlock_bh(chip->mutex); 546 mutex_unlock(&chip->mutex);
547 schedule(); 547 schedule();
548 remove_wait_queue(&chip->wq, &wait); 548 remove_wait_queue(&chip->wq, &wait);
549 timeo = jiffies + (HZ / 2); /* FIXME */ 549 timeo = jiffies + (HZ / 2); /* FIXME */
550 spin_lock_bh(chip->mutex); 550 mutex_lock(&chip->mutex);
551 continue; 551 continue;
552 } 552 }
553 553
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
563 map_write(map, CMD(0x70), adr); 563 map_write(map, CMD(0x70), adr);
564 chip->state = FL_STATUS; 564 chip->state = FL_STATUS;
565 DISABLE_VPP(map); 565 DISABLE_VPP(map);
566 spin_unlock_bh(chip->mutex); 566 mutex_unlock(&chip->mutex);
567 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 567 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
568 return -EIO; 568 return -EIO;
569 } 569 }
570 570
571 /* Latency issues. Drop the lock, wait a while and retry */ 571 /* Latency issues. Drop the lock, wait a while and retry */
572 spin_unlock_bh(chip->mutex); 572 mutex_unlock(&chip->mutex);
573 cfi_udelay(1); 573 cfi_udelay(1);
574 z++; 574 z++;
575 spin_lock_bh(chip->mutex); 575 mutex_lock(&chip->mutex);
576 } 576 }
577 if (!z) { 577 if (!z) {
578 chip->buffer_write_time--; 578 chip->buffer_write_time--;
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
596 /* put back into read status register mode */ 596 /* put back into read status register mode */
597 map_write(map, CMD(0x70), adr); 597 map_write(map, CMD(0x70), adr);
598 wake_up(&chip->wq); 598 wake_up(&chip->wq);
599 spin_unlock_bh(chip->mutex); 599 mutex_unlock(&chip->mutex);
600 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; 600 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
601 } 601 }
602 wake_up(&chip->wq); 602 wake_up(&chip->wq);
603 spin_unlock_bh(chip->mutex); 603 mutex_unlock(&chip->mutex);
604 604
605 return 0; 605 return 0;
606} 606}
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
749 749
750 timeo = jiffies + HZ; 750 timeo = jiffies + HZ;
751retry: 751retry:
752 spin_lock_bh(chip->mutex); 752 mutex_lock(&chip->mutex);
753 753
754 /* Check that the chip's ready to talk to us. */ 754 /* Check that the chip's ready to talk to us. */
755 switch (chip->state) { 755 switch (chip->state) {
@@ -766,13 +766,13 @@ retry:
766 766
767 /* Urgh. Chip not yet ready to talk to us. */ 767 /* Urgh. Chip not yet ready to talk to us. */
768 if (time_after(jiffies, timeo)) { 768 if (time_after(jiffies, timeo)) {
769 spin_unlock_bh(chip->mutex); 769 mutex_unlock(&chip->mutex);
770 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); 770 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
771 return -EIO; 771 return -EIO;
772 } 772 }
773 773
774 /* Latency issues. Drop the lock, wait a while and retry */ 774 /* Latency issues. Drop the lock, wait a while and retry */
775 spin_unlock_bh(chip->mutex); 775 mutex_unlock(&chip->mutex);
776 cfi_udelay(1); 776 cfi_udelay(1);
777 goto retry; 777 goto retry;
778 778
@@ -781,7 +781,7 @@ retry:
781 someone changes the status */ 781 someone changes the status */
782 set_current_state(TASK_UNINTERRUPTIBLE); 782 set_current_state(TASK_UNINTERRUPTIBLE);
783 add_wait_queue(&chip->wq, &wait); 783 add_wait_queue(&chip->wq, &wait);
784 spin_unlock_bh(chip->mutex); 784 mutex_unlock(&chip->mutex);
785 schedule(); 785 schedule();
786 remove_wait_queue(&chip->wq, &wait); 786 remove_wait_queue(&chip->wq, &wait);
787 timeo = jiffies + HZ; 787 timeo = jiffies + HZ;
@@ -797,9 +797,9 @@ retry:
797 map_write(map, CMD(0xD0), adr); 797 map_write(map, CMD(0xD0), adr);
798 chip->state = FL_ERASING; 798 chip->state = FL_ERASING;
799 799
800 spin_unlock_bh(chip->mutex); 800 mutex_unlock(&chip->mutex);
801 msleep(1000); 801 msleep(1000);
802 spin_lock_bh(chip->mutex); 802 mutex_lock(&chip->mutex);
803 803
804 /* FIXME. Use a timer to check this, and return immediately. */ 804 /* FIXME. Use a timer to check this, and return immediately. */
805 /* Once the state machine's known to be working I'll do that */ 805 /* Once the state machine's known to be working I'll do that */
@@ -810,11 +810,11 @@ retry:
810 /* Someone's suspended the erase. Sleep */ 810 /* Someone's suspended the erase. Sleep */
811 set_current_state(TASK_UNINTERRUPTIBLE); 811 set_current_state(TASK_UNINTERRUPTIBLE);
812 add_wait_queue(&chip->wq, &wait); 812 add_wait_queue(&chip->wq, &wait);
813 spin_unlock_bh(chip->mutex); 813 mutex_unlock(&chip->mutex);
814 schedule(); 814 schedule();
815 remove_wait_queue(&chip->wq, &wait); 815 remove_wait_queue(&chip->wq, &wait);
816 timeo = jiffies + (HZ*20); /* FIXME */ 816 timeo = jiffies + (HZ*20); /* FIXME */
817 spin_lock_bh(chip->mutex); 817 mutex_lock(&chip->mutex);
818 continue; 818 continue;
819 } 819 }
820 820
@@ -828,14 +828,14 @@ retry:
828 chip->state = FL_STATUS; 828 chip->state = FL_STATUS;
829 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 829 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
830 DISABLE_VPP(map); 830 DISABLE_VPP(map);
831 spin_unlock_bh(chip->mutex); 831 mutex_unlock(&chip->mutex);
832 return -EIO; 832 return -EIO;
833 } 833 }
834 834
835 /* Latency issues. Drop the lock, wait a while and retry */ 835 /* Latency issues. Drop the lock, wait a while and retry */
836 spin_unlock_bh(chip->mutex); 836 mutex_unlock(&chip->mutex);
837 cfi_udelay(1); 837 cfi_udelay(1);
838 spin_lock_bh(chip->mutex); 838 mutex_lock(&chip->mutex);
839 } 839 }
840 840
841 DISABLE_VPP(map); 841 DISABLE_VPP(map);
@@ -878,7 +878,7 @@ retry:
878 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); 878 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
879 timeo = jiffies + HZ; 879 timeo = jiffies + HZ;
880 chip->state = FL_STATUS; 880 chip->state = FL_STATUS;
881 spin_unlock_bh(chip->mutex); 881 mutex_unlock(&chip->mutex);
882 goto retry; 882 goto retry;
883 } 883 }
884 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); 884 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@@ -887,7 +887,7 @@ retry:
887 } 887 }
888 888
889 wake_up(&chip->wq); 889 wake_up(&chip->wq);
890 spin_unlock_bh(chip->mutex); 890 mutex_unlock(&chip->mutex);
891 return ret; 891 return ret;
892} 892}
893 893
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
995 chip = &cfi->chips[i]; 995 chip = &cfi->chips[i];
996 996
997 retry: 997 retry:
998 spin_lock_bh(chip->mutex); 998 mutex_lock(&chip->mutex);
999 999
1000 switch(chip->state) { 1000 switch(chip->state) {
1001 case FL_READY: 1001 case FL_READY:
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1009 * with the chip now anyway. 1009 * with the chip now anyway.
1010 */ 1010 */
1011 case FL_SYNCING: 1011 case FL_SYNCING:
1012 spin_unlock_bh(chip->mutex); 1012 mutex_unlock(&chip->mutex);
1013 break; 1013 break;
1014 1014
1015 default: 1015 default:
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1017 set_current_state(TASK_UNINTERRUPTIBLE); 1017 set_current_state(TASK_UNINTERRUPTIBLE);
1018 add_wait_queue(&chip->wq, &wait); 1018 add_wait_queue(&chip->wq, &wait);
1019 1019
1020 spin_unlock_bh(chip->mutex); 1020 mutex_unlock(&chip->mutex);
1021 schedule(); 1021 schedule();
1022 remove_wait_queue(&chip->wq, &wait); 1022 remove_wait_queue(&chip->wq, &wait);
1023 1023
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
1030 for (i--; i >=0; i--) { 1030 for (i--; i >=0; i--) {
1031 chip = &cfi->chips[i]; 1031 chip = &cfi->chips[i];
1032 1032
1033 spin_lock_bh(chip->mutex); 1033 mutex_lock(&chip->mutex);
1034 1034
1035 if (chip->state == FL_SYNCING) { 1035 if (chip->state == FL_SYNCING) {
1036 chip->state = chip->oldstate; 1036 chip->state = chip->oldstate;
1037 wake_up(&chip->wq); 1037 wake_up(&chip->wq);
1038 } 1038 }
1039 spin_unlock_bh(chip->mutex); 1039 mutex_unlock(&chip->mutex);
1040 } 1040 }
1041} 1041}
1042 1042
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
1054 1054
1055 timeo = jiffies + HZ; 1055 timeo = jiffies + HZ;
1056retry: 1056retry:
1057 spin_lock_bh(chip->mutex); 1057 mutex_lock(&chip->mutex);
1058 1058
1059 /* Check that the chip's ready to talk to us. */ 1059 /* Check that the chip's ready to talk to us. */
1060 switch (chip->state) { 1060 switch (chip->state) {
@@ -1071,13 +1071,13 @@ retry:
1071 1071
1072 /* Urgh. Chip not yet ready to talk to us. */ 1072 /* Urgh. Chip not yet ready to talk to us. */
1073 if (time_after(jiffies, timeo)) { 1073 if (time_after(jiffies, timeo)) {
1074 spin_unlock_bh(chip->mutex); 1074 mutex_unlock(&chip->mutex);
1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); 1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1076 return -EIO; 1076 return -EIO;
1077 } 1077 }
1078 1078
1079 /* Latency issues. Drop the lock, wait a while and retry */ 1079 /* Latency issues. Drop the lock, wait a while and retry */
1080 spin_unlock_bh(chip->mutex); 1080 mutex_unlock(&chip->mutex);
1081 cfi_udelay(1); 1081 cfi_udelay(1);
1082 goto retry; 1082 goto retry;
1083 1083
@@ -1086,7 +1086,7 @@ retry:
1086 someone changes the status */ 1086 someone changes the status */
1087 set_current_state(TASK_UNINTERRUPTIBLE); 1087 set_current_state(TASK_UNINTERRUPTIBLE);
1088 add_wait_queue(&chip->wq, &wait); 1088 add_wait_queue(&chip->wq, &wait);
1089 spin_unlock_bh(chip->mutex); 1089 mutex_unlock(&chip->mutex);
1090 schedule(); 1090 schedule();
1091 remove_wait_queue(&chip->wq, &wait); 1091 remove_wait_queue(&chip->wq, &wait);
1092 timeo = jiffies + HZ; 1092 timeo = jiffies + HZ;
@@ -1098,9 +1098,9 @@ retry:
1098 map_write(map, CMD(0x01), adr); 1098 map_write(map, CMD(0x01), adr);
1099 chip->state = FL_LOCKING; 1099 chip->state = FL_LOCKING;
1100 1100
1101 spin_unlock_bh(chip->mutex); 1101 mutex_unlock(&chip->mutex);
1102 msleep(1000); 1102 msleep(1000);
1103 spin_lock_bh(chip->mutex); 1103 mutex_lock(&chip->mutex);
1104 1104
1105 /* FIXME. Use a timer to check this, and return immediately. */ 1105 /* FIXME. Use a timer to check this, and return immediately. */
1106 /* Once the state machine's known to be working I'll do that */ 1106 /* Once the state machine's known to be working I'll do that */
@@ -1118,21 +1118,21 @@ retry:
1118 chip->state = FL_STATUS; 1118 chip->state = FL_STATUS;
1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1120 DISABLE_VPP(map); 1120 DISABLE_VPP(map);
1121 spin_unlock_bh(chip->mutex); 1121 mutex_unlock(&chip->mutex);
1122 return -EIO; 1122 return -EIO;
1123 } 1123 }
1124 1124
1125 /* Latency issues. Drop the lock, wait a while and retry */ 1125 /* Latency issues. Drop the lock, wait a while and retry */
1126 spin_unlock_bh(chip->mutex); 1126 mutex_unlock(&chip->mutex);
1127 cfi_udelay(1); 1127 cfi_udelay(1);
1128 spin_lock_bh(chip->mutex); 1128 mutex_lock(&chip->mutex);
1129 } 1129 }
1130 1130
1131 /* Done and happy. */ 1131 /* Done and happy. */
1132 chip->state = FL_STATUS; 1132 chip->state = FL_STATUS;
1133 DISABLE_VPP(map); 1133 DISABLE_VPP(map);
1134 wake_up(&chip->wq); 1134 wake_up(&chip->wq);
1135 spin_unlock_bh(chip->mutex); 1135 mutex_unlock(&chip->mutex);
1136 return 0; 1136 return 0;
1137} 1137}
1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1138static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
1203 1203
1204 timeo = jiffies + HZ; 1204 timeo = jiffies + HZ;
1205retry: 1205retry:
1206 spin_lock_bh(chip->mutex); 1206 mutex_lock(&chip->mutex);
1207 1207
1208 /* Check that the chip's ready to talk to us. */ 1208 /* Check that the chip's ready to talk to us. */
1209 switch (chip->state) { 1209 switch (chip->state) {
@@ -1220,13 +1220,13 @@ retry:
1220 1220
1221 /* Urgh. Chip not yet ready to talk to us. */ 1221 /* Urgh. Chip not yet ready to talk to us. */
1222 if (time_after(jiffies, timeo)) { 1222 if (time_after(jiffies, timeo)) {
1223 spin_unlock_bh(chip->mutex); 1223 mutex_unlock(&chip->mutex);
1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); 1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1225 return -EIO; 1225 return -EIO;
1226 } 1226 }
1227 1227
1228 /* Latency issues. Drop the lock, wait a while and retry */ 1228 /* Latency issues. Drop the lock, wait a while and retry */
1229 spin_unlock_bh(chip->mutex); 1229 mutex_unlock(&chip->mutex);
1230 cfi_udelay(1); 1230 cfi_udelay(1);
1231 goto retry; 1231 goto retry;
1232 1232
@@ -1235,7 +1235,7 @@ retry:
1235 someone changes the status */ 1235 someone changes the status */
1236 set_current_state(TASK_UNINTERRUPTIBLE); 1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 add_wait_queue(&chip->wq, &wait); 1237 add_wait_queue(&chip->wq, &wait);
1238 spin_unlock_bh(chip->mutex); 1238 mutex_unlock(&chip->mutex);
1239 schedule(); 1239 schedule();
1240 remove_wait_queue(&chip->wq, &wait); 1240 remove_wait_queue(&chip->wq, &wait);
1241 timeo = jiffies + HZ; 1241 timeo = jiffies + HZ;
@@ -1247,9 +1247,9 @@ retry:
1247 map_write(map, CMD(0xD0), adr); 1247 map_write(map, CMD(0xD0), adr);
1248 chip->state = FL_UNLOCKING; 1248 chip->state = FL_UNLOCKING;
1249 1249
1250 spin_unlock_bh(chip->mutex); 1250 mutex_unlock(&chip->mutex);
1251 msleep(1000); 1251 msleep(1000);
1252 spin_lock_bh(chip->mutex); 1252 mutex_lock(&chip->mutex);
1253 1253
1254 /* FIXME. Use a timer to check this, and return immediately. */ 1254 /* FIXME. Use a timer to check this, and return immediately. */
1255 /* Once the state machine's known to be working I'll do that */ 1255 /* Once the state machine's known to be working I'll do that */
@@ -1267,21 +1267,21 @@ retry:
1267 chip->state = FL_STATUS; 1267 chip->state = FL_STATUS;
1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1269 DISABLE_VPP(map); 1269 DISABLE_VPP(map);
1270 spin_unlock_bh(chip->mutex); 1270 mutex_unlock(&chip->mutex);
1271 return -EIO; 1271 return -EIO;
1272 } 1272 }
1273 1273
1274 /* Latency issues. Drop the unlock, wait a while and retry */ 1274 /* Latency issues. Drop the unlock, wait a while and retry */
1275 spin_unlock_bh(chip->mutex); 1275 mutex_unlock(&chip->mutex);
1276 cfi_udelay(1); 1276 cfi_udelay(1);
1277 spin_lock_bh(chip->mutex); 1277 mutex_lock(&chip->mutex);
1278 } 1278 }
1279 1279
1280 /* Done and happy. */ 1280 /* Done and happy. */
1281 chip->state = FL_STATUS; 1281 chip->state = FL_STATUS;
1282 DISABLE_VPP(map); 1282 DISABLE_VPP(map);
1283 wake_up(&chip->wq); 1283 wake_up(&chip->wq);
1284 spin_unlock_bh(chip->mutex); 1284 mutex_unlock(&chip->mutex);
1285 return 0; 1285 return 0;
1286} 1286}
1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1287static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1334 for (i=0; !ret && i<cfi->numchips; i++) { 1334 for (i=0; !ret && i<cfi->numchips; i++) {
1335 chip = &cfi->chips[i]; 1335 chip = &cfi->chips[i];
1336 1336
1337 spin_lock_bh(chip->mutex); 1337 mutex_lock(&chip->mutex);
1338 1338
1339 switch(chip->state) { 1339 switch(chip->state) {
1340 case FL_READY: 1340 case FL_READY:
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1354 ret = -EAGAIN; 1354 ret = -EAGAIN;
1355 break; 1355 break;
1356 } 1356 }
1357 spin_unlock_bh(chip->mutex); 1357 mutex_unlock(&chip->mutex);
1358 } 1358 }
1359 1359
1360 /* Unlock the chips again */ 1360 /* Unlock the chips again */
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1363 for (i--; i >=0; i--) { 1363 for (i--; i >=0; i--) {
1364 chip = &cfi->chips[i]; 1364 chip = &cfi->chips[i];
1365 1365
1366 spin_lock_bh(chip->mutex); 1366 mutex_lock(&chip->mutex);
1367 1367
1368 if (chip->state == FL_PM_SUSPENDED) { 1368 if (chip->state == FL_PM_SUSPENDED) {
1369 /* No need to force it into a known state here, 1369 /* No need to force it into a known state here,
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
1372 chip->state = chip->oldstate; 1372 chip->state = chip->oldstate;
1373 wake_up(&chip->wq); 1373 wake_up(&chip->wq);
1374 } 1374 }
1375 spin_unlock_bh(chip->mutex); 1375 mutex_unlock(&chip->mutex);
1376 } 1376 }
1377 } 1377 }
1378 1378
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
1390 1390
1391 chip = &cfi->chips[i]; 1391 chip = &cfi->chips[i];
1392 1392
1393 spin_lock_bh(chip->mutex); 1393 mutex_lock(&chip->mutex);
1394 1394
1395 /* Go to known state. Chip may have been power cycled */ 1395 /* Go to known state. Chip may have been power cycled */
1396 if (chip->state == FL_PM_SUSPENDED) { 1396 if (chip->state == FL_PM_SUSPENDED) {
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
1399 wake_up(&chip->wq); 1399 wake_up(&chip->wq);
1400 } 1400 }
1401 1401
1402 spin_unlock_bh(chip->mutex); 1402 mutex_unlock(&chip->mutex);
1403 } 1403 }
1404} 1404}
1405 1405