diff options
author | Stefani Seibold <stefani@seibold.net> | 2010-04-18 16:46:44 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2010-05-10 09:22:30 -0400 |
commit | c4e773764cead9358fd4b036d1b883fff3968513 (patch) | |
tree | ceb30e53d7ba33a071653c2bc05c06293d84575f /drivers/mtd/chips/cfi_cmdset_0002.c | |
parent | 67026418f534045525a7c39f506006cd7fbd197f (diff) |
mtd: fix a huge latency problem in the MTD CFI and LPDDR flash drivers.
The use of a memcpy() during a spinlock operation will cause very long
thread context switch delays if the flash chip bandwidth is low and the
data to be copied large, because a spinlock will disable preemption.
For example: A flash with 6,5 MB/s bandwidth will cause under ubifs,
which request sometimes 128 KiB (the flash erase size), a preemption delay of
20 milliseconds. High priority threads will not be served during this
time, regardless whether this threads access the flash or not. This behavior
breaks real time.
The patch changes all the use of spin_lock operations for xxxx->mutex
into mutex operations, which is exact what the name says and means.
I have checked the code of the drivers and there is no use of atomic
pathes like interrupt or timers. The mtdoops facility will also not be used
by this drivers. So it is dave to replace the spin_lock against mutex.
There is no performance regression since the mutex is normally not
acquired.
Changelog:
06.03.2010 First release
26.03.2010 Fix mutex[1] issue and tested it for compile failure
Signed-off-by: Stefani Seibold <stefani@seibold.net>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0002.c')
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index ea2a7f66ddf9..c93e47d21ce0 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -565,9 +565,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
565 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); | 565 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); |
566 | return -EIO; | 566 | return -EIO; |
567 | } | 567 | } |
568 | spin_unlock(chip->mutex); | 568 | mutex_unlock(&chip->mutex); |
569 | cfi_udelay(1); | 569 | cfi_udelay(1); |
570 | spin_lock(chip->mutex); | 570 | mutex_lock(&chip->mutex); |
571 | /* Someone else might have been playing with it. */ | 571 | /* Someone else might have been playing with it. */ |
572 | goto retry; | 572 | goto retry; |
573 | } | 573 | } |
@@ -611,9 +611,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
611 | return -EIO; | 611 | return -EIO; |
612 | } | 612 | } |
613 | 613 | ||
614 | spin_unlock(chip->mutex); | 614 | mutex_unlock(&chip->mutex); |
615 | cfi_udelay(1); | 615 | cfi_udelay(1); |
616 | spin_lock(chip->mutex); | 616 | mutex_lock(&chip->mutex); |
617 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | 617 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
618 | So we can just loop here. */ | 618 | So we can just loop here. */ |
619 | } | 619 | } |
@@ -637,10 +637,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
637 | sleep: | 637 | sleep: |
638 | set_current_state(TASK_UNINTERRUPTIBLE); | 638 | set_current_state(TASK_UNINTERRUPTIBLE); |
639 | add_wait_queue(&chip->wq, &wait); | 639 | add_wait_queue(&chip->wq, &wait); |
640 | spin_unlock(chip->mutex); | 640 | mutex_unlock(&chip->mutex); |
641 | schedule(); | 641 | schedule(); |
642 | remove_wait_queue(&chip->wq, &wait); | 642 | remove_wait_queue(&chip->wq, &wait); |
643 | spin_lock(chip->mutex); | 643 | mutex_lock(&chip->mutex); |
644 | goto resettime; | 644 | goto resettime; |
645 | } | 645 | } |
646 | } | 646 | } |
@@ -772,7 +772,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
772 | (void) map_read(map, adr); | 772 | (void) map_read(map, adr); |
773 | xip_iprefetch(); | 773 | xip_iprefetch(); |
774 | local_irq_enable(); | 774 | local_irq_enable(); |
775 | spin_unlock(chip->mutex); | 775 | mutex_unlock(&chip->mutex); |
776 | xip_iprefetch(); | 776 | xip_iprefetch(); |
777 | cond_resched(); | 777 | cond_resched(); |
778 | 778 | ||
@@ -782,15 +782,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
782 | * a suspended erase state. If so let's wait | 782 | * a suspended erase state. If so let's wait |
783 | * until it's done. | 783 | * until it's done. |
784 | */ | 784 | */ |
785 | spin_lock(chip->mutex); | 785 | mutex_lock(&chip->mutex); |
786 | while (chip->state != FL_XIP_WHILE_ERASING) { | 786 | while (chip->state != FL_XIP_WHILE_ERASING) { |
787 | DECLARE_WAITQUEUE(wait, current); | 787 | DECLARE_WAITQUEUE(wait, current); |
788 | set_current_state(TASK_UNINTERRUPTIBLE); | 788 | set_current_state(TASK_UNINTERRUPTIBLE); |
789 | add_wait_queue(&chip->wq, &wait); | 789 | add_wait_queue(&chip->wq, &wait); |
790 | spin_unlock(chip->mutex); | 790 | mutex_unlock(&chip->mutex); |
791 | schedule(); | 791 | schedule(); |
792 | remove_wait_queue(&chip->wq, &wait); | 792 | remove_wait_queue(&chip->wq, &wait); |
793 | spin_lock(chip->mutex); | 793 | mutex_lock(&chip->mutex); |
794 | } | 794 | } |
795 | /* Disallow XIP again */ | 795 | /* Disallow XIP again */ |
796 | local_irq_disable(); | 796 | local_irq_disable(); |
@@ -852,17 +852,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
852 | 852 | ||
853 | #define UDELAY(map, chip, adr, usec) \ | 853 | #define UDELAY(map, chip, adr, usec) \ |
854 | do { \ | 854 | do { \ |
855 | spin_unlock(chip->mutex); \ | 855 | mutex_unlock(&chip->mutex); \ |
856 | cfi_udelay(usec); \ | 856 | cfi_udelay(usec); \ |
857 | spin_lock(chip->mutex); \ | 857 | mutex_lock(&chip->mutex); \ |
858 | } while (0) | 858 | } while (0) |
859 | 859 | ||
860 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ | 860 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ |
861 | do { \ | 861 | do { \ |
862 | spin_unlock(chip->mutex); \ | 862 | mutex_unlock(&chip->mutex); \ |
863 | INVALIDATE_CACHED_RANGE(map, adr, len); \ | 863 | INVALIDATE_CACHED_RANGE(map, adr, len); \ |
864 | cfi_udelay(usec); \ | 864 | cfi_udelay(usec); \ |
865 | spin_lock(chip->mutex); \ | 865 | mutex_lock(&chip->mutex); \ |
866 | } while (0) | 866 | } while (0) |
867 | 867 | ||
868 | #endif | 868 | #endif |
@@ -878,10 +878,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
878 | /* Ensure cmd read/writes are aligned. */ | 878 | /* Ensure cmd read/writes are aligned. */ |
879 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 879 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
880 | 880 | ||
881 | spin_lock(chip->mutex); | 881 | mutex_lock(&chip->mutex); |
882 | ret = get_chip(map, chip, cmd_addr, FL_READY); | 882 | ret = get_chip(map, chip, cmd_addr, FL_READY); |
883 | if (ret) { | 883 | if (ret) { |
884 | spin_unlock(chip->mutex); | 884 | mutex_unlock(&chip->mutex); |
885 | return ret; | 885 | return ret; |
886 | } | 886 | } |
887 | 887 | ||
@@ -894,7 +894,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
894 | 894 | ||
895 | put_chip(map, chip, cmd_addr); | 895 | put_chip(map, chip, cmd_addr); |
896 | 896 | ||
897 | spin_unlock(chip->mutex); | 897 | mutex_unlock(&chip->mutex); |
898 | return 0; | 898 | return 0; |
899 | } | 899 | } |
900 | 900 | ||
@@ -948,7 +948,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
948 | struct cfi_private *cfi = map->fldrv_priv; | 948 | struct cfi_private *cfi = map->fldrv_priv; |
949 | 949 | ||
950 | retry: | 950 | retry: |
951 | spin_lock(chip->mutex); | 951 | mutex_lock(&chip->mutex); |
952 | 952 | ||
953 | if (chip->state != FL_READY){ | 953 | if (chip->state != FL_READY){ |
954 | #if 0 | 954 | #if 0 |
@@ -957,7 +957,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
957 | set_current_state(TASK_UNINTERRUPTIBLE); | 957 | set_current_state(TASK_UNINTERRUPTIBLE); |
958 | add_wait_queue(&chip->wq, &wait); | 958 | add_wait_queue(&chip->wq, &wait); |
959 | 959 | ||
960 | spin_unlock(chip->mutex); | 960 | mutex_unlock(&chip->mutex); |
961 | 961 | ||
962 | schedule(); | 962 | schedule(); |
963 | remove_wait_queue(&chip->wq, &wait); | 963 | remove_wait_queue(&chip->wq, &wait); |
@@ -986,7 +986,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
986 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | 986 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
987 | 987 | ||
988 | wake_up(&chip->wq); | 988 | wake_up(&chip->wq); |
989 | spin_unlock(chip->mutex); | 989 | mutex_unlock(&chip->mutex); |
990 | 990 | ||
991 | return 0; | 991 | return 0; |
992 | } | 992 | } |
@@ -1055,10 +1055,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1055 | 1055 | ||
1056 | adr += chip->start; | 1056 | adr += chip->start; |
1057 | 1057 | ||
1058 | spin_lock(chip->mutex); | 1058 | mutex_lock(&chip->mutex); |
1059 | ret = get_chip(map, chip, adr, FL_WRITING); | 1059 | ret = get_chip(map, chip, adr, FL_WRITING); |
1060 | if (ret) { | 1060 | if (ret) { |
1061 | spin_unlock(chip->mutex); | 1061 | mutex_unlock(&chip->mutex); |
1062 | return ret; | 1062 | return ret; |
1063 | } | 1063 | } |
1064 | 1064 | ||
@@ -1101,11 +1101,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1101 | 1101 | ||
1102 | set_current_state(TASK_UNINTERRUPTIBLE); | 1102 | set_current_state(TASK_UNINTERRUPTIBLE); |
1103 | add_wait_queue(&chip->wq, &wait); | 1103 | add_wait_queue(&chip->wq, &wait); |
1104 | spin_unlock(chip->mutex); | 1104 | mutex_unlock(&chip->mutex); |
1105 | schedule(); | 1105 | schedule(); |
1106 | remove_wait_queue(&chip->wq, &wait); | 1106 | remove_wait_queue(&chip->wq, &wait); |
1107 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1107 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1108 | spin_lock(chip->mutex); | 1108 | mutex_lock(&chip->mutex); |
1109 | continue; | 1109 | continue; |
1110 | } | 1110 | } |
1111 | 1111 | ||
@@ -1137,7 +1137,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1137 | op_done: | 1137 | op_done: |
1138 | chip->state = FL_READY; | 1138 | chip->state = FL_READY; |
1139 | put_chip(map, chip, adr); | 1139 | put_chip(map, chip, adr); |
1140 | spin_unlock(chip->mutex); | 1140 | mutex_unlock(&chip->mutex); |
1141 | 1141 | ||
1142 | return ret; | 1142 | return ret; |
1143 | } | 1143 | } |
@@ -1169,7 +1169,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1169 | map_word tmp_buf; | 1169 | map_word tmp_buf; |
1170 | 1170 | ||
1171 | retry: | 1171 | retry: |
1172 | spin_lock(cfi->chips[chipnum].mutex); | 1172 | mutex_lock(&cfi->chips[chipnum].mutex); |
1173 | 1173 | ||
1174 | if (cfi->chips[chipnum].state != FL_READY) { | 1174 | if (cfi->chips[chipnum].state != FL_READY) { |
1175 | #if 0 | 1175 | #if 0 |
@@ -1178,7 +1178,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1178 | set_current_state(TASK_UNINTERRUPTIBLE); | 1178 | set_current_state(TASK_UNINTERRUPTIBLE); |
1179 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1179 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1180 | 1180 | ||
1181 | spin_unlock(cfi->chips[chipnum].mutex); | 1181 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1182 | 1182 | ||
1183 | schedule(); | 1183 | schedule(); |
1184 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1184 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1192,7 +1192,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1192 | /* Load 'tmp_buf' with old contents of flash */ | 1192 | /* Load 'tmp_buf' with old contents of flash */ |
1193 | tmp_buf = map_read(map, bus_ofs+chipstart); | 1193 | tmp_buf = map_read(map, bus_ofs+chipstart); |
1194 | 1194 | ||
1195 | spin_unlock(cfi->chips[chipnum].mutex); | 1195 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1196 | 1196 | ||
1197 | /* Number of bytes to copy from buffer */ | 1197 | /* Number of bytes to copy from buffer */ |
1198 | n = min_t(int, len, map_bankwidth(map)-i); | 1198 | n = min_t(int, len, map_bankwidth(map)-i); |
@@ -1247,7 +1247,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1247 | map_word tmp_buf; | 1247 | map_word tmp_buf; |
1248 | 1248 | ||
1249 | retry1: | 1249 | retry1: |
1250 | spin_lock(cfi->chips[chipnum].mutex); | 1250 | mutex_lock(&cfi->chips[chipnum].mutex); |
1251 | 1251 | ||
1252 | if (cfi->chips[chipnum].state != FL_READY) { | 1252 | if (cfi->chips[chipnum].state != FL_READY) { |
1253 | #if 0 | 1253 | #if 0 |
@@ -1256,7 +1256,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1256 | set_current_state(TASK_UNINTERRUPTIBLE); | 1256 | set_current_state(TASK_UNINTERRUPTIBLE); |
1257 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1257 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1258 | 1258 | ||
1259 | spin_unlock(cfi->chips[chipnum].mutex); | 1259 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1260 | 1260 | ||
1261 | schedule(); | 1261 | schedule(); |
1262 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1262 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1269,7 +1269,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1269 | 1269 | ||
1270 | tmp_buf = map_read(map, ofs + chipstart); | 1270 | tmp_buf = map_read(map, ofs + chipstart); |
1271 | 1271 | ||
1272 | spin_unlock(cfi->chips[chipnum].mutex); | 1272 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1273 | 1273 | ||
1274 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); | 1274 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); |
1275 | 1275 | ||
@@ -1304,10 +1304,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1304 | adr += chip->start; | 1304 | adr += chip->start; |
1305 | cmd_adr = adr; | 1305 | cmd_adr = adr; |
1306 | 1306 | ||
1307 | spin_lock(chip->mutex); | 1307 | mutex_lock(&chip->mutex); |
1308 | ret = get_chip(map, chip, adr, FL_WRITING); | 1308 | ret = get_chip(map, chip, adr, FL_WRITING); |
1309 | if (ret) { | 1309 | if (ret) { |
1310 | spin_unlock(chip->mutex); | 1310 | mutex_unlock(&chip->mutex); |
1311 | return ret; | 1311 | return ret; |
1312 | } | 1312 | } |
1313 | 1313 | ||
@@ -1362,11 +1362,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1362 | 1362 | ||
1363 | set_current_state(TASK_UNINTERRUPTIBLE); | 1363 | set_current_state(TASK_UNINTERRUPTIBLE); |
1364 | add_wait_queue(&chip->wq, &wait); | 1364 | add_wait_queue(&chip->wq, &wait); |
1365 | spin_unlock(chip->mutex); | 1365 | mutex_unlock(&chip->mutex); |
1366 | schedule(); | 1366 | schedule(); |
1367 | remove_wait_queue(&chip->wq, &wait); | 1367 | remove_wait_queue(&chip->wq, &wait); |
1368 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1368 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1369 | spin_lock(chip->mutex); | 1369 | mutex_lock(&chip->mutex); |
1370 | continue; | 1370 | continue; |
1371 | } | 1371 | } |
1372 | 1372 | ||
@@ -1394,7 +1394,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1394 | op_done: | 1394 | op_done: |
1395 | chip->state = FL_READY; | 1395 | chip->state = FL_READY; |
1396 | put_chip(map, chip, adr); | 1396 | put_chip(map, chip, adr); |
1397 | spin_unlock(chip->mutex); | 1397 | mutex_unlock(&chip->mutex); |
1398 | 1398 | ||
1399 | return ret; | 1399 | return ret; |
1400 | } | 1400 | } |
@@ -1494,10 +1494,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1494 | 1494 | ||
1495 | adr = cfi->addr_unlock1; | 1495 | adr = cfi->addr_unlock1; |
1496 | 1496 | ||
1497 | spin_lock(chip->mutex); | 1497 | mutex_lock(&chip->mutex); |
1498 | ret = get_chip(map, chip, adr, FL_WRITING); | 1498 | ret = get_chip(map, chip, adr, FL_WRITING); |
1499 | if (ret) { | 1499 | if (ret) { |
1500 | spin_unlock(chip->mutex); | 1500 | mutex_unlock(&chip->mutex); |
1501 | return ret; | 1501 | return ret; |
1502 | } | 1502 | } |
1503 | 1503 | ||
@@ -1530,10 +1530,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1530 | /* Someone's suspended the erase. Sleep */ | 1530 | /* Someone's suspended the erase. Sleep */ |
1531 | set_current_state(TASK_UNINTERRUPTIBLE); | 1531 | set_current_state(TASK_UNINTERRUPTIBLE); |
1532 | add_wait_queue(&chip->wq, &wait); | 1532 | add_wait_queue(&chip->wq, &wait); |
1533 | spin_unlock(chip->mutex); | 1533 | mutex_unlock(&chip->mutex); |
1534 | schedule(); | 1534 | schedule(); |
1535 | remove_wait_queue(&chip->wq, &wait); | 1535 | remove_wait_queue(&chip->wq, &wait); |
1536 | spin_lock(chip->mutex); | 1536 | mutex_lock(&chip->mutex); |
1537 | continue; | 1537 | continue; |
1538 | } | 1538 | } |
1539 | if (chip->erase_suspended) { | 1539 | if (chip->erase_suspended) { |
@@ -1567,7 +1567,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1567 | chip->state = FL_READY; | 1567 | chip->state = FL_READY; |
1568 | xip_enable(map, chip, adr); | 1568 | xip_enable(map, chip, adr); |
1569 | put_chip(map, chip, adr); | 1569 | put_chip(map, chip, adr); |
1570 | spin_unlock(chip->mutex); | 1570 | mutex_unlock(&chip->mutex); |
1571 | 1571 | ||
1572 | return ret; | 1572 | return ret; |
1573 | } | 1573 | } |
@@ -1582,10 +1582,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1582 | 1582 | ||
1583 | adr += chip->start; | 1583 | adr += chip->start; |
1584 | 1584 | ||
1585 | spin_lock(chip->mutex); | 1585 | mutex_lock(&chip->mutex); |
1586 | ret = get_chip(map, chip, adr, FL_ERASING); | 1586 | ret = get_chip(map, chip, adr, FL_ERASING); |
1587 | if (ret) { | 1587 | if (ret) { |
1588 | spin_unlock(chip->mutex); | 1588 | mutex_unlock(&chip->mutex); |
1589 | return ret; | 1589 | return ret; |
1590 | } | 1590 | } |
1591 | 1591 | ||
@@ -1618,10 +1618,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1618 | /* Someone's suspended the erase. Sleep */ | 1618 | /* Someone's suspended the erase. Sleep */ |
1619 | set_current_state(TASK_UNINTERRUPTIBLE); | 1619 | set_current_state(TASK_UNINTERRUPTIBLE); |
1620 | add_wait_queue(&chip->wq, &wait); | 1620 | add_wait_queue(&chip->wq, &wait); |
1621 | spin_unlock(chip->mutex); | 1621 | mutex_unlock(&chip->mutex); |
1622 | schedule(); | 1622 | schedule(); |
1623 | remove_wait_queue(&chip->wq, &wait); | 1623 | remove_wait_queue(&chip->wq, &wait); |
1624 | spin_lock(chip->mutex); | 1624 | mutex_lock(&chip->mutex); |
1625 | continue; | 1625 | continue; |
1626 | } | 1626 | } |
1627 | if (chip->erase_suspended) { | 1627 | if (chip->erase_suspended) { |
@@ -1657,7 +1657,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1657 | 1657 | ||
1658 | chip->state = FL_READY; | 1658 | chip->state = FL_READY; |
1659 | put_chip(map, chip, adr); | 1659 | put_chip(map, chip, adr); |
1660 | spin_unlock(chip->mutex); | 1660 | mutex_unlock(&chip->mutex); |
1661 | return ret; | 1661 | return ret; |
1662 | } | 1662 | } |
1663 | 1663 | ||
@@ -1709,7 +1709,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1709 | struct cfi_private *cfi = map->fldrv_priv; | 1709 | struct cfi_private *cfi = map->fldrv_priv; |
1710 | int ret; | 1710 | int ret; |
1711 | 1711 | ||
1712 | spin_lock(chip->mutex); | 1712 | mutex_lock(&chip->mutex); |
1713 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); | 1713 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); |
1714 | if (ret) | 1714 | if (ret) |
1715 | goto out_unlock; | 1715 | goto out_unlock; |
@@ -1735,7 +1735,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1735 | ret = 0; | 1735 | ret = 0; |
1736 | 1736 | ||
1737 | out_unlock: | 1737 | out_unlock: |
1738 | spin_unlock(chip->mutex); | 1738 | mutex_unlock(&chip->mutex); |
1739 | return ret; | 1739 | return ret; |
1740 | } | 1740 | } |
1741 | 1741 | ||
@@ -1745,7 +1745,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1745 | struct cfi_private *cfi = map->fldrv_priv; | 1745 | struct cfi_private *cfi = map->fldrv_priv; |
1746 | int ret; | 1746 | int ret; |
1747 | 1747 | ||
1748 | spin_lock(chip->mutex); | 1748 | mutex_lock(&chip->mutex); |
1749 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); | 1749 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); |
1750 | if (ret) | 1750 | if (ret) |
1751 | goto out_unlock; | 1751 | goto out_unlock; |
@@ -1763,7 +1763,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1763 | ret = 0; | 1763 | ret = 0; |
1764 | 1764 | ||
1765 | out_unlock: | 1765 | out_unlock: |
1766 | spin_unlock(chip->mutex); | 1766 | mutex_unlock(&chip->mutex); |
1767 | return ret; | 1767 | return ret; |
1768 | } | 1768 | } |
1769 | 1769 | ||
@@ -1791,7 +1791,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1791 | chip = &cfi->chips[i]; | 1791 | chip = &cfi->chips[i]; |
1792 | 1792 | ||
1793 | retry: | 1793 | retry: |
1794 | spin_lock(chip->mutex); | 1794 | mutex_lock(&chip->mutex); |
1795 | 1795 | ||
1796 | switch(chip->state) { | 1796 | switch(chip->state) { |
1797 | case FL_READY: | 1797 | case FL_READY: |
@@ -1805,7 +1805,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1805 | * with the chip now anyway. | 1805 | * with the chip now anyway. |
1806 | */ | 1806 | */ |
1807 | case FL_SYNCING: | 1807 | case FL_SYNCING: |
1808 | spin_unlock(chip->mutex); | 1808 | mutex_unlock(&chip->mutex); |
1809 | break; | 1809 | break; |
1810 | 1810 | ||
1811 | default: | 1811 | default: |
@@ -1813,7 +1813,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1813 | set_current_state(TASK_UNINTERRUPTIBLE); | 1813 | set_current_state(TASK_UNINTERRUPTIBLE); |
1814 | add_wait_queue(&chip->wq, &wait); | 1814 | add_wait_queue(&chip->wq, &wait); |
1815 | 1815 | ||
1816 | spin_unlock(chip->mutex); | 1816 | mutex_unlock(&chip->mutex); |
1817 | 1817 | ||
1818 | schedule(); | 1818 | schedule(); |
1819 | 1819 | ||
@@ -1828,13 +1828,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1828 | for (i--; i >=0; i--) { | 1828 | for (i--; i >=0; i--) { |
1829 | chip = &cfi->chips[i]; | 1829 | chip = &cfi->chips[i]; |
1830 | 1830 | ||
1831 | spin_lock(chip->mutex); | 1831 | mutex_lock(&chip->mutex); |
1832 | 1832 | ||
1833 | if (chip->state == FL_SYNCING) { | 1833 | if (chip->state == FL_SYNCING) { |
1834 | chip->state = chip->oldstate; | 1834 | chip->state = chip->oldstate; |
1835 | wake_up(&chip->wq); | 1835 | wake_up(&chip->wq); |
1836 | } | 1836 | } |
1837 | spin_unlock(chip->mutex); | 1837 | mutex_unlock(&chip->mutex); |
1838 | } | 1838 | } |
1839 | } | 1839 | } |
1840 | 1840 | ||
@@ -1850,7 +1850,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1850 | for (i=0; !ret && i<cfi->numchips; i++) { | 1850 | for (i=0; !ret && i<cfi->numchips; i++) { |
1851 | chip = &cfi->chips[i]; | 1851 | chip = &cfi->chips[i]; |
1852 | 1852 | ||
1853 | spin_lock(chip->mutex); | 1853 | mutex_lock(&chip->mutex); |
1854 | 1854 | ||
1855 | switch(chip->state) { | 1855 | switch(chip->state) { |
1856 | case FL_READY: | 1856 | case FL_READY: |
@@ -1870,7 +1870,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1870 | ret = -EAGAIN; | 1870 | ret = -EAGAIN; |
1871 | break; | 1871 | break; |
1872 | } | 1872 | } |
1873 | spin_unlock(chip->mutex); | 1873 | mutex_unlock(&chip->mutex); |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | /* Unlock the chips again */ | 1876 | /* Unlock the chips again */ |
@@ -1879,13 +1879,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1879 | for (i--; i >=0; i--) { | 1879 | for (i--; i >=0; i--) { |
1880 | chip = &cfi->chips[i]; | 1880 | chip = &cfi->chips[i]; |
1881 | 1881 | ||
1882 | spin_lock(chip->mutex); | 1882 | mutex_lock(&chip->mutex); |
1883 | 1883 | ||
1884 | if (chip->state == FL_PM_SUSPENDED) { | 1884 | if (chip->state == FL_PM_SUSPENDED) { |
1885 | chip->state = chip->oldstate; | 1885 | chip->state = chip->oldstate; |
1886 | wake_up(&chip->wq); | 1886 | wake_up(&chip->wq); |
1887 | } | 1887 | } |
1888 | spin_unlock(chip->mutex); | 1888 | mutex_unlock(&chip->mutex); |
1889 | } | 1889 | } |
1890 | } | 1890 | } |
1891 | 1891 | ||
@@ -1904,7 +1904,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1904 | 1904 | ||
1905 | chip = &cfi->chips[i]; | 1905 | chip = &cfi->chips[i]; |
1906 | 1906 | ||
1907 | spin_lock(chip->mutex); | 1907 | mutex_lock(&chip->mutex); |
1908 | 1908 | ||
1909 | if (chip->state == FL_PM_SUSPENDED) { | 1909 | if (chip->state == FL_PM_SUSPENDED) { |
1910 | chip->state = FL_READY; | 1910 | chip->state = FL_READY; |
@@ -1914,7 +1914,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1914 | else | 1914 | else |
1915 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); | 1915 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); |
1916 | 1916 | ||
1917 | spin_unlock(chip->mutex); | 1917 | mutex_unlock(&chip->mutex); |
1918 | } | 1918 | } |
1919 | } | 1919 | } |
1920 | 1920 | ||