diff options
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0001.c | 131 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 122 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0020.c | 136 | ||||
-rw-r--r-- | drivers/mtd/chips/fwh_lock.h | 6 | ||||
-rw-r--r-- | drivers/mtd/chips/gen_probe.c | 3 | ||||
-rw-r--r-- | drivers/mtd/lpddr/lpddr_cmds.c | 79 | ||||
-rw-r--r-- | include/linux/mtd/flashchip.h | 4 |
7 files changed, 239 insertions, 242 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 92530433c11c..62f3ea9de848 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -725,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, | |||
725 | /* those should be reset too since | 725 | /* those should be reset too since |
726 | they create memory references. */ | 726 | they create memory references. */ |
727 | init_waitqueue_head(&chip->wq); | 727 | init_waitqueue_head(&chip->wq); |
728 | spin_lock_init(&chip->_spinlock); | 728 | mutex_init(&chip->mutex); |
729 | chip->mutex = &chip->_spinlock; | ||
730 | chip++; | 729 | chip++; |
731 | } | 730 | } |
732 | } | 731 | } |
@@ -772,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long | |||
772 | if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) | 771 | if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) |
773 | break; | 772 | break; |
774 | 773 | ||
775 | spin_unlock(chip->mutex); | 774 | mutex_unlock(&chip->mutex); |
776 | cfi_udelay(1); | 775 | cfi_udelay(1); |
777 | spin_lock(chip->mutex); | 776 | mutex_lock(&chip->mutex); |
778 | /* Someone else might have been playing with it. */ | 777 | /* Someone else might have been playing with it. */ |
779 | return -EAGAIN; | 778 | return -EAGAIN; |
780 | } | 779 | } |
@@ -821,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long | |||
821 | return -EIO; | 820 | return -EIO; |
822 | } | 821 | } |
823 | 822 | ||
824 | spin_unlock(chip->mutex); | 823 | mutex_unlock(&chip->mutex); |
825 | cfi_udelay(1); | 824 | cfi_udelay(1); |
826 | spin_lock(chip->mutex); | 825 | mutex_lock(&chip->mutex); |
827 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | 826 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
828 | So we can just loop here. */ | 827 | So we can just loop here. */ |
829 | } | 828 | } |
@@ -850,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long | |||
850 | sleep: | 849 | sleep: |
851 | set_current_state(TASK_UNINTERRUPTIBLE); | 850 | set_current_state(TASK_UNINTERRUPTIBLE); |
852 | add_wait_queue(&chip->wq, &wait); | 851 | add_wait_queue(&chip->wq, &wait); |
853 | spin_unlock(chip->mutex); | 852 | mutex_unlock(&chip->mutex); |
854 | schedule(); | 853 | schedule(); |
855 | remove_wait_queue(&chip->wq, &wait); | 854 | remove_wait_queue(&chip->wq, &wait); |
856 | spin_lock(chip->mutex); | 855 | mutex_lock(&chip->mutex); |
857 | return -EAGAIN; | 856 | return -EAGAIN; |
858 | } | 857 | } |
859 | } | 858 | } |
@@ -899,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
899 | * it'll happily send us to sleep. In any case, when | 898 | * it'll happily send us to sleep. In any case, when |
900 | * get_chip returns success we're clear to go ahead. | 899 | * get_chip returns success we're clear to go ahead. |
901 | */ | 900 | */ |
902 | ret = spin_trylock(contender->mutex); | 901 | ret = mutex_trylock(&contender->mutex); |
903 | spin_unlock(&shared->lock); | 902 | spin_unlock(&shared->lock); |
904 | if (!ret) | 903 | if (!ret) |
905 | goto retry; | 904 | goto retry; |
906 | spin_unlock(chip->mutex); | 905 | mutex_unlock(&chip->mutex); |
907 | ret = chip_ready(map, contender, contender->start, mode); | 906 | ret = chip_ready(map, contender, contender->start, mode); |
908 | spin_lock(chip->mutex); | 907 | mutex_lock(&chip->mutex); |
909 | 908 | ||
910 | if (ret == -EAGAIN) { | 909 | if (ret == -EAGAIN) { |
911 | spin_unlock(contender->mutex); | 910 | mutex_unlock(&contender->mutex); |
912 | goto retry; | 911 | goto retry; |
913 | } | 912 | } |
914 | if (ret) { | 913 | if (ret) { |
915 | spin_unlock(contender->mutex); | 914 | mutex_unlock(&contender->mutex); |
916 | return ret; | 915 | return ret; |
917 | } | 916 | } |
918 | spin_lock(&shared->lock); | 917 | spin_lock(&shared->lock); |
@@ -921,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
921 | * in FL_SYNCING state. Put contender and retry. */ | 920 | * in FL_SYNCING state. Put contender and retry. */ |
922 | if (chip->state == FL_SYNCING) { | 921 | if (chip->state == FL_SYNCING) { |
923 | put_chip(map, contender, contender->start); | 922 | put_chip(map, contender, contender->start); |
924 | spin_unlock(contender->mutex); | 923 | mutex_unlock(&contender->mutex); |
925 | goto retry; | 924 | goto retry; |
926 | } | 925 | } |
927 | spin_unlock(contender->mutex); | 926 | mutex_unlock(&contender->mutex); |
928 | } | 927 | } |
929 | 928 | ||
930 | /* Check if we already have suspended erase | 929 | /* Check if we already have suspended erase |
@@ -934,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
934 | spin_unlock(&shared->lock); | 933 | spin_unlock(&shared->lock); |
935 | set_current_state(TASK_UNINTERRUPTIBLE); | 934 | set_current_state(TASK_UNINTERRUPTIBLE); |
936 | add_wait_queue(&chip->wq, &wait); | 935 | add_wait_queue(&chip->wq, &wait); |
937 | spin_unlock(chip->mutex); | 936 | mutex_unlock(&chip->mutex); |
938 | schedule(); | 937 | schedule(); |
939 | remove_wait_queue(&chip->wq, &wait); | 938 | remove_wait_queue(&chip->wq, &wait); |
940 | spin_lock(chip->mutex); | 939 | mutex_lock(&chip->mutex); |
941 | goto retry; | 940 | goto retry; |
942 | } | 941 | } |
943 | 942 | ||
@@ -967,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad | |||
967 | if (shared->writing && shared->writing != chip) { | 966 | if (shared->writing && shared->writing != chip) { |
968 | /* give back ownership to who we loaned it from */ | 967 | /* give back ownership to who we loaned it from */ |
969 | struct flchip *loaner = shared->writing; | 968 | struct flchip *loaner = shared->writing; |
970 | spin_lock(loaner->mutex); | 969 | mutex_lock(&loaner->mutex); |
971 | spin_unlock(&shared->lock); | 970 | spin_unlock(&shared->lock); |
972 | spin_unlock(chip->mutex); | 971 | mutex_unlock(&chip->mutex); |
973 | put_chip(map, loaner, loaner->start); | 972 | put_chip(map, loaner, loaner->start); |
974 | spin_lock(chip->mutex); | 973 | mutex_lock(&chip->mutex); |
975 | spin_unlock(loaner->mutex); | 974 | mutex_unlock(&loaner->mutex); |
976 | wake_up(&chip->wq); | 975 | wake_up(&chip->wq); |
977 | return; | 976 | return; |
978 | } | 977 | } |
@@ -1142,7 +1141,7 @@ static int __xipram xip_wait_for_operation( | |||
1142 | (void) map_read(map, adr); | 1141 | (void) map_read(map, adr); |
1143 | xip_iprefetch(); | 1142 | xip_iprefetch(); |
1144 | local_irq_enable(); | 1143 | local_irq_enable(); |
1145 | spin_unlock(chip->mutex); | 1144 | mutex_unlock(&chip->mutex); |
1146 | xip_iprefetch(); | 1145 | xip_iprefetch(); |
1147 | cond_resched(); | 1146 | cond_resched(); |
1148 | 1147 | ||
@@ -1152,15 +1151,15 @@ static int __xipram xip_wait_for_operation( | |||
1152 | * a suspended erase state. If so let's wait | 1151 | * a suspended erase state. If so let's wait |
1153 | * until it's done. | 1152 | * until it's done. |
1154 | */ | 1153 | */ |
1155 | spin_lock(chip->mutex); | 1154 | mutex_lock(&chip->mutex); |
1156 | while (chip->state != newstate) { | 1155 | while (chip->state != newstate) { |
1157 | DECLARE_WAITQUEUE(wait, current); | 1156 | DECLARE_WAITQUEUE(wait, current); |
1158 | set_current_state(TASK_UNINTERRUPTIBLE); | 1157 | set_current_state(TASK_UNINTERRUPTIBLE); |
1159 | add_wait_queue(&chip->wq, &wait); | 1158 | add_wait_queue(&chip->wq, &wait); |
1160 | spin_unlock(chip->mutex); | 1159 | mutex_unlock(&chip->mutex); |
1161 | schedule(); | 1160 | schedule(); |
1162 | remove_wait_queue(&chip->wq, &wait); | 1161 | remove_wait_queue(&chip->wq, &wait); |
1163 | spin_lock(chip->mutex); | 1162 | mutex_lock(&chip->mutex); |
1164 | } | 1163 | } |
1165 | /* Disallow XIP again */ | 1164 | /* Disallow XIP again */ |
1166 | local_irq_disable(); | 1165 | local_irq_disable(); |
@@ -1216,10 +1215,10 @@ static int inval_cache_and_wait_for_operation( | |||
1216 | int chip_state = chip->state; | 1215 | int chip_state = chip->state; |
1217 | unsigned int timeo, sleep_time, reset_timeo; | 1216 | unsigned int timeo, sleep_time, reset_timeo; |
1218 | 1217 | ||
1219 | spin_unlock(chip->mutex); | 1218 | mutex_unlock(&chip->mutex); |
1220 | if (inval_len) | 1219 | if (inval_len) |
1221 | INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); | 1220 | INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); |
1222 | spin_lock(chip->mutex); | 1221 | mutex_lock(&chip->mutex); |
1223 | 1222 | ||
1224 | timeo = chip_op_time_max; | 1223 | timeo = chip_op_time_max; |
1225 | if (!timeo) | 1224 | if (!timeo) |
@@ -1239,7 +1238,7 @@ static int inval_cache_and_wait_for_operation( | |||
1239 | } | 1238 | } |
1240 | 1239 | ||
1241 | /* OK Still waiting. Drop the lock, wait a while and retry. */ | 1240 | /* OK Still waiting. Drop the lock, wait a while and retry. */ |
1242 | spin_unlock(chip->mutex); | 1241 | mutex_unlock(&chip->mutex); |
1243 | if (sleep_time >= 1000000/HZ) { | 1242 | if (sleep_time >= 1000000/HZ) { |
1244 | /* | 1243 | /* |
1245 | * Half of the normal delay still remaining | 1244 | * Half of the normal delay still remaining |
@@ -1254,17 +1253,17 @@ static int inval_cache_and_wait_for_operation( | |||
1254 | cond_resched(); | 1253 | cond_resched(); |
1255 | timeo--; | 1254 | timeo--; |
1256 | } | 1255 | } |
1257 | spin_lock(chip->mutex); | 1256 | mutex_lock(&chip->mutex); |
1258 | 1257 | ||
1259 | while (chip->state != chip_state) { | 1258 | while (chip->state != chip_state) { |
1260 | /* Someone's suspended the operation: sleep */ | 1259 | /* Someone's suspended the operation: sleep */ |
1261 | DECLARE_WAITQUEUE(wait, current); | 1260 | DECLARE_WAITQUEUE(wait, current); |
1262 | set_current_state(TASK_UNINTERRUPTIBLE); | 1261 | set_current_state(TASK_UNINTERRUPTIBLE); |
1263 | add_wait_queue(&chip->wq, &wait); | 1262 | add_wait_queue(&chip->wq, &wait); |
1264 | spin_unlock(chip->mutex); | 1263 | mutex_unlock(&chip->mutex); |
1265 | schedule(); | 1264 | schedule(); |
1266 | remove_wait_queue(&chip->wq, &wait); | 1265 | remove_wait_queue(&chip->wq, &wait); |
1267 | spin_lock(chip->mutex); | 1266 | mutex_lock(&chip->mutex); |
1268 | } | 1267 | } |
1269 | if (chip->erase_suspended && chip_state == FL_ERASING) { | 1268 | if (chip->erase_suspended && chip_state == FL_ERASING) { |
1270 | /* Erase suspend occured while sleep: reset timeout */ | 1269 | /* Erase suspend occured while sleep: reset timeout */ |
@@ -1300,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a | |||
1300 | /* Ensure cmd read/writes are aligned. */ | 1299 | /* Ensure cmd read/writes are aligned. */ |
1301 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 1300 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
1302 | 1301 | ||
1303 | spin_lock(chip->mutex); | 1302 | mutex_lock(&chip->mutex); |
1304 | 1303 | ||
1305 | ret = get_chip(map, chip, cmd_addr, FL_POINT); | 1304 | ret = get_chip(map, chip, cmd_addr, FL_POINT); |
1306 | 1305 | ||
@@ -1311,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a | |||
1311 | chip->state = FL_POINT; | 1310 | chip->state = FL_POINT; |
1312 | chip->ref_point_counter++; | 1311 | chip->ref_point_counter++; |
1313 | } | 1312 | } |
1314 | spin_unlock(chip->mutex); | 1313 | mutex_unlock(&chip->mutex); |
1315 | 1314 | ||
1316 | return ret; | 1315 | return ret; |
1317 | } | 1316 | } |
@@ -1396,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1396 | else | 1395 | else |
1397 | thislen = len; | 1396 | thislen = len; |
1398 | 1397 | ||
1399 | spin_lock(chip->mutex); | 1398 | mutex_lock(&chip->mutex); |
1400 | if (chip->state == FL_POINT) { | 1399 | if (chip->state == FL_POINT) { |
1401 | chip->ref_point_counter--; | 1400 | chip->ref_point_counter--; |
1402 | if(chip->ref_point_counter == 0) | 1401 | if(chip->ref_point_counter == 0) |
@@ -1405,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1405 | printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ | 1404 | printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ |
1406 | 1405 | ||
1407 | put_chip(map, chip, chip->start); | 1406 | put_chip(map, chip, chip->start); |
1408 | spin_unlock(chip->mutex); | 1407 | mutex_unlock(&chip->mutex); |
1409 | 1408 | ||
1410 | len -= thislen; | 1409 | len -= thislen; |
1411 | ofs = 0; | 1410 | ofs = 0; |
@@ -1424,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
1424 | /* Ensure cmd read/writes are aligned. */ | 1423 | /* Ensure cmd read/writes are aligned. */ |
1425 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 1424 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
1426 | 1425 | ||
1427 | spin_lock(chip->mutex); | 1426 | mutex_lock(&chip->mutex); |
1428 | ret = get_chip(map, chip, cmd_addr, FL_READY); | 1427 | ret = get_chip(map, chip, cmd_addr, FL_READY); |
1429 | if (ret) { | 1428 | if (ret) { |
1430 | spin_unlock(chip->mutex); | 1429 | mutex_unlock(&chip->mutex); |
1431 | return ret; | 1430 | return ret; |
1432 | } | 1431 | } |
1433 | 1432 | ||
@@ -1441,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
1441 | 1440 | ||
1442 | put_chip(map, chip, cmd_addr); | 1441 | put_chip(map, chip, cmd_addr); |
1443 | 1442 | ||
1444 | spin_unlock(chip->mutex); | 1443 | mutex_unlock(&chip->mutex); |
1445 | return 0; | 1444 | return 0; |
1446 | } | 1445 | } |
1447 | 1446 | ||
@@ -1504,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1504 | return -EINVAL; | 1503 | return -EINVAL; |
1505 | } | 1504 | } |
1506 | 1505 | ||
1507 | spin_lock(chip->mutex); | 1506 | mutex_lock(&chip->mutex); |
1508 | ret = get_chip(map, chip, adr, mode); | 1507 | ret = get_chip(map, chip, adr, mode); |
1509 | if (ret) { | 1508 | if (ret) { |
1510 | spin_unlock(chip->mutex); | 1509 | mutex_unlock(&chip->mutex); |
1511 | return ret; | 1510 | return ret; |
1512 | } | 1511 | } |
1513 | 1512 | ||
@@ -1553,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1553 | 1552 | ||
1554 | xip_enable(map, chip, adr); | 1553 | xip_enable(map, chip, adr); |
1555 | out: put_chip(map, chip, adr); | 1554 | out: put_chip(map, chip, adr); |
1556 | spin_unlock(chip->mutex); | 1555 | mutex_unlock(&chip->mutex); |
1557 | return ret; | 1556 | return ret; |
1558 | } | 1557 | } |
1559 | 1558 | ||
@@ -1662,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1662 | /* Let's determine this according to the interleave only once */ | 1661 | /* Let's determine this according to the interleave only once */ |
1663 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); | 1662 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); |
1664 | 1663 | ||
1665 | spin_lock(chip->mutex); | 1664 | mutex_lock(&chip->mutex); |
1666 | ret = get_chip(map, chip, cmd_adr, FL_WRITING); | 1665 | ret = get_chip(map, chip, cmd_adr, FL_WRITING); |
1667 | if (ret) { | 1666 | if (ret) { |
1668 | spin_unlock(chip->mutex); | 1667 | mutex_unlock(&chip->mutex); |
1669 | return ret; | 1668 | return ret; |
1670 | } | 1669 | } |
1671 | 1670 | ||
@@ -1796,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1796 | 1795 | ||
1797 | xip_enable(map, chip, cmd_adr); | 1796 | xip_enable(map, chip, cmd_adr); |
1798 | out: put_chip(map, chip, cmd_adr); | 1797 | out: put_chip(map, chip, cmd_adr); |
1799 | spin_unlock(chip->mutex); | 1798 | mutex_unlock(&chip->mutex); |
1800 | return ret; | 1799 | return ret; |
1801 | } | 1800 | } |
1802 | 1801 | ||
@@ -1875,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1875 | adr += chip->start; | 1874 | adr += chip->start; |
1876 | 1875 | ||
1877 | retry: | 1876 | retry: |
1878 | spin_lock(chip->mutex); | 1877 | mutex_lock(&chip->mutex); |
1879 | ret = get_chip(map, chip, adr, FL_ERASING); | 1878 | ret = get_chip(map, chip, adr, FL_ERASING); |
1880 | if (ret) { | 1879 | if (ret) { |
1881 | spin_unlock(chip->mutex); | 1880 | mutex_unlock(&chip->mutex); |
1882 | return ret; | 1881 | return ret; |
1883 | } | 1882 | } |
1884 | 1883 | ||
@@ -1934,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1934 | } else if (chipstatus & 0x20 && retries--) { | 1933 | } else if (chipstatus & 0x20 && retries--) { |
1935 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); | 1934 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); |
1936 | put_chip(map, chip, adr); | 1935 | put_chip(map, chip, adr); |
1937 | spin_unlock(chip->mutex); | 1936 | mutex_unlock(&chip->mutex); |
1938 | goto retry; | 1937 | goto retry; |
1939 | } else { | 1938 | } else { |
1940 | printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); | 1939 | printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); |
@@ -1946,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1946 | 1945 | ||
1947 | xip_enable(map, chip, adr); | 1946 | xip_enable(map, chip, adr); |
1948 | out: put_chip(map, chip, adr); | 1947 | out: put_chip(map, chip, adr); |
1949 | spin_unlock(chip->mutex); | 1948 | mutex_unlock(&chip->mutex); |
1950 | return ret; | 1949 | return ret; |
1951 | } | 1950 | } |
1952 | 1951 | ||
@@ -1979,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd) | |||
1979 | for (i=0; !ret && i<cfi->numchips; i++) { | 1978 | for (i=0; !ret && i<cfi->numchips; i++) { |
1980 | chip = &cfi->chips[i]; | 1979 | chip = &cfi->chips[i]; |
1981 | 1980 | ||
1982 | spin_lock(chip->mutex); | 1981 | mutex_lock(&chip->mutex); |
1983 | ret = get_chip(map, chip, chip->start, FL_SYNCING); | 1982 | ret = get_chip(map, chip, chip->start, FL_SYNCING); |
1984 | 1983 | ||
1985 | if (!ret) { | 1984 | if (!ret) { |
@@ -1990,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd) | |||
1990 | * with the chip now anyway. | 1989 | * with the chip now anyway. |
1991 | */ | 1990 | */ |
1992 | } | 1991 | } |
1993 | spin_unlock(chip->mutex); | 1992 | mutex_unlock(&chip->mutex); |
1994 | } | 1993 | } |
1995 | 1994 | ||
1996 | /* Unlock the chips again */ | 1995 | /* Unlock the chips again */ |
@@ -1998,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd) | |||
1998 | for (i--; i >=0; i--) { | 1997 | for (i--; i >=0; i--) { |
1999 | chip = &cfi->chips[i]; | 1998 | chip = &cfi->chips[i]; |
2000 | 1999 | ||
2001 | spin_lock(chip->mutex); | 2000 | mutex_lock(&chip->mutex); |
2002 | 2001 | ||
2003 | if (chip->state == FL_SYNCING) { | 2002 | if (chip->state == FL_SYNCING) { |
2004 | chip->state = chip->oldstate; | 2003 | chip->state = chip->oldstate; |
2005 | chip->oldstate = FL_READY; | 2004 | chip->oldstate = FL_READY; |
2006 | wake_up(&chip->wq); | 2005 | wake_up(&chip->wq); |
2007 | } | 2006 | } |
2008 | spin_unlock(chip->mutex); | 2007 | mutex_unlock(&chip->mutex); |
2009 | } | 2008 | } |
2010 | } | 2009 | } |
2011 | 2010 | ||
@@ -2051,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
2051 | 2050 | ||
2052 | adr += chip->start; | 2051 | adr += chip->start; |
2053 | 2052 | ||
2054 | spin_lock(chip->mutex); | 2053 | mutex_lock(&chip->mutex); |
2055 | ret = get_chip(map, chip, adr, FL_LOCKING); | 2054 | ret = get_chip(map, chip, adr, FL_LOCKING); |
2056 | if (ret) { | 2055 | if (ret) { |
2057 | spin_unlock(chip->mutex); | 2056 | mutex_unlock(&chip->mutex); |
2058 | return ret; | 2057 | return ret; |
2059 | } | 2058 | } |
2060 | 2059 | ||
@@ -2088,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
2088 | 2087 | ||
2089 | xip_enable(map, chip, adr); | 2088 | xip_enable(map, chip, adr); |
2090 | out: put_chip(map, chip, adr); | 2089 | out: put_chip(map, chip, adr); |
2091 | spin_unlock(chip->mutex); | 2090 | mutex_unlock(&chip->mutex); |
2092 | return ret; | 2091 | return ret; |
2093 | } | 2092 | } |
2094 | 2093 | ||
@@ -2153,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, | |||
2153 | struct cfi_private *cfi = map->fldrv_priv; | 2152 | struct cfi_private *cfi = map->fldrv_priv; |
2154 | int ret; | 2153 | int ret; |
2155 | 2154 | ||
2156 | spin_lock(chip->mutex); | 2155 | mutex_lock(&chip->mutex); |
2157 | ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); | 2156 | ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); |
2158 | if (ret) { | 2157 | if (ret) { |
2159 | spin_unlock(chip->mutex); | 2158 | mutex_unlock(&chip->mutex); |
2160 | return ret; | 2159 | return ret; |
2161 | } | 2160 | } |
2162 | 2161 | ||
@@ -2175,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, | |||
2175 | INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); | 2174 | INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); |
2176 | 2175 | ||
2177 | put_chip(map, chip, chip->start); | 2176 | put_chip(map, chip, chip->start); |
2178 | spin_unlock(chip->mutex); | 2177 | mutex_unlock(&chip->mutex); |
2179 | return 0; | 2178 | return 0; |
2180 | } | 2179 | } |
2181 | 2180 | ||
@@ -2450,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2450 | for (i=0; !ret && i<cfi->numchips; i++) { | 2449 | for (i=0; !ret && i<cfi->numchips; i++) { |
2451 | chip = &cfi->chips[i]; | 2450 | chip = &cfi->chips[i]; |
2452 | 2451 | ||
2453 | spin_lock(chip->mutex); | 2452 | mutex_lock(&chip->mutex); |
2454 | 2453 | ||
2455 | switch (chip->state) { | 2454 | switch (chip->state) { |
2456 | case FL_READY: | 2455 | case FL_READY: |
@@ -2482,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2482 | case FL_PM_SUSPENDED: | 2481 | case FL_PM_SUSPENDED: |
2483 | break; | 2482 | break; |
2484 | } | 2483 | } |
2485 | spin_unlock(chip->mutex); | 2484 | mutex_unlock(&chip->mutex); |
2486 | } | 2485 | } |
2487 | 2486 | ||
2488 | /* Unlock the chips again */ | 2487 | /* Unlock the chips again */ |
@@ -2491,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2491 | for (i--; i >=0; i--) { | 2490 | for (i--; i >=0; i--) { |
2492 | chip = &cfi->chips[i]; | 2491 | chip = &cfi->chips[i]; |
2493 | 2492 | ||
2494 | spin_lock(chip->mutex); | 2493 | mutex_lock(&chip->mutex); |
2495 | 2494 | ||
2496 | if (chip->state == FL_PM_SUSPENDED) { | 2495 | if (chip->state == FL_PM_SUSPENDED) { |
2497 | /* No need to force it into a known state here, | 2496 | /* No need to force it into a known state here, |
@@ -2501,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2501 | chip->oldstate = FL_READY; | 2500 | chip->oldstate = FL_READY; |
2502 | wake_up(&chip->wq); | 2501 | wake_up(&chip->wq); |
2503 | } | 2502 | } |
2504 | spin_unlock(chip->mutex); | 2503 | mutex_unlock(&chip->mutex); |
2505 | } | 2504 | } |
2506 | } | 2505 | } |
2507 | 2506 | ||
@@ -2542,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd) | |||
2542 | 2541 | ||
2543 | chip = &cfi->chips[i]; | 2542 | chip = &cfi->chips[i]; |
2544 | 2543 | ||
2545 | spin_lock(chip->mutex); | 2544 | mutex_lock(&chip->mutex); |
2546 | 2545 | ||
2547 | /* Go to known state. Chip may have been power cycled */ | 2546 | /* Go to known state. Chip may have been power cycled */ |
2548 | if (chip->state == FL_PM_SUSPENDED) { | 2547 | if (chip->state == FL_PM_SUSPENDED) { |
@@ -2551,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd) | |||
2551 | wake_up(&chip->wq); | 2550 | wake_up(&chip->wq); |
2552 | } | 2551 | } |
2553 | 2552 | ||
2554 | spin_unlock(chip->mutex); | 2553 | mutex_unlock(&chip->mutex); |
2555 | } | 2554 | } |
2556 | 2555 | ||
2557 | if ((mtd->flags & MTD_POWERUP_LOCK) | 2556 | if ((mtd->flags & MTD_POWERUP_LOCK) |
@@ -2571,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd) | |||
2571 | /* force the completion of any ongoing operation | 2570 | /* force the completion of any ongoing operation |
2572 | and switch to array mode so any bootloader in | 2571 | and switch to array mode so any bootloader in |
2573 | flash is accessible for soft reboot. */ | 2572 | flash is accessible for soft reboot. */ |
2574 | spin_lock(chip->mutex); | 2573 | mutex_lock(&chip->mutex); |
2575 | ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); | 2574 | ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); |
2576 | if (!ret) { | 2575 | if (!ret) { |
2577 | map_write(map, CMD(0xff), chip->start); | 2576 | map_write(map, CMD(0xff), chip->start); |
2578 | chip->state = FL_SHUTDOWN; | 2577 | chip->state = FL_SHUTDOWN; |
2579 | put_chip(map, chip, chip->start); | 2578 | put_chip(map, chip, chip->start); |
2580 | } | 2579 | } |
2581 | spin_unlock(chip->mutex); | 2580 | mutex_unlock(&chip->mutex); |
2582 | } | 2581 | } |
2583 | 2582 | ||
2584 | return 0; | 2583 | return 0; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index ea2a7f66ddf9..c93e47d21ce0 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -565,9 +565,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
565 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); | 565 | printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); |
566 | return -EIO; | 566 | return -EIO; |
567 | } | 567 | } |
568 | spin_unlock(chip->mutex); | 568 | mutex_unlock(&chip->mutex); |
569 | cfi_udelay(1); | 569 | cfi_udelay(1); |
570 | spin_lock(chip->mutex); | 570 | mutex_lock(&chip->mutex); |
571 | /* Someone else might have been playing with it. */ | 571 | /* Someone else might have been playing with it. */ |
572 | goto retry; | 572 | goto retry; |
573 | } | 573 | } |
@@ -611,9 +611,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
611 | return -EIO; | 611 | return -EIO; |
612 | } | 612 | } |
613 | 613 | ||
614 | spin_unlock(chip->mutex); | 614 | mutex_unlock(&chip->mutex); |
615 | cfi_udelay(1); | 615 | cfi_udelay(1); |
616 | spin_lock(chip->mutex); | 616 | mutex_lock(&chip->mutex); |
617 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. | 617 | /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. |
618 | So we can just loop here. */ | 618 | So we can just loop here. */ |
619 | } | 619 | } |
@@ -637,10 +637,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr | |||
637 | sleep: | 637 | sleep: |
638 | set_current_state(TASK_UNINTERRUPTIBLE); | 638 | set_current_state(TASK_UNINTERRUPTIBLE); |
639 | add_wait_queue(&chip->wq, &wait); | 639 | add_wait_queue(&chip->wq, &wait); |
640 | spin_unlock(chip->mutex); | 640 | mutex_unlock(&chip->mutex); |
641 | schedule(); | 641 | schedule(); |
642 | remove_wait_queue(&chip->wq, &wait); | 642 | remove_wait_queue(&chip->wq, &wait); |
643 | spin_lock(chip->mutex); | 643 | mutex_lock(&chip->mutex); |
644 | goto resettime; | 644 | goto resettime; |
645 | } | 645 | } |
646 | } | 646 | } |
@@ -772,7 +772,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
772 | (void) map_read(map, adr); | 772 | (void) map_read(map, adr); |
773 | xip_iprefetch(); | 773 | xip_iprefetch(); |
774 | local_irq_enable(); | 774 | local_irq_enable(); |
775 | spin_unlock(chip->mutex); | 775 | mutex_unlock(&chip->mutex); |
776 | xip_iprefetch(); | 776 | xip_iprefetch(); |
777 | cond_resched(); | 777 | cond_resched(); |
778 | 778 | ||
@@ -782,15 +782,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
782 | * a suspended erase state. If so let's wait | 782 | * a suspended erase state. If so let's wait |
783 | * until it's done. | 783 | * until it's done. |
784 | */ | 784 | */ |
785 | spin_lock(chip->mutex); | 785 | mutex_lock(&chip->mutex); |
786 | while (chip->state != FL_XIP_WHILE_ERASING) { | 786 | while (chip->state != FL_XIP_WHILE_ERASING) { |
787 | DECLARE_WAITQUEUE(wait, current); | 787 | DECLARE_WAITQUEUE(wait, current); |
788 | set_current_state(TASK_UNINTERRUPTIBLE); | 788 | set_current_state(TASK_UNINTERRUPTIBLE); |
789 | add_wait_queue(&chip->wq, &wait); | 789 | add_wait_queue(&chip->wq, &wait); |
790 | spin_unlock(chip->mutex); | 790 | mutex_unlock(&chip->mutex); |
791 | schedule(); | 791 | schedule(); |
792 | remove_wait_queue(&chip->wq, &wait); | 792 | remove_wait_queue(&chip->wq, &wait); |
793 | spin_lock(chip->mutex); | 793 | mutex_lock(&chip->mutex); |
794 | } | 794 | } |
795 | /* Disallow XIP again */ | 795 | /* Disallow XIP again */ |
796 | local_irq_disable(); | 796 | local_irq_disable(); |
@@ -852,17 +852,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
852 | 852 | ||
853 | #define UDELAY(map, chip, adr, usec) \ | 853 | #define UDELAY(map, chip, adr, usec) \ |
854 | do { \ | 854 | do { \ |
855 | spin_unlock(chip->mutex); \ | 855 | mutex_unlock(&chip->mutex); \ |
856 | cfi_udelay(usec); \ | 856 | cfi_udelay(usec); \ |
857 | spin_lock(chip->mutex); \ | 857 | mutex_lock(&chip->mutex); \ |
858 | } while (0) | 858 | } while (0) |
859 | 859 | ||
860 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ | 860 | #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ |
861 | do { \ | 861 | do { \ |
862 | spin_unlock(chip->mutex); \ | 862 | mutex_unlock(&chip->mutex); \ |
863 | INVALIDATE_CACHED_RANGE(map, adr, len); \ | 863 | INVALIDATE_CACHED_RANGE(map, adr, len); \ |
864 | cfi_udelay(usec); \ | 864 | cfi_udelay(usec); \ |
865 | spin_lock(chip->mutex); \ | 865 | mutex_lock(&chip->mutex); \ |
866 | } while (0) | 866 | } while (0) |
867 | 867 | ||
868 | #endif | 868 | #endif |
@@ -878,10 +878,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
878 | /* Ensure cmd read/writes are aligned. */ | 878 | /* Ensure cmd read/writes are aligned. */ |
879 | cmd_addr = adr & ~(map_bankwidth(map)-1); | 879 | cmd_addr = adr & ~(map_bankwidth(map)-1); |
880 | 880 | ||
881 | spin_lock(chip->mutex); | 881 | mutex_lock(&chip->mutex); |
882 | ret = get_chip(map, chip, cmd_addr, FL_READY); | 882 | ret = get_chip(map, chip, cmd_addr, FL_READY); |
883 | if (ret) { | 883 | if (ret) { |
884 | spin_unlock(chip->mutex); | 884 | mutex_unlock(&chip->mutex); |
885 | return ret; | 885 | return ret; |
886 | } | 886 | } |
887 | 887 | ||
@@ -894,7 +894,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
894 | 894 | ||
895 | put_chip(map, chip, cmd_addr); | 895 | put_chip(map, chip, cmd_addr); |
896 | 896 | ||
897 | spin_unlock(chip->mutex); | 897 | mutex_unlock(&chip->mutex); |
898 | return 0; | 898 | return 0; |
899 | } | 899 | } |
900 | 900 | ||
@@ -948,7 +948,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
948 | struct cfi_private *cfi = map->fldrv_priv; | 948 | struct cfi_private *cfi = map->fldrv_priv; |
949 | 949 | ||
950 | retry: | 950 | retry: |
951 | spin_lock(chip->mutex); | 951 | mutex_lock(&chip->mutex); |
952 | 952 | ||
953 | if (chip->state != FL_READY){ | 953 | if (chip->state != FL_READY){ |
954 | #if 0 | 954 | #if 0 |
@@ -957,7 +957,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
957 | set_current_state(TASK_UNINTERRUPTIBLE); | 957 | set_current_state(TASK_UNINTERRUPTIBLE); |
958 | add_wait_queue(&chip->wq, &wait); | 958 | add_wait_queue(&chip->wq, &wait); |
959 | 959 | ||
960 | spin_unlock(chip->mutex); | 960 | mutex_unlock(&chip->mutex); |
961 | 961 | ||
962 | schedule(); | 962 | schedule(); |
963 | remove_wait_queue(&chip->wq, &wait); | 963 | remove_wait_queue(&chip->wq, &wait); |
@@ -986,7 +986,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi | |||
986 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | 986 | cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
987 | 987 | ||
988 | wake_up(&chip->wq); | 988 | wake_up(&chip->wq); |
989 | spin_unlock(chip->mutex); | 989 | mutex_unlock(&chip->mutex); |
990 | 990 | ||
991 | return 0; | 991 | return 0; |
992 | } | 992 | } |
@@ -1055,10 +1055,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1055 | 1055 | ||
1056 | adr += chip->start; | 1056 | adr += chip->start; |
1057 | 1057 | ||
1058 | spin_lock(chip->mutex); | 1058 | mutex_lock(&chip->mutex); |
1059 | ret = get_chip(map, chip, adr, FL_WRITING); | 1059 | ret = get_chip(map, chip, adr, FL_WRITING); |
1060 | if (ret) { | 1060 | if (ret) { |
1061 | spin_unlock(chip->mutex); | 1061 | mutex_unlock(&chip->mutex); |
1062 | return ret; | 1062 | return ret; |
1063 | } | 1063 | } |
1064 | 1064 | ||
@@ -1101,11 +1101,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1101 | 1101 | ||
1102 | set_current_state(TASK_UNINTERRUPTIBLE); | 1102 | set_current_state(TASK_UNINTERRUPTIBLE); |
1103 | add_wait_queue(&chip->wq, &wait); | 1103 | add_wait_queue(&chip->wq, &wait); |
1104 | spin_unlock(chip->mutex); | 1104 | mutex_unlock(&chip->mutex); |
1105 | schedule(); | 1105 | schedule(); |
1106 | remove_wait_queue(&chip->wq, &wait); | 1106 | remove_wait_queue(&chip->wq, &wait); |
1107 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1107 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1108 | spin_lock(chip->mutex); | 1108 | mutex_lock(&chip->mutex); |
1109 | continue; | 1109 | continue; |
1110 | } | 1110 | } |
1111 | 1111 | ||
@@ -1137,7 +1137,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1137 | op_done: | 1137 | op_done: |
1138 | chip->state = FL_READY; | 1138 | chip->state = FL_READY; |
1139 | put_chip(map, chip, adr); | 1139 | put_chip(map, chip, adr); |
1140 | spin_unlock(chip->mutex); | 1140 | mutex_unlock(&chip->mutex); |
1141 | 1141 | ||
1142 | return ret; | 1142 | return ret; |
1143 | } | 1143 | } |
@@ -1169,7 +1169,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1169 | map_word tmp_buf; | 1169 | map_word tmp_buf; |
1170 | 1170 | ||
1171 | retry: | 1171 | retry: |
1172 | spin_lock(cfi->chips[chipnum].mutex); | 1172 | mutex_lock(&cfi->chips[chipnum].mutex); |
1173 | 1173 | ||
1174 | if (cfi->chips[chipnum].state != FL_READY) { | 1174 | if (cfi->chips[chipnum].state != FL_READY) { |
1175 | #if 0 | 1175 | #if 0 |
@@ -1178,7 +1178,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1178 | set_current_state(TASK_UNINTERRUPTIBLE); | 1178 | set_current_state(TASK_UNINTERRUPTIBLE); |
1179 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1179 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1180 | 1180 | ||
1181 | spin_unlock(cfi->chips[chipnum].mutex); | 1181 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1182 | 1182 | ||
1183 | schedule(); | 1183 | schedule(); |
1184 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1184 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1192,7 +1192,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1192 | /* Load 'tmp_buf' with old contents of flash */ | 1192 | /* Load 'tmp_buf' with old contents of flash */ |
1193 | tmp_buf = map_read(map, bus_ofs+chipstart); | 1193 | tmp_buf = map_read(map, bus_ofs+chipstart); |
1194 | 1194 | ||
1195 | spin_unlock(cfi->chips[chipnum].mutex); | 1195 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1196 | 1196 | ||
1197 | /* Number of bytes to copy from buffer */ | 1197 | /* Number of bytes to copy from buffer */ |
1198 | n = min_t(int, len, map_bankwidth(map)-i); | 1198 | n = min_t(int, len, map_bankwidth(map)-i); |
@@ -1247,7 +1247,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1247 | map_word tmp_buf; | 1247 | map_word tmp_buf; |
1248 | 1248 | ||
1249 | retry1: | 1249 | retry1: |
1250 | spin_lock(cfi->chips[chipnum].mutex); | 1250 | mutex_lock(&cfi->chips[chipnum].mutex); |
1251 | 1251 | ||
1252 | if (cfi->chips[chipnum].state != FL_READY) { | 1252 | if (cfi->chips[chipnum].state != FL_READY) { |
1253 | #if 0 | 1253 | #if 0 |
@@ -1256,7 +1256,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1256 | set_current_state(TASK_UNINTERRUPTIBLE); | 1256 | set_current_state(TASK_UNINTERRUPTIBLE); |
1257 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1257 | add_wait_queue(&cfi->chips[chipnum].wq, &wait); |
1258 | 1258 | ||
1259 | spin_unlock(cfi->chips[chipnum].mutex); | 1259 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1260 | 1260 | ||
1261 | schedule(); | 1261 | schedule(); |
1262 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); | 1262 | remove_wait_queue(&cfi->chips[chipnum].wq, &wait); |
@@ -1269,7 +1269,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1269 | 1269 | ||
1270 | tmp_buf = map_read(map, ofs + chipstart); | 1270 | tmp_buf = map_read(map, ofs + chipstart); |
1271 | 1271 | ||
1272 | spin_unlock(cfi->chips[chipnum].mutex); | 1272 | mutex_unlock(&cfi->chips[chipnum].mutex); |
1273 | 1273 | ||
1274 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); | 1274 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); |
1275 | 1275 | ||
@@ -1304,10 +1304,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1304 | adr += chip->start; | 1304 | adr += chip->start; |
1305 | cmd_adr = adr; | 1305 | cmd_adr = adr; |
1306 | 1306 | ||
1307 | spin_lock(chip->mutex); | 1307 | mutex_lock(&chip->mutex); |
1308 | ret = get_chip(map, chip, adr, FL_WRITING); | 1308 | ret = get_chip(map, chip, adr, FL_WRITING); |
1309 | if (ret) { | 1309 | if (ret) { |
1310 | spin_unlock(chip->mutex); | 1310 | mutex_unlock(&chip->mutex); |
1311 | return ret; | 1311 | return ret; |
1312 | } | 1312 | } |
1313 | 1313 | ||
@@ -1362,11 +1362,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1362 | 1362 | ||
1363 | set_current_state(TASK_UNINTERRUPTIBLE); | 1363 | set_current_state(TASK_UNINTERRUPTIBLE); |
1364 | add_wait_queue(&chip->wq, &wait); | 1364 | add_wait_queue(&chip->wq, &wait); |
1365 | spin_unlock(chip->mutex); | 1365 | mutex_unlock(&chip->mutex); |
1366 | schedule(); | 1366 | schedule(); |
1367 | remove_wait_queue(&chip->wq, &wait); | 1367 | remove_wait_queue(&chip->wq, &wait); |
1368 | timeo = jiffies + (HZ / 2); /* FIXME */ | 1368 | timeo = jiffies + (HZ / 2); /* FIXME */ |
1369 | spin_lock(chip->mutex); | 1369 | mutex_lock(&chip->mutex); |
1370 | continue; | 1370 | continue; |
1371 | } | 1371 | } |
1372 | 1372 | ||
@@ -1394,7 +1394,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1394 | op_done: | 1394 | op_done: |
1395 | chip->state = FL_READY; | 1395 | chip->state = FL_READY; |
1396 | put_chip(map, chip, adr); | 1396 | put_chip(map, chip, adr); |
1397 | spin_unlock(chip->mutex); | 1397 | mutex_unlock(&chip->mutex); |
1398 | 1398 | ||
1399 | return ret; | 1399 | return ret; |
1400 | } | 1400 | } |
@@ -1494,10 +1494,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1494 | 1494 | ||
1495 | adr = cfi->addr_unlock1; | 1495 | adr = cfi->addr_unlock1; |
1496 | 1496 | ||
1497 | spin_lock(chip->mutex); | 1497 | mutex_lock(&chip->mutex); |
1498 | ret = get_chip(map, chip, adr, FL_WRITING); | 1498 | ret = get_chip(map, chip, adr, FL_WRITING); |
1499 | if (ret) { | 1499 | if (ret) { |
1500 | spin_unlock(chip->mutex); | 1500 | mutex_unlock(&chip->mutex); |
1501 | return ret; | 1501 | return ret; |
1502 | } | 1502 | } |
1503 | 1503 | ||
@@ -1530,10 +1530,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1530 | /* Someone's suspended the erase. Sleep */ | 1530 | /* Someone's suspended the erase. Sleep */ |
1531 | set_current_state(TASK_UNINTERRUPTIBLE); | 1531 | set_current_state(TASK_UNINTERRUPTIBLE); |
1532 | add_wait_queue(&chip->wq, &wait); | 1532 | add_wait_queue(&chip->wq, &wait); |
1533 | spin_unlock(chip->mutex); | 1533 | mutex_unlock(&chip->mutex); |
1534 | schedule(); | 1534 | schedule(); |
1535 | remove_wait_queue(&chip->wq, &wait); | 1535 | remove_wait_queue(&chip->wq, &wait); |
1536 | spin_lock(chip->mutex); | 1536 | mutex_lock(&chip->mutex); |
1537 | continue; | 1537 | continue; |
1538 | } | 1538 | } |
1539 | if (chip->erase_suspended) { | 1539 | if (chip->erase_suspended) { |
@@ -1567,7 +1567,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1567 | chip->state = FL_READY; | 1567 | chip->state = FL_READY; |
1568 | xip_enable(map, chip, adr); | 1568 | xip_enable(map, chip, adr); |
1569 | put_chip(map, chip, adr); | 1569 | put_chip(map, chip, adr); |
1570 | spin_unlock(chip->mutex); | 1570 | mutex_unlock(&chip->mutex); |
1571 | 1571 | ||
1572 | return ret; | 1572 | return ret; |
1573 | } | 1573 | } |
@@ -1582,10 +1582,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1582 | 1582 | ||
1583 | adr += chip->start; | 1583 | adr += chip->start; |
1584 | 1584 | ||
1585 | spin_lock(chip->mutex); | 1585 | mutex_lock(&chip->mutex); |
1586 | ret = get_chip(map, chip, adr, FL_ERASING); | 1586 | ret = get_chip(map, chip, adr, FL_ERASING); |
1587 | if (ret) { | 1587 | if (ret) { |
1588 | spin_unlock(chip->mutex); | 1588 | mutex_unlock(&chip->mutex); |
1589 | return ret; | 1589 | return ret; |
1590 | } | 1590 | } |
1591 | 1591 | ||
@@ -1618,10 +1618,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1618 | /* Someone's suspended the erase. Sleep */ | 1618 | /* Someone's suspended the erase. Sleep */ |
1619 | set_current_state(TASK_UNINTERRUPTIBLE); | 1619 | set_current_state(TASK_UNINTERRUPTIBLE); |
1620 | add_wait_queue(&chip->wq, &wait); | 1620 | add_wait_queue(&chip->wq, &wait); |
1621 | spin_unlock(chip->mutex); | 1621 | mutex_unlock(&chip->mutex); |
1622 | schedule(); | 1622 | schedule(); |
1623 | remove_wait_queue(&chip->wq, &wait); | 1623 | remove_wait_queue(&chip->wq, &wait); |
1624 | spin_lock(chip->mutex); | 1624 | mutex_lock(&chip->mutex); |
1625 | continue; | 1625 | continue; |
1626 | } | 1626 | } |
1627 | if (chip->erase_suspended) { | 1627 | if (chip->erase_suspended) { |
@@ -1657,7 +1657,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1657 | 1657 | ||
1658 | chip->state = FL_READY; | 1658 | chip->state = FL_READY; |
1659 | put_chip(map, chip, adr); | 1659 | put_chip(map, chip, adr); |
1660 | spin_unlock(chip->mutex); | 1660 | mutex_unlock(&chip->mutex); |
1661 | return ret; | 1661 | return ret; |
1662 | } | 1662 | } |
1663 | 1663 | ||
@@ -1709,7 +1709,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1709 | struct cfi_private *cfi = map->fldrv_priv; | 1709 | struct cfi_private *cfi = map->fldrv_priv; |
1710 | int ret; | 1710 | int ret; |
1711 | 1711 | ||
1712 | spin_lock(chip->mutex); | 1712 | mutex_lock(&chip->mutex); |
1713 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); | 1713 | ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); |
1714 | if (ret) | 1714 | if (ret) |
1715 | goto out_unlock; | 1715 | goto out_unlock; |
@@ -1735,7 +1735,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1735 | ret = 0; | 1735 | ret = 0; |
1736 | 1736 | ||
1737 | out_unlock: | 1737 | out_unlock: |
1738 | spin_unlock(chip->mutex); | 1738 | mutex_unlock(&chip->mutex); |
1739 | return ret; | 1739 | return ret; |
1740 | } | 1740 | } |
1741 | 1741 | ||
@@ -1745,7 +1745,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1745 | struct cfi_private *cfi = map->fldrv_priv; | 1745 | struct cfi_private *cfi = map->fldrv_priv; |
1746 | int ret; | 1746 | int ret; |
1747 | 1747 | ||
1748 | spin_lock(chip->mutex); | 1748 | mutex_lock(&chip->mutex); |
1749 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); | 1749 | ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); |
1750 | if (ret) | 1750 | if (ret) |
1751 | goto out_unlock; | 1751 | goto out_unlock; |
@@ -1763,7 +1763,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1763 | ret = 0; | 1763 | ret = 0; |
1764 | 1764 | ||
1765 | out_unlock: | 1765 | out_unlock: |
1766 | spin_unlock(chip->mutex); | 1766 | mutex_unlock(&chip->mutex); |
1767 | return ret; | 1767 | return ret; |
1768 | } | 1768 | } |
1769 | 1769 | ||
@@ -1791,7 +1791,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1791 | chip = &cfi->chips[i]; | 1791 | chip = &cfi->chips[i]; |
1792 | 1792 | ||
1793 | retry: | 1793 | retry: |
1794 | spin_lock(chip->mutex); | 1794 | mutex_lock(&chip->mutex); |
1795 | 1795 | ||
1796 | switch(chip->state) { | 1796 | switch(chip->state) { |
1797 | case FL_READY: | 1797 | case FL_READY: |
@@ -1805,7 +1805,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1805 | * with the chip now anyway. | 1805 | * with the chip now anyway. |
1806 | */ | 1806 | */ |
1807 | case FL_SYNCING: | 1807 | case FL_SYNCING: |
1808 | spin_unlock(chip->mutex); | 1808 | mutex_unlock(&chip->mutex); |
1809 | break; | 1809 | break; |
1810 | 1810 | ||
1811 | default: | 1811 | default: |
@@ -1813,7 +1813,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1813 | set_current_state(TASK_UNINTERRUPTIBLE); | 1813 | set_current_state(TASK_UNINTERRUPTIBLE); |
1814 | add_wait_queue(&chip->wq, &wait); | 1814 | add_wait_queue(&chip->wq, &wait); |
1815 | 1815 | ||
1816 | spin_unlock(chip->mutex); | 1816 | mutex_unlock(&chip->mutex); |
1817 | 1817 | ||
1818 | schedule(); | 1818 | schedule(); |
1819 | 1819 | ||
@@ -1828,13 +1828,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) | |||
1828 | for (i--; i >=0; i--) { | 1828 | for (i--; i >=0; i--) { |
1829 | chip = &cfi->chips[i]; | 1829 | chip = &cfi->chips[i]; |
1830 | 1830 | ||
1831 | spin_lock(chip->mutex); | 1831 | mutex_lock(&chip->mutex); |
1832 | 1832 | ||
1833 | if (chip->state == FL_SYNCING) { | 1833 | if (chip->state == FL_SYNCING) { |
1834 | chip->state = chip->oldstate; | 1834 | chip->state = chip->oldstate; |
1835 | wake_up(&chip->wq); | 1835 | wake_up(&chip->wq); |
1836 | } | 1836 | } |
1837 | spin_unlock(chip->mutex); | 1837 | mutex_unlock(&chip->mutex); |
1838 | } | 1838 | } |
1839 | } | 1839 | } |
1840 | 1840 | ||
@@ -1850,7 +1850,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1850 | for (i=0; !ret && i<cfi->numchips; i++) { | 1850 | for (i=0; !ret && i<cfi->numchips; i++) { |
1851 | chip = &cfi->chips[i]; | 1851 | chip = &cfi->chips[i]; |
1852 | 1852 | ||
1853 | spin_lock(chip->mutex); | 1853 | mutex_lock(&chip->mutex); |
1854 | 1854 | ||
1855 | switch(chip->state) { | 1855 | switch(chip->state) { |
1856 | case FL_READY: | 1856 | case FL_READY: |
@@ -1870,7 +1870,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1870 | ret = -EAGAIN; | 1870 | ret = -EAGAIN; |
1871 | break; | 1871 | break; |
1872 | } | 1872 | } |
1873 | spin_unlock(chip->mutex); | 1873 | mutex_unlock(&chip->mutex); |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | /* Unlock the chips again */ | 1876 | /* Unlock the chips again */ |
@@ -1879,13 +1879,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) | |||
1879 | for (i--; i >=0; i--) { | 1879 | for (i--; i >=0; i--) { |
1880 | chip = &cfi->chips[i]; | 1880 | chip = &cfi->chips[i]; |
1881 | 1881 | ||
1882 | spin_lock(chip->mutex); | 1882 | mutex_lock(&chip->mutex); |
1883 | 1883 | ||
1884 | if (chip->state == FL_PM_SUSPENDED) { | 1884 | if (chip->state == FL_PM_SUSPENDED) { |
1885 | chip->state = chip->oldstate; | 1885 | chip->state = chip->oldstate; |
1886 | wake_up(&chip->wq); | 1886 | wake_up(&chip->wq); |
1887 | } | 1887 | } |
1888 | spin_unlock(chip->mutex); | 1888 | mutex_unlock(&chip->mutex); |
1889 | } | 1889 | } |
1890 | } | 1890 | } |
1891 | 1891 | ||
@@ -1904,7 +1904,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1904 | 1904 | ||
1905 | chip = &cfi->chips[i]; | 1905 | chip = &cfi->chips[i]; |
1906 | 1906 | ||
1907 | spin_lock(chip->mutex); | 1907 | mutex_lock(&chip->mutex); |
1908 | 1908 | ||
1909 | if (chip->state == FL_PM_SUSPENDED) { | 1909 | if (chip->state == FL_PM_SUSPENDED) { |
1910 | chip->state = FL_READY; | 1910 | chip->state = FL_READY; |
@@ -1914,7 +1914,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) | |||
1914 | else | 1914 | else |
1915 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); | 1915 | printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); |
1916 | 1916 | ||
1917 | spin_unlock(chip->mutex); | 1917 | mutex_unlock(&chip->mutex); |
1918 | } | 1918 | } |
1919 | } | 1919 | } |
1920 | 1920 | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 0667a671525d..e54e8c169d76 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
265 | 265 | ||
266 | timeo = jiffies + HZ; | 266 | timeo = jiffies + HZ; |
267 | retry: | 267 | retry: |
268 | spin_lock_bh(chip->mutex); | 268 | mutex_lock(&chip->mutex); |
269 | 269 | ||
270 | /* Check that the chip's ready to talk to us. | 270 | /* Check that the chip's ready to talk to us. |
271 | * If it's in FL_ERASING state, suspend it and make it talk now. | 271 | * If it's in FL_ERASING state, suspend it and make it talk now. |
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
296 | /* make sure we're in 'read status' mode */ | 296 | /* make sure we're in 'read status' mode */ |
297 | map_write(map, CMD(0x70), cmd_addr); | 297 | map_write(map, CMD(0x70), cmd_addr); |
298 | chip->state = FL_ERASING; | 298 | chip->state = FL_ERASING; |
299 | spin_unlock_bh(chip->mutex); | 299 | mutex_unlock(&chip->mutex); |
300 | printk(KERN_ERR "Chip not ready after erase " | 300 | printk(KERN_ERR "Chip not ready after erase " |
301 | "suspended: status = 0x%lx\n", status.x[0]); | 301 | "suspended: status = 0x%lx\n", status.x[0]); |
302 | return -EIO; | 302 | return -EIO; |
303 | } | 303 | } |
304 | 304 | ||
305 | spin_unlock_bh(chip->mutex); | 305 | mutex_unlock(&chip->mutex); |
306 | cfi_udelay(1); | 306 | cfi_udelay(1); |
307 | spin_lock_bh(chip->mutex); | 307 | mutex_lock(&chip->mutex); |
308 | } | 308 | } |
309 | 309 | ||
310 | suspended = 1; | 310 | suspended = 1; |
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
335 | 335 | ||
336 | /* Urgh. Chip not yet ready to talk to us. */ | 336 | /* Urgh. Chip not yet ready to talk to us. */ |
337 | if (time_after(jiffies, timeo)) { | 337 | if (time_after(jiffies, timeo)) { |
338 | spin_unlock_bh(chip->mutex); | 338 | mutex_unlock(&chip->mutex); |
339 | printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); | 339 | printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); |
340 | return -EIO; | 340 | return -EIO; |
341 | } | 341 | } |
342 | 342 | ||
343 | /* Latency issues. Drop the lock, wait a while and retry */ | 343 | /* Latency issues. Drop the lock, wait a while and retry */ |
344 | spin_unlock_bh(chip->mutex); | 344 | mutex_unlock(&chip->mutex); |
345 | cfi_udelay(1); | 345 | cfi_udelay(1); |
346 | goto retry; | 346 | goto retry; |
347 | 347 | ||
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
351 | someone changes the status */ | 351 | someone changes the status */ |
352 | set_current_state(TASK_UNINTERRUPTIBLE); | 352 | set_current_state(TASK_UNINTERRUPTIBLE); |
353 | add_wait_queue(&chip->wq, &wait); | 353 | add_wait_queue(&chip->wq, &wait); |
354 | spin_unlock_bh(chip->mutex); | 354 | mutex_unlock(&chip->mutex); |
355 | schedule(); | 355 | schedule(); |
356 | remove_wait_queue(&chip->wq, &wait); | 356 | remove_wait_queue(&chip->wq, &wait); |
357 | timeo = jiffies + HZ; | 357 | timeo = jiffies + HZ; |
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof | |||
376 | } | 376 | } |
377 | 377 | ||
378 | wake_up(&chip->wq); | 378 | wake_up(&chip->wq); |
379 | spin_unlock_bh(chip->mutex); | 379 | mutex_unlock(&chip->mutex); |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
445 | #ifdef DEBUG_CFI_FEATURES | 445 | #ifdef DEBUG_CFI_FEATURES |
446 | printk("%s: chip->state[%d]\n", __func__, chip->state); | 446 | printk("%s: chip->state[%d]\n", __func__, chip->state); |
447 | #endif | 447 | #endif |
448 | spin_lock_bh(chip->mutex); | 448 | mutex_lock(&chip->mutex); |
449 | 449 | ||
450 | /* Check that the chip's ready to talk to us. | 450 | /* Check that the chip's ready to talk to us. |
451 | * Later, we can actually think about interrupting it | 451 | * Later, we can actually think about interrupting it |
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
470 | break; | 470 | break; |
471 | /* Urgh. Chip not yet ready to talk to us. */ | 471 | /* Urgh. Chip not yet ready to talk to us. */ |
472 | if (time_after(jiffies, timeo)) { | 472 | if (time_after(jiffies, timeo)) { |
473 | spin_unlock_bh(chip->mutex); | 473 | mutex_unlock(&chip->mutex); |
474 | printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", | 474 | printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", |
475 | status.x[0], map_read(map, cmd_adr).x[0]); | 475 | status.x[0], map_read(map, cmd_adr).x[0]); |
476 | return -EIO; | 476 | return -EIO; |
477 | } | 477 | } |
478 | 478 | ||
479 | /* Latency issues. Drop the lock, wait a while and retry */ | 479 | /* Latency issues. Drop the lock, wait a while and retry */ |
480 | spin_unlock_bh(chip->mutex); | 480 | mutex_unlock(&chip->mutex); |
481 | cfi_udelay(1); | 481 | cfi_udelay(1); |
482 | goto retry; | 482 | goto retry; |
483 | 483 | ||
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
486 | someone changes the status */ | 486 | someone changes the status */ |
487 | set_current_state(TASK_UNINTERRUPTIBLE); | 487 | set_current_state(TASK_UNINTERRUPTIBLE); |
488 | add_wait_queue(&chip->wq, &wait); | 488 | add_wait_queue(&chip->wq, &wait); |
489 | spin_unlock_bh(chip->mutex); | 489 | mutex_unlock(&chip->mutex); |
490 | schedule(); | 490 | schedule(); |
491 | remove_wait_queue(&chip->wq, &wait); | 491 | remove_wait_queue(&chip->wq, &wait); |
492 | timeo = jiffies + HZ; | 492 | timeo = jiffies + HZ; |
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
503 | if (map_word_andequal(map, status, status_OK, status_OK)) | 503 | if (map_word_andequal(map, status, status_OK, status_OK)) |
504 | break; | 504 | break; |
505 | 505 | ||
506 | spin_unlock_bh(chip->mutex); | 506 | mutex_unlock(&chip->mutex); |
507 | cfi_udelay(1); | 507 | cfi_udelay(1); |
508 | spin_lock_bh(chip->mutex); | 508 | mutex_lock(&chip->mutex); |
509 | 509 | ||
510 | if (++z > 100) { | 510 | if (++z > 100) { |
511 | /* Argh. Not ready for write to buffer */ | 511 | /* Argh. Not ready for write to buffer */ |
512 | DISABLE_VPP(map); | 512 | DISABLE_VPP(map); |
513 | map_write(map, CMD(0x70), cmd_adr); | 513 | map_write(map, CMD(0x70), cmd_adr); |
514 | chip->state = FL_STATUS; | 514 | chip->state = FL_STATUS; |
515 | spin_unlock_bh(chip->mutex); | 515 | mutex_unlock(&chip->mutex); |
516 | printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); | 516 | printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); |
517 | return -EIO; | 517 | return -EIO; |
518 | } | 518 | } |
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
532 | map_write(map, CMD(0xd0), cmd_adr); | 532 | map_write(map, CMD(0xd0), cmd_adr); |
533 | chip->state = FL_WRITING; | 533 | chip->state = FL_WRITING; |
534 | 534 | ||
535 | spin_unlock_bh(chip->mutex); | 535 | mutex_unlock(&chip->mutex); |
536 | cfi_udelay(chip->buffer_write_time); | 536 | cfi_udelay(chip->buffer_write_time); |
537 | spin_lock_bh(chip->mutex); | 537 | mutex_lock(&chip->mutex); |
538 | 538 | ||
539 | timeo = jiffies + (HZ/2); | 539 | timeo = jiffies + (HZ/2); |
540 | z = 0; | 540 | z = 0; |
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
543 | /* Someone's suspended the write. Sleep */ | 543 | /* Someone's suspended the write. Sleep */ |
544 | set_current_state(TASK_UNINTERRUPTIBLE); | 544 | set_current_state(TASK_UNINTERRUPTIBLE); |
545 | add_wait_queue(&chip->wq, &wait); | 545 | add_wait_queue(&chip->wq, &wait); |
546 | spin_unlock_bh(chip->mutex); | 546 | mutex_unlock(&chip->mutex); |
547 | schedule(); | 547 | schedule(); |
548 | remove_wait_queue(&chip->wq, &wait); | 548 | remove_wait_queue(&chip->wq, &wait); |
549 | timeo = jiffies + (HZ / 2); /* FIXME */ | 549 | timeo = jiffies + (HZ / 2); /* FIXME */ |
550 | spin_lock_bh(chip->mutex); | 550 | mutex_lock(&chip->mutex); |
551 | continue; | 551 | continue; |
552 | } | 552 | } |
553 | 553 | ||
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
563 | map_write(map, CMD(0x70), adr); | 563 | map_write(map, CMD(0x70), adr); |
564 | chip->state = FL_STATUS; | 564 | chip->state = FL_STATUS; |
565 | DISABLE_VPP(map); | 565 | DISABLE_VPP(map); |
566 | spin_unlock_bh(chip->mutex); | 566 | mutex_unlock(&chip->mutex); |
567 | printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); | 567 | printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); |
568 | return -EIO; | 568 | return -EIO; |
569 | } | 569 | } |
570 | 570 | ||
571 | /* Latency issues. Drop the lock, wait a while and retry */ | 571 | /* Latency issues. Drop the lock, wait a while and retry */ |
572 | spin_unlock_bh(chip->mutex); | 572 | mutex_unlock(&chip->mutex); |
573 | cfi_udelay(1); | 573 | cfi_udelay(1); |
574 | z++; | 574 | z++; |
575 | spin_lock_bh(chip->mutex); | 575 | mutex_lock(&chip->mutex); |
576 | } | 576 | } |
577 | if (!z) { | 577 | if (!z) { |
578 | chip->buffer_write_time--; | 578 | chip->buffer_write_time--; |
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
596 | /* put back into read status register mode */ | 596 | /* put back into read status register mode */ |
597 | map_write(map, CMD(0x70), adr); | 597 | map_write(map, CMD(0x70), adr); |
598 | wake_up(&chip->wq); | 598 | wake_up(&chip->wq); |
599 | spin_unlock_bh(chip->mutex); | 599 | mutex_unlock(&chip->mutex); |
600 | return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; | 600 | return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; |
601 | } | 601 | } |
602 | wake_up(&chip->wq); | 602 | wake_up(&chip->wq); |
603 | spin_unlock_bh(chip->mutex); | 603 | mutex_unlock(&chip->mutex); |
604 | 604 | ||
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u | |||
749 | 749 | ||
750 | timeo = jiffies + HZ; | 750 | timeo = jiffies + HZ; |
751 | retry: | 751 | retry: |
752 | spin_lock_bh(chip->mutex); | 752 | mutex_lock(&chip->mutex); |
753 | 753 | ||
754 | /* Check that the chip's ready to talk to us. */ | 754 | /* Check that the chip's ready to talk to us. */ |
755 | switch (chip->state) { | 755 | switch (chip->state) { |
@@ -766,13 +766,13 @@ retry: | |||
766 | 766 | ||
767 | /* Urgh. Chip not yet ready to talk to us. */ | 767 | /* Urgh. Chip not yet ready to talk to us. */ |
768 | if (time_after(jiffies, timeo)) { | 768 | if (time_after(jiffies, timeo)) { |
769 | spin_unlock_bh(chip->mutex); | 769 | mutex_unlock(&chip->mutex); |
770 | printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); | 770 | printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); |
771 | return -EIO; | 771 | return -EIO; |
772 | } | 772 | } |
773 | 773 | ||
774 | /* Latency issues. Drop the lock, wait a while and retry */ | 774 | /* Latency issues. Drop the lock, wait a while and retry */ |
775 | spin_unlock_bh(chip->mutex); | 775 | mutex_unlock(&chip->mutex); |
776 | cfi_udelay(1); | 776 | cfi_udelay(1); |
777 | goto retry; | 777 | goto retry; |
778 | 778 | ||
@@ -781,7 +781,7 @@ retry: | |||
781 | someone changes the status */ | 781 | someone changes the status */ |
782 | set_current_state(TASK_UNINTERRUPTIBLE); | 782 | set_current_state(TASK_UNINTERRUPTIBLE); |
783 | add_wait_queue(&chip->wq, &wait); | 783 | add_wait_queue(&chip->wq, &wait); |
784 | spin_unlock_bh(chip->mutex); | 784 | mutex_unlock(&chip->mutex); |
785 | schedule(); | 785 | schedule(); |
786 | remove_wait_queue(&chip->wq, &wait); | 786 | remove_wait_queue(&chip->wq, &wait); |
787 | timeo = jiffies + HZ; | 787 | timeo = jiffies + HZ; |
@@ -797,9 +797,9 @@ retry: | |||
797 | map_write(map, CMD(0xD0), adr); | 797 | map_write(map, CMD(0xD0), adr); |
798 | chip->state = FL_ERASING; | 798 | chip->state = FL_ERASING; |
799 | 799 | ||
800 | spin_unlock_bh(chip->mutex); | 800 | mutex_unlock(&chip->mutex); |
801 | msleep(1000); | 801 | msleep(1000); |
802 | spin_lock_bh(chip->mutex); | 802 | mutex_lock(&chip->mutex); |
803 | 803 | ||
804 | /* FIXME. Use a timer to check this, and return immediately. */ | 804 | /* FIXME. Use a timer to check this, and return immediately. */ |
805 | /* Once the state machine's known to be working I'll do that */ | 805 | /* Once the state machine's known to be working I'll do that */ |
@@ -810,11 +810,11 @@ retry: | |||
810 | /* Someone's suspended the erase. Sleep */ | 810 | /* Someone's suspended the erase. Sleep */ |
811 | set_current_state(TASK_UNINTERRUPTIBLE); | 811 | set_current_state(TASK_UNINTERRUPTIBLE); |
812 | add_wait_queue(&chip->wq, &wait); | 812 | add_wait_queue(&chip->wq, &wait); |
813 | spin_unlock_bh(chip->mutex); | 813 | mutex_unlock(&chip->mutex); |
814 | schedule(); | 814 | schedule(); |
815 | remove_wait_queue(&chip->wq, &wait); | 815 | remove_wait_queue(&chip->wq, &wait); |
816 | timeo = jiffies + (HZ*20); /* FIXME */ | 816 | timeo = jiffies + (HZ*20); /* FIXME */ |
817 | spin_lock_bh(chip->mutex); | 817 | mutex_lock(&chip->mutex); |
818 | continue; | 818 | continue; |
819 | } | 819 | } |
820 | 820 | ||
@@ -828,14 +828,14 @@ retry: | |||
828 | chip->state = FL_STATUS; | 828 | chip->state = FL_STATUS; |
829 | printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); | 829 | printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); |
830 | DISABLE_VPP(map); | 830 | DISABLE_VPP(map); |
831 | spin_unlock_bh(chip->mutex); | 831 | mutex_unlock(&chip->mutex); |
832 | return -EIO; | 832 | return -EIO; |
833 | } | 833 | } |
834 | 834 | ||
835 | /* Latency issues. Drop the lock, wait a while and retry */ | 835 | /* Latency issues. Drop the lock, wait a while and retry */ |
836 | spin_unlock_bh(chip->mutex); | 836 | mutex_unlock(&chip->mutex); |
837 | cfi_udelay(1); | 837 | cfi_udelay(1); |
838 | spin_lock_bh(chip->mutex); | 838 | mutex_lock(&chip->mutex); |
839 | } | 839 | } |
840 | 840 | ||
841 | DISABLE_VPP(map); | 841 | DISABLE_VPP(map); |
@@ -878,7 +878,7 @@ retry: | |||
878 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); | 878 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); |
879 | timeo = jiffies + HZ; | 879 | timeo = jiffies + HZ; |
880 | chip->state = FL_STATUS; | 880 | chip->state = FL_STATUS; |
881 | spin_unlock_bh(chip->mutex); | 881 | mutex_unlock(&chip->mutex); |
882 | goto retry; | 882 | goto retry; |
883 | } | 883 | } |
884 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); | 884 | printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); |
@@ -887,7 +887,7 @@ retry: | |||
887 | } | 887 | } |
888 | 888 | ||
889 | wake_up(&chip->wq); | 889 | wake_up(&chip->wq); |
890 | spin_unlock_bh(chip->mutex); | 890 | mutex_unlock(&chip->mutex); |
891 | return ret; | 891 | return ret; |
892 | } | 892 | } |
893 | 893 | ||
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
995 | chip = &cfi->chips[i]; | 995 | chip = &cfi->chips[i]; |
996 | 996 | ||
997 | retry: | 997 | retry: |
998 | spin_lock_bh(chip->mutex); | 998 | mutex_lock(&chip->mutex); |
999 | 999 | ||
1000 | switch(chip->state) { | 1000 | switch(chip->state) { |
1001 | case FL_READY: | 1001 | case FL_READY: |
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
1009 | * with the chip now anyway. | 1009 | * with the chip now anyway. |
1010 | */ | 1010 | */ |
1011 | case FL_SYNCING: | 1011 | case FL_SYNCING: |
1012 | spin_unlock_bh(chip->mutex); | 1012 | mutex_unlock(&chip->mutex); |
1013 | break; | 1013 | break; |
1014 | 1014 | ||
1015 | default: | 1015 | default: |
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
1017 | set_current_state(TASK_UNINTERRUPTIBLE); | 1017 | set_current_state(TASK_UNINTERRUPTIBLE); |
1018 | add_wait_queue(&chip->wq, &wait); | 1018 | add_wait_queue(&chip->wq, &wait); |
1019 | 1019 | ||
1020 | spin_unlock_bh(chip->mutex); | 1020 | mutex_unlock(&chip->mutex); |
1021 | schedule(); | 1021 | schedule(); |
1022 | remove_wait_queue(&chip->wq, &wait); | 1022 | remove_wait_queue(&chip->wq, &wait); |
1023 | 1023 | ||
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd) | |||
1030 | for (i--; i >=0; i--) { | 1030 | for (i--; i >=0; i--) { |
1031 | chip = &cfi->chips[i]; | 1031 | chip = &cfi->chips[i]; |
1032 | 1032 | ||
1033 | spin_lock_bh(chip->mutex); | 1033 | mutex_lock(&chip->mutex); |
1034 | 1034 | ||
1035 | if (chip->state == FL_SYNCING) { | 1035 | if (chip->state == FL_SYNCING) { |
1036 | chip->state = chip->oldstate; | 1036 | chip->state = chip->oldstate; |
1037 | wake_up(&chip->wq); | 1037 | wake_up(&chip->wq); |
1038 | } | 1038 | } |
1039 | spin_unlock_bh(chip->mutex); | 1039 | mutex_unlock(&chip->mutex); |
1040 | } | 1040 | } |
1041 | } | 1041 | } |
1042 | 1042 | ||
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un | |||
1054 | 1054 | ||
1055 | timeo = jiffies + HZ; | 1055 | timeo = jiffies + HZ; |
1056 | retry: | 1056 | retry: |
1057 | spin_lock_bh(chip->mutex); | 1057 | mutex_lock(&chip->mutex); |
1058 | 1058 | ||
1059 | /* Check that the chip's ready to talk to us. */ | 1059 | /* Check that the chip's ready to talk to us. */ |
1060 | switch (chip->state) { | 1060 | switch (chip->state) { |
@@ -1071,13 +1071,13 @@ retry: | |||
1071 | 1071 | ||
1072 | /* Urgh. Chip not yet ready to talk to us. */ | 1072 | /* Urgh. Chip not yet ready to talk to us. */ |
1073 | if (time_after(jiffies, timeo)) { | 1073 | if (time_after(jiffies, timeo)) { |
1074 | spin_unlock_bh(chip->mutex); | 1074 | mutex_unlock(&chip->mutex); |
1075 | printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); | 1075 | printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); |
1076 | return -EIO; | 1076 | return -EIO; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | /* Latency issues. Drop the lock, wait a while and retry */ | 1079 | /* Latency issues. Drop the lock, wait a while and retry */ |
1080 | spin_unlock_bh(chip->mutex); | 1080 | mutex_unlock(&chip->mutex); |
1081 | cfi_udelay(1); | 1081 | cfi_udelay(1); |
1082 | goto retry; | 1082 | goto retry; |
1083 | 1083 | ||
@@ -1086,7 +1086,7 @@ retry: | |||
1086 | someone changes the status */ | 1086 | someone changes the status */ |
1087 | set_current_state(TASK_UNINTERRUPTIBLE); | 1087 | set_current_state(TASK_UNINTERRUPTIBLE); |
1088 | add_wait_queue(&chip->wq, &wait); | 1088 | add_wait_queue(&chip->wq, &wait); |
1089 | spin_unlock_bh(chip->mutex); | 1089 | mutex_unlock(&chip->mutex); |
1090 | schedule(); | 1090 | schedule(); |
1091 | remove_wait_queue(&chip->wq, &wait); | 1091 | remove_wait_queue(&chip->wq, &wait); |
1092 | timeo = jiffies + HZ; | 1092 | timeo = jiffies + HZ; |
@@ -1098,9 +1098,9 @@ retry: | |||
1098 | map_write(map, CMD(0x01), adr); | 1098 | map_write(map, CMD(0x01), adr); |
1099 | chip->state = FL_LOCKING; | 1099 | chip->state = FL_LOCKING; |
1100 | 1100 | ||
1101 | spin_unlock_bh(chip->mutex); | 1101 | mutex_unlock(&chip->mutex); |
1102 | msleep(1000); | 1102 | msleep(1000); |
1103 | spin_lock_bh(chip->mutex); | 1103 | mutex_lock(&chip->mutex); |
1104 | 1104 | ||
1105 | /* FIXME. Use a timer to check this, and return immediately. */ | 1105 | /* FIXME. Use a timer to check this, and return immediately. */ |
1106 | /* Once the state machine's known to be working I'll do that */ | 1106 | /* Once the state machine's known to be working I'll do that */ |
@@ -1118,21 +1118,21 @@ retry: | |||
1118 | chip->state = FL_STATUS; | 1118 | chip->state = FL_STATUS; |
1119 | printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); | 1119 | printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); |
1120 | DISABLE_VPP(map); | 1120 | DISABLE_VPP(map); |
1121 | spin_unlock_bh(chip->mutex); | 1121 | mutex_unlock(&chip->mutex); |
1122 | return -EIO; | 1122 | return -EIO; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | /* Latency issues. Drop the lock, wait a while and retry */ | 1125 | /* Latency issues. Drop the lock, wait a while and retry */ |
1126 | spin_unlock_bh(chip->mutex); | 1126 | mutex_unlock(&chip->mutex); |
1127 | cfi_udelay(1); | 1127 | cfi_udelay(1); |
1128 | spin_lock_bh(chip->mutex); | 1128 | mutex_lock(&chip->mutex); |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | /* Done and happy. */ | 1131 | /* Done and happy. */ |
1132 | chip->state = FL_STATUS; | 1132 | chip->state = FL_STATUS; |
1133 | DISABLE_VPP(map); | 1133 | DISABLE_VPP(map); |
1134 | wake_up(&chip->wq); | 1134 | wake_up(&chip->wq); |
1135 | spin_unlock_bh(chip->mutex); | 1135 | mutex_unlock(&chip->mutex); |
1136 | return 0; | 1136 | return 0; |
1137 | } | 1137 | } |
1138 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 1138 | static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, | |||
1203 | 1203 | ||
1204 | timeo = jiffies + HZ; | 1204 | timeo = jiffies + HZ; |
1205 | retry: | 1205 | retry: |
1206 | spin_lock_bh(chip->mutex); | 1206 | mutex_lock(&chip->mutex); |
1207 | 1207 | ||
1208 | /* Check that the chip's ready to talk to us. */ | 1208 | /* Check that the chip's ready to talk to us. */ |
1209 | switch (chip->state) { | 1209 | switch (chip->state) { |
@@ -1220,13 +1220,13 @@ retry: | |||
1220 | 1220 | ||
1221 | /* Urgh. Chip not yet ready to talk to us. */ | 1221 | /* Urgh. Chip not yet ready to talk to us. */ |
1222 | if (time_after(jiffies, timeo)) { | 1222 | if (time_after(jiffies, timeo)) { |
1223 | spin_unlock_bh(chip->mutex); | 1223 | mutex_unlock(&chip->mutex); |
1224 | printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); | 1224 | printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); |
1225 | return -EIO; | 1225 | return -EIO; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | /* Latency issues. Drop the lock, wait a while and retry */ | 1228 | /* Latency issues. Drop the lock, wait a while and retry */ |
1229 | spin_unlock_bh(chip->mutex); | 1229 | mutex_unlock(&chip->mutex); |
1230 | cfi_udelay(1); | 1230 | cfi_udelay(1); |
1231 | goto retry; | 1231 | goto retry; |
1232 | 1232 | ||
@@ -1235,7 +1235,7 @@ retry: | |||
1235 | someone changes the status */ | 1235 | someone changes the status */ |
1236 | set_current_state(TASK_UNINTERRUPTIBLE); | 1236 | set_current_state(TASK_UNINTERRUPTIBLE); |
1237 | add_wait_queue(&chip->wq, &wait); | 1237 | add_wait_queue(&chip->wq, &wait); |
1238 | spin_unlock_bh(chip->mutex); | 1238 | mutex_unlock(&chip->mutex); |
1239 | schedule(); | 1239 | schedule(); |
1240 | remove_wait_queue(&chip->wq, &wait); | 1240 | remove_wait_queue(&chip->wq, &wait); |
1241 | timeo = jiffies + HZ; | 1241 | timeo = jiffies + HZ; |
@@ -1247,9 +1247,9 @@ retry: | |||
1247 | map_write(map, CMD(0xD0), adr); | 1247 | map_write(map, CMD(0xD0), adr); |
1248 | chip->state = FL_UNLOCKING; | 1248 | chip->state = FL_UNLOCKING; |
1249 | 1249 | ||
1250 | spin_unlock_bh(chip->mutex); | 1250 | mutex_unlock(&chip->mutex); |
1251 | msleep(1000); | 1251 | msleep(1000); |
1252 | spin_lock_bh(chip->mutex); | 1252 | mutex_lock(&chip->mutex); |
1253 | 1253 | ||
1254 | /* FIXME. Use a timer to check this, and return immediately. */ | 1254 | /* FIXME. Use a timer to check this, and return immediately. */ |
1255 | /* Once the state machine's known to be working I'll do that */ | 1255 | /* Once the state machine's known to be working I'll do that */ |
@@ -1267,21 +1267,21 @@ retry: | |||
1267 | chip->state = FL_STATUS; | 1267 | chip->state = FL_STATUS; |
1268 | printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); | 1268 | printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); |
1269 | DISABLE_VPP(map); | 1269 | DISABLE_VPP(map); |
1270 | spin_unlock_bh(chip->mutex); | 1270 | mutex_unlock(&chip->mutex); |
1271 | return -EIO; | 1271 | return -EIO; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | /* Latency issues. Drop the unlock, wait a while and retry */ | 1274 | /* Latency issues. Drop the unlock, wait a while and retry */ |
1275 | spin_unlock_bh(chip->mutex); | 1275 | mutex_unlock(&chip->mutex); |
1276 | cfi_udelay(1); | 1276 | cfi_udelay(1); |
1277 | spin_lock_bh(chip->mutex); | 1277 | mutex_lock(&chip->mutex); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* Done and happy. */ | 1280 | /* Done and happy. */ |
1281 | chip->state = FL_STATUS; | 1281 | chip->state = FL_STATUS; |
1282 | DISABLE_VPP(map); | 1282 | DISABLE_VPP(map); |
1283 | wake_up(&chip->wq); | 1283 | wake_up(&chip->wq); |
1284 | spin_unlock_bh(chip->mutex); | 1284 | mutex_unlock(&chip->mutex); |
1285 | return 0; | 1285 | return 0; |
1286 | } | 1286 | } |
1287 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 1287 | static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1334 | for (i=0; !ret && i<cfi->numchips; i++) { | 1334 | for (i=0; !ret && i<cfi->numchips; i++) { |
1335 | chip = &cfi->chips[i]; | 1335 | chip = &cfi->chips[i]; |
1336 | 1336 | ||
1337 | spin_lock_bh(chip->mutex); | 1337 | mutex_lock(&chip->mutex); |
1338 | 1338 | ||
1339 | switch(chip->state) { | 1339 | switch(chip->state) { |
1340 | case FL_READY: | 1340 | case FL_READY: |
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1354 | ret = -EAGAIN; | 1354 | ret = -EAGAIN; |
1355 | break; | 1355 | break; |
1356 | } | 1356 | } |
1357 | spin_unlock_bh(chip->mutex); | 1357 | mutex_unlock(&chip->mutex); |
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | /* Unlock the chips again */ | 1360 | /* Unlock the chips again */ |
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1363 | for (i--; i >=0; i--) { | 1363 | for (i--; i >=0; i--) { |
1364 | chip = &cfi->chips[i]; | 1364 | chip = &cfi->chips[i]; |
1365 | 1365 | ||
1366 | spin_lock_bh(chip->mutex); | 1366 | mutex_lock(&chip->mutex); |
1367 | 1367 | ||
1368 | if (chip->state == FL_PM_SUSPENDED) { | 1368 | if (chip->state == FL_PM_SUSPENDED) { |
1369 | /* No need to force it into a known state here, | 1369 | /* No need to force it into a known state here, |
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) | |||
1372 | chip->state = chip->oldstate; | 1372 | chip->state = chip->oldstate; |
1373 | wake_up(&chip->wq); | 1373 | wake_up(&chip->wq); |
1374 | } | 1374 | } |
1375 | spin_unlock_bh(chip->mutex); | 1375 | mutex_unlock(&chip->mutex); |
1376 | } | 1376 | } |
1377 | } | 1377 | } |
1378 | 1378 | ||
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd) | |||
1390 | 1390 | ||
1391 | chip = &cfi->chips[i]; | 1391 | chip = &cfi->chips[i]; |
1392 | 1392 | ||
1393 | spin_lock_bh(chip->mutex); | 1393 | mutex_lock(&chip->mutex); |
1394 | 1394 | ||
1395 | /* Go to known state. Chip may have been power cycled */ | 1395 | /* Go to known state. Chip may have been power cycled */ |
1396 | if (chip->state == FL_PM_SUSPENDED) { | 1396 | if (chip->state == FL_PM_SUSPENDED) { |
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd) | |||
1399 | wake_up(&chip->wq); | 1399 | wake_up(&chip->wq); |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | spin_unlock_bh(chip->mutex); | 1402 | mutex_unlock(&chip->mutex); |
1403 | } | 1403 | } |
1404 | } | 1404 | } |
1405 | 1405 | ||
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h index 57e0e4e921f9..d18064977192 100644 --- a/drivers/mtd/chips/fwh_lock.h +++ b/drivers/mtd/chips/fwh_lock.h | |||
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, | |||
58 | * to flash memory - that means that we don't have to check status | 58 | * to flash memory - that means that we don't have to check status |
59 | * and timeout. | 59 | * and timeout. |
60 | */ | 60 | */ |
61 | spin_lock(chip->mutex); | 61 | mutex_lock(&chip->mutex); |
62 | ret = get_chip(map, chip, adr, FL_LOCKING); | 62 | ret = get_chip(map, chip, adr, FL_LOCKING); |
63 | if (ret) { | 63 | if (ret) { |
64 | spin_unlock(chip->mutex); | 64 | mutex_unlock(&chip->mutex); |
65 | return ret; | 65 | return ret; |
66 | } | 66 | } |
67 | 67 | ||
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, | |||
72 | /* Done and happy. */ | 72 | /* Done and happy. */ |
73 | chip->state = chip->oldstate; | 73 | chip->state = chip->oldstate; |
74 | put_chip(map, chip, adr); | 74 | put_chip(map, chip, adr); |
75 | spin_unlock(chip->mutex); | 75 | mutex_unlock(&chip->mutex); |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index e2dc96441e05..fcc1bc02c8a2 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c | |||
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi | |||
155 | pchip->start = (i << cfi.chipshift); | 155 | pchip->start = (i << cfi.chipshift); |
156 | pchip->state = FL_READY; | 156 | pchip->state = FL_READY; |
157 | init_waitqueue_head(&pchip->wq); | 157 | init_waitqueue_head(&pchip->wq); |
158 | spin_lock_init(&pchip->_spinlock); | 158 | mutex_init(&pchip->mutex); |
159 | pchip->mutex = &pchip->_spinlock; | ||
160 | } | 159 | } |
161 | } | 160 | } |
162 | 161 | ||
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index e22ca49583e7..eb6f437ca9ec 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c | |||
@@ -106,8 +106,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) | |||
106 | /* those should be reset too since | 106 | /* those should be reset too since |
107 | they create memory references. */ | 107 | they create memory references. */ |
108 | init_waitqueue_head(&chip->wq); | 108 | init_waitqueue_head(&chip->wq); |
109 | spin_lock_init(&chip->_spinlock); | 109 | mutex_init(&chip->mutex); |
110 | chip->mutex = &chip->_spinlock; | ||
111 | chip++; | 110 | chip++; |
112 | } | 111 | } |
113 | } | 112 | } |
@@ -143,7 +142,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip, | |||
143 | } | 142 | } |
144 | 143 | ||
145 | /* OK Still waiting. Drop the lock, wait a while and retry. */ | 144 | /* OK Still waiting. Drop the lock, wait a while and retry. */ |
146 | spin_unlock(chip->mutex); | 145 | mutex_unlock(&chip->mutex); |
147 | if (sleep_time >= 1000000/HZ) { | 146 | if (sleep_time >= 1000000/HZ) { |
148 | /* | 147 | /* |
149 | * Half of the normal delay still remaining | 148 | * Half of the normal delay still remaining |
@@ -158,17 +157,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip, | |||
158 | cond_resched(); | 157 | cond_resched(); |
159 | timeo--; | 158 | timeo--; |
160 | } | 159 | } |
161 | spin_lock(chip->mutex); | 160 | mutex_lock(&chip->mutex); |
162 | 161 | ||
163 | while (chip->state != chip_state) { | 162 | while (chip->state != chip_state) { |
164 | /* Someone's suspended the operation: sleep */ | 163 | /* Someone's suspended the operation: sleep */ |
165 | DECLARE_WAITQUEUE(wait, current); | 164 | DECLARE_WAITQUEUE(wait, current); |
166 | set_current_state(TASK_UNINTERRUPTIBLE); | 165 | set_current_state(TASK_UNINTERRUPTIBLE); |
167 | add_wait_queue(&chip->wq, &wait); | 166 | add_wait_queue(&chip->wq, &wait); |
168 | spin_unlock(chip->mutex); | 167 | mutex_unlock(&chip->mutex); |
169 | schedule(); | 168 | schedule(); |
170 | remove_wait_queue(&chip->wq, &wait); | 169 | remove_wait_queue(&chip->wq, &wait); |
171 | spin_lock(chip->mutex); | 170 | mutex_lock(&chip->mutex); |
172 | } | 171 | } |
173 | if (chip->erase_suspended || chip->write_suspended) { | 172 | if (chip->erase_suspended || chip->write_suspended) { |
174 | /* Suspend has occured while sleep: reset timeout */ | 173 | /* Suspend has occured while sleep: reset timeout */ |
@@ -229,20 +228,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) | |||
229 | * it'll happily send us to sleep. In any case, when | 228 | * it'll happily send us to sleep. In any case, when |
230 | * get_chip returns success we're clear to go ahead. | 229 | * get_chip returns success we're clear to go ahead. |
231 | */ | 230 | */ |
232 | ret = spin_trylock(contender->mutex); | 231 | ret = mutex_trylock(&contender->mutex); |
233 | spin_unlock(&shared->lock); | 232 | spin_unlock(&shared->lock); |
234 | if (!ret) | 233 | if (!ret) |
235 | goto retry; | 234 | goto retry; |
236 | spin_unlock(chip->mutex); | 235 | mutex_unlock(&chip->mutex); |
237 | ret = chip_ready(map, contender, mode); | 236 | ret = chip_ready(map, contender, mode); |
238 | spin_lock(chip->mutex); | 237 | mutex_lock(&chip->mutex); |
239 | 238 | ||
240 | if (ret == -EAGAIN) { | 239 | if (ret == -EAGAIN) { |
241 | spin_unlock(contender->mutex); | 240 | mutex_unlock(&contender->mutex); |
242 | goto retry; | 241 | goto retry; |
243 | } | 242 | } |
244 | if (ret) { | 243 | if (ret) { |
245 | spin_unlock(contender->mutex); | 244 | mutex_unlock(&contender->mutex); |
246 | return ret; | 245 | return ret; |
247 | } | 246 | } |
248 | spin_lock(&shared->lock); | 247 | spin_lock(&shared->lock); |
@@ -251,10 +250,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) | |||
251 | * state. Put contender and retry. */ | 250 | * state. Put contender and retry. */ |
252 | if (chip->state == FL_SYNCING) { | 251 | if (chip->state == FL_SYNCING) { |
253 | put_chip(map, contender); | 252 | put_chip(map, contender); |
254 | spin_unlock(contender->mutex); | 253 | mutex_unlock(&contender->mutex); |
255 | goto retry; | 254 | goto retry; |
256 | } | 255 | } |
257 | spin_unlock(contender->mutex); | 256 | mutex_unlock(&contender->mutex); |
258 | } | 257 | } |
259 | 258 | ||
260 | /* Check if we have suspended erase on this chip. | 259 | /* Check if we have suspended erase on this chip. |
@@ -264,10 +263,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) | |||
264 | spin_unlock(&shared->lock); | 263 | spin_unlock(&shared->lock); |
265 | set_current_state(TASK_UNINTERRUPTIBLE); | 264 | set_current_state(TASK_UNINTERRUPTIBLE); |
266 | add_wait_queue(&chip->wq, &wait); | 265 | add_wait_queue(&chip->wq, &wait); |
267 | spin_unlock(chip->mutex); | 266 | mutex_unlock(&chip->mutex); |
268 | schedule(); | 267 | schedule(); |
269 | remove_wait_queue(&chip->wq, &wait); | 268 | remove_wait_queue(&chip->wq, &wait); |
270 | spin_lock(chip->mutex); | 269 | mutex_lock(&chip->mutex); |
271 | goto retry; | 270 | goto retry; |
272 | } | 271 | } |
273 | 272 | ||
@@ -336,10 +335,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode) | |||
336 | sleep: | 335 | sleep: |
337 | set_current_state(TASK_UNINTERRUPTIBLE); | 336 | set_current_state(TASK_UNINTERRUPTIBLE); |
338 | add_wait_queue(&chip->wq, &wait); | 337 | add_wait_queue(&chip->wq, &wait); |
339 | spin_unlock(chip->mutex); | 338 | mutex_unlock(&chip->mutex); |
340 | schedule(); | 339 | schedule(); |
341 | remove_wait_queue(&chip->wq, &wait); | 340 | remove_wait_queue(&chip->wq, &wait); |
342 | spin_lock(chip->mutex); | 341 | mutex_lock(&chip->mutex); |
343 | return -EAGAIN; | 342 | return -EAGAIN; |
344 | } | 343 | } |
345 | } | 344 | } |
@@ -355,12 +354,12 @@ static void put_chip(struct map_info *map, struct flchip *chip) | |||
355 | if (shared->writing && shared->writing != chip) { | 354 | if (shared->writing && shared->writing != chip) { |
356 | /* give back the ownership */ | 355 | /* give back the ownership */ |
357 | struct flchip *loaner = shared->writing; | 356 | struct flchip *loaner = shared->writing; |
358 | spin_lock(loaner->mutex); | 357 | mutex_lock(&loaner->mutex); |
359 | spin_unlock(&shared->lock); | 358 | spin_unlock(&shared->lock); |
360 | spin_unlock(chip->mutex); | 359 | mutex_unlock(&chip->mutex); |
361 | put_chip(map, loaner); | 360 | put_chip(map, loaner); |
362 | spin_lock(chip->mutex); | 361 | mutex_lock(&chip->mutex); |
363 | spin_unlock(loaner->mutex); | 362 | mutex_unlock(&loaner->mutex); |
364 | wake_up(&chip->wq); | 363 | wake_up(&chip->wq); |
365 | return; | 364 | return; |
366 | } | 365 | } |
@@ -413,10 +412,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
413 | 412 | ||
414 | wbufsize = 1 << lpddr->qinfo->BufSizeShift; | 413 | wbufsize = 1 << lpddr->qinfo->BufSizeShift; |
415 | 414 | ||
416 | spin_lock(chip->mutex); | 415 | mutex_lock(&chip->mutex); |
417 | ret = get_chip(map, chip, FL_WRITING); | 416 | ret = get_chip(map, chip, FL_WRITING); |
418 | if (ret) { | 417 | if (ret) { |
419 | spin_unlock(chip->mutex); | 418 | mutex_unlock(&chip->mutex); |
420 | return ret; | 419 | return ret; |
421 | } | 420 | } |
422 | /* Figure out the number of words to write */ | 421 | /* Figure out the number of words to write */ |
@@ -477,7 +476,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip, | |||
477 | } | 476 | } |
478 | 477 | ||
479 | out: put_chip(map, chip); | 478 | out: put_chip(map, chip); |
480 | spin_unlock(chip->mutex); | 479 | mutex_unlock(&chip->mutex); |
481 | return ret; | 480 | return ret; |
482 | } | 481 | } |
483 | 482 | ||
@@ -489,10 +488,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) | |||
489 | struct flchip *chip = &lpddr->chips[chipnum]; | 488 | struct flchip *chip = &lpddr->chips[chipnum]; |
490 | int ret; | 489 | int ret; |
491 | 490 | ||
492 | spin_lock(chip->mutex); | 491 | mutex_lock(&chip->mutex); |
493 | ret = get_chip(map, chip, FL_ERASING); | 492 | ret = get_chip(map, chip, FL_ERASING); |
494 | if (ret) { | 493 | if (ret) { |
495 | spin_unlock(chip->mutex); | 494 | mutex_unlock(&chip->mutex); |
496 | return ret; | 495 | return ret; |
497 | } | 496 | } |
498 | send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); | 497 | send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); |
@@ -504,7 +503,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) | |||
504 | goto out; | 503 | goto out; |
505 | } | 504 | } |
506 | out: put_chip(map, chip); | 505 | out: put_chip(map, chip); |
507 | spin_unlock(chip->mutex); | 506 | mutex_unlock(&chip->mutex); |
508 | return ret; | 507 | return ret; |
509 | } | 508 | } |
510 | 509 | ||
@@ -517,10 +516,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | |||
517 | struct flchip *chip = &lpddr->chips[chipnum]; | 516 | struct flchip *chip = &lpddr->chips[chipnum]; |
518 | int ret = 0; | 517 | int ret = 0; |
519 | 518 | ||
520 | spin_lock(chip->mutex); | 519 | mutex_lock(&chip->mutex); |
521 | ret = get_chip(map, chip, FL_READY); | 520 | ret = get_chip(map, chip, FL_READY); |
522 | if (ret) { | 521 | if (ret) { |
523 | spin_unlock(chip->mutex); | 522 | mutex_unlock(&chip->mutex); |
524 | return ret; | 523 | return ret; |
525 | } | 524 | } |
526 | 525 | ||
@@ -528,7 +527,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, | |||
528 | *retlen = len; | 527 | *retlen = len; |
529 | 528 | ||
530 | put_chip(map, chip); | 529 | put_chip(map, chip); |
531 | spin_unlock(chip->mutex); | 530 | mutex_unlock(&chip->mutex); |
532 | return ret; | 531 | return ret; |
533 | } | 532 | } |
534 | 533 | ||
@@ -568,9 +567,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | |||
568 | else | 567 | else |
569 | thislen = len; | 568 | thislen = len; |
570 | /* get the chip */ | 569 | /* get the chip */ |
571 | spin_lock(chip->mutex); | 570 | mutex_lock(&chip->mutex); |
572 | ret = get_chip(map, chip, FL_POINT); | 571 | ret = get_chip(map, chip, FL_POINT); |
573 | spin_unlock(chip->mutex); | 572 | mutex_unlock(&chip->mutex); |
574 | if (ret) | 573 | if (ret) |
575 | break; | 574 | break; |
576 | 575 | ||
@@ -610,7 +609,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | |||
610 | else | 609 | else |
611 | thislen = len; | 610 | thislen = len; |
612 | 611 | ||
613 | spin_lock(chip->mutex); | 612 | mutex_lock(&chip->mutex); |
614 | if (chip->state == FL_POINT) { | 613 | if (chip->state == FL_POINT) { |
615 | chip->ref_point_counter--; | 614 | chip->ref_point_counter--; |
616 | if (chip->ref_point_counter == 0) | 615 | if (chip->ref_point_counter == 0) |
@@ -620,7 +619,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | |||
620 | "pointed region\n", map->name); | 619 | "pointed region\n", map->name); |
621 | 620 | ||
622 | put_chip(map, chip); | 621 | put_chip(map, chip); |
623 | spin_unlock(chip->mutex); | 622 | mutex_unlock(&chip->mutex); |
624 | 623 | ||
625 | len -= thislen; | 624 | len -= thislen; |
626 | ofs = 0; | 625 | ofs = 0; |
@@ -726,10 +725,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) | |||
726 | int chipnum = adr >> lpddr->chipshift; | 725 | int chipnum = adr >> lpddr->chipshift; |
727 | struct flchip *chip = &lpddr->chips[chipnum]; | 726 | struct flchip *chip = &lpddr->chips[chipnum]; |
728 | 727 | ||
729 | spin_lock(chip->mutex); | 728 | mutex_lock(&chip->mutex); |
730 | ret = get_chip(map, chip, FL_LOCKING); | 729 | ret = get_chip(map, chip, FL_LOCKING); |
731 | if (ret) { | 730 | if (ret) { |
732 | spin_unlock(chip->mutex); | 731 | mutex_unlock(&chip->mutex); |
733 | return ret; | 732 | return ret; |
734 | } | 733 | } |
735 | 734 | ||
@@ -749,7 +748,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) | |||
749 | goto out; | 748 | goto out; |
750 | } | 749 | } |
751 | out: put_chip(map, chip); | 750 | out: put_chip(map, chip); |
752 | spin_unlock(chip->mutex); | 751 | mutex_unlock(&chip->mutex); |
753 | return ret; | 752 | return ret; |
754 | } | 753 | } |
755 | 754 | ||
@@ -770,10 +769,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval) | |||
770 | int chipnum = adr >> lpddr->chipshift; | 769 | int chipnum = adr >> lpddr->chipshift; |
771 | struct flchip *chip = &lpddr->chips[chipnum]; | 770 | struct flchip *chip = &lpddr->chips[chipnum]; |
772 | 771 | ||
773 | spin_lock(chip->mutex); | 772 | mutex_lock(&chip->mutex); |
774 | ret = get_chip(map, chip, FL_WRITING); | 773 | ret = get_chip(map, chip, FL_WRITING); |
775 | if (ret) { | 774 | if (ret) { |
776 | spin_unlock(chip->mutex); | 775 | mutex_unlock(&chip->mutex); |
777 | return ret; | 776 | return ret; |
778 | } | 777 | } |
779 | 778 | ||
@@ -787,7 +786,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval) | |||
787 | } | 786 | } |
788 | 787 | ||
789 | out: put_chip(map, chip); | 788 | out: put_chip(map, chip); |
790 | spin_unlock(chip->mutex); | 789 | mutex_unlock(&chip->mutex); |
791 | return ret; | 790 | return ret; |
792 | } | 791 | } |
793 | 792 | ||
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index d0bf422ae374..f43e9b49b751 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h | |||
@@ -15,6 +15,7 @@ | |||
15 | * has asm/spinlock.h, or 2.4, which has linux/spinlock.h | 15 | * has asm/spinlock.h, or 2.4, which has linux/spinlock.h |
16 | */ | 16 | */ |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/mutex.h> | ||
18 | 19 | ||
19 | typedef enum { | 20 | typedef enum { |
20 | FL_READY, | 21 | FL_READY, |
@@ -74,8 +75,7 @@ struct flchip { | |||
74 | unsigned int erase_suspended:1; | 75 | unsigned int erase_suspended:1; |
75 | unsigned long in_progress_block_addr; | 76 | unsigned long in_progress_block_addr; |
76 | 77 | ||
77 | spinlock_t *mutex; | 78 | struct mutex mutex; |
78 | spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */ | ||
79 | wait_queue_head_t wq; /* Wait on here when we're waiting for the chip | 79 | wait_queue_head_t wq; /* Wait on here when we're waiting for the chip |
80 | to be ready */ | 80 | to be ready */ |
81 | int word_write_time; | 81 | int word_write_time; |