aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2006-03-30 09:52:41 -0500
committerDavid Woodhouse <dwmw2@infradead.org>2006-05-16 20:25:37 -0400
commitc172471b78255a5cf6d05383d9ebbf0c6683167a (patch)
treeecb18d8ef5686eeb11f14a8b9d6e12df03ba7520 /drivers/mtd
parent6e7a6809c555aeb7cb98544df4d446fbd6f123ec (diff)
cfi_cmdset_0001: factorize code to wait for flash status
This allows for much better abstraction and separation of the XIP and non-XIP cases with their own specific implementations. This fixes the case where a timeout was tripped on in the XIP case by the code that was meant for the non-XIP case only. This also makes for a nice code reduction. Signed-off-by: Nicolas Pitre <nico@cam.org> CC: "Alexey, Korolev" <alexey.korolev@intel.com> Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c421
1 files changed, 157 insertions, 264 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ab3888f5e464..d0d5e521b564 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -399,7 +399,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 for (i=0; i< cfi->numchips; i++) { 399 for (i=0; i< cfi->numchips; i++) {
400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
402 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 402 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
403 cfi->chips[i].ref_point_counter = 0; 403 cfi->chips[i].ref_point_counter = 0;
404 init_waitqueue_head(&(cfi->chips[i].wq)); 404 init_waitqueue_head(&(cfi->chips[i].wq));
405 } 405 }
@@ -894,26 +894,33 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
894 894
895/* 895/*
896 * When a delay is required for the flash operation to complete, the 896 * When a delay is required for the flash operation to complete, the
897 * xip_udelay() function is polling for both the given timeout and pending 897 * xip_wait_for_operation() function is polling for both the given timeout
898 * (but still masked) hardware interrupts. Whenever there is an interrupt 898 * and pending (but still masked) hardware interrupts. Whenever there is an
899 * pending then the flash erase or write operation is suspended, array mode 899 * interrupt pending then the flash erase or write operation is suspended,
900 * restored and interrupts unmasked. Task scheduling might also happen at that 900 * array mode restored and interrupts unmasked. Task scheduling might also
901 * point. The CPU eventually returns from the interrupt or the call to 901 * happen at that point. The CPU eventually returns from the interrupt or
902 * schedule() and the suspended flash operation is resumed for the remaining 902 * the call to schedule() and the suspended flash operation is resumed for
903 * of the delay period. 903 * the remaining of the delay period.
904 * 904 *
905 * Warning: this function _will_ fool interrupt latency tracing tools. 905 * Warning: this function _will_ fool interrupt latency tracing tools.
906 */ 906 */
907 907
908static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 908static int __xipram xip_wait_for_operation(
909 unsigned long adr, int usec) 909 struct map_info *map, struct flchip *chip,
910 unsigned long adr, int *chip_op_time )
910{ 911{
911 struct cfi_private *cfi = map->fldrv_priv; 912 struct cfi_private *cfi = map->fldrv_priv;
912 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 913 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
913 map_word status, OK = CMD(0x80); 914 map_word status, OK = CMD(0x80);
914 unsigned long suspended, start = xip_currtime(); 915 unsigned long usec, suspended, start, done;
915 flstate_t oldstate, newstate; 916 flstate_t oldstate, newstate;
916 917
918 start = xip_currtime();
919 usec = *chip_op_time * 8;
920 if (usec == 0)
921 usec = 500000;
922 done = 0;
923
917 do { 924 do {
918 cpu_relax(); 925 cpu_relax();
919 if (xip_irqpending() && cfip && 926 if (xip_irqpending() && cfip &&
@@ -930,9 +937,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
930 * we resume the whole thing at once). Yes, it 937 * we resume the whole thing at once). Yes, it
931 * can happen! 938 * can happen!
932 */ 939 */
940 usec -= done;
933 map_write(map, CMD(0xb0), adr); 941 map_write(map, CMD(0xb0), adr);
934 map_write(map, CMD(0x70), adr); 942 map_write(map, CMD(0x70), adr);
935 usec -= xip_elapsed_since(start);
936 suspended = xip_currtime(); 943 suspended = xip_currtime();
937 do { 944 do {
938 if (xip_elapsed_since(suspended) > 100000) { 945 if (xip_elapsed_since(suspended) > 100000) {
@@ -942,7 +949,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
942 * This is a critical error but there 949 * This is a critical error but there
943 * is not much we can do here. 950 * is not much we can do here.
944 */ 951 */
945 return; 952 return -EIO;
946 } 953 }
947 status = map_read(map, adr); 954 status = map_read(map, adr);
948 } while (!map_word_andequal(map, status, OK, OK)); 955 } while (!map_word_andequal(map, status, OK, OK));
@@ -1002,65 +1009,107 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1002 xip_cpu_idle(); 1009 xip_cpu_idle();
1003 } 1010 }
1004 status = map_read(map, adr); 1011 status = map_read(map, adr);
1012 done = xip_elapsed_since(start);
1005 } while (!map_word_andequal(map, status, OK, OK) 1013 } while (!map_word_andequal(map, status, OK, OK)
1006 && xip_elapsed_since(start) < usec); 1014 && done < usec);
1007}
1008 1015
1009#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1016 return (done >= usec) ? -ETIME : 0;
1017}
1010 1018
1011/* 1019/*
1012 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1020 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1013 * the flash is actively programming or erasing since we have to poll for 1021 * the flash is actively programming or erasing since we have to poll for
1014 * the operation to complete anyway. We can't do that in a generic way with 1022 * the operation to complete anyway. We can't do that in a generic way with
1015 * a XIP setup so do it before the actual flash operation in this case 1023 * a XIP setup so do it before the actual flash operation in this case
1016 * and stub it out from INVALIDATE_CACHE_UDELAY. 1024 * and stub it out from INVAL_CACHE_AND_WAIT.
1017 */ 1025 */
1018#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1026#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1019 INVALIDATE_CACHED_RANGE(map, from, size) 1027 INVALIDATE_CACHED_RANGE(map, from, size)
1020 1028
1021#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ 1029#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
1022 UDELAY(map, chip, cmd_adr, usec) 1030 xip_wait_for_operation(map, chip, cmd_adr, p_usec)
1023
1024/*
1025 * Extra notes:
1026 *
1027 * Activating this XIP support changes the way the code works a bit. For
1028 * example the code to suspend the current process when concurrent access
1029 * happens is never executed because xip_udelay() will always return with the
1030 * same chip state as it was entered with. This is why there is no care for
1031 * the presence of add_wait_queue() or schedule() calls from within a couple
1032 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1033 * The queueing and scheduling are always happening within xip_udelay().
1034 *
1035 * Similarly, get_chip() and put_chip() just happen to always be executed
1036 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1037 * is in array mode, therefore never executing many cases therein and not
1038 * causing any problem with XIP.
1039 */
1040 1031
1041#else 1032#else
1042 1033
1043#define xip_disable(map, chip, adr) 1034#define xip_disable(map, chip, adr)
1044#define xip_enable(map, chip, adr) 1035#define xip_enable(map, chip, adr)
1045#define XIP_INVAL_CACHED_RANGE(x...) 1036#define XIP_INVAL_CACHED_RANGE(x...)
1037#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1038
1039static int inval_cache_and_wait_for_operation(
1040 struct map_info *map, struct flchip *chip,
1041 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1042 int *chip_op_time )
1043{
1044 struct cfi_private *cfi = map->fldrv_priv;
1045 map_word status, status_OK = CMD(0x80);
1046 int z, chip_state = chip->state;
1047 unsigned long timeo;
1048
1049 spin_unlock(chip->mutex);
1050 if (inval_len)
1051 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1052 if (*chip_op_time)
1053 cfi_udelay(*chip_op_time);
1054 spin_lock(chip->mutex);
1055
1056 timeo = *chip_op_time * 8 * HZ / 1000000;
1057 if (timeo < HZ/2)
1058 timeo = HZ/2;
1059 timeo += jiffies;
1060
1061 z = 0;
1062 for (;;) {
1063 if (chip->state != chip_state) {
1064 /* Someone's suspended the operation: sleep */
1065 DECLARE_WAITQUEUE(wait, current);
1066
1067 set_current_state(TASK_UNINTERRUPTIBLE);
1068 add_wait_queue(&chip->wq, &wait);
1069 spin_unlock(chip->mutex);
1070 schedule();
1071 remove_wait_queue(&chip->wq, &wait);
1072 timeo = jiffies + (HZ / 2); /* FIXME */
1073 spin_lock(chip->mutex);
1074 continue;
1075 }
1076
1077 status = map_read(map, cmd_adr);
1078 if (map_word_andequal(map, status, status_OK, status_OK))
1079 break;
1046 1080
1047#define UDELAY(map, chip, adr, usec) \ 1081 /* OK Still waiting */
1048do { \ 1082 if (time_after(jiffies, timeo)) {
1049 spin_unlock(chip->mutex); \ 1083 map_write(map, CMD(0x70), cmd_adr);
1050 cfi_udelay(usec); \ 1084 chip->state = FL_STATUS;
1051 spin_lock(chip->mutex); \ 1085 return -ETIME;
1052} while (0) 1086 }
1053 1087
1054#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ 1088 /* Latency issues. Drop the lock, wait a while and retry */
1055do { \ 1089 z++;
1056 spin_unlock(chip->mutex); \ 1090 spin_unlock(chip->mutex);
1057 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1091 cfi_udelay(1);
1058 cfi_udelay(usec); \ 1092 spin_lock(chip->mutex);
1059 spin_lock(chip->mutex); \ 1093 }
1060} while (0) 1094
1095 if (!z) {
1096 if (!--(*chip_op_time))
1097 *chip_op_time = 1;
1098 } else if (z > 1)
1099 ++(*chip_op_time);
1100
1101 /* Done and happy. */
1102 chip->state = FL_STATUS;
1103 return 0;
1104}
1061 1105
1062#endif 1106#endif
1063 1107
1108#define WAIT_TIMEOUT(map, chip, adr, udelay) \
1109 ({ int __udelay = (udelay); \
1110 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1111
1112
1064static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) 1113static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1065{ 1114{
1066 unsigned long cmd_addr; 1115 unsigned long cmd_addr;
@@ -1250,14 +1299,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1250 unsigned long adr, map_word datum, int mode) 1299 unsigned long adr, map_word datum, int mode)
1251{ 1300{
1252 struct cfi_private *cfi = map->fldrv_priv; 1301 struct cfi_private *cfi = map->fldrv_priv;
1253 map_word status, status_OK, write_cmd; 1302 map_word status, write_cmd;
1254 unsigned long timeo; 1303 int ret=0;
1255 int z, ret=0;
1256 1304
1257 adr += chip->start; 1305 adr += chip->start;
1258 1306
1259 /* Let's determine those according to the interleave only once */
1260 status_OK = CMD(0x80);
1261 switch (mode) { 1307 switch (mode) {
1262 case FL_WRITING: 1308 case FL_WRITING:
1263 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); 1309 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
@@ -1283,57 +1329,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1283 map_write(map, datum, adr); 1329 map_write(map, datum, adr);
1284 chip->state = mode; 1330 chip->state = mode;
1285 1331
1286 INVALIDATE_CACHE_UDELAY(map, chip, adr, 1332 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1287 adr, map_bankwidth(map), 1333 adr, map_bankwidth(map),
1288 chip->word_write_time); 1334 &chip->word_write_time);
1289 1335 if (ret) {
1290 timeo = jiffies + (HZ/2); 1336 xip_enable(map, chip, adr);
1291 z = 0; 1337 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1292 for (;;) { 1338 goto out;
1293 if (chip->state != mode) {
1294 /* Someone's suspended the write. Sleep */
1295 DECLARE_WAITQUEUE(wait, current);
1296
1297 set_current_state(TASK_UNINTERRUPTIBLE);
1298 add_wait_queue(&chip->wq, &wait);
1299 spin_unlock(chip->mutex);
1300 schedule();
1301 remove_wait_queue(&chip->wq, &wait);
1302 timeo = jiffies + (HZ / 2); /* FIXME */
1303 spin_lock(chip->mutex);
1304 continue;
1305 }
1306
1307 status = map_read(map, adr);
1308 if (map_word_andequal(map, status, status_OK, status_OK))
1309 break;
1310
1311 /* OK Still waiting */
1312 if (time_after(jiffies, timeo)) {
1313 map_write(map, CMD(0x70), adr);
1314 chip->state = FL_STATUS;
1315 xip_enable(map, chip, adr);
1316 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1317 ret = -EIO;
1318 goto out;
1319 }
1320
1321 /* Latency issues. Drop the lock, wait a while and retry */
1322 z++;
1323 UDELAY(map, chip, adr, 1);
1324 }
1325 if (!z) {
1326 chip->word_write_time--;
1327 if (!chip->word_write_time)
1328 chip->word_write_time = 1;
1329 } 1339 }
1330 if (z > 1)
1331 chip->word_write_time++;
1332
1333 /* Done and happy. */
1334 chip->state = FL_STATUS;
1335 1340
1336 /* check for errors */ 1341 /* check for errors */
1342 status = map_read(map, adr);
1337 if (map_word_bitsset(map, status, CMD(0x1a))) { 1343 if (map_word_bitsset(map, status, CMD(0x1a))) {
1338 unsigned long chipstatus = MERGESTATUS(status); 1344 unsigned long chipstatus = MERGESTATUS(status);
1339 1345
@@ -1450,9 +1456,9 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1450 unsigned long *pvec_seek, int len) 1456 unsigned long *pvec_seek, int len)
1451{ 1457{
1452 struct cfi_private *cfi = map->fldrv_priv; 1458 struct cfi_private *cfi = map->fldrv_priv;
1453 map_word status, status_OK, write_cmd, datum; 1459 map_word status, write_cmd, datum;
1454 unsigned long cmd_adr, timeo; 1460 unsigned long cmd_adr;
1455 int wbufsize, z, ret=0, word_gap, words; 1461 int ret, wbufsize, word_gap, words;
1456 const struct kvec *vec; 1462 const struct kvec *vec;
1457 unsigned long vec_seek; 1463 unsigned long vec_seek;
1458 1464
@@ -1461,7 +1467,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1461 cmd_adr = adr & ~(wbufsize-1); 1467 cmd_adr = adr & ~(wbufsize-1);
1462 1468
1463 /* Let's determine this according to the interleave only once */ 1469 /* Let's determine this according to the interleave only once */
1464 status_OK = CMD(0x80);
1465 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); 1470 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1466 1471
1467 spin_lock(chip->mutex); 1472 spin_lock(chip->mutex);
@@ -1493,32 +1498,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1493 } 1498 }
1494 1499
1495 chip->state = FL_WRITING_TO_BUFFER; 1500 chip->state = FL_WRITING_TO_BUFFER;
1496 1501 map_write(map, write_cmd, cmd_adr);
1497 z = 0; 1502 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1498 for (;;) { 1503 if (ret) {
1499 map_write(map, write_cmd, cmd_adr); 1504 /* Argh. Not ready for write to buffer */
1500 1505 map_word Xstatus = map_read(map, cmd_adr);
1506 map_write(map, CMD(0x70), cmd_adr);
1507 chip->state = FL_STATUS;
1501 status = map_read(map, cmd_adr); 1508 status = map_read(map, cmd_adr);
1502 if (map_word_andequal(map, status, status_OK, status_OK)) 1509 map_write(map, CMD(0x50), cmd_adr);
1503 break; 1510 map_write(map, CMD(0x70), cmd_adr);
1504 1511 xip_enable(map, chip, cmd_adr);
1505 UDELAY(map, chip, cmd_adr, 1); 1512 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1506 1513 map->name, Xstatus.x[0], status.x[0]);
1507 if (++z > 20) { 1514 goto out;
1508 /* Argh. Not ready for write to buffer */
1509 map_word Xstatus;
1510 map_write(map, CMD(0x70), cmd_adr);
1511 chip->state = FL_STATUS;
1512 Xstatus = map_read(map, cmd_adr);
1513 /* Odd. Clear status bits */
1514 map_write(map, CMD(0x50), cmd_adr);
1515 map_write(map, CMD(0x70), cmd_adr);
1516 xip_enable(map, chip, cmd_adr);
1517 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1518 map->name, status.x[0], Xstatus.x[0]);
1519 ret = -EIO;
1520 goto out;
1521 }
1522 } 1515 }
1523 1516
1524 /* Figure out the number of words to write */ 1517 /* Figure out the number of words to write */
@@ -1573,56 +1566,19 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1573 map_write(map, CMD(0xd0), cmd_adr); 1566 map_write(map, CMD(0xd0), cmd_adr);
1574 chip->state = FL_WRITING; 1567 chip->state = FL_WRITING;
1575 1568
1576 INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, 1569 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1577 adr, len, 1570 adr, len,
1578 chip->buffer_write_time); 1571 &chip->buffer_write_time);
1579 1572 if (ret) {
1580 timeo = jiffies + (HZ/2); 1573 map_write(map, CMD(0x70), cmd_adr);
1581 z = 0; 1574 chip->state = FL_STATUS;
1582 for (;;) { 1575 xip_enable(map, chip, cmd_adr);
1583 if (chip->state != FL_WRITING) { 1576 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1584 /* Someone's suspended the write. Sleep */ 1577 goto out;
1585 DECLARE_WAITQUEUE(wait, current);
1586 set_current_state(TASK_UNINTERRUPTIBLE);
1587 add_wait_queue(&chip->wq, &wait);
1588 spin_unlock(chip->mutex);
1589 schedule();
1590 remove_wait_queue(&chip->wq, &wait);
1591 timeo = jiffies + (HZ / 2); /* FIXME */
1592 spin_lock(chip->mutex);
1593 continue;
1594 }
1595
1596 status = map_read(map, cmd_adr);
1597 if (map_word_andequal(map, status, status_OK, status_OK))
1598 break;
1599
1600 /* OK Still waiting */
1601 if (time_after(jiffies, timeo)) {
1602 map_write(map, CMD(0x70), cmd_adr);
1603 chip->state = FL_STATUS;
1604 xip_enable(map, chip, cmd_adr);
1605 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1606 ret = -EIO;
1607 goto out;
1608 }
1609
1610 /* Latency issues. Drop the lock, wait a while and retry */
1611 z++;
1612 UDELAY(map, chip, cmd_adr, 1);
1613 }
1614 if (!z) {
1615 chip->buffer_write_time--;
1616 if (!chip->buffer_write_time)
1617 chip->buffer_write_time = 1;
1618 } 1578 }
1619 if (z > 1)
1620 chip->buffer_write_time++;
1621
1622 /* Done and happy. */
1623 chip->state = FL_STATUS;
1624 1579
1625 /* check for errors */ 1580 /* check for errors */
1581 status = map_read(map, cmd_adr);
1626 if (map_word_bitsset(map, status, CMD(0x1a))) { 1582 if (map_word_bitsset(map, status, CMD(0x1a))) {
1627 unsigned long chipstatus = MERGESTATUS(status); 1583 unsigned long chipstatus = MERGESTATUS(status);
1628 1584
@@ -1718,17 +1674,12 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1718 unsigned long adr, int len, void *thunk) 1674 unsigned long adr, int len, void *thunk)
1719{ 1675{
1720 struct cfi_private *cfi = map->fldrv_priv; 1676 struct cfi_private *cfi = map->fldrv_priv;
1721 map_word status, status_OK; 1677 map_word status;
1722 unsigned long timeo;
1723 int retries = 3; 1678 int retries = 3;
1724 DECLARE_WAITQUEUE(wait, current); 1679 int ret;
1725 int ret = 0;
1726 1680
1727 adr += chip->start; 1681 adr += chip->start;
1728 1682
1729 /* Let's determine this according to the interleave only once */
1730 status_OK = CMD(0x80);
1731
1732 retry: 1683 retry:
1733 spin_lock(chip->mutex); 1684 spin_lock(chip->mutex);
1734 ret = get_chip(map, chip, adr, FL_ERASING); 1685 ret = get_chip(map, chip, adr, FL_ERASING);
@@ -1750,48 +1701,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1750 chip->state = FL_ERASING; 1701 chip->state = FL_ERASING;
1751 chip->erase_suspended = 0; 1702 chip->erase_suspended = 0;
1752 1703
1753 INVALIDATE_CACHE_UDELAY(map, chip, adr, 1704 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1754 adr, len, 1705 adr, len,
1755 chip->erase_time*1000/2); 1706 &chip->erase_time);
1756 1707 if (ret) {
1757 /* FIXME. Use a timer to check this, and return immediately. */ 1708 map_write(map, CMD(0x70), adr);
1758 /* Once the state machine's known to be working I'll do that */ 1709 chip->state = FL_STATUS;
1759 1710 xip_enable(map, chip, adr);
1760 timeo = jiffies + (HZ*20); 1711 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1761 for (;;) { 1712 goto out;
1762 if (chip->state != FL_ERASING) {
1763 /* Someone's suspended the erase. Sleep */
1764 set_current_state(TASK_UNINTERRUPTIBLE);
1765 add_wait_queue(&chip->wq, &wait);
1766 spin_unlock(chip->mutex);
1767 schedule();
1768 remove_wait_queue(&chip->wq, &wait);
1769 spin_lock(chip->mutex);
1770 continue;
1771 }
1772 if (chip->erase_suspended) {
1773 /* This erase was suspended and resumed.
1774 Adjust the timeout */
1775 timeo = jiffies + (HZ*20); /* FIXME */
1776 chip->erase_suspended = 0;
1777 }
1778
1779 status = map_read(map, adr);
1780 if (map_word_andequal(map, status, status_OK, status_OK))
1781 break;
1782
1783 /* OK Still waiting */
1784 if (time_after(jiffies, timeo)) {
1785 map_write(map, CMD(0x70), adr);
1786 chip->state = FL_STATUS;
1787 xip_enable(map, chip, adr);
1788 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1789 ret = -EIO;
1790 goto out;
1791 }
1792
1793 /* Latency issues. Drop the lock, wait a while and retry */
1794 UDELAY(map, chip, adr, 1000000/HZ);
1795 } 1713 }
1796 1714
1797 /* We've broken this before. It doesn't hurt to be safe */ 1715 /* We've broken this before. It doesn't hurt to be safe */
@@ -1820,7 +1738,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1820 ret = -EIO; 1738 ret = -EIO;
1821 } else if (chipstatus & 0x20 && retries--) { 1739 } else if (chipstatus & 0x20 && retries--) {
1822 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1740 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1823 timeo = jiffies + HZ;
1824 put_chip(map, chip, adr); 1741 put_chip(map, chip, adr);
1825 spin_unlock(chip->mutex); 1742 spin_unlock(chip->mutex);
1826 goto retry; 1743 goto retry;
@@ -1926,15 +1843,11 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1926{ 1843{
1927 struct cfi_private *cfi = map->fldrv_priv; 1844 struct cfi_private *cfi = map->fldrv_priv;
1928 struct cfi_pri_intelext *extp = cfi->cmdset_priv; 1845 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1929 map_word status, status_OK; 1846 int udelay;
1930 unsigned long timeo = jiffies + HZ;
1931 int ret; 1847 int ret;
1932 1848
1933 adr += chip->start; 1849 adr += chip->start;
1934 1850
1935 /* Let's determine this according to the interleave only once */
1936 status_OK = CMD(0x80);
1937
1938 spin_lock(chip->mutex); 1851 spin_lock(chip->mutex);
1939 ret = get_chip(map, chip, adr, FL_LOCKING); 1852 ret = get_chip(map, chip, adr, FL_LOCKING);
1940 if (ret) { 1853 if (ret) {
@@ -1959,41 +1872,21 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1959 * If Instant Individual Block Locking supported then no need 1872 * If Instant Individual Block Locking supported then no need
1960 * to delay. 1873 * to delay.
1961 */ 1874 */
1875 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1962 1876
1963 if (!extp || !(extp->FeatureSupport & (1 << 5))) 1877 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1964 UDELAY(map, chip, adr, 1000000/HZ); 1878 if (ret) {
1965 1879 map_write(map, CMD(0x70), adr);
1966 /* FIXME. Use a timer to check this, and return immediately. */ 1880 chip->state = FL_STATUS;
1967 /* Once the state machine's known to be working I'll do that */ 1881 xip_enable(map, chip, adr);
1968 1882 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1969 timeo = jiffies + (HZ*20); 1883 goto out;
1970 for (;;) {
1971
1972 status = map_read(map, adr);
1973 if (map_word_andequal(map, status, status_OK, status_OK))
1974 break;
1975
1976 /* OK Still waiting */
1977 if (time_after(jiffies, timeo)) {
1978 map_write(map, CMD(0x70), adr);
1979 chip->state = FL_STATUS;
1980 xip_enable(map, chip, adr);
1981 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1982 put_chip(map, chip, adr);
1983 spin_unlock(chip->mutex);
1984 return -EIO;
1985 }
1986
1987 /* Latency issues. Drop the lock, wait a while and retry */
1988 UDELAY(map, chip, adr, 1);
1989 } 1884 }
1990 1885
1991 /* Done and happy. */
1992 chip->state = FL_STATUS;
1993 xip_enable(map, chip, adr); 1886 xip_enable(map, chip, adr);
1994 put_chip(map, chip, adr); 1887out: put_chip(map, chip, adr);
1995 spin_unlock(chip->mutex); 1888 spin_unlock(chip->mutex);
1996 return 0; 1889 return ret;
1997} 1890}
1998 1891
1999static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1892static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)