aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips/cfi_cmdset_0001.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/chips/cfi_cmdset_0001.c')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c432
1 files changed, 166 insertions, 266 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index fe00af3f9195..d0d5e521b564 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -399,7 +399,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 for (i=0; i< cfi->numchips; i++) { 399 for (i=0; i< cfi->numchips; i++) {
400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
402 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 402 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
403 cfi->chips[i].ref_point_counter = 0; 403 cfi->chips[i].ref_point_counter = 0;
404 init_waitqueue_head(&(cfi->chips[i].wq)); 404 init_waitqueue_head(&(cfi->chips[i].wq));
405 } 405 }
@@ -894,26 +894,33 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
894 894
895/* 895/*
896 * When a delay is required for the flash operation to complete, the 896 * When a delay is required for the flash operation to complete, the
897 * xip_udelay() function is polling for both the given timeout and pending 897 * xip_wait_for_operation() function is polling for both the given timeout
898 * (but still masked) hardware interrupts. Whenever there is an interrupt 898 * and pending (but still masked) hardware interrupts. Whenever there is an
899 * pending then the flash erase or write operation is suspended, array mode 899 * interrupt pending then the flash erase or write operation is suspended,
900 * restored and interrupts unmasked. Task scheduling might also happen at that 900 * array mode restored and interrupts unmasked. Task scheduling might also
901 * point. The CPU eventually returns from the interrupt or the call to 901 * happen at that point. The CPU eventually returns from the interrupt or
902 * schedule() and the suspended flash operation is resumed for the remaining 902 * the call to schedule() and the suspended flash operation is resumed for
903 * of the delay period. 903 * the remaining of the delay period.
904 * 904 *
905 * Warning: this function _will_ fool interrupt latency tracing tools. 905 * Warning: this function _will_ fool interrupt latency tracing tools.
906 */ 906 */
907 907
908static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 908static int __xipram xip_wait_for_operation(
909 unsigned long adr, int usec) 909 struct map_info *map, struct flchip *chip,
910 unsigned long adr, int *chip_op_time )
910{ 911{
911 struct cfi_private *cfi = map->fldrv_priv; 912 struct cfi_private *cfi = map->fldrv_priv;
912 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 913 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
913 map_word status, OK = CMD(0x80); 914 map_word status, OK = CMD(0x80);
914 unsigned long suspended, start = xip_currtime(); 915 unsigned long usec, suspended, start, done;
915 flstate_t oldstate, newstate; 916 flstate_t oldstate, newstate;
916 917
918 start = xip_currtime();
919 usec = *chip_op_time * 8;
920 if (usec == 0)
921 usec = 500000;
922 done = 0;
923
917 do { 924 do {
918 cpu_relax(); 925 cpu_relax();
919 if (xip_irqpending() && cfip && 926 if (xip_irqpending() && cfip &&
@@ -930,9 +937,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
930 * we resume the whole thing at once). Yes, it 937 * we resume the whole thing at once). Yes, it
931 * can happen! 938 * can happen!
932 */ 939 */
940 usec -= done;
933 map_write(map, CMD(0xb0), adr); 941 map_write(map, CMD(0xb0), adr);
934 map_write(map, CMD(0x70), adr); 942 map_write(map, CMD(0x70), adr);
935 usec -= xip_elapsed_since(start);
936 suspended = xip_currtime(); 943 suspended = xip_currtime();
937 do { 944 do {
938 if (xip_elapsed_since(suspended) > 100000) { 945 if (xip_elapsed_since(suspended) > 100000) {
@@ -942,7 +949,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
942 * This is a critical error but there 949 * This is a critical error but there
943 * is not much we can do here. 950 * is not much we can do here.
944 */ 951 */
945 return; 952 return -EIO;
946 } 953 }
947 status = map_read(map, adr); 954 status = map_read(map, adr);
948 } while (!map_word_andequal(map, status, OK, OK)); 955 } while (!map_word_andequal(map, status, OK, OK));
@@ -1002,65 +1009,107 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1002 xip_cpu_idle(); 1009 xip_cpu_idle();
1003 } 1010 }
1004 status = map_read(map, adr); 1011 status = map_read(map, adr);
1012 done = xip_elapsed_since(start);
1005 } while (!map_word_andequal(map, status, OK, OK) 1013 } while (!map_word_andequal(map, status, OK, OK)
1006 && xip_elapsed_since(start) < usec); 1014 && done < usec);
1007}
1008 1015
1009#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1016 return (done >= usec) ? -ETIME : 0;
1017}
1010 1018
1011/* 1019/*
1012 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1020 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1013 * the flash is actively programming or erasing since we have to poll for 1021 * the flash is actively programming or erasing since we have to poll for
1014 * the operation to complete anyway. We can't do that in a generic way with 1022 * the operation to complete anyway. We can't do that in a generic way with
1015 * a XIP setup so do it before the actual flash operation in this case 1023 * a XIP setup so do it before the actual flash operation in this case
1016 * and stub it out from INVALIDATE_CACHE_UDELAY. 1024 * and stub it out from INVAL_CACHE_AND_WAIT.
1017 */ 1025 */
1018#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1026#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1019 INVALIDATE_CACHED_RANGE(map, from, size) 1027 INVALIDATE_CACHED_RANGE(map, from, size)
1020 1028
1021#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ 1029#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
1022 UDELAY(map, chip, cmd_adr, usec) 1030 xip_wait_for_operation(map, chip, cmd_adr, p_usec)
1023
1024/*
1025 * Extra notes:
1026 *
1027 * Activating this XIP support changes the way the code works a bit. For
1028 * example the code to suspend the current process when concurrent access
1029 * happens is never executed because xip_udelay() will always return with the
1030 * same chip state as it was entered with. This is why there is no care for
1031 * the presence of add_wait_queue() or schedule() calls from within a couple
1032 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1033 * The queueing and scheduling are always happening within xip_udelay().
1034 *
1035 * Similarly, get_chip() and put_chip() just happen to always be executed
1036 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1037 * is in array mode, therefore never executing many cases therein and not
1038 * causing any problem with XIP.
1039 */
1040 1031
1041#else 1032#else
1042 1033
1043#define xip_disable(map, chip, adr) 1034#define xip_disable(map, chip, adr)
1044#define xip_enable(map, chip, adr) 1035#define xip_enable(map, chip, adr)
1045#define XIP_INVAL_CACHED_RANGE(x...) 1036#define XIP_INVAL_CACHED_RANGE(x...)
1037#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1038
1039static int inval_cache_and_wait_for_operation(
1040 struct map_info *map, struct flchip *chip,
1041 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1042 int *chip_op_time )
1043{
1044 struct cfi_private *cfi = map->fldrv_priv;
1045 map_word status, status_OK = CMD(0x80);
1046 int z, chip_state = chip->state;
1047 unsigned long timeo;
1048
1049 spin_unlock(chip->mutex);
1050 if (inval_len)
1051 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1052 if (*chip_op_time)
1053 cfi_udelay(*chip_op_time);
1054 spin_lock(chip->mutex);
1046 1055
1047#define UDELAY(map, chip, adr, usec) \ 1056 timeo = *chip_op_time * 8 * HZ / 1000000;
1048do { \ 1057 if (timeo < HZ/2)
1049 spin_unlock(chip->mutex); \ 1058 timeo = HZ/2;
1050 cfi_udelay(usec); \ 1059 timeo += jiffies;
1051 spin_lock(chip->mutex); \ 1060
1052} while (0) 1061 z = 0;
1053 1062 for (;;) {
1054#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ 1063 if (chip->state != chip_state) {
1055do { \ 1064 /* Someone's suspended the operation: sleep */
1056 spin_unlock(chip->mutex); \ 1065 DECLARE_WAITQUEUE(wait, current);
1057 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1066
1058 cfi_udelay(usec); \ 1067 set_current_state(TASK_UNINTERRUPTIBLE);
1059 spin_lock(chip->mutex); \ 1068 add_wait_queue(&chip->wq, &wait);
1060} while (0) 1069 spin_unlock(chip->mutex);
1070 schedule();
1071 remove_wait_queue(&chip->wq, &wait);
1072 timeo = jiffies + (HZ / 2); /* FIXME */
1073 spin_lock(chip->mutex);
1074 continue;
1075 }
1076
1077 status = map_read(map, cmd_adr);
1078 if (map_word_andequal(map, status, status_OK, status_OK))
1079 break;
1080
1081 /* OK Still waiting */
1082 if (time_after(jiffies, timeo)) {
1083 map_write(map, CMD(0x70), cmd_adr);
1084 chip->state = FL_STATUS;
1085 return -ETIME;
1086 }
1087
1088 /* Latency issues. Drop the lock, wait a while and retry */
1089 z++;
1090 spin_unlock(chip->mutex);
1091 cfi_udelay(1);
1092 spin_lock(chip->mutex);
1093 }
1094
1095 if (!z) {
1096 if (!--(*chip_op_time))
1097 *chip_op_time = 1;
1098 } else if (z > 1)
1099 ++(*chip_op_time);
1100
1101 /* Done and happy. */
1102 chip->state = FL_STATUS;
1103 return 0;
1104}
1061 1105
1062#endif 1106#endif
1063 1107
1108#define WAIT_TIMEOUT(map, chip, adr, udelay) \
1109 ({ int __udelay = (udelay); \
1110 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1111
1112
1064static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) 1113static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1065{ 1114{
1066 unsigned long cmd_addr; 1115 unsigned long cmd_addr;
@@ -1250,14 +1299,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1250 unsigned long adr, map_word datum, int mode) 1299 unsigned long adr, map_word datum, int mode)
1251{ 1300{
1252 struct cfi_private *cfi = map->fldrv_priv; 1301 struct cfi_private *cfi = map->fldrv_priv;
1253 map_word status, status_OK, write_cmd; 1302 map_word status, write_cmd;
1254 unsigned long timeo; 1303 int ret=0;
1255 int z, ret=0;
1256 1304
1257 adr += chip->start; 1305 adr += chip->start;
1258 1306
1259 /* Let's determine those according to the interleave only once */
1260 status_OK = CMD(0x80);
1261 switch (mode) { 1307 switch (mode) {
1262 case FL_WRITING: 1308 case FL_WRITING:
1263 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); 1309 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
@@ -1283,57 +1329,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1283 map_write(map, datum, adr); 1329 map_write(map, datum, adr);
1284 chip->state = mode; 1330 chip->state = mode;
1285 1331
1286 INVALIDATE_CACHE_UDELAY(map, chip, adr, 1332 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1287 adr, map_bankwidth(map), 1333 adr, map_bankwidth(map),
1288 chip->word_write_time); 1334 &chip->word_write_time);
1289 1335 if (ret) {
1290 timeo = jiffies + (HZ/2); 1336 xip_enable(map, chip, adr);
1291 z = 0; 1337 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1292 for (;;) { 1338 goto out;
1293 if (chip->state != mode) {
1294 /* Someone's suspended the write. Sleep */
1295 DECLARE_WAITQUEUE(wait, current);
1296
1297 set_current_state(TASK_UNINTERRUPTIBLE);
1298 add_wait_queue(&chip->wq, &wait);
1299 spin_unlock(chip->mutex);
1300 schedule();
1301 remove_wait_queue(&chip->wq, &wait);
1302 timeo = jiffies + (HZ / 2); /* FIXME */
1303 spin_lock(chip->mutex);
1304 continue;
1305 }
1306
1307 status = map_read(map, adr);
1308 if (map_word_andequal(map, status, status_OK, status_OK))
1309 break;
1310
1311 /* OK Still waiting */
1312 if (time_after(jiffies, timeo)) {
1313 map_write(map, CMD(0x70), adr);
1314 chip->state = FL_STATUS;
1315 xip_enable(map, chip, adr);
1316 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1317 ret = -EIO;
1318 goto out;
1319 }
1320
1321 /* Latency issues. Drop the lock, wait a while and retry */
1322 z++;
1323 UDELAY(map, chip, adr, 1);
1324 }
1325 if (!z) {
1326 chip->word_write_time--;
1327 if (!chip->word_write_time)
1328 chip->word_write_time = 1;
1329 } 1339 }
1330 if (z > 1)
1331 chip->word_write_time++;
1332
1333 /* Done and happy. */
1334 chip->state = FL_STATUS;
1335 1340
1336 /* check for errors */ 1341 /* check for errors */
1342 status = map_read(map, adr);
1337 if (map_word_bitsset(map, status, CMD(0x1a))) { 1343 if (map_word_bitsset(map, status, CMD(0x1a))) {
1338 unsigned long chipstatus = MERGESTATUS(status); 1344 unsigned long chipstatus = MERGESTATUS(status);
1339 1345
@@ -1450,9 +1456,9 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1450 unsigned long *pvec_seek, int len) 1456 unsigned long *pvec_seek, int len)
1451{ 1457{
1452 struct cfi_private *cfi = map->fldrv_priv; 1458 struct cfi_private *cfi = map->fldrv_priv;
1453 map_word status, status_OK, write_cmd, datum; 1459 map_word status, write_cmd, datum;
1454 unsigned long cmd_adr, timeo; 1460 unsigned long cmd_adr;
1455 int wbufsize, z, ret=0, word_gap, words; 1461 int ret, wbufsize, word_gap, words;
1456 const struct kvec *vec; 1462 const struct kvec *vec;
1457 unsigned long vec_seek; 1463 unsigned long vec_seek;
1458 1464
@@ -1461,7 +1467,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1461 cmd_adr = adr & ~(wbufsize-1); 1467 cmd_adr = adr & ~(wbufsize-1);
1462 1468
1463 /* Let's determine this according to the interleave only once */ 1469 /* Let's determine this according to the interleave only once */
1464 status_OK = CMD(0x80);
1465 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); 1470 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1466 1471
1467 spin_lock(chip->mutex); 1472 spin_lock(chip->mutex);
@@ -1475,12 +1480,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1475 ENABLE_VPP(map); 1480 ENABLE_VPP(map);
1476 xip_disable(map, chip, cmd_adr); 1481 xip_disable(map, chip, cmd_adr);
1477 1482
1478 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set 1483 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1479 [...], the device will not accept any more Write to Buffer commands". 1484 [...], the device will not accept any more Write to Buffer commands".
1480 So we must check here and reset those bits if they're set. Otherwise 1485 So we must check here and reset those bits if they're set. Otherwise
1481 we're just pissing in the wind */ 1486 we're just pissing in the wind */
1482 if (chip->state != FL_STATUS) 1487 if (chip->state != FL_STATUS) {
1483 map_write(map, CMD(0x70), cmd_adr); 1488 map_write(map, CMD(0x70), cmd_adr);
1489 chip->state = FL_STATUS;
1490 }
1484 status = map_read(map, cmd_adr); 1491 status = map_read(map, cmd_adr);
1485 if (map_word_bitsset(map, status, CMD(0x30))) { 1492 if (map_word_bitsset(map, status, CMD(0x30))) {
1486 xip_enable(map, chip, cmd_adr); 1493 xip_enable(map, chip, cmd_adr);
@@ -1491,32 +1498,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1491 } 1498 }
1492 1499
1493 chip->state = FL_WRITING_TO_BUFFER; 1500 chip->state = FL_WRITING_TO_BUFFER;
1494 1501 map_write(map, write_cmd, cmd_adr);
1495 z = 0; 1502 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1496 for (;;) { 1503 if (ret) {
1497 map_write(map, write_cmd, cmd_adr); 1504 /* Argh. Not ready for write to buffer */
1498 1505 map_word Xstatus = map_read(map, cmd_adr);
1506 map_write(map, CMD(0x70), cmd_adr);
1507 chip->state = FL_STATUS;
1499 status = map_read(map, cmd_adr); 1508 status = map_read(map, cmd_adr);
1500 if (map_word_andequal(map, status, status_OK, status_OK)) 1509 map_write(map, CMD(0x50), cmd_adr);
1501 break; 1510 map_write(map, CMD(0x70), cmd_adr);
1502 1511 xip_enable(map, chip, cmd_adr);
1503 UDELAY(map, chip, cmd_adr, 1); 1512 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1504 1513 map->name, Xstatus.x[0], status.x[0]);
1505 if (++z > 20) { 1514 goto out;
1506 /* Argh. Not ready for write to buffer */
1507 map_word Xstatus;
1508 map_write(map, CMD(0x70), cmd_adr);
1509 chip->state = FL_STATUS;
1510 Xstatus = map_read(map, cmd_adr);
1511 /* Odd. Clear status bits */
1512 map_write(map, CMD(0x50), cmd_adr);
1513 map_write(map, CMD(0x70), cmd_adr);
1514 xip_enable(map, chip, cmd_adr);
1515 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1516 map->name, status.x[0], Xstatus.x[0]);
1517 ret = -EIO;
1518 goto out;
1519 }
1520 } 1515 }
1521 1516
1522 /* Figure out the number of words to write */ 1517 /* Figure out the number of words to write */
@@ -1571,56 +1566,19 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1571 map_write(map, CMD(0xd0), cmd_adr); 1566 map_write(map, CMD(0xd0), cmd_adr);
1572 chip->state = FL_WRITING; 1567 chip->state = FL_WRITING;
1573 1568
1574 INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, 1569 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1575 adr, len, 1570 adr, len,
1576 chip->buffer_write_time); 1571 &chip->buffer_write_time);
1577 1572 if (ret) {
1578 timeo = jiffies + (HZ/2); 1573 map_write(map, CMD(0x70), cmd_adr);
1579 z = 0; 1574 chip->state = FL_STATUS;
1580 for (;;) { 1575 xip_enable(map, chip, cmd_adr);
1581 if (chip->state != FL_WRITING) { 1576 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1582 /* Someone's suspended the write. Sleep */ 1577 goto out;
1583 DECLARE_WAITQUEUE(wait, current);
1584 set_current_state(TASK_UNINTERRUPTIBLE);
1585 add_wait_queue(&chip->wq, &wait);
1586 spin_unlock(chip->mutex);
1587 schedule();
1588 remove_wait_queue(&chip->wq, &wait);
1589 timeo = jiffies + (HZ / 2); /* FIXME */
1590 spin_lock(chip->mutex);
1591 continue;
1592 }
1593
1594 status = map_read(map, cmd_adr);
1595 if (map_word_andequal(map, status, status_OK, status_OK))
1596 break;
1597
1598 /* OK Still waiting */
1599 if (time_after(jiffies, timeo)) {
1600 map_write(map, CMD(0x70), cmd_adr);
1601 chip->state = FL_STATUS;
1602 xip_enable(map, chip, cmd_adr);
1603 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1604 ret = -EIO;
1605 goto out;
1606 }
1607
1608 /* Latency issues. Drop the lock, wait a while and retry */
1609 z++;
1610 UDELAY(map, chip, cmd_adr, 1);
1611 }
1612 if (!z) {
1613 chip->buffer_write_time--;
1614 if (!chip->buffer_write_time)
1615 chip->buffer_write_time = 1;
1616 } 1578 }
1617 if (z > 1)
1618 chip->buffer_write_time++;
1619
1620 /* Done and happy. */
1621 chip->state = FL_STATUS;
1622 1579
1623 /* check for errors */ 1580 /* check for errors */
1581 status = map_read(map, cmd_adr);
1624 if (map_word_bitsset(map, status, CMD(0x1a))) { 1582 if (map_word_bitsset(map, status, CMD(0x1a))) {
1625 unsigned long chipstatus = MERGESTATUS(status); 1583 unsigned long chipstatus = MERGESTATUS(status);
1626 1584
@@ -1691,6 +1649,11 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1691 if (chipnum == cfi->numchips) 1649 if (chipnum == cfi->numchips)
1692 return 0; 1650 return 0;
1693 } 1651 }
1652
1653 /* Be nice and reschedule with the chip in a usable state for other
1654 processes. */
1655 cond_resched();
1656
1694 } while (len); 1657 } while (len);
1695 1658
1696 return 0; 1659 return 0;
@@ -1711,17 +1674,12 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1711 unsigned long adr, int len, void *thunk) 1674 unsigned long adr, int len, void *thunk)
1712{ 1675{
1713 struct cfi_private *cfi = map->fldrv_priv; 1676 struct cfi_private *cfi = map->fldrv_priv;
1714 map_word status, status_OK; 1677 map_word status;
1715 unsigned long timeo;
1716 int retries = 3; 1678 int retries = 3;
1717 DECLARE_WAITQUEUE(wait, current); 1679 int ret;
1718 int ret = 0;
1719 1680
1720 adr += chip->start; 1681 adr += chip->start;
1721 1682
1722 /* Let's determine this according to the interleave only once */
1723 status_OK = CMD(0x80);
1724
1725 retry: 1683 retry:
1726 spin_lock(chip->mutex); 1684 spin_lock(chip->mutex);
1727 ret = get_chip(map, chip, adr, FL_ERASING); 1685 ret = get_chip(map, chip, adr, FL_ERASING);
@@ -1743,48 +1701,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1743 chip->state = FL_ERASING; 1701 chip->state = FL_ERASING;
1744 chip->erase_suspended = 0; 1702 chip->erase_suspended = 0;
1745 1703
1746 INVALIDATE_CACHE_UDELAY(map, chip, adr, 1704 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1747 adr, len, 1705 adr, len,
1748 chip->erase_time*1000/2); 1706 &chip->erase_time);
1749 1707 if (ret) {
1750 /* FIXME. Use a timer to check this, and return immediately. */ 1708 map_write(map, CMD(0x70), adr);
1751 /* Once the state machine's known to be working I'll do that */ 1709 chip->state = FL_STATUS;
1752 1710 xip_enable(map, chip, adr);
1753 timeo = jiffies + (HZ*20); 1711 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1754 for (;;) { 1712 goto out;
1755 if (chip->state != FL_ERASING) {
1756 /* Someone's suspended the erase. Sleep */
1757 set_current_state(TASK_UNINTERRUPTIBLE);
1758 add_wait_queue(&chip->wq, &wait);
1759 spin_unlock(chip->mutex);
1760 schedule();
1761 remove_wait_queue(&chip->wq, &wait);
1762 spin_lock(chip->mutex);
1763 continue;
1764 }
1765 if (chip->erase_suspended) {
1766 /* This erase was suspended and resumed.
1767 Adjust the timeout */
1768 timeo = jiffies + (HZ*20); /* FIXME */
1769 chip->erase_suspended = 0;
1770 }
1771
1772 status = map_read(map, adr);
1773 if (map_word_andequal(map, status, status_OK, status_OK))
1774 break;
1775
1776 /* OK Still waiting */
1777 if (time_after(jiffies, timeo)) {
1778 map_write(map, CMD(0x70), adr);
1779 chip->state = FL_STATUS;
1780 xip_enable(map, chip, adr);
1781 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1782 ret = -EIO;
1783 goto out;
1784 }
1785
1786 /* Latency issues. Drop the lock, wait a while and retry */
1787 UDELAY(map, chip, adr, 1000000/HZ);
1788 } 1713 }
1789 1714
1790 /* We've broken this before. It doesn't hurt to be safe */ 1715 /* We've broken this before. It doesn't hurt to be safe */
@@ -1813,7 +1738,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1813 ret = -EIO; 1738 ret = -EIO;
1814 } else if (chipstatus & 0x20 && retries--) { 1739 } else if (chipstatus & 0x20 && retries--) {
1815 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1740 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1816 timeo = jiffies + HZ;
1817 put_chip(map, chip, adr); 1741 put_chip(map, chip, adr);
1818 spin_unlock(chip->mutex); 1742 spin_unlock(chip->mutex);
1819 goto retry; 1743 goto retry;
@@ -1919,15 +1843,11 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1919{ 1843{
1920 struct cfi_private *cfi = map->fldrv_priv; 1844 struct cfi_private *cfi = map->fldrv_priv;
1921 struct cfi_pri_intelext *extp = cfi->cmdset_priv; 1845 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1922 map_word status, status_OK; 1846 int udelay;
1923 unsigned long timeo = jiffies + HZ;
1924 int ret; 1847 int ret;
1925 1848
1926 adr += chip->start; 1849 adr += chip->start;
1927 1850
1928 /* Let's determine this according to the interleave only once */
1929 status_OK = CMD(0x80);
1930
1931 spin_lock(chip->mutex); 1851 spin_lock(chip->mutex);
1932 ret = get_chip(map, chip, adr, FL_LOCKING); 1852 ret = get_chip(map, chip, adr, FL_LOCKING);
1933 if (ret) { 1853 if (ret) {
@@ -1952,41 +1872,21 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1952 * If Instant Individual Block Locking supported then no need 1872 * If Instant Individual Block Locking supported then no need
1953 * to delay. 1873 * to delay.
1954 */ 1874 */
1875 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1955 1876
1956 if (!extp || !(extp->FeatureSupport & (1 << 5))) 1877 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1957 UDELAY(map, chip, adr, 1000000/HZ); 1878 if (ret) {
1958 1879 map_write(map, CMD(0x70), adr);
1959 /* FIXME. Use a timer to check this, and return immediately. */ 1880 chip->state = FL_STATUS;
1960 /* Once the state machine's known to be working I'll do that */ 1881 xip_enable(map, chip, adr);
1961 1882 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1962 timeo = jiffies + (HZ*20); 1883 goto out;
1963 for (;;) {
1964
1965 status = map_read(map, adr);
1966 if (map_word_andequal(map, status, status_OK, status_OK))
1967 break;
1968
1969 /* OK Still waiting */
1970 if (time_after(jiffies, timeo)) {
1971 map_write(map, CMD(0x70), adr);
1972 chip->state = FL_STATUS;
1973 xip_enable(map, chip, adr);
1974 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1975 put_chip(map, chip, adr);
1976 spin_unlock(chip->mutex);
1977 return -EIO;
1978 }
1979
1980 /* Latency issues. Drop the lock, wait a while and retry */
1981 UDELAY(map, chip, adr, 1);
1982 } 1884 }
1983 1885
1984 /* Done and happy. */
1985 chip->state = FL_STATUS;
1986 xip_enable(map, chip, adr); 1886 xip_enable(map, chip, adr);
1987 put_chip(map, chip, adr); 1887out: put_chip(map, chip, adr);
1988 spin_unlock(chip->mutex); 1888 spin_unlock(chip->mutex);
1989 return 0; 1889 return ret;
1990} 1890}
1991 1891
1992static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1892static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)