aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/onenand
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/onenand')
-rw-r--r--drivers/mtd/onenand/omap2.c22
-rw-r--r--drivers/mtd/onenand/onenand_base.c745
2 files changed, 620 insertions, 147 deletions
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 86c4f6dcdc65..75f38b95811e 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
112 unsigned long timeout; 112 unsigned long timeout;
113 u32 syscfg; 113 u32 syscfg;
114 114
115 if (state == FL_RESETING) { 115 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
116 int i; 116 state == FL_VERIFYING_ERASE) {
117 int i = 21;
118 unsigned int intr_flags = ONENAND_INT_MASTER;
119
120 switch (state) {
121 case FL_RESETING:
122 intr_flags |= ONENAND_INT_RESET;
123 break;
124 case FL_PREPARING_ERASE:
125 intr_flags |= ONENAND_INT_ERASE;
126 break;
127 case FL_VERIFYING_ERASE:
128 i = 101;
129 break;
130 }
117 131
118 for (i = 0; i < 20; i++) { 132 while (--i) {
119 udelay(1); 133 udelay(1);
120 intr = read_reg(c, ONENAND_REG_INTERRUPT); 134 intr = read_reg(c, ONENAND_REG_INTERRUPT);
121 if (intr & ONENAND_INT_MASTER) 135 if (intr & ONENAND_INT_MASTER)
@@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
126 wait_err("controller error", state, ctrl, intr); 140 wait_err("controller error", state, ctrl, intr);
127 return -EIO; 141 return -EIO;
128 } 142 }
129 if (!(intr & ONENAND_INT_RESET)) { 143 if ((intr & intr_flags) != intr_flags) {
130 wait_err("timeout", state, ctrl, intr); 144 wait_err("timeout", state, ctrl, intr);
131 return -EIO; 145 return -EIO;
132 } 146 }
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ff66e4330aa7..f63b1db3ffb3 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1,17 +1,19 @@
1/* 1/*
2 * linux/drivers/mtd/onenand/onenand_base.c 2 * linux/drivers/mtd/onenand/onenand_base.c
3 * 3 *
4 * Copyright (C) 2005-2007 Samsung Electronics 4 * Copyright © 2005-2009 Samsung Electronics
5 * Copyright © 2007 Nokia Corporation
6 *
5 * Kyungmin Park <kyungmin.park@samsung.com> 7 * Kyungmin Park <kyungmin.park@samsung.com>
6 * 8 *
7 * Credits: 9 * Credits:
8 * Adrian Hunter <ext-adrian.hunter@nokia.com>: 10 * Adrian Hunter <ext-adrian.hunter@nokia.com>:
9 * auto-placement support, read-while load support, various fixes 11 * auto-placement support, read-while load support, various fixes
10 * Copyright (C) Nokia Corporation, 2007
11 * 12 *
12 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com> 13 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
13 * Flex-OneNAND support 14 * Flex-OneNAND support
14 * Copyright (C) Samsung Electronics, 2008 15 * Amul Kumar Saha <amul.saha at samsung.com>
16 * OTP support
15 * 17 *
16 * This program is free software; you can redistribute it and/or modify 18 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as 19 * it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
32 34
33#include <asm/io.h> 35#include <asm/io.h>
34 36
37/*
38 * Multiblock erase if number of blocks to erase is 2 or more.
39 * Maximum number of blocks for simultaneous erase is 64.
40 */
41#define MB_ERASE_MIN_BLK_COUNT 2
42#define MB_ERASE_MAX_BLK_COUNT 64
43
35/* Default Flex-OneNAND boundary and lock respectively */ 44/* Default Flex-OneNAND boundary and lock respectively */
36static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 }; 45static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
37 46
@@ -43,6 +52,18 @@ MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
43 " : 0->Set boundary in unlocked status" 52 " : 0->Set boundary in unlocked status"
44 " : 1->Set boundary in locked status"); 53 " : 1->Set boundary in locked status");
45 54
55/* Default OneNAND/Flex-OneNAND OTP options*/
56static int otp;
57
58module_param(otp, int, 0400);
59MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
60 "Syntax : otp=LOCK_TYPE"
61 "LOCK_TYPE : Keys issued, for specific OTP Lock type"
62 " : 0 -> Default (No Blocks Locked)"
63 " : 1 -> OTP Block lock"
64 " : 2 -> 1st Block lock"
65 " : 3 -> BOTH OTP Block and 1st Block lock");
66
46/** 67/**
47 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page 68 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
48 * For now, we expose only 64 out of 80 ecc bytes 69 * For now, we expose only 64 out of 80 ecc bytes
@@ -339,6 +360,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
339 break; 360 break;
340 361
341 case ONENAND_CMD_ERASE: 362 case ONENAND_CMD_ERASE:
363 case ONENAND_CMD_MULTIBLOCK_ERASE:
364 case ONENAND_CMD_ERASE_VERIFY:
342 case ONENAND_CMD_BUFFERRAM: 365 case ONENAND_CMD_BUFFERRAM:
343 case ONENAND_CMD_OTP_ACCESS: 366 case ONENAND_CMD_OTP_ACCESS:
344 block = onenand_block(this, addr); 367 block = onenand_block(this, addr);
@@ -483,7 +506,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
483 if (interrupt & flags) 506 if (interrupt & flags)
484 break; 507 break;
485 508
486 if (state != FL_READING) 509 if (state != FL_READING && state != FL_PREPARING_ERASE)
487 cond_resched(); 510 cond_resched();
488 } 511 }
489 /* To get correct interrupt status in timeout case */ 512 /* To get correct interrupt status in timeout case */
@@ -500,25 +523,40 @@ static int onenand_wait(struct mtd_info *mtd, int state)
500 int ecc = onenand_read_ecc(this); 523 int ecc = onenand_read_ecc(this);
501 if (ecc) { 524 if (ecc) {
502 if (ecc & ONENAND_ECC_2BIT_ALL) { 525 if (ecc & ONENAND_ECC_2BIT_ALL) {
503 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc); 526 printk(KERN_ERR "%s: ECC error = 0x%04x\n",
527 __func__, ecc);
504 mtd->ecc_stats.failed++; 528 mtd->ecc_stats.failed++;
505 return -EBADMSG; 529 return -EBADMSG;
506 } else if (ecc & ONENAND_ECC_1BIT_ALL) { 530 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
507 printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc); 531 printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
532 __func__, ecc);
508 mtd->ecc_stats.corrected++; 533 mtd->ecc_stats.corrected++;
509 } 534 }
510 } 535 }
511 } else if (state == FL_READING) { 536 } else if (state == FL_READING) {
512 printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 537 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
538 __func__, ctrl, interrupt);
539 return -EIO;
540 }
541
542 if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
543 printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
544 __func__, ctrl, interrupt);
545 return -EIO;
546 }
547
548 if (!(interrupt & ONENAND_INT_MASTER)) {
549 printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
550 __func__, ctrl, interrupt);
513 return -EIO; 551 return -EIO;
514 } 552 }
515 553
516 /* If there's controller error, it's a real error */ 554 /* If there's controller error, it's a real error */
517 if (ctrl & ONENAND_CTRL_ERROR) { 555 if (ctrl & ONENAND_CTRL_ERROR) {
518 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", 556 printk(KERN_ERR "%s: controller error = 0x%04x\n",
519 ctrl); 557 __func__, ctrl);
520 if (ctrl & ONENAND_CTRL_LOCK) 558 if (ctrl & ONENAND_CTRL_LOCK)
521 printk(KERN_ERR "onenand_wait: it's locked error.\n"); 559 printk(KERN_ERR "%s: it's locked error.\n", __func__);
522 return -EIO; 560 return -EIO;
523 } 561 }
524 562
@@ -1015,7 +1053,8 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
1015 /* We are attempting to reread, so decrement stats.failed 1053 /* We are attempting to reread, so decrement stats.failed
1016 * which was incremented by onenand_wait due to read failure 1054 * which was incremented by onenand_wait due to read failure
1017 */ 1055 */
1018 printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n"); 1056 printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
1057 __func__);
1019 mtd->ecc_stats.failed--; 1058 mtd->ecc_stats.failed--;
1020 1059
1021 /* Issue the LSB page recovery command */ 1060 /* Issue the LSB page recovery command */
@@ -1046,7 +1085,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1046 int ret = 0; 1085 int ret = 0;
1047 int writesize = this->writesize; 1086 int writesize = this->writesize;
1048 1087
1049 DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1088 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1089 __func__, (unsigned int) from, (int) len);
1050 1090
1051 if (ops->mode == MTD_OOB_AUTO) 1091 if (ops->mode == MTD_OOB_AUTO)
1052 oobsize = this->ecclayout->oobavail; 1092 oobsize = this->ecclayout->oobavail;
@@ -1057,7 +1097,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1057 1097
1058 /* Do not allow reads past end of device */ 1098 /* Do not allow reads past end of device */
1059 if (from + len > mtd->size) { 1099 if (from + len > mtd->size) {
1060 printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n"); 1100 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1101 __func__);
1061 ops->retlen = 0; 1102 ops->retlen = 0;
1062 ops->oobretlen = 0; 1103 ops->oobretlen = 0;
1063 return -EINVAL; 1104 return -EINVAL;
@@ -1146,7 +1187,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1146 int ret = 0, boundary = 0; 1187 int ret = 0, boundary = 0;
1147 int writesize = this->writesize; 1188 int writesize = this->writesize;
1148 1189
1149 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1190 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1191 __func__, (unsigned int) from, (int) len);
1150 1192
1151 if (ops->mode == MTD_OOB_AUTO) 1193 if (ops->mode == MTD_OOB_AUTO)
1152 oobsize = this->ecclayout->oobavail; 1194 oobsize = this->ecclayout->oobavail;
@@ -1157,7 +1199,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1157 1199
1158 /* Do not allow reads past end of device */ 1200 /* Do not allow reads past end of device */
1159 if ((from + len) > mtd->size) { 1201 if ((from + len) > mtd->size) {
1160 printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n"); 1202 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1203 __func__);
1161 ops->retlen = 0; 1204 ops->retlen = 0;
1162 ops->oobretlen = 0; 1205 ops->oobretlen = 0;
1163 return -EINVAL; 1206 return -EINVAL;
@@ -1275,7 +1318,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1275 1318
1276 from += ops->ooboffs; 1319 from += ops->ooboffs;
1277 1320
1278 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1321 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1322 __func__, (unsigned int) from, (int) len);
1279 1323
1280 /* Initialize return length value */ 1324 /* Initialize return length value */
1281 ops->oobretlen = 0; 1325 ops->oobretlen = 0;
@@ -1288,7 +1332,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1288 column = from & (mtd->oobsize - 1); 1332 column = from & (mtd->oobsize - 1);
1289 1333
1290 if (unlikely(column >= oobsize)) { 1334 if (unlikely(column >= oobsize)) {
1291 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n"); 1335 printk(KERN_ERR "%s: Attempted to start read outside oob\n",
1336 __func__);
1292 return -EINVAL; 1337 return -EINVAL;
1293 } 1338 }
1294 1339
@@ -1296,7 +1341,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1296 if (unlikely(from >= mtd->size || 1341 if (unlikely(from >= mtd->size ||
1297 column + len > ((mtd->size >> this->page_shift) - 1342 column + len > ((mtd->size >> this->page_shift) -
1298 (from >> this->page_shift)) * oobsize)) { 1343 (from >> this->page_shift)) * oobsize)) {
1299 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n"); 1344 printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
1345 __func__);
1300 return -EINVAL; 1346 return -EINVAL;
1301 } 1347 }
1302 1348
@@ -1319,7 +1365,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1319 ret = onenand_recover_lsb(mtd, from, ret); 1365 ret = onenand_recover_lsb(mtd, from, ret);
1320 1366
1321 if (ret && ret != -EBADMSG) { 1367 if (ret && ret != -EBADMSG) {
1322 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 1368 printk(KERN_ERR "%s: read failed = 0x%x\n",
1369 __func__, ret);
1323 break; 1370 break;
1324 } 1371 }
1325 1372
@@ -1450,20 +1497,21 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1450 if (interrupt & ONENAND_INT_READ) { 1497 if (interrupt & ONENAND_INT_READ) {
1451 int ecc = onenand_read_ecc(this); 1498 int ecc = onenand_read_ecc(this);
1452 if (ecc & ONENAND_ECC_2BIT_ALL) { 1499 if (ecc & ONENAND_ECC_2BIT_ALL) {
1453 printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x" 1500 printk(KERN_WARNING "%s: ecc error = 0x%04x, "
1454 ", controller error 0x%04x\n", ecc, ctrl); 1501 "controller error 0x%04x\n",
1502 __func__, ecc, ctrl);
1455 return ONENAND_BBT_READ_ECC_ERROR; 1503 return ONENAND_BBT_READ_ECC_ERROR;
1456 } 1504 }
1457 } else { 1505 } else {
1458 printk(KERN_ERR "onenand_bbt_wait: read timeout!" 1506 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
1459 "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 1507 __func__, ctrl, interrupt);
1460 return ONENAND_BBT_READ_FATAL_ERROR; 1508 return ONENAND_BBT_READ_FATAL_ERROR;
1461 } 1509 }
1462 1510
1463 /* Initial bad block case: 0x2400 or 0x0400 */ 1511 /* Initial bad block case: 0x2400 or 0x0400 */
1464 if (ctrl & ONENAND_CTRL_ERROR) { 1512 if (ctrl & ONENAND_CTRL_ERROR) {
1465 printk(KERN_DEBUG "onenand_bbt_wait: " 1513 printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
1466 "controller error = 0x%04x\n", ctrl); 1514 __func__, ctrl);
1467 return ONENAND_BBT_READ_ERROR; 1515 return ONENAND_BBT_READ_ERROR;
1468 } 1516 }
1469 1517
@@ -1487,14 +1535,16 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1487 size_t len = ops->ooblen; 1535 size_t len = ops->ooblen;
1488 u_char *buf = ops->oobbuf; 1536 u_char *buf = ops->oobbuf;
1489 1537
1490 DEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len); 1538 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
1539 __func__, (unsigned int) from, len);
1491 1540
1492 /* Initialize return value */ 1541 /* Initialize return value */
1493 ops->oobretlen = 0; 1542 ops->oobretlen = 0;
1494 1543
1495 /* Do not allow reads past end of device */ 1544 /* Do not allow reads past end of device */
1496 if (unlikely((from + len) > mtd->size)) { 1545 if (unlikely((from + len) > mtd->size)) {
1497 printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n"); 1546 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1547 __func__);
1498 return ONENAND_BBT_READ_FATAL_ERROR; 1548 return ONENAND_BBT_READ_FATAL_ERROR;
1499 } 1549 }
1500 1550
@@ -1661,21 +1711,23 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1661 /* Wait for any existing operation to clear */ 1711 /* Wait for any existing operation to clear */
1662 onenand_panic_wait(mtd); 1712 onenand_panic_wait(mtd);
1663 1713
1664 DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n", 1714 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1665 (unsigned int) to, (int) len); 1715 __func__, (unsigned int) to, (int) len);
1666 1716
1667 /* Initialize retlen, in case of early exit */ 1717 /* Initialize retlen, in case of early exit */
1668 *retlen = 0; 1718 *retlen = 0;
1669 1719
1670 /* Do not allow writes past end of device */ 1720 /* Do not allow writes past end of device */
1671 if (unlikely((to + len) > mtd->size)) { 1721 if (unlikely((to + len) > mtd->size)) {
1672 printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n"); 1722 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1723 __func__);
1673 return -EINVAL; 1724 return -EINVAL;
1674 } 1725 }
1675 1726
1676 /* Reject writes, which are not page aligned */ 1727 /* Reject writes, which are not page aligned */
1677 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1728 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1678 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); 1729 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1730 __func__);
1679 return -EINVAL; 1731 return -EINVAL;
1680 } 1732 }
1681 1733
@@ -1711,7 +1763,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1711 } 1763 }
1712 1764
1713 if (ret) { 1765 if (ret) {
1714 printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret); 1766 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
1715 break; 1767 break;
1716 } 1768 }
1717 1769
@@ -1792,7 +1844,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1792 u_char *oobbuf; 1844 u_char *oobbuf;
1793 int ret = 0; 1845 int ret = 0;
1794 1846
1795 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1847 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1848 __func__, (unsigned int) to, (int) len);
1796 1849
1797 /* Initialize retlen, in case of early exit */ 1850 /* Initialize retlen, in case of early exit */
1798 ops->retlen = 0; 1851 ops->retlen = 0;
@@ -1800,13 +1853,15 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1800 1853
1801 /* Do not allow writes past end of device */ 1854 /* Do not allow writes past end of device */
1802 if (unlikely((to + len) > mtd->size)) { 1855 if (unlikely((to + len) > mtd->size)) {
1803 printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n"); 1856 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1857 __func__);
1804 return -EINVAL; 1858 return -EINVAL;
1805 } 1859 }
1806 1860
1807 /* Reject writes, which are not page aligned */ 1861 /* Reject writes, which are not page aligned */
1808 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1862 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1809 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n"); 1863 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1864 __func__);
1810 return -EINVAL; 1865 return -EINVAL;
1811 } 1866 }
1812 1867
@@ -1879,7 +1934,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1879 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); 1934 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
1880 if (ret) { 1935 if (ret) {
1881 written -= prevlen; 1936 written -= prevlen;
1882 printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1937 printk(KERN_ERR "%s: write failed %d\n",
1938 __func__, ret);
1883 break; 1939 break;
1884 } 1940 }
1885 1941
@@ -1887,7 +1943,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1887 /* Only check verify write turn on */ 1943 /* Only check verify write turn on */
1888 ret = onenand_verify(mtd, buf - len, to - len, len); 1944 ret = onenand_verify(mtd, buf - len, to - len, len);
1889 if (ret) 1945 if (ret)
1890 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1946 printk(KERN_ERR "%s: verify failed %d\n",
1947 __func__, ret);
1891 break; 1948 break;
1892 } 1949 }
1893 1950
@@ -1905,14 +1962,16 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1905 /* In partial page write we don't update bufferram */ 1962 /* In partial page write we don't update bufferram */
1906 onenand_update_bufferram(mtd, to, !ret && !subpage); 1963 onenand_update_bufferram(mtd, to, !ret && !subpage);
1907 if (ret) { 1964 if (ret) {
1908 printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1965 printk(KERN_ERR "%s: write failed %d\n",
1966 __func__, ret);
1909 break; 1967 break;
1910 } 1968 }
1911 1969
1912 /* Only check verify write turn on */ 1970 /* Only check verify write turn on */
1913 ret = onenand_verify(mtd, buf, to, thislen); 1971 ret = onenand_verify(mtd, buf, to, thislen);
1914 if (ret) { 1972 if (ret) {
1915 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1973 printk(KERN_ERR "%s: verify failed %d\n",
1974 __func__, ret);
1916 break; 1975 break;
1917 } 1976 }
1918 1977
@@ -1968,7 +2027,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1968 2027
1969 to += ops->ooboffs; 2028 to += ops->ooboffs;
1970 2029
1971 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 2030 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
2031 __func__, (unsigned int) to, (int) len);
1972 2032
1973 /* Initialize retlen, in case of early exit */ 2033 /* Initialize retlen, in case of early exit */
1974 ops->oobretlen = 0; 2034 ops->oobretlen = 0;
@@ -1981,14 +2041,15 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1981 column = to & (mtd->oobsize - 1); 2041 column = to & (mtd->oobsize - 1);
1982 2042
1983 if (unlikely(column >= oobsize)) { 2043 if (unlikely(column >= oobsize)) {
1984 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n"); 2044 printk(KERN_ERR "%s: Attempted to start write outside oob\n",
2045 __func__);
1985 return -EINVAL; 2046 return -EINVAL;
1986 } 2047 }
1987 2048
1988 /* For compatibility with NAND: Do not allow write past end of page */ 2049 /* For compatibility with NAND: Do not allow write past end of page */
1989 if (unlikely(column + len > oobsize)) { 2050 if (unlikely(column + len > oobsize)) {
1990 printk(KERN_ERR "onenand_write_oob_nolock: " 2051 printk(KERN_ERR "%s: Attempt to write past end of page\n",
1991 "Attempt to write past end of page\n"); 2052 __func__);
1992 return -EINVAL; 2053 return -EINVAL;
1993 } 2054 }
1994 2055
@@ -1996,7 +2057,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1996 if (unlikely(to >= mtd->size || 2057 if (unlikely(to >= mtd->size ||
1997 column + len > ((mtd->size >> this->page_shift) - 2058 column + len > ((mtd->size >> this->page_shift) -
1998 (to >> this->page_shift)) * oobsize)) { 2059 (to >> this->page_shift)) * oobsize)) {
1999 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n"); 2060 printk(KERN_ERR "%s: Attempted to write past end of device\n",
2061 __func__);
2000 return -EINVAL; 2062 return -EINVAL;
2001 } 2063 }
2002 2064
@@ -2038,13 +2100,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2038 2100
2039 ret = this->wait(mtd, FL_WRITING); 2101 ret = this->wait(mtd, FL_WRITING);
2040 if (ret) { 2102 if (ret) {
2041 printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret); 2103 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2042 break; 2104 break;
2043 } 2105 }
2044 2106
2045 ret = onenand_verify_oob(mtd, oobbuf, to); 2107 ret = onenand_verify_oob(mtd, oobbuf, to);
2046 if (ret) { 2108 if (ret) {
2047 printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret); 2109 printk(KERN_ERR "%s: verify failed %d\n",
2110 __func__, ret);
2048 break; 2111 break;
2049 } 2112 }
2050 2113
@@ -2140,78 +2203,186 @@ static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allo
2140 return bbm->isbad_bbt(mtd, ofs, allowbbt); 2203 return bbm->isbad_bbt(mtd, ofs, allowbbt);
2141} 2204}
2142 2205
2206
2207static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
2208 struct erase_info *instr)
2209{
2210 struct onenand_chip *this = mtd->priv;
2211 loff_t addr = instr->addr;
2212 int len = instr->len;
2213 unsigned int block_size = (1 << this->erase_shift);
2214 int ret = 0;
2215
2216 while (len) {
2217 this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
2218 ret = this->wait(mtd, FL_VERIFYING_ERASE);
2219 if (ret) {
2220 printk(KERN_ERR "%s: Failed verify, block %d\n",
2221 __func__, onenand_block(this, addr));
2222 instr->state = MTD_ERASE_FAILED;
2223 instr->fail_addr = addr;
2224 return -1;
2225 }
2226 len -= block_size;
2227 addr += block_size;
2228 }
2229 return 0;
2230}
2231
2143/** 2232/**
2144 * onenand_erase - [MTD Interface] erase block(s) 2233 * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
2145 * @param mtd MTD device structure 2234 * @param mtd MTD device structure
2146 * @param instr erase instruction 2235 * @param instr erase instruction
2236 * @param region erase region
2147 * 2237 *
2148 * Erase one ore more blocks 2238 * Erase one or more blocks up to 64 block at a time
2149 */ 2239 */
2150static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) 2240static int onenand_multiblock_erase(struct mtd_info *mtd,
2241 struct erase_info *instr,
2242 unsigned int block_size)
2151{ 2243{
2152 struct onenand_chip *this = mtd->priv; 2244 struct onenand_chip *this = mtd->priv;
2153 unsigned int block_size;
2154 loff_t addr = instr->addr; 2245 loff_t addr = instr->addr;
2155 loff_t len = instr->len; 2246 int len = instr->len;
2156 int ret = 0, i; 2247 int eb_count = 0;
2157 struct mtd_erase_region_info *region = NULL; 2248 int ret = 0;
2158 loff_t region_end = 0; 2249 int bdry_block = 0;
2159 2250
2160 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); 2251 instr->state = MTD_ERASING;
2161 2252
2162 /* Do not allow erase past end of device */ 2253 if (ONENAND_IS_DDP(this)) {
2163 if (unlikely((len + addr) > mtd->size)) { 2254 loff_t bdry_addr = this->chipsize >> 1;
2164 printk(KERN_ERR "onenand_erase: Erase past end of device\n"); 2255 if (addr < bdry_addr && (addr + len) > bdry_addr)
2165 return -EINVAL; 2256 bdry_block = bdry_addr >> this->erase_shift;
2166 } 2257 }
2167 2258
2168 if (FLEXONENAND(this)) { 2259 /* Pre-check bbs */
2169 /* Find the eraseregion of this address */ 2260 while (len) {
2170 i = flexonenand_region(mtd, addr); 2261 /* Check if we have a bad block, we do not erase bad blocks */
2171 region = &mtd->eraseregions[i]; 2262 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2263 printk(KERN_WARNING "%s: attempt to erase a bad block "
2264 "at addr 0x%012llx\n",
2265 __func__, (unsigned long long) addr);
2266 instr->state = MTD_ERASE_FAILED;
2267 return -EIO;
2268 }
2269 len -= block_size;
2270 addr += block_size;
2271 }
2172 2272
2173 block_size = region->erasesize; 2273 len = instr->len;
2174 region_end = region->offset + region->erasesize * region->numblocks; 2274 addr = instr->addr;
2175 2275
2176 /* Start address within region must align on block boundary. 2276 /* loop over 64 eb batches */
2177 * Erase region's start offset is always block start address. 2277 while (len) {
2178 */ 2278 struct erase_info verify_instr = *instr;
2179 if (unlikely((addr - region->offset) & (block_size - 1))) { 2279 int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
2180 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2280
2181 return -EINVAL; 2281 verify_instr.addr = addr;
2282 verify_instr.len = 0;
2283
2284 /* do not cross chip boundary */
2285 if (bdry_block) {
2286 int this_block = (addr >> this->erase_shift);
2287
2288 if (this_block < bdry_block) {
2289 max_eb_count = min(max_eb_count,
2290 (bdry_block - this_block));
2291 }
2182 } 2292 }
2183 } else {
2184 block_size = 1 << this->erase_shift;
2185 2293
2186 /* Start address must align on block boundary */ 2294 eb_count = 0;
2187 if (unlikely(addr & (block_size - 1))) { 2295
2188 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2296 while (len > block_size && eb_count < (max_eb_count - 1)) {
2189 return -EINVAL; 2297 this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
2298 addr, block_size);
2299 onenand_invalidate_bufferram(mtd, addr, block_size);
2300
2301 ret = this->wait(mtd, FL_PREPARING_ERASE);
2302 if (ret) {
2303 printk(KERN_ERR "%s: Failed multiblock erase, "
2304 "block %d\n", __func__,
2305 onenand_block(this, addr));
2306 instr->state = MTD_ERASE_FAILED;
2307 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2308 return -EIO;
2309 }
2310
2311 len -= block_size;
2312 addr += block_size;
2313 eb_count++;
2314 }
2315
2316 /* last block of 64-eb series */
2317 cond_resched();
2318 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
2319 onenand_invalidate_bufferram(mtd, addr, block_size);
2320
2321 ret = this->wait(mtd, FL_ERASING);
2322 /* Check if it is write protected */
2323 if (ret) {
2324 printk(KERN_ERR "%s: Failed erase, block %d\n",
2325 __func__, onenand_block(this, addr));
2326 instr->state = MTD_ERASE_FAILED;
2327 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2328 return -EIO;
2329 }
2330
2331 len -= block_size;
2332 addr += block_size;
2333 eb_count++;
2334
2335 /* verify */
2336 verify_instr.len = eb_count * block_size;
2337 if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
2338 instr->state = verify_instr.state;
2339 instr->fail_addr = verify_instr.fail_addr;
2340 return -EIO;
2190 } 2341 }
2191 }
2192 2342
2193 /* Length must align on block boundary */
2194 if (unlikely(len & (block_size - 1))) {
2195 printk(KERN_ERR "onenand_erase: Length not block aligned\n");
2196 return -EINVAL;
2197 } 2343 }
2344 return 0;
2345}
2198 2346
2199 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2200 2347
2201 /* Grab the lock and see if the device is available */ 2348/**
2202 onenand_get_device(mtd, FL_ERASING); 2349 * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
2350 * @param mtd MTD device structure
2351 * @param instr erase instruction
2352 * @param region erase region
2353 * @param block_size erase block size
2354 *
2355 * Erase one or more blocks one block at a time
2356 */
2357static int onenand_block_by_block_erase(struct mtd_info *mtd,
2358 struct erase_info *instr,
2359 struct mtd_erase_region_info *region,
2360 unsigned int block_size)
2361{
2362 struct onenand_chip *this = mtd->priv;
2363 loff_t addr = instr->addr;
2364 int len = instr->len;
2365 loff_t region_end = 0;
2366 int ret = 0;
2367
2368 if (region) {
2369 /* region is set for Flex-OneNAND */
2370 region_end = region->offset + region->erasesize * region->numblocks;
2371 }
2203 2372
2204 /* Loop through the blocks */
2205 instr->state = MTD_ERASING; 2373 instr->state = MTD_ERASING;
2206 2374
2375 /* Loop through the blocks */
2207 while (len) { 2376 while (len) {
2208 cond_resched(); 2377 cond_resched();
2209 2378
2210 /* Check if we have a bad block, we do not erase bad blocks */ 2379 /* Check if we have a bad block, we do not erase bad blocks */
2211 if (onenand_block_isbad_nolock(mtd, addr, 0)) { 2380 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2212 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr); 2381 printk(KERN_WARNING "%s: attempt to erase a bad block "
2382 "at addr 0x%012llx\n",
2383 __func__, (unsigned long long) addr);
2213 instr->state = MTD_ERASE_FAILED; 2384 instr->state = MTD_ERASE_FAILED;
2214 goto erase_exit; 2385 return -EIO;
2215 } 2386 }
2216 2387
2217 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); 2388 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
@@ -2221,11 +2392,11 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2221 ret = this->wait(mtd, FL_ERASING); 2392 ret = this->wait(mtd, FL_ERASING);
2222 /* Check, if it is write protected */ 2393 /* Check, if it is write protected */
2223 if (ret) { 2394 if (ret) {
2224 printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", 2395 printk(KERN_ERR "%s: Failed erase, block %d\n",
2225 onenand_block(this, addr)); 2396 __func__, onenand_block(this, addr));
2226 instr->state = MTD_ERASE_FAILED; 2397 instr->state = MTD_ERASE_FAILED;
2227 instr->fail_addr = addr; 2398 instr->fail_addr = addr;
2228 goto erase_exit; 2399 return -EIO;
2229 } 2400 }
2230 2401
2231 len -= block_size; 2402 len -= block_size;
@@ -2241,25 +2412,88 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2241 2412
2242 if (len & (block_size - 1)) { 2413 if (len & (block_size - 1)) {
2243 /* FIXME: This should be handled at MTD partitioning level. */ 2414 /* FIXME: This should be handled at MTD partitioning level. */
2244 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2415 printk(KERN_ERR "%s: Unaligned address\n",
2245 goto erase_exit; 2416 __func__);
2417 return -EIO;
2246 } 2418 }
2247 } 2419 }
2420 }
2421 return 0;
2422}
2423
2424/**
2425 * onenand_erase - [MTD Interface] erase block(s)
2426 * @param mtd MTD device structure
2427 * @param instr erase instruction
2428 *
2429 * Erase one or more blocks
2430 */
2431static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2432{
2433 struct onenand_chip *this = mtd->priv;
2434 unsigned int block_size;
2435 loff_t addr = instr->addr;
2436 loff_t len = instr->len;
2437 int ret = 0;
2438 struct mtd_erase_region_info *region = NULL;
2439 loff_t region_offset = 0;
2440
2441 DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
2442 (unsigned long long) instr->addr, (unsigned long long) instr->len);
2443
2444 /* Do not allow erase past end of device */
2445 if (unlikely((len + addr) > mtd->size)) {
2446 printk(KERN_ERR "%s: Erase past end of device\n", __func__);
2447 return -EINVAL;
2448 }
2449
2450 if (FLEXONENAND(this)) {
2451 /* Find the eraseregion of this address */
2452 int i = flexonenand_region(mtd, addr);
2453
2454 region = &mtd->eraseregions[i];
2455 block_size = region->erasesize;
2456
2457 /* Start address within region must align on block boundary.
2458 * Erase region's start offset is always block start address.
2459 */
2460 region_offset = region->offset;
2461 } else
2462 block_size = 1 << this->erase_shift;
2463
2464 /* Start address must align on block boundary */
2465 if (unlikely((addr - region_offset) & (block_size - 1))) {
2466 printk(KERN_ERR "%s: Unaligned address\n", __func__);
2467 return -EINVAL;
2468 }
2248 2469
2470 /* Length must align on block boundary */
2471 if (unlikely(len & (block_size - 1))) {
2472 printk(KERN_ERR "%s: Length not block aligned\n", __func__);
2473 return -EINVAL;
2249 } 2474 }
2250 2475
2251 instr->state = MTD_ERASE_DONE; 2476 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2252 2477
2253erase_exit: 2478 /* Grab the lock and see if the device is available */
2479 onenand_get_device(mtd, FL_ERASING);
2254 2480
2255 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2481 if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
2482 /* region is set for Flex-OneNAND (no mb erase) */
2483 ret = onenand_block_by_block_erase(mtd, instr,
2484 region, block_size);
2485 } else {
2486 ret = onenand_multiblock_erase(mtd, instr, block_size);
2487 }
2256 2488
2257 /* Deselect and wake up anyone waiting on the device */ 2489 /* Deselect and wake up anyone waiting on the device */
2258 onenand_release_device(mtd); 2490 onenand_release_device(mtd);
2259 2491
2260 /* Do call back function */ 2492 /* Do call back function */
2261 if (!ret) 2493 if (!ret) {
2494 instr->state = MTD_ERASE_DONE;
2262 mtd_erase_callback(instr); 2495 mtd_erase_callback(instr);
2496 }
2263 2497
2264 return ret; 2498 return ret;
2265} 2499}
@@ -2272,7 +2506,7 @@ erase_exit:
2272 */ 2506 */
2273static void onenand_sync(struct mtd_info *mtd) 2507static void onenand_sync(struct mtd_info *mtd)
2274{ 2508{
2275 DEBUG(MTD_DEBUG_LEVEL3, "onenand_sync: called\n"); 2509 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
2276 2510
2277 /* Grab the lock and see if the device is available */ 2511 /* Grab the lock and see if the device is available */
2278 onenand_get_device(mtd, FL_SYNCING); 2512 onenand_get_device(mtd, FL_SYNCING);
@@ -2406,7 +2640,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2406 /* Check lock status */ 2640 /* Check lock status */
2407 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2641 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2408 if (!(status & wp_status_mask)) 2642 if (!(status & wp_status_mask))
2409 printk(KERN_ERR "wp status = 0x%x\n", status); 2643 printk(KERN_ERR "%s: wp status = 0x%x\n",
2644 __func__, status);
2410 2645
2411 return 0; 2646 return 0;
2412 } 2647 }
@@ -2435,7 +2670,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2435 /* Check lock status */ 2670 /* Check lock status */
2436 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2671 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2437 if (!(status & wp_status_mask)) 2672 if (!(status & wp_status_mask))
2438 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2673 printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2674 __func__, block, status);
2439 } 2675 }
2440 2676
2441 return 0; 2677 return 0;
@@ -2502,7 +2738,8 @@ static int onenand_check_lock_status(struct onenand_chip *this)
2502 /* Check lock status */ 2738 /* Check lock status */
2503 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2739 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2504 if (!(status & ONENAND_WP_US)) { 2740 if (!(status & ONENAND_WP_US)) {
2505 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2741 printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2742 __func__, block, status);
2506 return 0; 2743 return 0;
2507 } 2744 }
2508 } 2745 }
@@ -2557,6 +2794,208 @@ static void onenand_unlock_all(struct mtd_info *mtd)
2557 2794
2558#ifdef CONFIG_MTD_ONENAND_OTP 2795#ifdef CONFIG_MTD_ONENAND_OTP
2559 2796
2797/**
2798 * onenand_otp_command - Send OTP specific command to OneNAND device
2799 * @param mtd MTD device structure
2800 * @param cmd the command to be sent
2801 * @param addr offset to read from or write to
2802 * @param len number of bytes to read or write
2803 */
2804static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
2805 size_t len)
2806{
2807 struct onenand_chip *this = mtd->priv;
2808 int value, block, page;
2809
2810 /* Address translation */
2811 switch (cmd) {
2812 case ONENAND_CMD_OTP_ACCESS:
2813 block = (int) (addr >> this->erase_shift);
2814 page = -1;
2815 break;
2816
2817 default:
2818 block = (int) (addr >> this->erase_shift);
2819 page = (int) (addr >> this->page_shift);
2820
2821 if (ONENAND_IS_2PLANE(this)) {
2822 /* Make the even block number */
2823 block &= ~1;
2824 /* Is it the odd plane? */
2825 if (addr & this->writesize)
2826 block++;
2827 page >>= 1;
2828 }
2829 page &= this->page_mask;
2830 break;
2831 }
2832
2833 if (block != -1) {
2834 /* Write 'DFS, FBA' of Flash */
2835 value = onenand_block_address(this, block);
2836 this->write_word(value, this->base +
2837 ONENAND_REG_START_ADDRESS1);
2838 }
2839
2840 if (page != -1) {
2841 /* Now we use page size operation */
2842 int sectors = 4, count = 4;
2843 int dataram;
2844
2845 switch (cmd) {
2846 default:
2847 if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
2848 cmd = ONENAND_CMD_2X_PROG;
2849 dataram = ONENAND_CURRENT_BUFFERRAM(this);
2850 break;
2851 }
2852
2853 /* Write 'FPA, FSA' of Flash */
2854 value = onenand_page_address(page, sectors);
2855 this->write_word(value, this->base +
2856 ONENAND_REG_START_ADDRESS8);
2857
2858 /* Write 'BSA, BSC' of DataRAM */
2859 value = onenand_buffer_address(dataram, sectors, count);
2860 this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
2861 }
2862
2863 /* Interrupt clear */
2864 this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
2865
2866 /* Write command */
2867 this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
2868
2869 return 0;
2870}
2871
2872/**
2873 * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
2874 * @param mtd MTD device structure
2875 * @param to offset to write to
2876 * @param len number of bytes to write
2877 * @param retlen pointer to variable to store the number of written bytes
2878 * @param buf the data to write
2879 *
2880 * OneNAND write out-of-band only for OTP
2881 */
2882static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2883 struct mtd_oob_ops *ops)
2884{
2885 struct onenand_chip *this = mtd->priv;
2886 int column, ret = 0, oobsize;
2887 int written = 0;
2888 u_char *oobbuf;
2889 size_t len = ops->ooblen;
2890 const u_char *buf = ops->oobbuf;
2891 int block, value, status;
2892
2893 to += ops->ooboffs;
2894
2895 /* Initialize retlen, in case of early exit */
2896 ops->oobretlen = 0;
2897
2898 oobsize = mtd->oobsize;
2899
2900 column = to & (mtd->oobsize - 1);
2901
2902 oobbuf = this->oob_buf;
2903
2904 /* Loop until all data write */
2905 while (written < len) {
2906 int thislen = min_t(int, oobsize, len - written);
2907
2908 cond_resched();
2909
2910 block = (int) (to >> this->erase_shift);
2911 /*
2912 * Write 'DFS, FBA' of Flash
2913 * Add: F100h DQ=DFS, FBA
2914 */
2915
2916 value = onenand_block_address(this, block);
2917 this->write_word(value, this->base +
2918 ONENAND_REG_START_ADDRESS1);
2919
2920 /*
2921 * Select DataRAM for DDP
2922 * Add: F101h DQ=DBS
2923 */
2924
2925 value = onenand_bufferram_address(this, block);
2926 this->write_word(value, this->base +
2927 ONENAND_REG_START_ADDRESS2);
2928 ONENAND_SET_NEXT_BUFFERRAM(this);
2929
2930 /*
2931 * Enter OTP access mode
2932 */
2933 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2934 this->wait(mtd, FL_OTPING);
2935
2936 /* We send data to spare ram with oobsize
2937 * to prevent byte access */
2938 memcpy(oobbuf + column, buf, thislen);
2939
2940 /*
2941 * Write Data into DataRAM
2942 * Add: 8th Word
2943 * in sector0/spare/page0
2944 * DQ=XXFCh
2945 */
2946 this->write_bufferram(mtd, ONENAND_SPARERAM,
2947 oobbuf, 0, mtd->oobsize);
2948
2949 onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
2950 onenand_update_bufferram(mtd, to, 0);
2951 if (ONENAND_IS_2PLANE(this)) {
2952 ONENAND_SET_BUFFERRAM1(this);
2953 onenand_update_bufferram(mtd, to + this->writesize, 0);
2954 }
2955
2956 ret = this->wait(mtd, FL_WRITING);
2957 if (ret) {
2958 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2959 break;
2960 }
2961
2962 /* Exit OTP access mode */
2963 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2964 this->wait(mtd, FL_RESETING);
2965
2966 status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
2967 status &= 0x60;
2968
2969 if (status == 0x60) {
2970 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2971 printk(KERN_DEBUG "1st Block\tLOCKED\n");
2972 printk(KERN_DEBUG "OTP Block\tLOCKED\n");
2973 } else if (status == 0x20) {
2974 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2975 printk(KERN_DEBUG "1st Block\tLOCKED\n");
2976 printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
2977 } else if (status == 0x40) {
2978 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2979 printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
2980 printk(KERN_DEBUG "OTP Block\tLOCKED\n");
2981 } else {
2982 printk(KERN_DEBUG "Reboot to check\n");
2983 }
2984
2985 written += thislen;
2986 if (written == len)
2987 break;
2988
2989 to += mtd->writesize;
2990 buf += thislen;
2991 column = 0;
2992 }
2993
2994 ops->oobretlen = written;
2995
2996 return ret;
2997}
2998
2560/* Internal OTP operation */ 2999/* Internal OTP operation */
2561typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, 3000typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
2562 size_t *retlen, u_char *buf); 3001 size_t *retlen, u_char *buf);
@@ -2659,11 +3098,11 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2659 struct mtd_oob_ops ops; 3098 struct mtd_oob_ops ops;
2660 int ret; 3099 int ret;
2661 3100
2662 /* Enter OTP access mode */
2663 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2664 this->wait(mtd, FL_OTPING);
2665
2666 if (FLEXONENAND(this)) { 3101 if (FLEXONENAND(this)) {
3102
3103 /* Enter OTP access mode */
3104 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3105 this->wait(mtd, FL_OTPING);
2667 /* 3106 /*
2668 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of 3107 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
2669 * main area of page 49. 3108 * main area of page 49.
@@ -2674,19 +3113,19 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2674 ops.oobbuf = NULL; 3113 ops.oobbuf = NULL;
2675 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops); 3114 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
2676 *retlen = ops.retlen; 3115 *retlen = ops.retlen;
3116
3117 /* Exit OTP access mode */
3118 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3119 this->wait(mtd, FL_RESETING);
2677 } else { 3120 } else {
2678 ops.mode = MTD_OOB_PLACE; 3121 ops.mode = MTD_OOB_PLACE;
2679 ops.ooblen = len; 3122 ops.ooblen = len;
2680 ops.oobbuf = buf; 3123 ops.oobbuf = buf;
2681 ops.ooboffs = 0; 3124 ops.ooboffs = 0;
2682 ret = onenand_write_oob_nolock(mtd, from, &ops); 3125 ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
2683 *retlen = ops.oobretlen; 3126 *retlen = ops.oobretlen;
2684 } 3127 }
2685 3128
2686 /* Exit OTP access mode */
2687 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2688 this->wait(mtd, FL_RESETING);
2689
2690 return ret; 3129 return ret;
2691} 3130}
2692 3131
@@ -2717,16 +3156,21 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2717 if (density < ONENAND_DEVICE_DENSITY_512Mb) 3156 if (density < ONENAND_DEVICE_DENSITY_512Mb)
2718 otp_pages = 20; 3157 otp_pages = 20;
2719 else 3158 else
2720 otp_pages = 10; 3159 otp_pages = 50;
2721 3160
2722 if (mode == MTD_OTP_FACTORY) { 3161 if (mode == MTD_OTP_FACTORY) {
2723 from += mtd->writesize * otp_pages; 3162 from += mtd->writesize * otp_pages;
2724 otp_pages = 64 - otp_pages; 3163 otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
2725 } 3164 }
2726 3165
2727 /* Check User/Factory boundary */ 3166 /* Check User/Factory boundary */
2728 if (((mtd->writesize * otp_pages) - (from + len)) < 0) 3167 if (mode == MTD_OTP_USER) {
2729 return 0; 3168 if (mtd->writesize * otp_pages < from + len)
3169 return 0;
3170 } else {
3171 if (mtd->writesize * otp_pages < len)
3172 return 0;
3173 }
2730 3174
2731 onenand_get_device(mtd, FL_OTPING); 3175 onenand_get_device(mtd, FL_OTPING);
2732 while (len > 0 && otp_pages > 0) { 3176 while (len > 0 && otp_pages > 0) {
@@ -2749,13 +3193,12 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2749 *retlen += sizeof(struct otp_info); 3193 *retlen += sizeof(struct otp_info);
2750 } else { 3194 } else {
2751 size_t tmp_retlen; 3195 size_t tmp_retlen;
2752 int size = len;
2753 3196
2754 ret = action(mtd, from, len, &tmp_retlen, buf); 3197 ret = action(mtd, from, len, &tmp_retlen, buf);
2755 3198
2756 buf += size; 3199 buf += tmp_retlen;
2757 len -= size; 3200 len -= tmp_retlen;
2758 *retlen += size; 3201 *retlen += tmp_retlen;
2759 3202
2760 if (ret) 3203 if (ret)
2761 break; 3204 break;
@@ -2868,21 +3311,11 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2868 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf; 3311 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
2869 size_t retlen; 3312 size_t retlen;
2870 int ret; 3313 int ret;
3314 unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
2871 3315
2872 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize 3316 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
2873 : mtd->oobsize); 3317 : mtd->oobsize);
2874 /* 3318 /*
2875 * Note: OTP lock operation
2876 * OTP block : 0xXXFC
2877 * 1st block : 0xXXF3 (If chip support)
2878 * Both : 0xXXF0 (If chip support)
2879 */
2880 if (FLEXONENAND(this))
2881 buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
2882 else
2883 buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
2884
2885 /*
2886 * Write lock mark to 8th word of sector0 of page0 of the spare0. 3319 * Write lock mark to 8th word of sector0 of page0 of the spare0.
2887 * We write 16 bytes spare area instead of 2 bytes. 3320 * We write 16 bytes spare area instead of 2 bytes.
2888 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of 3321 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
@@ -2892,10 +3325,30 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2892 from = 0; 3325 from = 0;
2893 len = FLEXONENAND(this) ? mtd->writesize : 16; 3326 len = FLEXONENAND(this) ? mtd->writesize : 16;
2894 3327
3328 /*
3329 * Note: OTP lock operation
3330 * OTP block : 0xXXFC XX 1111 1100
3331 * 1st block : 0xXXF3 (If chip support) XX 1111 0011
3332 * Both : 0xXXF0 (If chip support) XX 1111 0000
3333 */
3334 if (FLEXONENAND(this))
3335 otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
3336
3337 /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
3338 if (otp == 1)
3339 buf[otp_lock_offset] = 0xFC;
3340 else if (otp == 2)
3341 buf[otp_lock_offset] = 0xF3;
3342 else if (otp == 3)
3343 buf[otp_lock_offset] = 0xF0;
3344 else if (otp != 0)
3345 printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
3346
2895 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER); 3347 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
2896 3348
2897 return ret ? : retlen; 3349 return ret ? : retlen;
2898} 3350}
3351
2899#endif /* CONFIG_MTD_ONENAND_OTP */ 3352#endif /* CONFIG_MTD_ONENAND_OTP */
2900 3353
2901/** 3354/**
@@ -3172,7 +3625,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3172 break; 3625 break;
3173 3626
3174 if (i != mtd->oobsize) { 3627 if (i != mtd->oobsize) {
3175 printk(KERN_WARNING "Block %d not erased.\n", block); 3628 printk(KERN_WARNING "%s: Block %d not erased.\n",
3629 __func__, block);
3176 return 1; 3630 return 1;
3177 } 3631 }
3178 } 3632 }
@@ -3204,8 +3658,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3204 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; 3658 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3205 3659
3206 if (boundary >= blksperdie) { 3660 if (boundary >= blksperdie) {
3207 printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. " 3661 printk(KERN_ERR "%s: Invalid boundary value. "
3208 "Boundary not changed.\n"); 3662 "Boundary not changed.\n", __func__);
3209 return -EINVAL; 3663 return -EINVAL;
3210 } 3664 }
3211 3665
@@ -3214,7 +3668,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3214 new = boundary + (die * this->density_mask); 3668 new = boundary + (die * this->density_mask);
3215 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new)); 3669 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
3216 if (ret) { 3670 if (ret) {
3217 printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n"); 3671 printk(KERN_ERR "%s: Please erase blocks "
3672 "before boundary change\n", __func__);
3218 return ret; 3673 return ret;
3219 } 3674 }
3220 3675
@@ -3227,12 +3682,12 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3227 3682
3228 thisboundary = this->read_word(this->base + ONENAND_DATARAM); 3683 thisboundary = this->read_word(this->base + ONENAND_DATARAM);
3229 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { 3684 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
3230 printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n"); 3685 printk(KERN_ERR "%s: boundary locked\n", __func__);
3231 ret = 1; 3686 ret = 1;
3232 goto out; 3687 goto out;
3233 } 3688 }
3234 3689
3235 printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n", 3690 printk(KERN_INFO "Changing die %d boundary: %d%s\n",
3236 die, boundary, lock ? "(Locked)" : "(Unlocked)"); 3691 die, boundary, lock ? "(Locked)" : "(Unlocked)");
3237 3692
3238 addr = die ? this->diesize[0] : 0; 3693 addr = die ? this->diesize[0] : 0;
@@ -3243,7 +3698,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3243 this->command(mtd, ONENAND_CMD_ERASE, addr, 0); 3698 this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
3244 ret = this->wait(mtd, FL_ERASING); 3699 ret = this->wait(mtd, FL_ERASING);
3245 if (ret) { 3700 if (ret) {
3246 printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die); 3701 printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
3702 __func__, die);
3247 goto out; 3703 goto out;
3248 } 3704 }
3249 3705
@@ -3251,7 +3707,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3251 this->command(mtd, ONENAND_CMD_PROG, addr, 0); 3707 this->command(mtd, ONENAND_CMD_PROG, addr, 0);
3252 ret = this->wait(mtd, FL_WRITING); 3708 ret = this->wait(mtd, FL_WRITING);
3253 if (ret) { 3709 if (ret) {
3254 printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die); 3710 printk(KERN_ERR "%s: Failed PI write for Die %d\n",
3711 __func__, die);
3255 goto out; 3712 goto out;
3256 } 3713 }
3257 3714
@@ -3408,8 +3865,8 @@ static void onenand_resume(struct mtd_info *mtd)
3408 if (this->state == FL_PM_SUSPENDED) 3865 if (this->state == FL_PM_SUSPENDED)
3409 onenand_release_device(mtd); 3866 onenand_release_device(mtd);
3410 else 3867 else
3411 printk(KERN_ERR "resume() called for the chip which is not" 3868 printk(KERN_ERR "%s: resume() called for the chip which is not "
3412 "in suspended state\n"); 3869 "in suspended state\n", __func__);
3413} 3870}
3414 3871
3415/** 3872/**
@@ -3464,7 +3921,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3464 if (!this->page_buf) { 3921 if (!this->page_buf) {
3465 this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); 3922 this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
3466 if (!this->page_buf) { 3923 if (!this->page_buf) {
3467 printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n"); 3924 printk(KERN_ERR "%s: Can't allocate page_buf\n",
3925 __func__);
3468 return -ENOMEM; 3926 return -ENOMEM;
3469 } 3927 }
3470 this->options |= ONENAND_PAGEBUF_ALLOC; 3928 this->options |= ONENAND_PAGEBUF_ALLOC;
@@ -3472,7 +3930,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3472 if (!this->oob_buf) { 3930 if (!this->oob_buf) {
3473 this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL); 3931 this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
3474 if (!this->oob_buf) { 3932 if (!this->oob_buf) {
3475 printk(KERN_ERR "onenand_scan(): Can't allocate oob_buf\n"); 3933 printk(KERN_ERR "%s: Can't allocate oob_buf\n",
3934 __func__);
3476 if (this->options & ONENAND_PAGEBUF_ALLOC) { 3935 if (this->options & ONENAND_PAGEBUF_ALLOC) {
3477 this->options &= ~ONENAND_PAGEBUF_ALLOC; 3936 this->options &= ~ONENAND_PAGEBUF_ALLOC;
3478 kfree(this->page_buf); 3937 kfree(this->page_buf);
@@ -3505,8 +3964,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3505 break; 3964 break;
3506 3965
3507 default: 3966 default:
3508 printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n", 3967 printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
3509 mtd->oobsize); 3968 __func__, mtd->oobsize);
3510 mtd->subpage_sft = 0; 3969 mtd->subpage_sft = 0;
3511 /* To prevent kernel oops */ 3970 /* To prevent kernel oops */
3512 this->ecclayout = &onenand_oob_32; 3971 this->ecclayout = &onenand_oob_32;