diff options
author | Amul Kumar Saha <amul.saha@samsung.com> | 2009-10-02 07:29:11 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-10-05 02:53:39 -0400 |
commit | 297758f8fc4e92b1915d2f5f2f84cedfe8941e5a (patch) | |
tree | ca08e7961fcbd87a83299f2b34297bea048d4f83 /drivers/mtd/onenand/onenand_base.c | |
parent | 5cd0be8ec946ee3901e7f651a795225c6badff8f (diff) |
mtd: Standardising prints in onenand_base.c
This patch resolves all the prints present in onenand_base.c
Primarily, it replaces the hard-coded function names in the prints,
and makes use of __func__.
Signed-off-by: Amul Kumar Saha <amul.saha@samsung.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/onenand/onenand_base.c')
-rw-r--r-- | drivers/mtd/onenand/onenand_base.c | 176 |
1 files changed, 108 insertions, 68 deletions
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index ff66e4330aa7..8935e634a87e 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -500,25 +500,28 @@ static int onenand_wait(struct mtd_info *mtd, int state) | |||
500 | int ecc = onenand_read_ecc(this); | 500 | int ecc = onenand_read_ecc(this); |
501 | if (ecc) { | 501 | if (ecc) { |
502 | if (ecc & ONENAND_ECC_2BIT_ALL) { | 502 | if (ecc & ONENAND_ECC_2BIT_ALL) { |
503 | printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc); | 503 | printk(KERN_ERR "%s: ECC error = 0x%04x\n", |
504 | __func__, ecc); | ||
504 | mtd->ecc_stats.failed++; | 505 | mtd->ecc_stats.failed++; |
505 | return -EBADMSG; | 506 | return -EBADMSG; |
506 | } else if (ecc & ONENAND_ECC_1BIT_ALL) { | 507 | } else if (ecc & ONENAND_ECC_1BIT_ALL) { |
507 | printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc); | 508 | printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n", |
509 | __func__, ecc); | ||
508 | mtd->ecc_stats.corrected++; | 510 | mtd->ecc_stats.corrected++; |
509 | } | 511 | } |
510 | } | 512 | } |
511 | } else if (state == FL_READING) { | 513 | } else if (state == FL_READING) { |
512 | printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); | 514 | printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n", |
515 | __func__, ctrl, interrupt); | ||
513 | return -EIO; | 516 | return -EIO; |
514 | } | 517 | } |
515 | 518 | ||
516 | /* If there's controller error, it's a real error */ | 519 | /* If there's controller error, it's a real error */ |
517 | if (ctrl & ONENAND_CTRL_ERROR) { | 520 | if (ctrl & ONENAND_CTRL_ERROR) { |
518 | printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", | 521 | printk(KERN_ERR "%s: controller error = 0x%04x\n", |
519 | ctrl); | 522 | __func__, ctrl); |
520 | if (ctrl & ONENAND_CTRL_LOCK) | 523 | if (ctrl & ONENAND_CTRL_LOCK) |
521 | printk(KERN_ERR "onenand_wait: it's locked error.\n"); | 524 | printk(KERN_ERR "%s: it's locked error.\n", __func__); |
522 | return -EIO; | 525 | return -EIO; |
523 | } | 526 | } |
524 | 527 | ||
@@ -1015,7 +1018,8 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status) | |||
1015 | /* We are attempting to reread, so decrement stats.failed | 1018 | /* We are attempting to reread, so decrement stats.failed |
1016 | * which was incremented by onenand_wait due to read failure | 1019 | * which was incremented by onenand_wait due to read failure |
1017 | */ | 1020 | */ |
1018 | printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n"); | 1021 | printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n", |
1022 | __func__); | ||
1019 | mtd->ecc_stats.failed--; | 1023 | mtd->ecc_stats.failed--; |
1020 | 1024 | ||
1021 | /* Issue the LSB page recovery command */ | 1025 | /* Issue the LSB page recovery command */ |
@@ -1046,7 +1050,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1046 | int ret = 0; | 1050 | int ret = 0; |
1047 | int writesize = this->writesize; | 1051 | int writesize = this->writesize; |
1048 | 1052 | ||
1049 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); | 1053 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", |
1054 | (unsigned int) from, (int) len); | ||
1050 | 1055 | ||
1051 | if (ops->mode == MTD_OOB_AUTO) | 1056 | if (ops->mode == MTD_OOB_AUTO) |
1052 | oobsize = this->ecclayout->oobavail; | 1057 | oobsize = this->ecclayout->oobavail; |
@@ -1057,7 +1062,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1057 | 1062 | ||
1058 | /* Do not allow reads past end of device */ | 1063 | /* Do not allow reads past end of device */ |
1059 | if (from + len > mtd->size) { | 1064 | if (from + len > mtd->size) { |
1060 | printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n"); | 1065 | printk(KERN_ERR "%s: Attempt read beyond end of device\n", |
1066 | __func__); | ||
1061 | ops->retlen = 0; | 1067 | ops->retlen = 0; |
1062 | ops->oobretlen = 0; | 1068 | ops->oobretlen = 0; |
1063 | return -EINVAL; | 1069 | return -EINVAL; |
@@ -1146,7 +1152,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1146 | int ret = 0, boundary = 0; | 1152 | int ret = 0, boundary = 0; |
1147 | int writesize = this->writesize; | 1153 | int writesize = this->writesize; |
1148 | 1154 | ||
1149 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); | 1155 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", |
1156 | __func__, (unsigned int) from, (int) len); | ||
1150 | 1157 | ||
1151 | if (ops->mode == MTD_OOB_AUTO) | 1158 | if (ops->mode == MTD_OOB_AUTO) |
1152 | oobsize = this->ecclayout->oobavail; | 1159 | oobsize = this->ecclayout->oobavail; |
@@ -1157,7 +1164,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1157 | 1164 | ||
1158 | /* Do not allow reads past end of device */ | 1165 | /* Do not allow reads past end of device */ |
1159 | if ((from + len) > mtd->size) { | 1166 | if ((from + len) > mtd->size) { |
1160 | printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n"); | 1167 | printk(KERN_ERR "%s: Attempt read beyond end of device\n", |
1168 | __func__); | ||
1161 | ops->retlen = 0; | 1169 | ops->retlen = 0; |
1162 | ops->oobretlen = 0; | 1170 | ops->oobretlen = 0; |
1163 | return -EINVAL; | 1171 | return -EINVAL; |
@@ -1275,7 +1283,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, | |||
1275 | 1283 | ||
1276 | from += ops->ooboffs; | 1284 | from += ops->ooboffs; |
1277 | 1285 | ||
1278 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); | 1286 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", |
1287 | __func__, (unsigned int) from, (int) len); | ||
1279 | 1288 | ||
1280 | /* Initialize return length value */ | 1289 | /* Initialize return length value */ |
1281 | ops->oobretlen = 0; | 1290 | ops->oobretlen = 0; |
@@ -1288,7 +1297,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, | |||
1288 | column = from & (mtd->oobsize - 1); | 1297 | column = from & (mtd->oobsize - 1); |
1289 | 1298 | ||
1290 | if (unlikely(column >= oobsize)) { | 1299 | if (unlikely(column >= oobsize)) { |
1291 | printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n"); | 1300 | printk(KERN_ERR "%s: Attempted to start read outside oob\n", |
1301 | __func__); | ||
1292 | return -EINVAL; | 1302 | return -EINVAL; |
1293 | } | 1303 | } |
1294 | 1304 | ||
@@ -1296,7 +1306,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, | |||
1296 | if (unlikely(from >= mtd->size || | 1306 | if (unlikely(from >= mtd->size || |
1297 | column + len > ((mtd->size >> this->page_shift) - | 1307 | column + len > ((mtd->size >> this->page_shift) - |
1298 | (from >> this->page_shift)) * oobsize)) { | 1308 | (from >> this->page_shift)) * oobsize)) { |
1299 | printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n"); | 1309 | printk(KERN_ERR "%s: Attempted to read beyond end of device\n", |
1310 | __func__); | ||
1300 | return -EINVAL; | 1311 | return -EINVAL; |
1301 | } | 1312 | } |
1302 | 1313 | ||
@@ -1319,7 +1330,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, | |||
1319 | ret = onenand_recover_lsb(mtd, from, ret); | 1330 | ret = onenand_recover_lsb(mtd, from, ret); |
1320 | 1331 | ||
1321 | if (ret && ret != -EBADMSG) { | 1332 | if (ret && ret != -EBADMSG) { |
1322 | printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); | 1333 | printk(KERN_ERR "%s: read failed = 0x%x\n", |
1334 | __func__, ret); | ||
1323 | break; | 1335 | break; |
1324 | } | 1336 | } |
1325 | 1337 | ||
@@ -1450,20 +1462,21 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state) | |||
1450 | if (interrupt & ONENAND_INT_READ) { | 1462 | if (interrupt & ONENAND_INT_READ) { |
1451 | int ecc = onenand_read_ecc(this); | 1463 | int ecc = onenand_read_ecc(this); |
1452 | if (ecc & ONENAND_ECC_2BIT_ALL) { | 1464 | if (ecc & ONENAND_ECC_2BIT_ALL) { |
1453 | printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x" | 1465 | printk(KERN_WARNING "%s: ecc error = 0x%04x, " |
1454 | ", controller error 0x%04x\n", ecc, ctrl); | 1466 | "controller error 0x%04x\n", |
1467 | __func__, ecc, ctrl); | ||
1455 | return ONENAND_BBT_READ_ECC_ERROR; | 1468 | return ONENAND_BBT_READ_ECC_ERROR; |
1456 | } | 1469 | } |
1457 | } else { | 1470 | } else { |
1458 | printk(KERN_ERR "onenand_bbt_wait: read timeout!" | 1471 | printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n", |
1459 | "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); | 1472 | __func__, ctrl, interrupt); |
1460 | return ONENAND_BBT_READ_FATAL_ERROR; | 1473 | return ONENAND_BBT_READ_FATAL_ERROR; |
1461 | } | 1474 | } |
1462 | 1475 | ||
1463 | /* Initial bad block case: 0x2400 or 0x0400 */ | 1476 | /* Initial bad block case: 0x2400 or 0x0400 */ |
1464 | if (ctrl & ONENAND_CTRL_ERROR) { | 1477 | if (ctrl & ONENAND_CTRL_ERROR) { |
1465 | printk(KERN_DEBUG "onenand_bbt_wait: " | 1478 | printk(KERN_DEBUG "%s: controller error = 0x%04x\n", |
1466 | "controller error = 0x%04x\n", ctrl); | 1479 | __func__, ctrl); |
1467 | return ONENAND_BBT_READ_ERROR; | 1480 | return ONENAND_BBT_READ_ERROR; |
1468 | } | 1481 | } |
1469 | 1482 | ||
@@ -1487,14 +1500,16 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, | |||
1487 | size_t len = ops->ooblen; | 1500 | size_t len = ops->ooblen; |
1488 | u_char *buf = ops->oobbuf; | 1501 | u_char *buf = ops->oobbuf; |
1489 | 1502 | ||
1490 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len); | 1503 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n", |
1504 | __func__, (unsigned int) from, len); | ||
1491 | 1505 | ||
1492 | /* Initialize return value */ | 1506 | /* Initialize return value */ |
1493 | ops->oobretlen = 0; | 1507 | ops->oobretlen = 0; |
1494 | 1508 | ||
1495 | /* Do not allow reads past end of device */ | 1509 | /* Do not allow reads past end of device */ |
1496 | if (unlikely((from + len) > mtd->size)) { | 1510 | if (unlikely((from + len) > mtd->size)) { |
1497 | printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n"); | 1511 | printk(KERN_ERR "%s: Attempt read beyond end of device\n", |
1512 | __func__); | ||
1498 | return ONENAND_BBT_READ_FATAL_ERROR; | 1513 | return ONENAND_BBT_READ_FATAL_ERROR; |
1499 | } | 1514 | } |
1500 | 1515 | ||
@@ -1661,21 +1676,23 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1661 | /* Wait for any existing operation to clear */ | 1676 | /* Wait for any existing operation to clear */ |
1662 | onenand_panic_wait(mtd); | 1677 | onenand_panic_wait(mtd); |
1663 | 1678 | ||
1664 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n", | 1679 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", |
1665 | (unsigned int) to, (int) len); | 1680 | __func__, (unsigned int) to, (int) len); |
1666 | 1681 | ||
1667 | /* Initialize retlen, in case of early exit */ | 1682 | /* Initialize retlen, in case of early exit */ |
1668 | *retlen = 0; | 1683 | *retlen = 0; |
1669 | 1684 | ||
1670 | /* Do not allow writes past end of device */ | 1685 | /* Do not allow writes past end of device */ |
1671 | if (unlikely((to + len) > mtd->size)) { | 1686 | if (unlikely((to + len) > mtd->size)) { |
1672 | printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n"); | 1687 | printk(KERN_ERR "%s: Attempt write to past end of device\n", |
1688 | __func__); | ||
1673 | return -EINVAL; | 1689 | return -EINVAL; |
1674 | } | 1690 | } |
1675 | 1691 | ||
1676 | /* Reject writes, which are not page aligned */ | 1692 | /* Reject writes, which are not page aligned */ |
1677 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { | 1693 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { |
1678 | printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); | 1694 | printk(KERN_ERR "%s: Attempt to write not page aligned data\n", |
1695 | __func__); | ||
1679 | return -EINVAL; | 1696 | return -EINVAL; |
1680 | } | 1697 | } |
1681 | 1698 | ||
@@ -1711,7 +1728,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1711 | } | 1728 | } |
1712 | 1729 | ||
1713 | if (ret) { | 1730 | if (ret) { |
1714 | printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret); | 1731 | printk(KERN_ERR "%s: write failed %d\n", __func__, ret); |
1715 | break; | 1732 | break; |
1716 | } | 1733 | } |
1717 | 1734 | ||
@@ -1792,7 +1809,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1792 | u_char *oobbuf; | 1809 | u_char *oobbuf; |
1793 | int ret = 0; | 1810 | int ret = 0; |
1794 | 1811 | ||
1795 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); | 1812 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", |
1813 | __func__, (unsigned int) to, (int) len); | ||
1796 | 1814 | ||
1797 | /* Initialize retlen, in case of early exit */ | 1815 | /* Initialize retlen, in case of early exit */ |
1798 | ops->retlen = 0; | 1816 | ops->retlen = 0; |
@@ -1800,13 +1818,15 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1800 | 1818 | ||
1801 | /* Do not allow writes past end of device */ | 1819 | /* Do not allow writes past end of device */ |
1802 | if (unlikely((to + len) > mtd->size)) { | 1820 | if (unlikely((to + len) > mtd->size)) { |
1803 | printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n"); | 1821 | printk(KERN_ERR "%s: Attempt write to past end of device\n", |
1822 | __func__); | ||
1804 | return -EINVAL; | 1823 | return -EINVAL; |
1805 | } | 1824 | } |
1806 | 1825 | ||
1807 | /* Reject writes, which are not page aligned */ | 1826 | /* Reject writes, which are not page aligned */ |
1808 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { | 1827 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { |
1809 | printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n"); | 1828 | printk(KERN_ERR "%s: Attempt to write not page aligned data\n", |
1829 | __func__); | ||
1810 | return -EINVAL; | 1830 | return -EINVAL; |
1811 | } | 1831 | } |
1812 | 1832 | ||
@@ -1879,7 +1899,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1879 | onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); | 1899 | onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); |
1880 | if (ret) { | 1900 | if (ret) { |
1881 | written -= prevlen; | 1901 | written -= prevlen; |
1882 | printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); | 1902 | printk(KERN_ERR "%s: write failed %d\n", |
1903 | __func__, ret); | ||
1883 | break; | 1904 | break; |
1884 | } | 1905 | } |
1885 | 1906 | ||
@@ -1887,7 +1908,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1887 | /* Only check verify write turn on */ | 1908 | /* Only check verify write turn on */ |
1888 | ret = onenand_verify(mtd, buf - len, to - len, len); | 1909 | ret = onenand_verify(mtd, buf - len, to - len, len); |
1889 | if (ret) | 1910 | if (ret) |
1890 | printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); | 1911 | printk(KERN_ERR "%s: verify failed %d\n", |
1912 | __func__, ret); | ||
1891 | break; | 1913 | break; |
1892 | } | 1914 | } |
1893 | 1915 | ||
@@ -1905,14 +1927,16 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1905 | /* In partial page write we don't update bufferram */ | 1927 | /* In partial page write we don't update bufferram */ |
1906 | onenand_update_bufferram(mtd, to, !ret && !subpage); | 1928 | onenand_update_bufferram(mtd, to, !ret && !subpage); |
1907 | if (ret) { | 1929 | if (ret) { |
1908 | printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); | 1930 | printk(KERN_ERR "%s: write failed %d\n", |
1931 | __func__, ret); | ||
1909 | break; | 1932 | break; |
1910 | } | 1933 | } |
1911 | 1934 | ||
1912 | /* Only check verify write turn on */ | 1935 | /* Only check verify write turn on */ |
1913 | ret = onenand_verify(mtd, buf, to, thislen); | 1936 | ret = onenand_verify(mtd, buf, to, thislen); |
1914 | if (ret) { | 1937 | if (ret) { |
1915 | printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); | 1938 | printk(KERN_ERR "%s: verify failed %d\n", |
1939 | __func__, ret); | ||
1916 | break; | 1940 | break; |
1917 | } | 1941 | } |
1918 | 1942 | ||
@@ -1968,7 +1992,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
1968 | 1992 | ||
1969 | to += ops->ooboffs; | 1993 | to += ops->ooboffs; |
1970 | 1994 | ||
1971 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); | 1995 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", |
1996 | __func__, (unsigned int) to, (int) len); | ||
1972 | 1997 | ||
1973 | /* Initialize retlen, in case of early exit */ | 1998 | /* Initialize retlen, in case of early exit */ |
1974 | ops->oobretlen = 0; | 1999 | ops->oobretlen = 0; |
@@ -1981,14 +2006,15 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
1981 | column = to & (mtd->oobsize - 1); | 2006 | column = to & (mtd->oobsize - 1); |
1982 | 2007 | ||
1983 | if (unlikely(column >= oobsize)) { | 2008 | if (unlikely(column >= oobsize)) { |
1984 | printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n"); | 2009 | printk(KERN_ERR "%s: Attempted to start write outside oob\n", |
2010 | __func__); | ||
1985 | return -EINVAL; | 2011 | return -EINVAL; |
1986 | } | 2012 | } |
1987 | 2013 | ||
1988 | /* For compatibility with NAND: Do not allow write past end of page */ | 2014 | /* For compatibility with NAND: Do not allow write past end of page */ |
1989 | if (unlikely(column + len > oobsize)) { | 2015 | if (unlikely(column + len > oobsize)) { |
1990 | printk(KERN_ERR "onenand_write_oob_nolock: " | 2016 | printk(KERN_ERR "%s: Attempt to write past end of page\n", |
1991 | "Attempt to write past end of page\n"); | 2017 | __func__); |
1992 | return -EINVAL; | 2018 | return -EINVAL; |
1993 | } | 2019 | } |
1994 | 2020 | ||
@@ -1996,7 +2022,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
1996 | if (unlikely(to >= mtd->size || | 2022 | if (unlikely(to >= mtd->size || |
1997 | column + len > ((mtd->size >> this->page_shift) - | 2023 | column + len > ((mtd->size >> this->page_shift) - |
1998 | (to >> this->page_shift)) * oobsize)) { | 2024 | (to >> this->page_shift)) * oobsize)) { |
1999 | printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n"); | 2025 | printk(KERN_ERR "%s: Attempted to write past end of device\n" |
2026 | __func__); | ||
2000 | return -EINVAL; | 2027 | return -EINVAL; |
2001 | } | 2028 | } |
2002 | 2029 | ||
@@ -2038,13 +2065,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
2038 | 2065 | ||
2039 | ret = this->wait(mtd, FL_WRITING); | 2066 | ret = this->wait(mtd, FL_WRITING); |
2040 | if (ret) { | 2067 | if (ret) { |
2041 | printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret); | 2068 | printk(KERN_ERR "%s: write failed %d\n", __func__, ret); |
2042 | break; | 2069 | break; |
2043 | } | 2070 | } |
2044 | 2071 | ||
2045 | ret = onenand_verify_oob(mtd, oobbuf, to); | 2072 | ret = onenand_verify_oob(mtd, oobbuf, to); |
2046 | if (ret) { | 2073 | if (ret) { |
2047 | printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret); | 2074 | printk(KERN_ERR "%s: verify failed %d\n", |
2075 | __func__, ret); | ||
2048 | break; | 2076 | break; |
2049 | } | 2077 | } |
2050 | 2078 | ||
@@ -2161,7 +2189,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2161 | 2189 | ||
2162 | /* Do not allow erase past end of device */ | 2190 | /* Do not allow erase past end of device */ |
2163 | if (unlikely((len + addr) > mtd->size)) { | 2191 | if (unlikely((len + addr) > mtd->size)) { |
2164 | printk(KERN_ERR "onenand_erase: Erase past end of device\n"); | 2192 | printk(KERN_ERR "%s: Erase past end of device\n", __func__); |
2165 | return -EINVAL; | 2193 | return -EINVAL; |
2166 | } | 2194 | } |
2167 | 2195 | ||
@@ -2177,7 +2205,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2177 | * Erase region's start offset is always block start address. | 2205 | * Erase region's start offset is always block start address. |
2178 | */ | 2206 | */ |
2179 | if (unlikely((addr - region->offset) & (block_size - 1))) { | 2207 | if (unlikely((addr - region->offset) & (block_size - 1))) { |
2180 | printk(KERN_ERR "onenand_erase: Unaligned address\n"); | 2208 | printk(KERN_ERR "%s: Unaligned address\n", __func__); |
2181 | return -EINVAL; | 2209 | return -EINVAL; |
2182 | } | 2210 | } |
2183 | } else { | 2211 | } else { |
@@ -2185,14 +2213,14 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2185 | 2213 | ||
2186 | /* Start address must align on block boundary */ | 2214 | /* Start address must align on block boundary */ |
2187 | if (unlikely(addr & (block_size - 1))) { | 2215 | if (unlikely(addr & (block_size - 1))) { |
2188 | printk(KERN_ERR "onenand_erase: Unaligned address\n"); | 2216 | printk(KERN_ERR "%s: Unaligned address\n", __func__); |
2189 | return -EINVAL; | 2217 | return -EINVAL; |
2190 | } | 2218 | } |
2191 | } | 2219 | } |
2192 | 2220 | ||
2193 | /* Length must align on block boundary */ | 2221 | /* Length must align on block boundary */ |
2194 | if (unlikely(len & (block_size - 1))) { | 2222 | if (unlikely(len & (block_size - 1))) { |
2195 | printk(KERN_ERR "onenand_erase: Length not block aligned\n"); | 2223 | printk(KERN_ERR "%s: Length not block aligned\n", __func__); |
2196 | return -EINVAL; | 2224 | return -EINVAL; |
2197 | } | 2225 | } |
2198 | 2226 | ||
@@ -2209,7 +2237,9 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2209 | 2237 | ||
2210 | /* Check if we have a bad block, we do not erase bad blocks */ | 2238 | /* Check if we have a bad block, we do not erase bad blocks */ |
2211 | if (onenand_block_isbad_nolock(mtd, addr, 0)) { | 2239 | if (onenand_block_isbad_nolock(mtd, addr, 0)) { |
2212 | printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr); | 2240 | printk(KERN_WARNING "%s: attempt to erase a bad block " |
2241 | "at addr 0x%012llx\n", | ||
2242 | __func__, (unsigned long long) addr); | ||
2213 | instr->state = MTD_ERASE_FAILED; | 2243 | instr->state = MTD_ERASE_FAILED; |
2214 | goto erase_exit; | 2244 | goto erase_exit; |
2215 | } | 2245 | } |
@@ -2221,8 +2251,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2221 | ret = this->wait(mtd, FL_ERASING); | 2251 | ret = this->wait(mtd, FL_ERASING); |
2222 | /* Check, if it is write protected */ | 2252 | /* Check, if it is write protected */ |
2223 | if (ret) { | 2253 | if (ret) { |
2224 | printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", | 2254 | printk(KERN_ERR "%s: Failed erase, block %d\n", |
2225 | onenand_block(this, addr)); | 2255 | __func__, onenand_block(this, addr)); |
2226 | instr->state = MTD_ERASE_FAILED; | 2256 | instr->state = MTD_ERASE_FAILED; |
2227 | instr->fail_addr = addr; | 2257 | instr->fail_addr = addr; |
2228 | goto erase_exit; | 2258 | goto erase_exit; |
@@ -2241,7 +2271,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2241 | 2271 | ||
2242 | if (len & (block_size - 1)) { | 2272 | if (len & (block_size - 1)) { |
2243 | /* FIXME: This should be handled at MTD partitioning level. */ | 2273 | /* FIXME: This should be handled at MTD partitioning level. */ |
2244 | printk(KERN_ERR "onenand_erase: Unaligned address\n"); | 2274 | printk(KERN_ERR "%s: Unaligned address\n", |
2275 | __func__); | ||
2245 | goto erase_exit; | 2276 | goto erase_exit; |
2246 | } | 2277 | } |
2247 | } | 2278 | } |
@@ -2272,7 +2303,7 @@ erase_exit: | |||
2272 | */ | 2303 | */ |
2273 | static void onenand_sync(struct mtd_info *mtd) | 2304 | static void onenand_sync(struct mtd_info *mtd) |
2274 | { | 2305 | { |
2275 | DEBUG(MTD_DEBUG_LEVEL3, "onenand_sync: called\n"); | 2306 | DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); |
2276 | 2307 | ||
2277 | /* Grab the lock and see if the device is available */ | 2308 | /* Grab the lock and see if the device is available */ |
2278 | onenand_get_device(mtd, FL_SYNCING); | 2309 | onenand_get_device(mtd, FL_SYNCING); |
@@ -2406,7 +2437,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int | |||
2406 | /* Check lock status */ | 2437 | /* Check lock status */ |
2407 | status = this->read_word(this->base + ONENAND_REG_WP_STATUS); | 2438 | status = this->read_word(this->base + ONENAND_REG_WP_STATUS); |
2408 | if (!(status & wp_status_mask)) | 2439 | if (!(status & wp_status_mask)) |
2409 | printk(KERN_ERR "wp status = 0x%x\n", status); | 2440 | printk(KERN_ERR "%s: wp status = 0x%x\n", |
2441 | __func__, status); | ||
2410 | 2442 | ||
2411 | return 0; | 2443 | return 0; |
2412 | } | 2444 | } |
@@ -2435,7 +2467,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int | |||
2435 | /* Check lock status */ | 2467 | /* Check lock status */ |
2436 | status = this->read_word(this->base + ONENAND_REG_WP_STATUS); | 2468 | status = this->read_word(this->base + ONENAND_REG_WP_STATUS); |
2437 | if (!(status & wp_status_mask)) | 2469 | if (!(status & wp_status_mask)) |
2438 | printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); | 2470 | printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n", |
2471 | __func__, block, status); | ||
2439 | } | 2472 | } |
2440 | 2473 | ||
2441 | return 0; | 2474 | return 0; |
@@ -2502,7 +2535,8 @@ static int onenand_check_lock_status(struct onenand_chip *this) | |||
2502 | /* Check lock status */ | 2535 | /* Check lock status */ |
2503 | status = this->read_word(this->base + ONENAND_REG_WP_STATUS); | 2536 | status = this->read_word(this->base + ONENAND_REG_WP_STATUS); |
2504 | if (!(status & ONENAND_WP_US)) { | 2537 | if (!(status & ONENAND_WP_US)) { |
2505 | printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); | 2538 | printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n", |
2539 | __func__, block, status); | ||
2506 | return 0; | 2540 | return 0; |
2507 | } | 2541 | } |
2508 | } | 2542 | } |
@@ -3172,7 +3206,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int | |||
3172 | break; | 3206 | break; |
3173 | 3207 | ||
3174 | if (i != mtd->oobsize) { | 3208 | if (i != mtd->oobsize) { |
3175 | printk(KERN_WARNING "Block %d not erased.\n", block); | 3209 | printk(KERN_WARNING "%s: Block %d not erased.\n", |
3210 | __func__, block); | ||
3176 | return 1; | 3211 | return 1; |
3177 | } | 3212 | } |
3178 | } | 3213 | } |
@@ -3204,8 +3239,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die, | |||
3204 | blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; | 3239 | blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; |
3205 | 3240 | ||
3206 | if (boundary >= blksperdie) { | 3241 | if (boundary >= blksperdie) { |
3207 | printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. " | 3242 | printk(KERN_ERR "%s: Invalid boundary value. " |
3208 | "Boundary not changed.\n"); | 3243 | "Boundary not changed.\n", __func__); |
3209 | return -EINVAL; | 3244 | return -EINVAL; |
3210 | } | 3245 | } |
3211 | 3246 | ||
@@ -3214,7 +3249,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die, | |||
3214 | new = boundary + (die * this->density_mask); | 3249 | new = boundary + (die * this->density_mask); |
3215 | ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new)); | 3250 | ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new)); |
3216 | if (ret) { | 3251 | if (ret) { |
3217 | printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n"); | 3252 | printk(KERN_ERR "%s: Please erase blocks " |
3253 | "before boundary change\n", __func__); | ||
3218 | return ret; | 3254 | return ret; |
3219 | } | 3255 | } |
3220 | 3256 | ||
@@ -3227,12 +3263,12 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die, | |||
3227 | 3263 | ||
3228 | thisboundary = this->read_word(this->base + ONENAND_DATARAM); | 3264 | thisboundary = this->read_word(this->base + ONENAND_DATARAM); |
3229 | if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { | 3265 | if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { |
3230 | printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n"); | 3266 | printk(KERN_ERR "%s: boundary locked\n", __func__); |
3231 | ret = 1; | 3267 | ret = 1; |
3232 | goto out; | 3268 | goto out; |
3233 | } | 3269 | } |
3234 | 3270 | ||
3235 | printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n", | 3271 | printk(KERN_INFO "Changing die %d boundary: %d%s\n", |
3236 | die, boundary, lock ? "(Locked)" : "(Unlocked)"); | 3272 | die, boundary, lock ? "(Locked)" : "(Unlocked)"); |
3237 | 3273 | ||
3238 | addr = die ? this->diesize[0] : 0; | 3274 | addr = die ? this->diesize[0] : 0; |
@@ -3243,7 +3279,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die, | |||
3243 | this->command(mtd, ONENAND_CMD_ERASE, addr, 0); | 3279 | this->command(mtd, ONENAND_CMD_ERASE, addr, 0); |
3244 | ret = this->wait(mtd, FL_ERASING); | 3280 | ret = this->wait(mtd, FL_ERASING); |
3245 | if (ret) { | 3281 | if (ret) { |
3246 | printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die); | 3282 | printk(KERN_ERR "%s: flexonenand_set_boundary: " |
3283 | "Failed PI erase for Die %d\n", __func__, die); | ||
3247 | goto out; | 3284 | goto out; |
3248 | } | 3285 | } |
3249 | 3286 | ||
@@ -3251,7 +3288,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die, | |||
3251 | this->command(mtd, ONENAND_CMD_PROG, addr, 0); | 3288 | this->command(mtd, ONENAND_CMD_PROG, addr, 0); |
3252 | ret = this->wait(mtd, FL_WRITING); | 3289 | ret = this->wait(mtd, FL_WRITING); |
3253 | if (ret) { | 3290 | if (ret) { |
3254 | printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die); | 3291 | printk(KERN_ERR "%s: Failed PI write for Die %d\n", |
3292 | __func__, die); | ||
3255 | goto out; | 3293 | goto out; |
3256 | } | 3294 | } |
3257 | 3295 | ||
@@ -3408,8 +3446,8 @@ static void onenand_resume(struct mtd_info *mtd) | |||
3408 | if (this->state == FL_PM_SUSPENDED) | 3446 | if (this->state == FL_PM_SUSPENDED) |
3409 | onenand_release_device(mtd); | 3447 | onenand_release_device(mtd); |
3410 | else | 3448 | else |
3411 | printk(KERN_ERR "resume() called for the chip which is not" | 3449 | printk(KERN_ERR "%s: resume() called for the chip which is not " |
3412 | "in suspended state\n"); | 3450 | "in suspended state\n", __func__); |
3413 | } | 3451 | } |
3414 | 3452 | ||
3415 | /** | 3453 | /** |
@@ -3464,7 +3502,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
3464 | if (!this->page_buf) { | 3502 | if (!this->page_buf) { |
3465 | this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); | 3503 | this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); |
3466 | if (!this->page_buf) { | 3504 | if (!this->page_buf) { |
3467 | printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n"); | 3505 | printk(KERN_ERR "%s: Can't allocate page_buf\n", |
3506 | __func__); | ||
3468 | return -ENOMEM; | 3507 | return -ENOMEM; |
3469 | } | 3508 | } |
3470 | this->options |= ONENAND_PAGEBUF_ALLOC; | 3509 | this->options |= ONENAND_PAGEBUF_ALLOC; |
@@ -3472,7 +3511,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
3472 | if (!this->oob_buf) { | 3511 | if (!this->oob_buf) { |
3473 | this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL); | 3512 | this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL); |
3474 | if (!this->oob_buf) { | 3513 | if (!this->oob_buf) { |
3475 | printk(KERN_ERR "onenand_scan(): Can't allocate oob_buf\n"); | 3514 | printk(KERN_ERR "%s: Can't allocate oob_buf\n", |
3515 | __func__); | ||
3476 | if (this->options & ONENAND_PAGEBUF_ALLOC) { | 3516 | if (this->options & ONENAND_PAGEBUF_ALLOC) { |
3477 | this->options &= ~ONENAND_PAGEBUF_ALLOC; | 3517 | this->options &= ~ONENAND_PAGEBUF_ALLOC; |
3478 | kfree(this->page_buf); | 3518 | kfree(this->page_buf); |
@@ -3505,8 +3545,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
3505 | break; | 3545 | break; |
3506 | 3546 | ||
3507 | default: | 3547 | default: |
3508 | printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n", | 3548 | printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n", |
3509 | mtd->oobsize); | 3549 | __func__, mtd->oobsize); |
3510 | mtd->subpage_sft = 0; | 3550 | mtd->subpage_sft = 0; |
3511 | /* To prevent kernel oops */ | 3551 | /* To prevent kernel oops */ |
3512 | this->ecclayout = &onenand_oob_32; | 3552 | this->ecclayout = &onenand_oob_32; |