aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/mtd
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig43
-rw-r--r--drivers/mtd/Makefile4
-rw-r--r--drivers/mtd/afs.c8
-rw-r--r--drivers/mtd/ar7part.c25
-rw-r--r--drivers/mtd/bcm47xxpart.c202
-rw-r--r--drivers/mtd/bcm63xxpart.c237
-rw-r--r--drivers/mtd/chips/Kconfig11
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c103
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c507
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c46
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/chips/chipreg.c5
-rw-r--r--drivers/mtd/chips/fwh_lock.h7
-rw-r--r--drivers/mtd/chips/jedec_probe.c34
-rw-r--r--drivers/mtd/chips/map_absent.c10
-rw-r--r--drivers/mtd/chips/map_ram.c14
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/cmdlinepart.c238
-rw-r--r--drivers/mtd/devices/Kconfig49
-rw-r--r--drivers/mtd/devices/Makefile7
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c105
-rw-r--r--drivers/mtd/devices/block2mtd.c38
-rw-r--r--drivers/mtd/devices/doc2000.c51
-rw-r--r--drivers/mtd/devices/doc2001.c41
-rw-r--r--drivers/mtd/devices/doc2001plus.c56
-rw-r--r--drivers/mtd/devices/docecc.c2
-rw-r--r--drivers/mtd/devices/docg3.c2162
-rw-r--r--drivers/mtd/devices/docg3.h370
-rw-r--r--drivers/mtd/devices/docprobe.c14
-rw-r--r--drivers/mtd/devices/lart.c35
-rw-r--r--drivers/mtd/devices/m25p80.c222
-rw-r--r--drivers/mtd/devices/ms02-nv.c12
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c162
-rw-r--r--drivers/mtd/devices/mtdram.c35
-rw-r--r--drivers/mtd/devices/phram.c76
-rw-r--r--drivers/mtd/devices/pmc551.c100
-rw-r--r--drivers/mtd/devices/slram.c44
-rw-r--r--drivers/mtd/devices/spear_smi.c1101
-rw-r--r--drivers/mtd/devices/sst25l.c95
-rw-r--r--drivers/mtd/ftl.c121
-rw-r--r--drivers/mtd/inftlcore.c96
-rw-r--r--drivers/mtd/inftlmount.c135
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c45
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c2
-rw-r--r--drivers/mtd/maps/Kconfig75
-rw-r--r--drivers/mtd/maps/Makefile7
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c153
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c37
-rw-r--r--drivers/mtd/maps/ck804xrom.c6
-rw-r--r--drivers/mtd/maps/dc21285.c9
-rw-r--r--drivers/mtd/maps/esb2rom.c8
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c40
-rw-r--r--drivers/mtd/maps/h720x-flash.c23
-rw-r--r--drivers/mtd/maps/ichxrom.c8
-rw-r--r--drivers/mtd/maps/impa7.c28
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c39
-rw-r--r--drivers/mtd/maps/ixp2000.c23
-rw-r--r--drivers/mtd/maps/ixp4xx.c46
-rw-r--r--drivers/mtd/maps/l440gx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c99
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c41
-rw-r--r--drivers/mtd/maps/pci.c44
-rw-r--r--drivers/mtd/maps/pcmciamtd.c138
-rw-r--r--drivers/mtd/maps/physmap.c64
-rw-r--r--drivers/mtd/maps/physmap_of.c128
-rw-r--r--drivers/mtd/maps/pismo.c31
-rw-r--r--drivers/mtd/maps/plat-ram.c38
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c44
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c42
-rw-r--r--drivers/mtd/maps/sa1100-flash.c175
-rw-r--r--drivers/mtd/maps/scb2_flash.c30
-rw-r--r--drivers/mtd/maps/solutionengine.c30
-rw-r--r--drivers/mtd/maps/sun_uflash.c19
-rw-r--r--drivers/mtd/maps/uclinux.c22
-rw-r--r--drivers/mtd/maps/vmu-flash.c24
-rw-r--r--drivers/mtd/mtd_blkdevs.c54
-rw-r--r--drivers/mtd/mtdblock.c45
-rw-r--r--drivers/mtd/mtdblock_ro.c5
-rw-r--r--drivers/mtd/mtdchar.c479
-rw-r--r--drivers/mtd/mtdconcat.c159
-rw-r--r--drivers/mtd/mtdcore.c535
-rw-r--r--drivers/mtd/mtdcore.h3
-rw-r--r--drivers/mtd/mtdoops.c98
-rw-r--r--drivers/mtd/mtdpart.c298
-rw-r--r--drivers/mtd/mtdsuper.c25
-rw-r--r--drivers/mtd/mtdswap.c60
-rw-r--r--drivers/mtd/nand/Kconfig263
-rw-r--r--drivers/mtd/nand/Makefile14
-rw-r--r--drivers/mtd/nand/alauda.c26
-rw-r--r--drivers/mtd/nand/ams-delta.c130
-rw-r--r--drivers/mtd/nand/atmel_nand.c1160
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h114
-rw-r--r--drivers/mtd/nand/au1550nd.c365
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h22
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c108
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c413
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c18
-rw-r--r--drivers/mtd/nand/cafe_nand.c86
-rw-r--r--drivers/mtd/nand/cmx270_nand.c37
-rw-r--r--drivers/mtd/nand/cs553x_nand.c15
-rw-r--r--drivers/mtd/nand/davinci_nand.c131
-rw-r--r--drivers/mtd/nand/denali.c219
-rw-r--r--drivers/mtd/nand/denali.h5
-rw-r--r--drivers/mtd/nand/denali_dt.c167
-rw-r--r--drivers/mtd/nand/denali_pci.c144
-rw-r--r--drivers/mtd/nand/diskonchip.c79
-rw-r--r--drivers/mtd/nand/docg4.c1415
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c241
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c1103
-rw-r--r--drivers/mtd/nand/fsl_upm.c40
-rw-r--r--drivers/mtd/nand/fsmc_nand.c993
-rw-r--r--drivers/mtd/nand/gpio.c195
-rw-r--r--drivers/mtd/nand/gpmi-nand/Makefile3
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h106
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c1337
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1701
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h294
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-regs.h184
-rw-r--r--drivers/mtd/nand/h1910.c22
-rw-r--r--drivers/mtd/nand/jz4740_nand.c270
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c924
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c1039
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c65
-rw-r--r--drivers/mtd/nand/mxc_nand.c849
-rw-r--r--drivers/mtd/nand/nand_base.c2075
-rw-r--r--drivers/mtd/nand/nand_bbt.c838
-rw-r--r--drivers/mtd/nand/nand_bch.c4
-rw-r--r--drivers/mtd/nand/nand_ecc.c10
-rw-r--r--drivers/mtd/nand/nand_ids.c13
-rw-r--r--drivers/mtd/nand/nandsim.c252
-rw-r--r--drivers/mtd/nand/ndfc.c55
-rw-r--r--drivers/mtd/nand/nuc900_nand.c37
-rw-r--r--drivers/mtd/nand/omap2.c812
-rw-r--r--drivers/mtd/nand/orion_nand.c88
-rw-r--r--drivers/mtd/nand/pasemi_nand.c18
-rw-r--r--drivers/mtd/nand/plat_nand.c72
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c45
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c578
-rw-r--r--drivers/mtd/nand/r852.c53
-rw-r--r--drivers/mtd/nand/rtc_from4.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c216
-rw-r--r--drivers/mtd/nand/sh_flctl.c727
-rw-r--r--drivers/mtd/nand/sharpsl.c32
-rw-r--r--drivers/mtd/nand/sm_common.c14
-rw-r--r--drivers/mtd/nand/socrates_nand.c66
-rw-r--r--drivers/mtd/nand/tmio_nand.c44
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c28
-rw-r--r--drivers/mtd/nand/xway_nand.c201
-rw-r--r--drivers/mtd/nftlcore.c67
-rw-r--r--drivers/mtd/nftlmount.c39
-rw-r--r--drivers/mtd/ofpart.c117
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/mtd/onenand/generic.c36
-rw-r--r--drivers/mtd/onenand/omap2.c103
-rw-r--r--drivers/mtd/onenand/onenand_base.c193
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c9
-rw-r--r--drivers/mtd/onenand/samsung.c34
-rw-r--r--drivers/mtd/redboot.c24
-rw-r--r--drivers/mtd/rfd_ftl.c47
-rw-r--r--drivers/mtd/sm_ftl.c41
-rw-r--r--drivers/mtd/ssfdc.c58
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c461
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c296
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c228
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c242
-rw-r--r--drivers/mtd/tests/mtd_readtest.c66
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c154
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c105
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c169
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c95
-rw-r--r--drivers/mtd/ubi/Kconfig69
-rw-r--r--drivers/mtd/ubi/Makefile6
-rw-r--r--drivers/mtd/ubi/attach.c1754
-rw-r--r--drivers/mtd/ubi/build.c365
-rw-r--r--drivers/mtd/ubi/cdev.c59
-rw-r--r--drivers/mtd/ubi/debug.c310
-rw-r--r--drivers/mtd/ubi/debug.h162
-rw-r--r--drivers/mtd/ubi/eba.c259
-rw-r--r--drivers/mtd/ubi/fastmap.c1535
-rw-r--r--drivers/mtd/ubi/gluebi.c89
-rw-r--r--drivers/mtd/ubi/io.c356
-rw-r--r--drivers/mtd/ubi/kapi.c67
-rw-r--r--drivers/mtd/ubi/misc.c41
-rw-r--r--drivers/mtd/ubi/ubi-media.h145
-rw-r--r--drivers/mtd/ubi/ubi.h362
-rw-r--r--drivers/mtd/ubi/upd.c22
-rw-r--r--drivers/mtd/ubi/vmt.c87
-rw-r--r--drivers/mtd/ubi/vtbl.c248
-rw-r--r--drivers/mtd/ubi/wl.c892
192 files changed, 9880 insertions, 30061 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 73fcbbeb78d..4be8373d43e 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,6 @@
1menuconfig MTD 1menuconfig MTD
2 tristate "Memory Technology Device (MTD) support" 2 tristate "Memory Technology Device (MTD) support"
3 depends on GENERIC_IO 3 depends on HAS_IOMEM
4 help 4 help
5 Memory Technology Devices are flash, RAM and similar chips, often 5 Memory Technology Devices are flash, RAM and similar chips, often
6 used for solid state file systems on embedded devices. This option 6 used for solid state file systems on embedded devices. This option
@@ -12,17 +12,27 @@ menuconfig MTD
12 12
13if MTD 13if MTD
14 14
15config MTD_DEBUG
16 bool "Debugging"
17 help
18 This turns on low-level debugging for the entire MTD sub-system.
19 Normally, you should say 'N'.
20
21config MTD_DEBUG_VERBOSE
22 int "Debugging verbosity (0 = quiet, 3 = noisy)"
23 depends on MTD_DEBUG
24 default "0"
25 help
26 Determines the verbosity level of the MTD debugging messages.
27
15config MTD_TESTS 28config MTD_TESTS
16 tristate "MTD tests support (DANGEROUS)" 29 tristate "MTD tests support"
17 depends on m 30 depends on m
18 help 31 help
19 This option includes various MTD tests into compilation. The tests 32 This option includes various MTD tests into compilation. The tests
20 should normally be compiled as kernel modules. The modules perform 33 should normally be compiled as kernel modules. The modules perform
21 various checks and verifications when loaded. 34 various checks and verifications when loaded.
22 35
23 WARNING: some of the tests will ERASE entire MTD device which they
24 test. Do not use these tests unless you really know what you do.
25
26config MTD_REDBOOT_PARTS 36config MTD_REDBOOT_PARTS
27 tristate "RedBoot partition table parsing" 37 tristate "RedBoot partition table parsing"
28 ---help--- 38 ---help---
@@ -127,34 +137,18 @@ config MTD_AFS_PARTS
127 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example. 137 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
128 138
129config MTD_OF_PARTS 139config MTD_OF_PARTS
130 tristate "OpenFirmware partitioning information support" 140 def_bool y
131 default y
132 depends on OF 141 depends on OF
133 help 142 help
134 This provides a partition parsing function which derives 143 This provides a partition parsing function which derives
135 the partition map from the children of the flash node, 144 the partition map from the children of the flash node,
136 as described in Documentation/devicetree/booting-without-of.txt. 145 as described in Documentation/powerpc/booting-without-of.txt.
137 146
138config MTD_AR7_PARTS 147config MTD_AR7_PARTS
139 tristate "TI AR7 partitioning support" 148 tristate "TI AR7 partitioning support"
140 ---help--- 149 ---help---
141 TI AR7 partitioning support 150 TI AR7 partitioning support
142 151
143config MTD_BCM63XX_PARTS
144 tristate "BCM63XX CFE partitioning support"
145 depends on BCM63XX
146 select CRC32
147 help
148 This provides partions parsing for BCM63xx devices with CFE
149 bootloaders.
150
151config MTD_BCM47XX_PARTS
152 tristate "BCM47XX partitioning support"
153 depends on BCM47XX
154 help
155 This provides partitions parser for devices based on BCM47xx
156 boards.
157
158comment "User Modules And Translation Layers" 152comment "User Modules And Translation Layers"
159 153
160config MTD_CHAR 154config MTD_CHAR
@@ -311,6 +305,9 @@ config MTD_OOPS
311 buffer in a flash partition where it can be read back at some 305 buffer in a flash partition where it can be read back at some
312 later point. 306 later point.
313 307
308 To use, add console=ttyMTDx to the kernel command line,
309 where x is the MTD device number to use.
310
314config MTD_SWAP 311config MTD_SWAP
315 tristate "Swap on MTD device support" 312 tristate "Swap on MTD device support"
316 depends on MTD && SWAP 313 depends on MTD && SWAP
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 18a38e55b2f..39664c4229f 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -5,14 +5,12 @@
5# Core functionality. 5# Core functionality.
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o 7mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
8mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
8 9
9obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
10obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 10obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
11obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 11obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
12obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 12obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
13obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o 13obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
14obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
15obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
16 14
17# 'Users' - code which presents functionality to userspace. 15# 'Users' - code which presents functionality to userspace.
18obj-$(CONFIG_MTD_CHAR) += mtdchar.o 16obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 5a3942bf109..302372c08b5 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -75,7 +75,7 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
75 size_t sz; 75 size_t sz;
76 int ret; 76 int ret;
77 77
78 ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs); 78 ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
79 if (ret >= 0 && sz != sizeof(fs)) 79 if (ret >= 0 && sz != sizeof(fs))
80 ret = -EINVAL; 80 ret = -EINVAL;
81 81
@@ -132,7 +132,7 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
132 int ret, i; 132 int ret, i;
133 133
134 memset(iis, 0, sizeof(*iis)); 134 memset(iis, 0, sizeof(*iis));
135 ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis); 135 ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
136 if (ret < 0) 136 if (ret < 0)
137 goto failed; 137 goto failed;
138 138
@@ -162,8 +162,8 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
162} 162}
163 163
164static int parse_afs_partitions(struct mtd_info *mtd, 164static int parse_afs_partitions(struct mtd_info *mtd,
165 struct mtd_partition **pparts, 165 struct mtd_partition **pparts,
166 struct mtd_part_parser_data *data) 166 unsigned long origin)
167{ 167{
168 struct mtd_partition *parts; 168 struct mtd_partition *parts;
169 u_int mask, off, idx, sz; 169 u_int mask, off, idx, sz;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 7c057a05adb..6697a1ec72d 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -26,9 +26,7 @@
26#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
28#include <linux/bootmem.h> 28#include <linux/bootmem.h>
29#include <linux/module.h> 29#include <linux/magic.h>
30
31#include <uapi/linux/magic.h>
32 30
33#define AR7_PARTS 4 31#define AR7_PARTS 4
34#define ROOT_OFFSET 0xe0000 32#define ROOT_OFFSET 0xe0000
@@ -36,6 +34,10 @@
36#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42) 34#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
37#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281) 35#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
38 36
37#ifndef SQUASHFS_MAGIC
38#define SQUASHFS_MAGIC 0x73717368
39#endif
40
39struct ar7_bin_rec { 41struct ar7_bin_rec {
40 unsigned int checksum; 42 unsigned int checksum;
41 unsigned int length; 43 unsigned int length;
@@ -44,7 +46,7 @@ struct ar7_bin_rec {
44 46
45static int create_mtd_partitions(struct mtd_info *master, 47static int create_mtd_partitions(struct mtd_info *master,
46 struct mtd_partition **pparts, 48 struct mtd_partition **pparts,
47 struct mtd_part_parser_data *data) 49 unsigned long origin)
48{ 50{
49 struct ar7_bin_rec header; 51 struct ar7_bin_rec header;
50 unsigned int offset; 52 unsigned int offset;
@@ -70,8 +72,8 @@ static int create_mtd_partitions(struct mtd_info *master,
70 72
71 do { /* Try 10 blocks starting from master->erasesize */ 73 do { /* Try 10 blocks starting from master->erasesize */
72 offset = pre_size; 74 offset = pre_size;
73 mtd_read(master, offset, sizeof(header), &len, 75 master->read(master, offset,
74 (uint8_t *)&header); 76 sizeof(header), &len, (uint8_t *)&header);
75 if (!strncmp((char *)&header, "TIENV0.8", 8)) 77 if (!strncmp((char *)&header, "TIENV0.8", 8))
76 ar7_parts[1].offset = pre_size; 78 ar7_parts[1].offset = pre_size;
77 if (header.checksum == LOADER_MAGIC1) 79 if (header.checksum == LOADER_MAGIC1)
@@ -92,16 +94,16 @@ static int create_mtd_partitions(struct mtd_info *master,
92 case LOADER_MAGIC1: 94 case LOADER_MAGIC1:
93 while (header.length) { 95 while (header.length) {
94 offset += sizeof(header) + header.length; 96 offset += sizeof(header) + header.length;
95 mtd_read(master, offset, sizeof(header), &len, 97 master->read(master, offset, sizeof(header),
96 (uint8_t *)&header); 98 &len, (uint8_t *)&header);
97 } 99 }
98 root_offset = offset + sizeof(header) + 4; 100 root_offset = offset + sizeof(header) + 4;
99 break; 101 break;
100 case LOADER_MAGIC2: 102 case LOADER_MAGIC2:
101 while (header.length) { 103 while (header.length) {
102 offset += sizeof(header) + header.length; 104 offset += sizeof(header) + header.length;
103 mtd_read(master, offset, sizeof(header), &len, 105 master->read(master, offset, sizeof(header),
104 (uint8_t *)&header); 106 &len, (uint8_t *)&header);
105 } 107 }
106 root_offset = offset + sizeof(header) + 4 + 0xff; 108 root_offset = offset + sizeof(header) + 4 + 0xff;
107 root_offset &= ~(uint32_t)0xff; 109 root_offset &= ~(uint32_t)0xff;
@@ -111,7 +113,8 @@ static int create_mtd_partitions(struct mtd_info *master,
111 break; 113 break;
112 } 114 }
113 115
114 mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header); 116 master->read(master, root_offset,
117 sizeof(header), &len, (u8 *)&header);
115 if (header.checksum != SQUASHFS_MAGIC) { 118 if (header.checksum != SQUASHFS_MAGIC) {
116 root_offset += master->erasesize - 1; 119 root_offset += master->erasesize - 1;
117 root_offset &= ~(master->erasesize - 1); 120 root_offset &= ~(master->erasesize - 1);
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
deleted file mode 100644
index e06d782489a..00000000000
--- a/drivers/mtd/bcm47xxpart.c
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * BCM47XX MTD partitioning
3 *
4 * Copyright © 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/mtd/mtd.h>
16#include <linux/mtd/partitions.h>
17#include <asm/mach-bcm47xx/nvram.h>
18
19/* 10 parts were found on sflash on Netgear WNDR4500 */
20#define BCM47XXPART_MAX_PARTS 12
21
22/*
23 * Amount of bytes we read when analyzing each block of flash memory.
24 * Set it big enough to allow detecting partition and reading important data.
25 */
26#define BCM47XXPART_BYTES_TO_READ 0x404
27
28/* Magics */
29#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
30#define POT_MAGIC1 0x54544f50 /* POTT */
31#define POT_MAGIC2 0x504f /* OP */
32#define ML_MAGIC1 0x39685a42
33#define ML_MAGIC2 0x26594131
34#define TRX_MAGIC 0x30524448
35
36struct trx_header {
37 uint32_t magic;
38 uint32_t length;
39 uint32_t crc32;
40 uint16_t flags;
41 uint16_t version;
42 uint32_t offset[3];
43} __packed;
44
45static void bcm47xxpart_add_part(struct mtd_partition *part, char *name,
46 u64 offset, uint32_t mask_flags)
47{
48 part->name = name;
49 part->offset = offset;
50 part->mask_flags = mask_flags;
51}
52
53static int bcm47xxpart_parse(struct mtd_info *master,
54 struct mtd_partition **pparts,
55 struct mtd_part_parser_data *data)
56{
57 struct mtd_partition *parts;
58 uint8_t i, curr_part = 0;
59 uint32_t *buf;
60 size_t bytes_read;
61 uint32_t offset;
62 uint32_t blocksize = 0x10000;
63 struct trx_header *trx;
64
65 /* Alloc */
66 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
67 GFP_KERNEL);
68 buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
69
70 /* Parse block by block looking for magics */
71 for (offset = 0; offset <= master->size - blocksize;
72 offset += blocksize) {
73 /* Nothing more in higher memory */
74 if (offset >= 0x2000000)
75 break;
76
77 if (curr_part > BCM47XXPART_MAX_PARTS) {
78 pr_warn("Reached maximum number of partitions, scanning stopped!\n");
79 break;
80 }
81
82 /* Read beginning of the block */
83 if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
84 &bytes_read, (uint8_t *)buf) < 0) {
85 pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
86 offset);
87 continue;
88 }
89
90 /* CFE has small NVRAM at 0x400 */
91 if (buf[0x400 / 4] == NVRAM_HEADER) {
92 bcm47xxpart_add_part(&parts[curr_part++], "boot",
93 offset, MTD_WRITEABLE);
94 continue;
95 }
96
97 /* Standard NVRAM */
98 if (buf[0x000 / 4] == NVRAM_HEADER) {
99 bcm47xxpart_add_part(&parts[curr_part++], "nvram",
100 offset, 0);
101 continue;
102 }
103
104 /*
105 * board_data starts with board_id which differs across boards,
106 * but we can use 'MPFR' (hopefully) magic at 0x100
107 */
108 if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
109 bcm47xxpart_add_part(&parts[curr_part++], "board_data",
110 offset, MTD_WRITEABLE);
111 continue;
112 }
113
114 /* POT(TOP) */
115 if (buf[0x000 / 4] == POT_MAGIC1 &&
116 (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
117 bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
118 MTD_WRITEABLE);
119 continue;
120 }
121
122 /* ML */
123 if (buf[0x010 / 4] == ML_MAGIC1 &&
124 buf[0x014 / 4] == ML_MAGIC2) {
125 bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
126 MTD_WRITEABLE);
127 continue;
128 }
129
130 /* TRX */
131 if (buf[0x000 / 4] == TRX_MAGIC) {
132 trx = (struct trx_header *)buf;
133
134 i = 0;
135 /* We have LZMA loader if offset[2] points to sth */
136 if (trx->offset[2]) {
137 bcm47xxpart_add_part(&parts[curr_part++],
138 "loader",
139 offset + trx->offset[i],
140 0);
141 i++;
142 }
143
144 bcm47xxpart_add_part(&parts[curr_part++], "linux",
145 offset + trx->offset[i], 0);
146 i++;
147
148 /*
149 * Pure rootfs size is known and can be calculated as:
150 * trx->length - trx->offset[i]. We don't fill it as
151 * we want to have jffs2 (overlay) in the same mtd.
152 */
153 bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
154 offset + trx->offset[i], 0);
155 i++;
156
157 /*
158 * We have whole TRX scanned, skip to the next part. Use
159 * roundown (not roundup), as the loop will increase
160 * offset in next step.
161 */
162 offset = rounddown(offset + trx->length, blocksize);
163 continue;
164 }
165 }
166 kfree(buf);
167
168 /*
169 * Assume that partitions end at the beginning of the one they are
170 * followed by.
171 */
172 for (i = 0; i < curr_part - 1; i++)
173 parts[i].size = parts[i + 1].offset - parts[i].offset;
174 if (curr_part > 0)
175 parts[curr_part - 1].size =
176 master->size - parts[curr_part - 1].offset;
177
178 *pparts = parts;
179 return curr_part;
180};
181
182static struct mtd_part_parser bcm47xxpart_mtd_parser = {
183 .owner = THIS_MODULE,
184 .parse_fn = bcm47xxpart_parse,
185 .name = "bcm47xxpart",
186};
187
188static int __init bcm47xxpart_init(void)
189{
190 return register_mtd_parser(&bcm47xxpart_mtd_parser);
191}
192
193static void __exit bcm47xxpart_exit(void)
194{
195 deregister_mtd_parser(&bcm47xxpart_mtd_parser);
196}
197
198module_init(bcm47xxpart_init);
199module_exit(bcm47xxpart_exit);
200
201MODULE_LICENSE("GPL");
202MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
deleted file mode 100644
index 6eeb84c81bc..00000000000
--- a/drivers/mtd/bcm63xxpart.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * BCM63XX CFE image tag parser
3 *
4 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
5 * Mike Albon <malbon@openwrt.org>
6 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
7 * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/crc32.h>
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/partitions.h>
34
35#include <asm/mach-bcm63xx/bcm963xx_tag.h>
36#include <asm/mach-bcm63xx/board_bcm963xx.h>
37
38#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
39
40#define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */
41
42#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
43
44static int bcm63xx_detect_cfe(struct mtd_info *master)
45{
46 char buf[9];
47 int ret;
48 size_t retlen;
49
50 ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
51 (void *)buf);
52 buf[retlen] = 0;
53
54 if (ret)
55 return ret;
56
57 if (strncmp("cfe-v", buf, 5) == 0)
58 return 0;
59
60 /* very old CFE's do not have the cfe-v string, so check for magic */
61 ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
62 (void *)buf);
63 buf[retlen] = 0;
64
65 return strncmp("CFE1CFE1", buf, 8);
66}
67
68static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
69 struct mtd_partition **pparts,
70 struct mtd_part_parser_data *data)
71{
72 /* CFE, NVRAM and global Linux are always present */
73 int nrparts = 3, curpart = 0;
74 struct bcm_tag *buf;
75 struct mtd_partition *parts;
76 int ret;
77 size_t retlen;
78 unsigned int rootfsaddr, kerneladdr, spareaddr;
79 unsigned int rootfslen, kernellen, sparelen, totallen;
80 unsigned int cfelen, nvramlen;
81 unsigned int cfe_erasesize;
82 int i;
83 u32 computed_crc;
84 bool rootfs_first = false;
85
86 if (bcm63xx_detect_cfe(master))
87 return -EINVAL;
88
89 cfe_erasesize = max_t(uint32_t, master->erasesize,
90 BCM63XX_CFE_BLOCK_SIZE);
91
92 cfelen = cfe_erasesize;
93 nvramlen = cfe_erasesize;
94
95 /* Allocate memory for buffer */
96 buf = vmalloc(sizeof(struct bcm_tag));
97 if (!buf)
98 return -ENOMEM;
99
100 /* Get the tag */
101 ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
102 (void *)buf);
103
104 if (retlen != sizeof(struct bcm_tag)) {
105 vfree(buf);
106 return -EIO;
107 }
108
109 computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
110 offsetof(struct bcm_tag, header_crc));
111 if (computed_crc == buf->header_crc) {
112 char *boardid = &(buf->board_id[0]);
113 char *tagversion = &(buf->tag_version[0]);
114
115 sscanf(buf->flash_image_start, "%u", &rootfsaddr);
116 sscanf(buf->kernel_address, "%u", &kerneladdr);
117 sscanf(buf->kernel_length, "%u", &kernellen);
118 sscanf(buf->total_length, "%u", &totallen);
119
120 pr_info("CFE boot tag found with version %s and board type %s\n",
121 tagversion, boardid);
122
123 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
124 rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
125 spareaddr = roundup(totallen, master->erasesize) + cfelen;
126
127 if (rootfsaddr < kerneladdr) {
128 /* default Broadcom layout */
129 rootfslen = kerneladdr - rootfsaddr;
130 rootfs_first = true;
131 } else {
132 /* OpenWrt layout */
133 rootfsaddr = kerneladdr + kernellen;
134 rootfslen = spareaddr - rootfsaddr;
135 }
136 } else {
137 pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
138 buf->header_crc, computed_crc);
139 kernellen = 0;
140 rootfslen = 0;
141 rootfsaddr = 0;
142 spareaddr = cfelen;
143 }
144 sparelen = master->size - spareaddr - nvramlen;
145
146 /* Determine number of partitions */
147 if (rootfslen > 0)
148 nrparts++;
149
150 if (kernellen > 0)
151 nrparts++;
152
153 /* Ask kernel for more memory */
154 parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
155 if (!parts) {
156 vfree(buf);
157 return -ENOMEM;
158 }
159
160 /* Start building partition list */
161 parts[curpart].name = "CFE";
162 parts[curpart].offset = 0;
163 parts[curpart].size = cfelen;
164 curpart++;
165
166 if (kernellen > 0) {
167 int kernelpart = curpart;
168
169 if (rootfslen > 0 && rootfs_first)
170 kernelpart++;
171 parts[kernelpart].name = "kernel";
172 parts[kernelpart].offset = kerneladdr;
173 parts[kernelpart].size = kernellen;
174 curpart++;
175 }
176
177 if (rootfslen > 0) {
178 int rootfspart = curpart;
179
180 if (kernellen > 0 && rootfs_first)
181 rootfspart--;
182 parts[rootfspart].name = "rootfs";
183 parts[rootfspart].offset = rootfsaddr;
184 parts[rootfspart].size = rootfslen;
185 if (sparelen > 0 && !rootfs_first)
186 parts[rootfspart].size += sparelen;
187 curpart++;
188 }
189
190 parts[curpart].name = "nvram";
191 parts[curpart].offset = master->size - nvramlen;
192 parts[curpart].size = nvramlen;
193 curpart++;
194
195 /* Global partition "linux" to make easy firmware upgrade */
196 parts[curpart].name = "linux";
197 parts[curpart].offset = cfelen;
198 parts[curpart].size = master->size - cfelen - nvramlen;
199
200 for (i = 0; i < nrparts; i++)
201 pr_info("Partition %d is %s offset %llx and length %llx\n", i,
202 parts[i].name, parts[i].offset, parts[i].size);
203
204 pr_info("Spare partition is offset %x and length %x\n", spareaddr,
205 sparelen);
206
207 *pparts = parts;
208 vfree(buf);
209
210 return nrparts;
211};
212
213static struct mtd_part_parser bcm63xx_cfe_parser = {
214 .owner = THIS_MODULE,
215 .parse_fn = bcm63xx_parse_cfe_partitions,
216 .name = "bcm63xxpart",
217};
218
219static int __init bcm63xx_cfe_parser_init(void)
220{
221 return register_mtd_parser(&bcm63xx_cfe_parser);
222}
223
224static void __exit bcm63xx_cfe_parser_exit(void)
225{
226 deregister_mtd_parser(&bcm63xx_cfe_parser);
227}
228
229module_init(bcm63xx_cfe_parser_init);
230module_exit(bcm63xx_cfe_parser_exit);
231
232MODULE_LICENSE("GPL");
233MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
234MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
235MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
236MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
237MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index e469b01d40d..b1e3c26edd6 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -43,6 +43,9 @@ choice
43 prompt "Flash cmd/query data swapping" 43 prompt "Flash cmd/query data swapping"
44 depends on MTD_CFI_ADV_OPTIONS 44 depends on MTD_CFI_ADV_OPTIONS
45 default MTD_CFI_NOSWAP 45 default MTD_CFI_NOSWAP
46
47config MTD_CFI_NOSWAP
48 bool "NO"
46 ---help--- 49 ---help---
47 This option defines the way in which the CPU attempts to arrange 50 This option defines the way in which the CPU attempts to arrange
48 data bits when writing the 'magic' commands to the chips. Saying 51 data bits when writing the 'magic' commands to the chips. Saying
@@ -52,8 +55,12 @@ choice
52 Specific arrangements are possible with the BIG_ENDIAN_BYTE and 55 Specific arrangements are possible with the BIG_ENDIAN_BYTE and
53 LITTLE_ENDIAN_BYTE, if the bytes are reversed. 56 LITTLE_ENDIAN_BYTE, if the bytes are reversed.
54 57
55config MTD_CFI_NOSWAP 58 If you have a LART, on which the data (and address) lines were
56 bool "NO" 59 connected in a fashion which ensured that the nets were as short
60 as possible, resulting in a bit-shuffling which seems utterly
61 random to the untrained eye, you need the LART_ENDIAN_BYTE option.
62
63 Yes, there really exists something sicker than PDP-endian :)
57 64
58config MTD_CFI_BE_BYTE_SWAP 65config MTD_CFI_BE_BYTE_SWAP
59 bool "BIG_ENDIAN_BYTE" 66 bool "BIG_ENDIAN_BYTE"
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 77514430f1f..e1e122f2f92 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -87,7 +87,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **
87 87
88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, 88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89 size_t *retlen, void **virt, resource_size_t *phys); 89 size_t *retlen, void **virt, resource_size_t *phys);
90static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); 90static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91 91
92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -262,9 +262,9 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
262static void fixup_use_point(struct mtd_info *mtd) 262static void fixup_use_point(struct mtd_info *mtd)
263{ 263{
264 struct map_info *map = mtd->priv; 264 struct map_info *map = mtd->priv;
265 if (!mtd->_point && map_is_linear(map)) { 265 if (!mtd->point && map_is_linear(map)) {
266 mtd->_point = cfi_intelext_point; 266 mtd->point = cfi_intelext_point;
267 mtd->_unpoint = cfi_intelext_unpoint; 267 mtd->unpoint = cfi_intelext_unpoint;
268 } 268 }
269} 269}
270 270
@@ -274,8 +274,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
274 struct cfi_private *cfi = map->fldrv_priv; 274 struct cfi_private *cfi = map->fldrv_priv;
275 if (cfi->cfiq->BufWriteTimeoutTyp) { 275 if (cfi->cfiq->BufWriteTimeoutTyp) {
276 printk(KERN_INFO "Using buffer write method\n" ); 276 printk(KERN_INFO "Using buffer write method\n" );
277 mtd->_write = cfi_intelext_write_buffers; 277 mtd->write = cfi_intelext_write_buffers;
278 mtd->_writev = cfi_intelext_writev; 278 mtd->writev = cfi_intelext_writev;
279 } 279 }
280} 280}
281 281
@@ -443,15 +443,15 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
443 mtd->type = MTD_NORFLASH; 443 mtd->type = MTD_NORFLASH;
444 444
445 /* Fill in the default mtd operations */ 445 /* Fill in the default mtd operations */
446 mtd->_erase = cfi_intelext_erase_varsize; 446 mtd->erase = cfi_intelext_erase_varsize;
447 mtd->_read = cfi_intelext_read; 447 mtd->read = cfi_intelext_read;
448 mtd->_write = cfi_intelext_write_words; 448 mtd->write = cfi_intelext_write_words;
449 mtd->_sync = cfi_intelext_sync; 449 mtd->sync = cfi_intelext_sync;
450 mtd->_lock = cfi_intelext_lock; 450 mtd->lock = cfi_intelext_lock;
451 mtd->_unlock = cfi_intelext_unlock; 451 mtd->unlock = cfi_intelext_unlock;
452 mtd->_is_locked = cfi_intelext_is_locked; 452 mtd->is_locked = cfi_intelext_is_locked;
453 mtd->_suspend = cfi_intelext_suspend; 453 mtd->suspend = cfi_intelext_suspend;
454 mtd->_resume = cfi_intelext_resume; 454 mtd->resume = cfi_intelext_resume;
455 mtd->flags = MTD_CAP_NORFLASH; 455 mtd->flags = MTD_CAP_NORFLASH;
456 mtd->name = map->name; 456 mtd->name = map->name;
457 mtd->writesize = 1; 457 mtd->writesize = 1;
@@ -600,12 +600,12 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
600 } 600 }
601 601
602#ifdef CONFIG_MTD_OTP 602#ifdef CONFIG_MTD_OTP
603 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; 603 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg; 604 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg; 605 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; 606 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info; 607 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info; 608 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
609#endif 609#endif
610 610
611 /* This function has the potential to distort the reality 611 /* This function has the potential to distort the reality
@@ -1017,6 +1017,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1017 case FL_READY: 1017 case FL_READY:
1018 case FL_STATUS: 1018 case FL_STATUS:
1019 case FL_JEDEC_QUERY: 1019 case FL_JEDEC_QUERY:
1020 /* We should really make set_vpp() count, rather than doing this */
1021 DISABLE_VPP(map);
1020 break; 1022 break;
1021 default: 1023 default:
1022 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); 1024 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
@@ -1322,7 +1324,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1322 int chipnum; 1324 int chipnum;
1323 int ret = 0; 1325 int ret = 0;
1324 1326
1325 if (!map->virt) 1327 if (!map->virt || (from + len > mtd->size))
1326 return -EINVAL; 1328 return -EINVAL;
1327 1329
1328 /* Now lock the chip(s) to POINT state */ 1330 /* Now lock the chip(s) to POINT state */
@@ -1332,6 +1334,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1332 ofs = from - (chipnum << cfi->chipshift); 1334 ofs = from - (chipnum << cfi->chipshift);
1333 1335
1334 *virt = map->virt + cfi->chips[chipnum].start + ofs; 1336 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1337 *retlen = 0;
1335 if (phys) 1338 if (phys)
1336 *phys = map->phys + cfi->chips[chipnum].start + ofs; 1339 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1337 1340
@@ -1366,12 +1369,12 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1366 return 0; 1369 return 0;
1367} 1370}
1368 1371
1369static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1372static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1370{ 1373{
1371 struct map_info *map = mtd->priv; 1374 struct map_info *map = mtd->priv;
1372 struct cfi_private *cfi = map->fldrv_priv; 1375 struct cfi_private *cfi = map->fldrv_priv;
1373 unsigned long ofs; 1376 unsigned long ofs;
1374 int chipnum, err = 0; 1377 int chipnum;
1375 1378
1376 /* Now unlock the chip(s) POINT state */ 1379 /* Now unlock the chip(s) POINT state */
1377 1380
@@ -1379,7 +1382,7 @@ static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1379 chipnum = (from >> cfi->chipshift); 1382 chipnum = (from >> cfi->chipshift);
1380 ofs = from - (chipnum << cfi->chipshift); 1383 ofs = from - (chipnum << cfi->chipshift);
1381 1384
1382 while (len && !err) { 1385 while (len) {
1383 unsigned long thislen; 1386 unsigned long thislen;
1384 struct flchip *chip; 1387 struct flchip *chip;
1385 1388
@@ -1397,10 +1400,8 @@ static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1397 chip->ref_point_counter--; 1400 chip->ref_point_counter--;
1398 if(chip->ref_point_counter == 0) 1401 if(chip->ref_point_counter == 0)
1399 chip->state = FL_READY; 1402 chip->state = FL_READY;
1400 } else { 1403 } else
1401 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name); 1404 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1402 err = -EINVAL;
1403 }
1404 1405
1405 put_chip(map, chip, chip->start); 1406 put_chip(map, chip, chip->start);
1406 mutex_unlock(&chip->mutex); 1407 mutex_unlock(&chip->mutex);
@@ -1409,8 +1410,6 @@ static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1409 ofs = 0; 1410 ofs = 0;
1410 chipnum++; 1411 chipnum++;
1411 } 1412 }
1412
1413 return err;
1414} 1413}
1415 1414
1416static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1415static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
@@ -1457,6 +1456,8 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1457 chipnum = (from >> cfi->chipshift); 1456 chipnum = (from >> cfi->chipshift);
1458 ofs = from - (chipnum << cfi->chipshift); 1457 ofs = from - (chipnum << cfi->chipshift);
1459 1458
1459 *retlen = 0;
1460
1460 while (len) { 1461 while (len) {
1461 unsigned long thislen; 1462 unsigned long thislen;
1462 1463
@@ -1550,8 +1551,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1550 } 1551 }
1551 1552
1552 xip_enable(map, chip, adr); 1553 xip_enable(map, chip, adr);
1553 out: DISABLE_VPP(map); 1554 out: put_chip(map, chip, adr);
1554 put_chip(map, chip, adr);
1555 mutex_unlock(&chip->mutex); 1555 mutex_unlock(&chip->mutex);
1556 return ret; 1556 return ret;
1557} 1557}
@@ -1565,6 +1565,10 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1565 int chipnum; 1565 int chipnum;
1566 unsigned long ofs; 1566 unsigned long ofs;
1567 1567
1568 *retlen = 0;
1569 if (!len)
1570 return 0;
1571
1568 chipnum = to >> cfi->chipshift; 1572 chipnum = to >> cfi->chipshift;
1569 ofs = to - (chipnum << cfi->chipshift); 1573 ofs = to - (chipnum << cfi->chipshift);
1570 1574
@@ -1790,8 +1794,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1790 } 1794 }
1791 1795
1792 xip_enable(map, chip, cmd_adr); 1796 xip_enable(map, chip, cmd_adr);
1793 out: DISABLE_VPP(map); 1797 out: put_chip(map, chip, cmd_adr);
1794 put_chip(map, chip, cmd_adr);
1795 mutex_unlock(&chip->mutex); 1798 mutex_unlock(&chip->mutex);
1796 return ret; 1799 return ret;
1797} 1800}
@@ -1810,6 +1813,7 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1810 for (i = 0; i < count; i++) 1813 for (i = 0; i < count; i++)
1811 len += vecs[i].iov_len; 1814 len += vecs[i].iov_len;
1812 1815
1816 *retlen = 0;
1813 if (!len) 1817 if (!len)
1814 return 0; 1818 return 0;
1815 1819
@@ -1928,7 +1932,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1928 ret = -EIO; 1932 ret = -EIO;
1929 } else if (chipstatus & 0x20 && retries--) { 1933 } else if (chipstatus & 0x20 && retries--) {
1930 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1934 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1931 DISABLE_VPP(map);
1932 put_chip(map, chip, adr); 1935 put_chip(map, chip, adr);
1933 mutex_unlock(&chip->mutex); 1936 mutex_unlock(&chip->mutex);
1934 goto retry; 1937 goto retry;
@@ -1941,8 +1944,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1941 } 1944 }
1942 1945
1943 xip_enable(map, chip, adr); 1946 xip_enable(map, chip, adr);
1944 out: DISABLE_VPP(map); 1947 out: put_chip(map, chip, adr);
1945 put_chip(map, chip, adr);
1946 mutex_unlock(&chip->mutex); 1948 mutex_unlock(&chip->mutex);
1947 return ret; 1949 return ret;
1948} 1950}
@@ -2043,7 +2045,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2043{ 2045{
2044 struct cfi_private *cfi = map->fldrv_priv; 2046 struct cfi_private *cfi = map->fldrv_priv;
2045 struct cfi_pri_intelext *extp = cfi->cmdset_priv; 2047 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2046 int mdelay; 2048 int udelay;
2047 int ret; 2049 int ret;
2048 2050
2049 adr += chip->start; 2051 adr += chip->start;
@@ -2072,17 +2074,9 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2072 * If Instant Individual Block Locking supported then no need 2074 * If Instant Individual Block Locking supported then no need
2073 * to delay. 2075 * to delay.
2074 */ 2076 */
2075 /* 2077 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2076 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2077 * lets use a max of 1.5 seconds (1500ms) as timeout.
2078 *
2079 * See "Clear Block Lock-Bits Time" on page 40 in
2080 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2081 * from February 2003
2082 */
2083 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2084 2078
2085 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000); 2079 ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2086 if (ret) { 2080 if (ret) {
2087 map_write(map, CMD(0x70), adr); 2081 map_write(map, CMD(0x70), adr);
2088 chip->state = FL_STATUS; 2082 chip->state = FL_STATUS;
@@ -2092,8 +2086,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2092 } 2086 }
2093 2087
2094 xip_enable(map, chip, adr); 2088 xip_enable(map, chip, adr);
2095 out: DISABLE_VPP(map); 2089out: put_chip(map, chip, adr);
2096 put_chip(map, chip, adr);
2097 mutex_unlock(&chip->mutex); 2090 mutex_unlock(&chip->mutex);
2098 return ret; 2091 return ret;
2099} 2092}
@@ -2490,7 +2483,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2490 allowed to. Or should we return -EAGAIN, because the upper layers 2483 allowed to. Or should we return -EAGAIN, because the upper layers
2491 ought to have already shut down anything which was using the device 2484 ought to have already shut down anything which was using the device
2492 anyway? The latter for now. */ 2485 anyway? The latter for now. */
2493 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state); 2486 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2494 ret = -EAGAIN; 2487 ret = -EAGAIN;
2495 case FL_PM_SUSPENDED: 2488 case FL_PM_SUSPENDED:
2496 break; 2489 break;
@@ -2533,10 +2526,12 @@ static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2533 if (!region->lockmap) 2526 if (!region->lockmap)
2534 continue; 2527 continue;
2535 2528
2536 for_each_clear_bit(block, region->lockmap, region->numblocks) { 2529 for (block = 0; block < region->numblocks; block++) {
2537 len = region->erasesize; 2530 len = region->erasesize;
2538 adr = region->offset + block * len; 2531 adr = region->offset + block * len;
2539 cfi_intelext_unlock(mtd, adr, len); 2532
2533 if (!test_bit(block, region->lockmap))
2534 cfi_intelext_unlock(mtd, adr, len);
2540 } 2535 }
2541 } 2536 }
2542} 2537}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index b86197286f2..51d79223bc7 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,9 +59,6 @@ static void cfi_amdstd_resume (struct mtd_info *);
59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 61
62static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
63 size_t *retlen, const u_char *buf);
64
65static void cfi_amdstd_destroy(struct mtd_info *); 62static void cfi_amdstd_destroy(struct mtd_info *);
66 63
67struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 64struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
@@ -148,7 +145,8 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
148 if (((major << 8) | minor) < 0x3131) { 145 if (((major << 8) | minor) < 0x3131) {
149 /* CFI version 1.0 => don't trust bootloc */ 146 /* CFI version 1.0 => don't trust bootloc */
150 147
151 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 148 DEBUG(MTD_DEBUG_LEVEL1,
149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
152 map->name, cfi->mfr, cfi->id); 150 map->name, cfi->mfr, cfi->id);
153 151
154 /* AFAICS all 29LV400 with a bottom boot block have a device ID 152 /* AFAICS all 29LV400 with a bottom boot block have a device ID
@@ -168,7 +166,8 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
168 * the 8-bit device ID. 166 * the 8-bit device ID.
169 */ 167 */
170 (cfi->mfr == CFI_MFR_MACRONIX)) { 168 (cfi->mfr == CFI_MFR_MACRONIX)) {
171 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 169 DEBUG(MTD_DEBUG_LEVEL1,
170 "%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->name); 171 " detected\n", map->name);
173 extp->TopBottom = 2; /* bottom boot */ 172 extp->TopBottom = 2; /* bottom boot */
174 } else 173 } else
@@ -179,7 +178,8 @@ static void fixup_amd_bootblock(struct mtd_info *mtd)
179 extp->TopBottom = 2; /* bottom boot */ 178 extp->TopBottom = 2; /* bottom boot */
180 } 179 }
181 180
182 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 181 DEBUG(MTD_DEBUG_LEVEL1,
182 "%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor, 183 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top"); 184 extp->TopBottom == 2 ? "bottom" : "top");
185 } 185 }
@@ -190,9 +190,17 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
190{ 190{
191 struct map_info *map = mtd->priv; 191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv; 192 struct cfi_private *cfi = map->fldrv_priv;
193 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
194
193 if (cfi->cfiq->BufWriteTimeoutTyp) { 195 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 pr_debug("Using buffer write method\n" ); 196 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
195 mtd->_write = cfi_amdstd_write_buffers; 197 mtd->write = cfi_amdstd_write_buffers;
198
199 if (extp->SiliconRevision >= 0x1C) {
200 mtd->writesize = 512;
201 mtd->flags &= ~MTD_BIT_WRITEABLE;
202 printk(KERN_INFO "Enabling Spansion 65nm mode, writesize = 512 bytes\n");
203 }
196 } 204 }
197} 205}
198 206
@@ -231,8 +239,8 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd)
231static void fixup_use_secsi(struct mtd_info *mtd) 239static void fixup_use_secsi(struct mtd_info *mtd)
232{ 240{
233 /* Setup for chips with a secsi area */ 241 /* Setup for chips with a secsi area */
234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 242 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 243 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
236} 244}
237 245
238static void fixup_use_erase_chip(struct mtd_info *mtd) 246static void fixup_use_erase_chip(struct mtd_info *mtd)
@@ -241,7 +249,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
241 struct cfi_private *cfi = map->fldrv_priv; 249 struct cfi_private *cfi = map->fldrv_priv;
242 if ((cfi->cfiq->NumEraseRegions == 1) && 250 if ((cfi->cfiq->NumEraseRegions == 1) &&
243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 251 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->_erase = cfi_amdstd_erase_chip; 252 mtd->erase = cfi_amdstd_erase_chip;
245 } 253 }
246 254
247} 255}
@@ -252,8 +260,8 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
252 */ 260 */
253static void fixup_use_atmel_lock(struct mtd_info *mtd) 261static void fixup_use_atmel_lock(struct mtd_info *mtd)
254{ 262{
255 mtd->_lock = cfi_atmel_lock; 263 mtd->lock = cfi_atmel_lock;
256 mtd->_unlock = cfi_atmel_unlock; 264 mtd->unlock = cfi_atmel_unlock;
257 mtd->flags |= MTD_POWERUP_LOCK; 265 mtd->flags |= MTD_POWERUP_LOCK;
258} 266}
259 267
@@ -317,7 +325,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
317 325
318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 326 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 327 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
320 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); 328 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
321 } 329 }
322} 330}
323 331
@@ -328,23 +336,10 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
328 336
329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 337 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 338 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
331 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); 339 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
332 } 340 }
333} 341}
334 342
335static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
336{
337 struct map_info *map = mtd->priv;
338 struct cfi_private *cfi = map->fldrv_priv;
339
340 /*
341 * S29NS512P flash uses more than 8bits to report number of sectors,
342 * which is not permitted by CFI.
343 */
344 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
345 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
346}
347
348/* Used to fix CFI-Tables of chips without Extended Query Tables */ 343/* Used to fix CFI-Tables of chips without Extended Query Tables */
349static struct cfi_fixup cfi_nopri_fixup_table[] = { 344static struct cfi_fixup cfi_nopri_fixup_table[] = {
350 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 345 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -375,7 +370,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
375 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 370 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
376 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 371 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
377 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 372 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
378 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
379 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 373 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
380 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 374 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
381 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 375 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
@@ -431,68 +425,6 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
431 } 425 }
432} 426}
433 427
434static int is_m29ew(struct cfi_private *cfi)
435{
436 if (cfi->mfr == CFI_MFR_INTEL &&
437 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
438 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
439 return 1;
440 return 0;
441}
442
443/*
444 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
445 * Some revisions of the M29EW suffer from erase suspend hang ups. In
446 * particular, it can occur when the sequence
447 * Erase Confirm -> Suspend -> Program -> Resume
448 * causes a lockup due to internal timing issues. The consequence is that the
449 * erase cannot be resumed without inserting a dummy command after programming
450 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
451 * that writes an F0 command code before the RESUME command.
452 */
453static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
454 unsigned long adr)
455{
456 struct cfi_private *cfi = map->fldrv_priv;
457 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
458 if (is_m29ew(cfi))
459 map_write(map, CMD(0xF0), adr);
460}
461
462/*
463 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
464 *
465 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
466 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
467 * command is issued after an ERASE RESUME operation without waiting for a
468 * minimum delay. The result is that once the ERASE seems to be completed
469 * (no bits are toggling), the contents of the Flash memory block on which
470 * the erase was ongoing could be inconsistent with the expected values
471 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
472 * values), causing a consequent failure of the ERASE operation.
473 * The occurrence of this issue could be high, especially when file system
474 * operations on the Flash are intensive. As a result, it is recommended
475 * that a patch be applied. Intensive file system operations can cause many
476 * calls to the garbage routine to free Flash space (also by erasing physical
477 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
478 * commands can occur. The problem disappears when a delay is inserted after
479 * the RESUME command by using the udelay() function available in Linux.
480 * The DELAY value must be tuned based on the customer's platform.
481 * The maximum value that fixes the problem in all cases is 500us.
482 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
483 * in most cases.
484 * We have chosen 500µs because this latency is acceptable.
485 */
486static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
487{
488 /*
489 * Resolving the Delay After Resume Issue see Micron TN-13-07
490 * Worst case delay must be 500µs but 30-50µs should be ok as well
491 */
492 if (is_m29ew(cfi))
493 cfi_udelay(500);
494}
495
496struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 428struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
497{ 429{
498 struct cfi_private *cfi = map->fldrv_priv; 430 struct cfi_private *cfi = map->fldrv_priv;
@@ -508,21 +440,20 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
508 mtd->type = MTD_NORFLASH; 440 mtd->type = MTD_NORFLASH;
509 441
510 /* Fill in the default mtd operations */ 442 /* Fill in the default mtd operations */
511 mtd->_erase = cfi_amdstd_erase_varsize; 443 mtd->erase = cfi_amdstd_erase_varsize;
512 mtd->_write = cfi_amdstd_write_words; 444 mtd->write = cfi_amdstd_write_words;
513 mtd->_read = cfi_amdstd_read; 445 mtd->read = cfi_amdstd_read;
514 mtd->_sync = cfi_amdstd_sync; 446 mtd->sync = cfi_amdstd_sync;
515 mtd->_suspend = cfi_amdstd_suspend; 447 mtd->suspend = cfi_amdstd_suspend;
516 mtd->_resume = cfi_amdstd_resume; 448 mtd->resume = cfi_amdstd_resume;
517 mtd->flags = MTD_CAP_NORFLASH; 449 mtd->flags = MTD_CAP_NORFLASH;
518 mtd->name = map->name; 450 mtd->name = map->name;
519 mtd->writesize = 1; 451 mtd->writesize = 1;
520 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 452 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
521 453
522 pr_debug("MTD %s(): write buffer size %d\n", __func__, 454 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
523 mtd->writebufsize); 455 __func__, mtd->writebufsize);
524 456
525 mtd->_panic_write = cfi_amdstd_panic_write;
526 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 457 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
527 458
528 if (cfi->cfi_mode==CFI_MODE_CFI){ 459 if (cfi->cfi_mode==CFI_MODE_CFI){
@@ -838,10 +769,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
838 769
839 switch(chip->oldstate) { 770 switch(chip->oldstate) {
840 case FL_ERASING: 771 case FL_ERASING:
841 cfi_fixup_m29ew_erase_suspend(map,
842 chip->in_progress_block_addr);
843 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 772 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
844 cfi_fixup_m29ew_delay_after_resume(cfi);
845 chip->oldstate = FL_READY; 773 chip->oldstate = FL_READY;
846 chip->state = FL_ERASING; 774 chip->state = FL_ERASING;
847 break; 775 break;
@@ -853,6 +781,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
853 781
854 case FL_READY: 782 case FL_READY:
855 case FL_STATUS: 783 case FL_STATUS:
784 /* We should really make set_vpp() count, rather than doing this */
785 DISABLE_VPP(map);
856 break; 786 break;
857 default: 787 default:
858 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 788 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
@@ -981,8 +911,6 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
981 /* Disallow XIP again */ 911 /* Disallow XIP again */
982 local_irq_disable(); 912 local_irq_disable();
983 913
984 /* Correct Erase Suspend Hangups for M29EW */
985 cfi_fixup_m29ew_erase_suspend(map, adr);
986 /* Resume the write or erase operation */ 914 /* Resume the write or erase operation */
987 map_write(map, cfi->sector_erase_cmd, adr); 915 map_write(map, cfi->sector_erase_cmd, adr);
988 chip->state = oldstate; 916 chip->state = oldstate;
@@ -1096,9 +1024,13 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
1096 int ret = 0; 1024 int ret = 0;
1097 1025
1098 /* ofs: offset within the first chip that the first read should start */ 1026 /* ofs: offset within the first chip that the first read should start */
1027
1099 chipnum = (from >> cfi->chipshift); 1028 chipnum = (from >> cfi->chipshift);
1100 ofs = from - (chipnum << cfi->chipshift); 1029 ofs = from - (chipnum << cfi->chipshift);
1101 1030
1031
1032 *retlen = 0;
1033
1102 while (len) { 1034 while (len) {
1103 unsigned long thislen; 1035 unsigned long thislen;
1104 1036
@@ -1176,11 +1108,16 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
1176 int chipnum; 1108 int chipnum;
1177 int ret = 0; 1109 int ret = 0;
1178 1110
1111
1179 /* ofs: offset within the first chip that the first read should start */ 1112 /* ofs: offset within the first chip that the first read should start */
1113
1180 /* 8 secsi bytes per chip */ 1114 /* 8 secsi bytes per chip */
1181 chipnum=from>>3; 1115 chipnum=from>>3;
1182 ofs=from & 7; 1116 ofs=from & 7;
1183 1117
1118
1119 *retlen = 0;
1120
1184 while (len) { 1121 while (len) {
1185 unsigned long thislen; 1122 unsigned long thislen;
1186 1123
@@ -1234,7 +1171,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1234 return ret; 1171 return ret;
1235 } 1172 }
1236 1173
1237 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1174 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1238 __func__, adr, datum.x[0] ); 1175 __func__, adr, datum.x[0] );
1239 1176
1240 /* 1177 /*
@@ -1245,7 +1182,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1245 */ 1182 */
1246 oldd = map_read(map, adr); 1183 oldd = map_read(map, adr);
1247 if (map_word_equal(map, oldd, datum)) { 1184 if (map_word_equal(map, oldd, datum)) {
1248 pr_debug("MTD %s(): NOP\n", 1185 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1249 __func__); 1186 __func__);
1250 goto op_done; 1187 goto op_done;
1251 } 1188 }
@@ -1308,7 +1245,6 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1308 xip_enable(map, chip, adr); 1245 xip_enable(map, chip, adr);
1309 op_done: 1246 op_done:
1310 chip->state = FL_READY; 1247 chip->state = FL_READY;
1311 DISABLE_VPP(map);
1312 put_chip(map, chip, adr); 1248 put_chip(map, chip, adr);
1313 mutex_unlock(&chip->mutex); 1249 mutex_unlock(&chip->mutex);
1314 1250
@@ -1326,6 +1262,10 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1326 unsigned long ofs, chipstart; 1262 unsigned long ofs, chipstart;
1327 DECLARE_WAITQUEUE(wait, current); 1263 DECLARE_WAITQUEUE(wait, current);
1328 1264
1265 *retlen = 0;
1266 if (!len)
1267 return 0;
1268
1329 chipnum = to >> cfi->chipshift; 1269 chipnum = to >> cfi->chipshift;
1330 ofs = to - (chipnum << cfi->chipshift); 1270 ofs = to - (chipnum << cfi->chipshift);
1331 chipstart = cfi->chips[chipnum].start; 1271 chipstart = cfi->chips[chipnum].start;
@@ -1440,21 +1380,18 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1440} 1380}
1441 1381
1442 1382
1443/*
1444 * FIXME: interleaved mode not tested, and probably not supported!
1445 */
1446static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1383static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1447 unsigned long adr, const u_char *buf, 1384 unsigned long adr, const u_char *buf,
1448 int len) 1385 int len)
1449{ 1386{
1450 struct cfi_private *cfi = map->fldrv_priv; 1387 struct cfi_private *cfi = map->fldrv_priv;
1451 unsigned long timeo = jiffies + HZ; 1388 unsigned long timeo = jiffies + HZ;
1452 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1389 /* see comments in do_write_oneword() regarding uWriteTimeout, 20ms */
1453 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1390 unsigned long uWriteTimeout = (HZ / 50) + 1;
1454 int ret = -EIO; 1391 int ret = -EIO;
1455 unsigned long cmd_adr; 1392 unsigned long cmd_adr;
1456 int z, words; 1393 int z, words, prolog, epilog, buflen = len;
1457 map_word datum; 1394 map_word datum, pdat, edat;
1458 1395
1459 adr += chip->start; 1396 adr += chip->start;
1460 cmd_adr = adr; 1397 cmd_adr = adr;
@@ -1468,13 +1405,28 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1468 1405
1469 datum = map_word_load(map, buf); 1406 datum = map_word_load(map, buf);
1470 1407
1471 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1408 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1472 __func__, adr, datum.x[0] ); 1409 __func__, adr, datum.x[0] );
1473 1410
1474 XIP_INVAL_CACHED_RANGE(map, adr, len); 1411 XIP_INVAL_CACHED_RANGE(map, adr, len);
1475 ENABLE_VPP(map); 1412 ENABLE_VPP(map);
1476 xip_disable(map, chip, cmd_adr); 1413 xip_disable(map, chip, cmd_adr);
1477 1414
1415 /* If start is not bus-aligned, prepend old contents of flash */
1416 prolog = (adr & (map_bankwidth(map)-1));
1417 if (prolog) {
1418 adr -= prolog;
1419 cmd_adr -= prolog;
1420 len += prolog;
1421 pdat = map_read(map, adr);
1422 }
1423 /* If end is not bus-aligned, append old contents of flash */
1424 epilog = ((adr + len) & (map_bankwidth(map)-1));
1425 if (epilog) {
1426 len += map_bankwidth(map)-epilog;
1427 edat = map_read(map, adr + len - map_bankwidth(map));
1428 }
1429
1478 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1430 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1479 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1431 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1480 1432
@@ -1488,8 +1440,21 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1488 map_write(map, CMD(words - 1), cmd_adr); 1440 map_write(map, CMD(words - 1), cmd_adr);
1489 /* Write data */ 1441 /* Write data */
1490 z = 0; 1442 z = 0;
1443 if (prolog) {
1444 datum = map_word_load_partial(map, pdat, buf, prolog,
1445 min_t(int, buflen,
1446 map_bankwidth(map) - prolog));
1447 map_write(map, datum, adr);
1448
1449 z += map_bankwidth(map);
1450 buf += map_bankwidth(map) - prolog;
1451 }
1491 while(z < words * map_bankwidth(map)) { 1452 while(z < words * map_bankwidth(map)) {
1492 datum = map_word_load(map, buf); 1453 if (epilog && z >= (words-1) * map_bankwidth(map))
1454 datum = map_word_load_partial(map, edat,
1455 buf, 0, epilog);
1456 else
1457 datum = map_word_load(map, buf);
1493 map_write(map, datum, adr + z); 1458 map_write(map, datum, adr + z);
1494 1459
1495 z += map_bankwidth(map); 1460 z += map_bankwidth(map);
@@ -1536,22 +1501,15 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1536 UDELAY(map, chip, adr, 1); 1501 UDELAY(map, chip, adr, 1);
1537 } 1502 }
1538 1503
1539 /* 1504 /* reset on all failures. */
1540 * Recovery from write-buffer programming failures requires 1505 map_write( map, CMD(0xF0), chip->start );
1541 * the write-to-buffer-reset sequence. Since the last part 1506 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map,
1542 * of the sequence also works as a normal reset, we can run 1507 cfi, cfi->device_type, NULL);
1543 * the same commands regardless of why we are here. 1508 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map,
1544 * See e.g. 1509 cfi, cfi->device_type, NULL);
1545 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1510 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map,
1546 */ 1511 cfi, cfi->device_type, NULL);
1547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1548 cfi->device_type, NULL);
1549 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1550 cfi->device_type, NULL);
1551 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1552 cfi->device_type, NULL);
1553 xip_enable(map, chip, adr); 1512 xip_enable(map, chip, adr);
1554 /* FIXME - should have reset delay before continuing */
1555 1513
1556 printk(KERN_WARNING "MTD %s(): software timeout\n", 1514 printk(KERN_WARNING "MTD %s(): software timeout\n",
1557 __func__ ); 1515 __func__ );
@@ -1559,7 +1517,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1559 ret = -EIO; 1517 ret = -EIO;
1560 op_done: 1518 op_done:
1561 chip->state = FL_READY; 1519 chip->state = FL_READY;
1562 DISABLE_VPP(map);
1563 put_chip(map, chip, adr); 1520 put_chip(map, chip, adr);
1564 mutex_unlock(&chip->mutex); 1521 mutex_unlock(&chip->mutex);
1565 1522
@@ -1577,39 +1534,19 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1577 int chipnum; 1534 int chipnum;
1578 unsigned long ofs; 1535 unsigned long ofs;
1579 1536
1537 *retlen = 0;
1538 if (!len)
1539 return 0;
1540
1580 chipnum = to >> cfi->chipshift; 1541 chipnum = to >> cfi->chipshift;
1581 ofs = to - (chipnum << cfi->chipshift); 1542 ofs = to - (chipnum << cfi->chipshift);
1582 1543
1583 /* If it's not bus-aligned, do the first word write */ 1544 while (len) {
1584 if (ofs & (map_bankwidth(map)-1)) {
1585 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1586 if (local_len > len)
1587 local_len = len;
1588 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1589 local_len, retlen, buf);
1590 if (ret)
1591 return ret;
1592 ofs += local_len;
1593 buf += local_len;
1594 len -= local_len;
1595
1596 if (ofs >> cfi->chipshift) {
1597 chipnum ++;
1598 ofs = 0;
1599 if (chipnum == cfi->numchips)
1600 return 0;
1601 }
1602 }
1603
1604 /* Write buffer is worth it only if more than one word to write... */
1605 while (len >= map_bankwidth(map) * 2) {
1606 /* We must not cross write block boundaries */ 1545 /* We must not cross write block boundaries */
1607 int size = wbufsize - (ofs & (wbufsize-1)); 1546 int size = wbufsize - (ofs & (wbufsize-1));
1608 1547
1609 if (size > len) 1548 if (size > len)
1610 size = len; 1549 size = len;
1611 if (size % map_bankwidth(map))
1612 size -= size % map_bankwidth(map);
1613 1550
1614 ret = do_write_buffer(map, &cfi->chips[chipnum], 1551 ret = do_write_buffer(map, &cfi->chips[chipnum],
1615 ofs, buf, size); 1552 ofs, buf, size);
@@ -1629,248 +1566,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1629 } 1566 }
1630 } 1567 }
1631 1568
1632 if (len) {
1633 size_t retlen_dregs = 0;
1634
1635 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1636 len, &retlen_dregs, buf);
1637
1638 *retlen += retlen_dregs;
1639 return ret;
1640 }
1641
1642 return 0;
1643}
1644
1645/*
1646 * Wait for the flash chip to become ready to write data
1647 *
1648 * This is only called during the panic_write() path. When panic_write()
1649 * is called, the kernel is in the process of a panic, and will soon be
1650 * dead. Therefore we don't take any locks, and attempt to get access
1651 * to the chip as soon as possible.
1652 */
1653static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1654 unsigned long adr)
1655{
1656 struct cfi_private *cfi = map->fldrv_priv;
1657 int retries = 10;
1658 int i;
1659
1660 /*
1661 * If the driver thinks the chip is idle, and no toggle bits
1662 * are changing, then the chip is actually idle for sure.
1663 */
1664 if (chip->state == FL_READY && chip_ready(map, adr))
1665 return 0;
1666
1667 /*
1668 * Try several times to reset the chip and then wait for it
1669 * to become idle. The upper limit of a few milliseconds of
1670 * delay isn't a big problem: the kernel is dying anyway. It
1671 * is more important to save the messages.
1672 */
1673 while (retries > 0) {
1674 const unsigned long timeo = (HZ / 1000) + 1;
1675
1676 /* send the reset command */
1677 map_write(map, CMD(0xF0), chip->start);
1678
1679 /* wait for the chip to become ready */
1680 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1681 if (chip_ready(map, adr))
1682 return 0;
1683
1684 udelay(1);
1685 }
1686 }
1687
1688 /* the chip never became ready */
1689 return -EBUSY;
1690}
1691
1692/*
1693 * Write out one word of data to a single flash chip during a kernel panic
1694 *
1695 * This is only called during the panic_write() path. When panic_write()
1696 * is called, the kernel is in the process of a panic, and will soon be
1697 * dead. Therefore we don't take any locks, and attempt to get access
1698 * to the chip as soon as possible.
1699 *
1700 * The implementation of this routine is intentionally similar to
1701 * do_write_oneword(), in order to ease code maintenance.
1702 */
1703static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1704 unsigned long adr, map_word datum)
1705{
1706 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1707 struct cfi_private *cfi = map->fldrv_priv;
1708 int retry_cnt = 0;
1709 map_word oldd;
1710 int ret = 0;
1711 int i;
1712
1713 adr += chip->start;
1714
1715 ret = cfi_amdstd_panic_wait(map, chip, adr);
1716 if (ret)
1717 return ret;
1718
1719 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1720 __func__, adr, datum.x[0]);
1721
1722 /*
1723 * Check for a NOP for the case when the datum to write is already
1724 * present - it saves time and works around buggy chips that corrupt
1725 * data at other locations when 0xff is written to a location that
1726 * already contains 0xff.
1727 */
1728 oldd = map_read(map, adr);
1729 if (map_word_equal(map, oldd, datum)) {
1730 pr_debug("MTD %s(): NOP\n", __func__);
1731 goto op_done;
1732 }
1733
1734 ENABLE_VPP(map);
1735
1736retry:
1737 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1738 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1739 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1740 map_write(map, datum, adr);
1741
1742 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1743 if (chip_ready(map, adr))
1744 break;
1745
1746 udelay(1);
1747 }
1748
1749 if (!chip_good(map, adr, datum)) {
1750 /* reset on all failures. */
1751 map_write(map, CMD(0xF0), chip->start);
1752 /* FIXME - should have reset delay before continuing */
1753
1754 if (++retry_cnt <= MAX_WORD_RETRIES)
1755 goto retry;
1756
1757 ret = -EIO;
1758 }
1759
1760op_done:
1761 DISABLE_VPP(map);
1762 return ret;
1763}
1764
1765/*
1766 * Write out some data during a kernel panic
1767 *
1768 * This is used by the mtdoops driver to save the dying messages from a
1769 * kernel which has panic'd.
1770 *
1771 * This routine ignores all of the locking used throughout the rest of the
1772 * driver, in order to ensure that the data gets written out no matter what
1773 * state this driver (and the flash chip itself) was in when the kernel crashed.
1774 *
1775 * The implementation of this routine is intentionally similar to
1776 * cfi_amdstd_write_words(), in order to ease code maintenance.
1777 */
1778static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1779 size_t *retlen, const u_char *buf)
1780{
1781 struct map_info *map = mtd->priv;
1782 struct cfi_private *cfi = map->fldrv_priv;
1783 unsigned long ofs, chipstart;
1784 int ret = 0;
1785 int chipnum;
1786
1787 chipnum = to >> cfi->chipshift;
1788 ofs = to - (chipnum << cfi->chipshift);
1789 chipstart = cfi->chips[chipnum].start;
1790
1791 /* If it's not bus aligned, do the first byte write */
1792 if (ofs & (map_bankwidth(map) - 1)) {
1793 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1794 int i = ofs - bus_ofs;
1795 int n = 0;
1796 map_word tmp_buf;
1797
1798 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1799 if (ret)
1800 return ret;
1801
1802 /* Load 'tmp_buf' with old contents of flash */
1803 tmp_buf = map_read(map, bus_ofs + chipstart);
1804
1805 /* Number of bytes to copy from buffer */
1806 n = min_t(int, len, map_bankwidth(map) - i);
1807
1808 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1809
1810 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1811 bus_ofs, tmp_buf);
1812 if (ret)
1813 return ret;
1814
1815 ofs += n;
1816 buf += n;
1817 (*retlen) += n;
1818 len -= n;
1819
1820 if (ofs >> cfi->chipshift) {
1821 chipnum++;
1822 ofs = 0;
1823 if (chipnum == cfi->numchips)
1824 return 0;
1825 }
1826 }
1827
1828 /* We are now aligned, write as much as possible */
1829 while (len >= map_bankwidth(map)) {
1830 map_word datum;
1831
1832 datum = map_word_load(map, buf);
1833
1834 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1835 ofs, datum);
1836 if (ret)
1837 return ret;
1838
1839 ofs += map_bankwidth(map);
1840 buf += map_bankwidth(map);
1841 (*retlen) += map_bankwidth(map);
1842 len -= map_bankwidth(map);
1843
1844 if (ofs >> cfi->chipshift) {
1845 chipnum++;
1846 ofs = 0;
1847 if (chipnum == cfi->numchips)
1848 return 0;
1849
1850 chipstart = cfi->chips[chipnum].start;
1851 }
1852 }
1853
1854 /* Write the trailing bytes if any */
1855 if (len & (map_bankwidth(map) - 1)) {
1856 map_word tmp_buf;
1857
1858 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1859 if (ret)
1860 return ret;
1861
1862 tmp_buf = map_read(map, ofs + chipstart);
1863
1864 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1865
1866 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1867 ofs, tmp_buf);
1868 if (ret)
1869 return ret;
1870
1871 (*retlen) += len;
1872 }
1873
1874 return 0; 1569 return 0;
1875} 1570}
1876 1571
@@ -1896,7 +1591,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1896 return ret; 1591 return ret;
1897 } 1592 }
1898 1593
1899 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1594 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1900 __func__, chip->start ); 1595 __func__, chip->start );
1901 1596
1902 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1597 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
@@ -1961,7 +1656,6 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1961 1656
1962 chip->state = FL_READY; 1657 chip->state = FL_READY;
1963 xip_enable(map, chip, adr); 1658 xip_enable(map, chip, adr);
1964 DISABLE_VPP(map);
1965 put_chip(map, chip, adr); 1659 put_chip(map, chip, adr);
1966 mutex_unlock(&chip->mutex); 1660 mutex_unlock(&chip->mutex);
1967 1661
@@ -1985,7 +1679,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1985 return ret; 1679 return ret;
1986 } 1680 }
1987 1681
1988 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1682 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1989 __func__, adr ); 1683 __func__, adr );
1990 1684
1991 XIP_INVAL_CACHED_RANGE(map, adr, len); 1685 XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -2052,7 +1746,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2052 } 1746 }
2053 1747
2054 chip->state = FL_READY; 1748 chip->state = FL_READY;
2055 DISABLE_VPP(map);
2056 put_chip(map, chip, adr); 1749 put_chip(map, chip, adr);
2057 mutex_unlock(&chip->mutex); 1750 mutex_unlock(&chip->mutex);
2058 return ret; 1751 return ret;
@@ -2112,7 +1805,8 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2112 goto out_unlock; 1805 goto out_unlock;
2113 chip->state = FL_LOCKING; 1806 chip->state = FL_LOCKING;
2114 1807
2115 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 1808 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1809 __func__, adr, len);
2116 1810
2117 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1811 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2118 cfi->device_type, NULL); 1812 cfi->device_type, NULL);
@@ -2147,7 +1841,8 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2147 goto out_unlock; 1841 goto out_unlock;
2148 chip->state = FL_UNLOCKING; 1842 chip->state = FL_UNLOCKING;
2149 1843
2150 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 1844 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1845 __func__, adr, len);
2151 1846
2152 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1847 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2153 cfi->device_type, NULL); 1848 cfi->device_type, NULL);
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 096993f9711..179814a95f3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -139,9 +139,8 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
139 } 139 }
140 140
141 /* Do some byteswapping if necessary */ 141 /* Do some byteswapping if necessary */
142 extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport); 142 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
143 extp->BlkStatusRegMask = cfi32_to_cpu(map, 143 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
144 extp->BlkStatusRegMask);
145 144
146#ifdef DEBUG_CFI_FEATURES 145#ifdef DEBUG_CFI_FEATURES
147 /* Tell the user about it in lots of lovely detail */ 146 /* Tell the user about it in lots of lovely detail */
@@ -228,15 +227,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
228 } 227 }
229 228
230 /* Also select the correct geometry setup too */ 229 /* Also select the correct geometry setup too */
231 mtd->_erase = cfi_staa_erase_varsize; 230 mtd->erase = cfi_staa_erase_varsize;
232 mtd->_read = cfi_staa_read; 231 mtd->read = cfi_staa_read;
233 mtd->_write = cfi_staa_write_buffers; 232 mtd->write = cfi_staa_write_buffers;
234 mtd->_writev = cfi_staa_writev; 233 mtd->writev = cfi_staa_writev;
235 mtd->_sync = cfi_staa_sync; 234 mtd->sync = cfi_staa_sync;
236 mtd->_lock = cfi_staa_lock; 235 mtd->lock = cfi_staa_lock;
237 mtd->_unlock = cfi_staa_unlock; 236 mtd->unlock = cfi_staa_unlock;
238 mtd->_suspend = cfi_staa_suspend; 237 mtd->suspend = cfi_staa_suspend;
239 mtd->_resume = cfi_staa_resume; 238 mtd->resume = cfi_staa_resume;
240 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; 239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
241 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ 240 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
242 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 241 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
@@ -394,6 +393,8 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
394 chipnum = (from >> cfi->chipshift); 393 chipnum = (from >> cfi->chipshift);
395 ofs = from - (chipnum << cfi->chipshift); 394 ofs = from - (chipnum << cfi->chipshift);
396 395
396 *retlen = 0;
397
397 while (len) { 398 while (len) {
398 unsigned long thislen; 399 unsigned long thislen;
399 400
@@ -615,6 +616,10 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
615 int chipnum; 616 int chipnum;
616 unsigned long ofs; 617 unsigned long ofs;
617 618
619 *retlen = 0;
620 if (!len)
621 return 0;
622
618 chipnum = to >> cfi->chipshift; 623 chipnum = to >> cfi->chipshift;
619 ofs = to - (chipnum << cfi->chipshift); 624 ofs = to - (chipnum << cfi->chipshift);
620 625
@@ -693,8 +698,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
693 continue; 698 continue;
694 } 699 }
695 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen); 700 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
696 ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen, 701 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
697 buffer);
698 totlen += thislen; 702 totlen += thislen;
699 if (ret || thislen != ECCBUF_SIZE) 703 if (ret || thislen != ECCBUF_SIZE)
700 goto write_error; 704 goto write_error;
@@ -703,8 +707,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
703 to += ECCBUF_SIZE; 707 to += ECCBUF_SIZE;
704 } 708 }
705 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */ 709 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
706 ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len), 710 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
707 &thislen, elem_base);
708 totlen += thislen; 711 totlen += thislen;
709 if (ret || thislen != ECCBUF_DIV(elem_len)) 712 if (ret || thislen != ECCBUF_DIV(elem_len))
710 goto write_error; 713 goto write_error;
@@ -718,7 +721,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
718 } 721 }
719 if (buflen) { /* flush last page, even if not full */ 722 if (buflen) { /* flush last page, even if not full */
720 /* This is sometimes intended behaviour, really */ 723 /* This is sometimes intended behaviour, really */
721 ret = mtd_write(mtd, to, buflen, &thislen, buffer); 724 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
722 totlen += thislen; 725 totlen += thislen;
723 if (ret || thislen != ECCBUF_SIZE) 726 if (ret || thislen != ECCBUF_SIZE)
724 goto write_error; 727 goto write_error;
@@ -898,6 +901,12 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
898 int i, first; 901 int i, first;
899 struct mtd_erase_region_info *regions = mtd->eraseregions; 902 struct mtd_erase_region_info *regions = mtd->eraseregions;
900 903
904 if (instr->addr > mtd->size)
905 return -EINVAL;
906
907 if ((instr->len + instr->addr) > mtd->size)
908 return -EINVAL;
909
901 /* Check that both start and end of the requested erase are 910 /* Check that both start and end of the requested erase are
902 * aligned with the erasesize at the appropriate addresses. 911 * aligned with the erasesize at the appropriate addresses.
903 */ 912 */
@@ -1143,6 +1152,9 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1143 if (len & (mtd->erasesize -1)) 1152 if (len & (mtd->erasesize -1))
1144 return -EINVAL; 1153 return -EINVAL;
1145 1154
1155 if ((len + ofs) > mtd->size)
1156 return -EINVAL;
1157
1146 chipnum = ofs >> cfi->chipshift; 1158 chipnum = ofs >> cfi->chipshift;
1147 adr = ofs - (chipnum << cfi->chipshift); 1159 adr = ofs - (chipnum << cfi->chipshift);
1148 1160
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index f992418f40a..8e464054a63 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -173,6 +173,12 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
173 int i, first; 173 int i, first;
174 struct mtd_erase_region_info *regions = mtd->eraseregions; 174 struct mtd_erase_region_info *regions = mtd->eraseregions;
175 175
176 if (ofs > mtd->size)
177 return -EINVAL;
178
179 if ((len + ofs) > mtd->size)
180 return -EINVAL;
181
176 /* Check that both start and end of the requested erase are 182 /* Check that both start and end of the requested erase are
177 * aligned with the erasesize at the appropriate addresses. 183 * aligned with the erasesize at the appropriate addresses.
178 */ 184 */
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index 0bbc61ba952..da1f96f385c 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -76,7 +76,10 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map)
76 */ 76 */
77 module_put(drv->module); 77 module_put(drv->module);
78 78
79 return ret; 79 if (ret)
80 return ret;
81
82 return NULL;
80} 83}
81/* 84/*
82 * Destroy an MTD device which was created for a map device. 85 * Destroy an MTD device which was created for a map device.
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 800b0e853e8..5e3cc80128a 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -34,7 +34,8 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
34 34
35 /* Refuse the operation if the we cannot look behind the chip */ 35 /* Refuse the operation if the we cannot look behind the chip */
36 if (chip->start < 0x400000) { 36 if (chip->start < 0x400000) {
37 pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n", 37 DEBUG( MTD_DEBUG_LEVEL3,
38 "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
38 __func__, chip->start ); 39 __func__, chip->start );
39 return -EIO; 40 return -EIO;
40 } 41 }
@@ -101,7 +102,7 @@ static void fixup_use_fwh_lock(struct mtd_info *mtd)
101{ 102{
102 printk(KERN_NOTICE "using fwh lock/unlock method\n"); 103 printk(KERN_NOTICE "using fwh lock/unlock method\n");
103 /* Setup for the chips with the fwh lock method */ 104 /* Setup for the chips with the fwh lock method */
104 mtd->_lock = fwh_lock_varsize; 105 mtd->lock = fwh_lock_varsize;
105 mtd->_unlock = fwh_unlock_varsize; 106 mtd->unlock = fwh_unlock_varsize;
106} 107}
107#endif /* FWH_LOCK_H */ 108#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index c443f527a53..ea832ea0e4a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1914,10 +1914,11 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
1914 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset 1914 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
1915 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at 1915 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
1916 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1916 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
1917 * as they will ignore the writes and don't care what address 1917 * as they will ignore the writes and dont care what address
1918 * the F0 is written to */ 1918 * the F0 is written to */
1919 if (cfi->addr_unlock1) { 1919 if (cfi->addr_unlock1) {
1920 pr_debug( "reset unlock called %x %x \n", 1920 DEBUG( MTD_DEBUG_LEVEL3,
1921 "reset unlock called %x %x \n",
1921 cfi->addr_unlock1,cfi->addr_unlock2); 1922 cfi->addr_unlock1,cfi->addr_unlock2);
1922 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1923 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
1923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 1924 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1940,7 +1941,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in
1940 uint8_t uaddr; 1941 uint8_t uaddr;
1941 1942
1942 if (!(jedec_table[index].devtypes & cfi->device_type)) { 1943 if (!(jedec_table[index].devtypes & cfi->device_type)) {
1943 pr_debug("Rejecting potential %s with incompatible %d-bit device type\n", 1944 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
1944 jedec_table[index].name, 4 * (1<<cfi->device_type)); 1945 jedec_table[index].name, 4 * (1<<cfi->device_type));
1945 return 0; 1946 return 0;
1946 } 1947 }
@@ -2020,7 +2021,7 @@ static inline int jedec_match( uint32_t base,
2020 * there aren't. 2021 * there aren't.
2021 */ 2022 */
2022 if (finfo->dev_id > 0xff) { 2023 if (finfo->dev_id > 0xff) {
2023 pr_debug("%s(): ID is not 8bit\n", 2024 DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n",
2024 __func__); 2025 __func__);
2025 goto match_done; 2026 goto match_done;
2026 } 2027 }
@@ -2044,10 +2045,12 @@ static inline int jedec_match( uint32_t base,
2044 } 2045 }
2045 2046
2046 /* the part size must fit in the memory window */ 2047 /* the part size must fit in the memory window */
2047 pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 2048 DEBUG( MTD_DEBUG_LEVEL3,
2049 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
2048 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); 2050 __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
2049 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { 2051 if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
2050 pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 2052 DEBUG( MTD_DEBUG_LEVEL3,
2053 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
2051 __func__, finfo->mfr_id, finfo->dev_id, 2054 __func__, finfo->mfr_id, finfo->dev_id,
2052 1 << finfo->dev_size ); 2055 1 << finfo->dev_size );
2053 goto match_done; 2056 goto match_done;
@@ -2058,12 +2061,13 @@ static inline int jedec_match( uint32_t base,
2058 2061
2059 uaddr = finfo->uaddr; 2062 uaddr = finfo->uaddr;
2060 2063
2061 pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 2064 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
2062 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 2065 __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
2063 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 2066 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
2064 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || 2067 && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
2065 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { 2068 unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
2066 pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n", 2069 DEBUG( MTD_DEBUG_LEVEL3,
2070 "MTD %s(): 0x%.4x 0x%.4x did not match\n",
2067 __func__, 2071 __func__,
2068 unlock_addrs[uaddr].addr1, 2072 unlock_addrs[uaddr].addr1,
2069 unlock_addrs[uaddr].addr2); 2073 unlock_addrs[uaddr].addr2);
@@ -2079,13 +2083,15 @@ static inline int jedec_match( uint32_t base,
2079 * FIXME - write a driver that takes all of the chip info as 2083 * FIXME - write a driver that takes all of the chip info as
2080 * module parameters, doesn't probe but forces a load. 2084 * module parameters, doesn't probe but forces a load.
2081 */ 2085 */
2082 pr_debug("MTD %s(): check ID's disappear when not in ID mode\n", 2086 DEBUG( MTD_DEBUG_LEVEL3,
2087 "MTD %s(): check ID's disappear when not in ID mode\n",
2083 __func__ ); 2088 __func__ );
2084 jedec_reset( base, map, cfi ); 2089 jedec_reset( base, map, cfi );
2085 mfr = jedec_read_mfr( map, base, cfi ); 2090 mfr = jedec_read_mfr( map, base, cfi );
2086 id = jedec_read_id( map, base, cfi ); 2091 id = jedec_read_id( map, base, cfi );
2087 if ( mfr == cfi->mfr && id == cfi->id ) { 2092 if ( mfr == cfi->mfr && id == cfi->id ) {
2088 pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" 2093 DEBUG( MTD_DEBUG_LEVEL3,
2094 "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
2089 "You might need to manually specify JEDEC parameters.\n", 2095 "You might need to manually specify JEDEC parameters.\n",
2090 __func__, cfi->mfr, cfi->id ); 2096 __func__, cfi->mfr, cfi->id );
2091 goto match_done; 2097 goto match_done;
@@ -2098,7 +2104,7 @@ static inline int jedec_match( uint32_t base,
2098 * Put the device back in ID mode - only need to do this if we 2104 * Put the device back in ID mode - only need to do this if we
2099 * were truly frobbing a real device. 2105 * were truly frobbing a real device.
2100 */ 2106 */
2101 pr_debug("MTD %s(): return to ID mode\n", __func__ ); 2107 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
2102 if (cfi->addr_unlock1) { 2108 if (cfi->addr_unlock1) {
2103 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 2109 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
2104 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 2110 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -2161,11 +2167,13 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2161 2167
2162 cfi->mfr = jedec_read_mfr(map, base, cfi); 2168 cfi->mfr = jedec_read_mfr(map, base, cfi);
2163 cfi->id = jedec_read_id(map, base, cfi); 2169 cfi->id = jedec_read_id(map, base, cfi);
2164 pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n", 2170 DEBUG(MTD_DEBUG_LEVEL3,
2171 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2165 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); 2172 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2166 for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { 2173 for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
2167 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { 2174 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
2168 pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", 2175 DEBUG( MTD_DEBUG_LEVEL3,
2176 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2169 __func__, cfi->mfr, cfi->id, 2177 __func__, cfi->mfr, cfi->id,
2170 cfi->addr_unlock1, cfi->addr_unlock2 ); 2178 cfi->addr_unlock1, cfi->addr_unlock2 );
2171 if (!cfi_jedec_setup(map, cfi, i)) 2179 if (!cfi_jedec_setup(map, cfi, i))
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index f7a5bca92ae..f2b87294687 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -55,10 +55,10 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
55 mtd->name = map->name; 55 mtd->name = map->name;
56 mtd->type = MTD_ABSENT; 56 mtd->type = MTD_ABSENT;
57 mtd->size = map->size; 57 mtd->size = map->size;
58 mtd->_erase = map_absent_erase; 58 mtd->erase = map_absent_erase;
59 mtd->_read = map_absent_read; 59 mtd->read = map_absent_read;
60 mtd->_write = map_absent_write; 60 mtd->write = map_absent_write;
61 mtd->_sync = map_absent_sync; 61 mtd->sync = map_absent_sync;
62 mtd->flags = 0; 62 mtd->flags = 0;
63 mtd->erasesize = PAGE_SIZE; 63 mtd->erasesize = PAGE_SIZE;
64 mtd->writesize = 1; 64 mtd->writesize = 1;
@@ -70,11 +70,13 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
70 70
71static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 71static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
72{ 72{
73 *retlen = 0;
73 return -ENODEV; 74 return -ENODEV;
74} 75}
75 76
76static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 77static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
77{ 78{
79 *retlen = 0;
78 return -ENODEV; 80 return -ENODEV;
79} 81}
80 82
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 991c2a1c05d..67640ccb2d4 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -64,11 +64,11 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
64 mtd->name = map->name; 64 mtd->name = map->name;
65 mtd->type = MTD_RAM; 65 mtd->type = MTD_RAM;
66 mtd->size = map->size; 66 mtd->size = map->size;
67 mtd->_erase = mapram_erase; 67 mtd->erase = mapram_erase;
68 mtd->_get_unmapped_area = mapram_unmapped_area; 68 mtd->get_unmapped_area = mapram_unmapped_area;
69 mtd->_read = mapram_read; 69 mtd->read = mapram_read;
70 mtd->_write = mapram_write; 70 mtd->write = mapram_write;
71 mtd->_sync = mapram_nop; 71 mtd->sync = mapram_nop;
72 mtd->flags = MTD_CAP_RAM; 72 mtd->flags = MTD_CAP_RAM;
73 mtd->writesize = 1; 73 mtd->writesize = 1;
74 74
@@ -122,10 +122,14 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
122 unsigned long i; 122 unsigned long i;
123 123
124 allff = map_word_ff(map); 124 allff = map_word_ff(map);
125
125 for (i=0; i<instr->len; i += map_bankwidth(map)) 126 for (i=0; i<instr->len; i += map_bankwidth(map))
126 map_write(map, allff, instr->addr + i); 127 map_write(map, allff, instr->addr + i);
128
127 instr->state = MTD_ERASE_DONE; 129 instr->state = MTD_ERASE_DONE;
130
128 mtd_erase_callback(instr); 131 mtd_erase_callback(instr);
132
129 return 0; 133 return 0;
130} 134}
131 135
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 47a43cf7e5c..593f73d480d 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -41,11 +41,11 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
41 mtd->name = map->name; 41 mtd->name = map->name;
42 mtd->type = MTD_ROM; 42 mtd->type = MTD_ROM;
43 mtd->size = map->size; 43 mtd->size = map->size;
44 mtd->_get_unmapped_area = maprom_unmapped_area; 44 mtd->get_unmapped_area = maprom_unmapped_area;
45 mtd->_read = maprom_read; 45 mtd->read = maprom_read;
46 mtd->_write = maprom_write; 46 mtd->write = maprom_write;
47 mtd->_sync = maprom_nop; 47 mtd->sync = maprom_nop;
48 mtd->_erase = maprom_erase; 48 mtd->erase = maprom_erase;
49 mtd->flags = MTD_CAP_ROM; 49 mtd->flags = MTD_CAP_ROM;
50 mtd->erasesize = map->size; 50 mtd->erasesize = map->size;
51 mtd->writesize = 1; 51 mtd->writesize = 1;
@@ -85,7 +85,8 @@ static void maprom_nop(struct mtd_info *mtd)
85 85
86static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 86static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
87{ 87{
88 return -EROFS; 88 printk(KERN_NOTICE "maprom_write called\n");
89 return -EIO;
89} 90}
90 91
91static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) 92static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index c533f27d863..e790f38893b 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -39,10 +39,10 @@
39 39
40#include <linux/kernel.h> 40#include <linux/kernel.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42
42#include <linux/mtd/mtd.h> 43#include <linux/mtd/mtd.h>
43#include <linux/mtd/partitions.h> 44#include <linux/mtd/partitions.h>
44#include <linux/module.h> 45#include <linux/bootmem.h>
45#include <linux/err.h>
46 46
47/* error message prefix */ 47/* error message prefix */
48#define ERRP "mtd: " 48#define ERRP "mtd: "
@@ -56,8 +56,8 @@
56 56
57 57
58/* special size referring to all the remaining space in a partition */ 58/* special size referring to all the remaining space in a partition */
59#define SIZE_REMAINING ULLONG_MAX 59#define SIZE_REMAINING UINT_MAX
60#define OFFSET_CONTINUOUS ULLONG_MAX 60#define OFFSET_CONTINUOUS UINT_MAX
61 61
62struct cmdline_mtd_partition { 62struct cmdline_mtd_partition {
63 struct cmdline_mtd_partition *next; 63 struct cmdline_mtd_partition *next;
@@ -69,9 +69,9 @@ struct cmdline_mtd_partition {
69/* mtdpart_setup() parses into here */ 69/* mtdpart_setup() parses into here */
70static struct cmdline_mtd_partition *partitions; 70static struct cmdline_mtd_partition *partitions;
71 71
72/* the command line passed to mtdpart_setup() */ 72/* the command line passed to mtdpart_setupd() */
73static char *cmdline; 73static char *cmdline;
74static int cmdline_parsed; 74static int cmdline_parsed = 0;
75 75
76/* 76/*
77 * Parse one partition definition for an MTD. Since there can be many 77 * Parse one partition definition for an MTD. Since there can be many
@@ -82,14 +82,15 @@ static int cmdline_parsed;
82 * syntax has been verified ok. 82 * syntax has been verified ok.
83 */ 83 */
84static struct mtd_partition * newpart(char *s, 84static struct mtd_partition * newpart(char *s,
85 char **retptr, 85 char **retptr,
86 int *num_parts, 86 int *num_parts,
87 int this_part, 87 int this_part,
88 unsigned char **extra_mem_ptr, 88 unsigned char **extra_mem_ptr,
89 int extra_mem_size) 89 int extra_mem_size)
90{ 90{
91 struct mtd_partition *parts; 91 struct mtd_partition *parts;
92 unsigned long long size, offset = OFFSET_CONTINUOUS; 92 unsigned long size;
93 unsigned long offset = OFFSET_CONTINUOUS;
93 char *name; 94 char *name;
94 int name_len; 95 int name_len;
95 unsigned char *extra_mem; 96 unsigned char *extra_mem;
@@ -97,107 +98,127 @@ static struct mtd_partition * newpart(char *s,
97 unsigned int mask_flags; 98 unsigned int mask_flags;
98 99
99 /* fetch the partition size */ 100 /* fetch the partition size */
100 if (*s == '-') { 101 if (*s == '-')
101 /* assign all remaining space to this partition */ 102 { /* assign all remaining space to this partition */
102 size = SIZE_REMAINING; 103 size = SIZE_REMAINING;
103 s++; 104 s++;
104 } else { 105 }
106 else
107 {
105 size = memparse(s, &s); 108 size = memparse(s, &s);
106 if (size < PAGE_SIZE) { 109 if (size < PAGE_SIZE)
107 printk(KERN_ERR ERRP "partition size too small (%llx)\n", 110 {
108 size); 111 printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
109 return ERR_PTR(-EINVAL); 112 return NULL;
110 } 113 }
111 } 114 }
112 115
113 /* fetch partition name and flags */ 116 /* fetch partition name and flags */
114 mask_flags = 0; /* this is going to be a regular partition */ 117 mask_flags = 0; /* this is going to be a regular partition */
115 delim = 0; 118 delim = 0;
116 119 /* check for offset */
117 /* check for offset */ 120 if (*s == '@')
118 if (*s == '@') { 121 {
119 s++; 122 s++;
120 offset = memparse(s, &s); 123 offset = memparse(s, &s);
121 } 124 }
122 125 /* now look for name */
123 /* now look for name */
124 if (*s == '(') 126 if (*s == '(')
127 {
125 delim = ')'; 128 delim = ')';
129 }
126 130
127 if (delim) { 131 if (delim)
132 {
128 char *p; 133 char *p;
129 134
130 name = ++s; 135 name = ++s;
131 p = strchr(name, delim); 136 p = strchr(name, delim);
132 if (!p) { 137 if (!p)
138 {
133 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim); 139 printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
134 return ERR_PTR(-EINVAL); 140 return NULL;
135 } 141 }
136 name_len = p - name; 142 name_len = p - name;
137 s = p + 1; 143 s = p + 1;
138 } else { 144 }
139 name = NULL; 145 else
146 {
147 name = NULL;
140 name_len = 13; /* Partition_000 */ 148 name_len = 13; /* Partition_000 */
141 } 149 }
142 150
143 /* record name length for memory allocation later */ 151 /* record name length for memory allocation later */
144 extra_mem_size += name_len + 1; 152 extra_mem_size += name_len + 1;
145 153
146 /* test for options */ 154 /* test for options */
147 if (strncmp(s, "ro", 2) == 0) { 155 if (strncmp(s, "ro", 2) == 0)
156 {
148 mask_flags |= MTD_WRITEABLE; 157 mask_flags |= MTD_WRITEABLE;
149 s += 2; 158 s += 2;
150 } 159 }
151 160
152 /* if lk is found do NOT unlock the MTD partition*/ 161 /* if lk is found do NOT unlock the MTD partition*/
153 if (strncmp(s, "lk", 2) == 0) { 162 if (strncmp(s, "lk", 2) == 0)
163 {
154 mask_flags |= MTD_POWERUP_LOCK; 164 mask_flags |= MTD_POWERUP_LOCK;
155 s += 2; 165 s += 2;
156 } 166 }
157 167
158 /* test if more partitions are following */ 168 /* test if more partitions are following */
159 if (*s == ',') { 169 if (*s == ',')
160 if (size == SIZE_REMAINING) { 170 {
171 if (size == SIZE_REMAINING)
172 {
161 printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n"); 173 printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
162 return ERR_PTR(-EINVAL); 174 return NULL;
163 } 175 }
164 /* more partitions follow, parse them */ 176 /* more partitions follow, parse them */
165 parts = newpart(s + 1, &s, num_parts, this_part + 1, 177 parts = newpart(s + 1, &s, num_parts, this_part + 1,
166 &extra_mem, extra_mem_size); 178 &extra_mem, extra_mem_size);
167 if (IS_ERR(parts)) 179 if (!parts)
168 return parts; 180 return NULL;
169 } else { 181 }
170 /* this is the last partition: allocate space for all */ 182 else
183 { /* this is the last partition: allocate space for all */
171 int alloc_size; 184 int alloc_size;
172 185
173 *num_parts = this_part + 1; 186 *num_parts = this_part + 1;
174 alloc_size = *num_parts * sizeof(struct mtd_partition) + 187 alloc_size = *num_parts * sizeof(struct mtd_partition) +
175 extra_mem_size; 188 extra_mem_size;
176
177 parts = kzalloc(alloc_size, GFP_KERNEL); 189 parts = kzalloc(alloc_size, GFP_KERNEL);
178 if (!parts) 190 if (!parts)
179 return ERR_PTR(-ENOMEM); 191 {
192 printk(KERN_ERR ERRP "out of memory\n");
193 return NULL;
194 }
180 extra_mem = (unsigned char *)(parts + *num_parts); 195 extra_mem = (unsigned char *)(parts + *num_parts);
181 } 196 }
182
183 /* enter this partition (offset will be calculated later if it is zero at this point) */ 197 /* enter this partition (offset will be calculated later if it is zero at this point) */
184 parts[this_part].size = size; 198 parts[this_part].size = size;
185 parts[this_part].offset = offset; 199 parts[this_part].offset = offset;
186 parts[this_part].mask_flags = mask_flags; 200 parts[this_part].mask_flags = mask_flags;
187 if (name) 201 if (name)
202 {
188 strlcpy(extra_mem, name, name_len + 1); 203 strlcpy(extra_mem, name, name_len + 1);
204 }
189 else 205 else
206 {
190 sprintf(extra_mem, "Partition_%03d", this_part); 207 sprintf(extra_mem, "Partition_%03d", this_part);
208 }
191 parts[this_part].name = extra_mem; 209 parts[this_part].name = extra_mem;
192 extra_mem += name_len + 1; 210 extra_mem += name_len + 1;
193 211
194 dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", 212 dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
195 this_part, parts[this_part].name, parts[this_part].offset, 213 this_part,
196 parts[this_part].size, parts[this_part].mask_flags)); 214 parts[this_part].name,
215 parts[this_part].offset,
216 parts[this_part].size,
217 parts[this_part].mask_flags));
197 218
198 /* return (updated) pointer to extra_mem memory */ 219 /* return (updated) pointer to extra_mem memory */
199 if (extra_mem_ptr) 220 if (extra_mem_ptr)
200 *extra_mem_ptr = extra_mem; 221 *extra_mem_ptr = extra_mem;
201 222
202 /* return (updated) pointer command line string */ 223 /* return (updated) pointer command line string */
203 *retptr = s; 224 *retptr = s;
@@ -217,16 +238,16 @@ static int mtdpart_setup_real(char *s)
217 { 238 {
218 struct cmdline_mtd_partition *this_mtd; 239 struct cmdline_mtd_partition *this_mtd;
219 struct mtd_partition *parts; 240 struct mtd_partition *parts;
220 int mtd_id_len, num_parts; 241 int mtd_id_len;
242 int num_parts;
221 char *p, *mtd_id; 243 char *p, *mtd_id;
222 244
223 mtd_id = s; 245 mtd_id = s;
224
225 /* fetch <mtd-id> */ 246 /* fetch <mtd-id> */
226 p = strchr(s, ':'); 247 if (!(p = strchr(s, ':')))
227 if (!p) { 248 {
228 printk(KERN_ERR ERRP "no mtd-id\n"); 249 printk(KERN_ERR ERRP "no mtd-id\n");
229 return -EINVAL; 250 return 0;
230 } 251 }
231 mtd_id_len = p - mtd_id; 252 mtd_id_len = p - mtd_id;
232 253
@@ -243,7 +264,8 @@ static int mtdpart_setup_real(char *s)
243 (unsigned char**)&this_mtd, /* out: extra mem */ 264 (unsigned char**)&this_mtd, /* out: extra mem */
244 mtd_id_len + 1 + sizeof(*this_mtd) + 265 mtd_id_len + 1 + sizeof(*this_mtd) +
245 sizeof(void*)-1 /*alignment*/); 266 sizeof(void*)-1 /*alignment*/);
246 if (IS_ERR(parts)) { 267 if(!parts)
268 {
247 /* 269 /*
248 * An error occurred. We're either: 270 * An error occurred. We're either:
249 * a) out of memory, or 271 * a) out of memory, or
@@ -251,12 +273,12 @@ static int mtdpart_setup_real(char *s)
251 * Either way, this mtd is hosed and we're 273 * Either way, this mtd is hosed and we're
252 * unlikely to succeed in parsing any more 274 * unlikely to succeed in parsing any more
253 */ 275 */
254 return PTR_ERR(parts); 276 return 0;
255 } 277 }
256 278
257 /* align this_mtd */ 279 /* align this_mtd */
258 this_mtd = (struct cmdline_mtd_partition *) 280 this_mtd = (struct cmdline_mtd_partition *)
259 ALIGN((unsigned long)this_mtd, sizeof(void *)); 281 ALIGN((unsigned long)this_mtd, sizeof(void*));
260 /* enter results */ 282 /* enter results */
261 this_mtd->parts = parts; 283 this_mtd->parts = parts;
262 this_mtd->num_parts = num_parts; 284 this_mtd->num_parts = num_parts;
@@ -276,14 +298,14 @@ static int mtdpart_setup_real(char *s)
276 break; 298 break;
277 299
278 /* does another spec follow? */ 300 /* does another spec follow? */
279 if (*s != ';') { 301 if (*s != ';')
302 {
280 printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s); 303 printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
281 return -EINVAL; 304 return 0;
282 } 305 }
283 s++; 306 s++;
284 } 307 }
285 308 return 1;
286 return 0;
287} 309}
288 310
289/* 311/*
@@ -294,67 +316,49 @@ static int mtdpart_setup_real(char *s)
294 * the first one in the chain if a NULL mtd_id is passed in. 316 * the first one in the chain if a NULL mtd_id is passed in.
295 */ 317 */
296static int parse_cmdline_partitions(struct mtd_info *master, 318static int parse_cmdline_partitions(struct mtd_info *master,
297 struct mtd_partition **pparts, 319 struct mtd_partition **pparts,
298 struct mtd_part_parser_data *data) 320 unsigned long origin)
299{ 321{
300 unsigned long long offset; 322 unsigned long offset;
301 int i, err; 323 int i;
302 struct cmdline_mtd_partition *part; 324 struct cmdline_mtd_partition *part;
303 const char *mtd_id = master->name; 325 const char *mtd_id = master->name;
304 326
305 /* parse command line */ 327 /* parse command line */
306 if (!cmdline_parsed) { 328 if (!cmdline_parsed)
307 err = mtdpart_setup_real(cmdline); 329 mtdpart_setup_real(cmdline);
308 if (err)
309 return err;
310 }
311 330
312 /* 331 for(part = partitions; part; part = part->next)
313 * Search for the partition definition matching master->name. 332 {
314 * If master->name is not set, stop at first partition definition.
315 */
316 for (part = partitions; part; part = part->next) {
317 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) 333 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
318 break; 334 {
319 } 335 for(i = 0, offset = 0; i < part->num_parts; i++)
320 336 {
321 if (!part) 337 if (part->parts[i].offset == OFFSET_CONTINUOUS)
322 return 0; 338 part->parts[i].offset = offset;
323 339 else
324 for (i = 0, offset = 0; i < part->num_parts; i++) { 340 offset = part->parts[i].offset;
325 if (part->parts[i].offset == OFFSET_CONTINUOUS) 341 if (part->parts[i].size == SIZE_REMAINING)
326 part->parts[i].offset = offset; 342 part->parts[i].size = master->size - offset;
327 else 343 if (offset + part->parts[i].size > master->size)
328 offset = part->parts[i].offset; 344 {
329 345 printk(KERN_WARNING ERRP
330 if (part->parts[i].size == SIZE_REMAINING) 346 "%s: partitioning exceeds flash size, truncating\n",
331 part->parts[i].size = master->size - offset; 347 part->mtd_id);
332 348 part->parts[i].size = master->size - offset;
333 if (part->parts[i].size == 0) { 349 part->num_parts = i;
334 printk(KERN_WARNING ERRP 350 }
335 "%s: skipping zero sized partition\n", 351 offset += part->parts[i].size;
336 part->mtd_id); 352 }
337 part->num_parts--; 353 *pparts = kmemdup(part->parts,
338 memmove(&part->parts[i], &part->parts[i + 1], 354 sizeof(*part->parts) * part->num_parts,
339 sizeof(*part->parts) * (part->num_parts - i)); 355 GFP_KERNEL);
340 continue; 356 if (!*pparts)
357 return -ENOMEM;
358 return part->num_parts;
341 } 359 }
342
343 if (offset + part->parts[i].size > master->size) {
344 printk(KERN_WARNING ERRP
345 "%s: partitioning exceeds flash size, truncating\n",
346 part->mtd_id);
347 part->parts[i].size = master->size - offset;
348 }
349 offset += part->parts[i].size;
350 } 360 }
351 361 return 0;
352 *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
353 GFP_KERNEL);
354 if (!*pparts)
355 return -ENOMEM;
356
357 return part->num_parts;
358} 362}
359 363
360 364
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 27f80cd8aef..943d90f08c0 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -1,6 +1,5 @@
1menu "Self-contained MTD device drivers" 1menu "Self-contained MTD device drivers"
2 depends on MTD!=n 2 depends on MTD!=n
3 depends on HAS_IOMEM
4 3
5config MTD_PMC551 4config MTD_PMC551
6 tristate "Ramix PMC551 PCI Mezzanine RAM card support" 5 tristate "Ramix PMC551 PCI Mezzanine RAM card support"
@@ -79,6 +78,12 @@ config MTD_DATAFLASH_OTP
79 other key product data. The second half is programmed with a 78 other key product data. The second half is programmed with a
80 unique-to-each-chip bit pattern at the factory. 79 unique-to-each-chip bit pattern at the factory.
81 80
81config MTD_NAND_TEGRA
82 tristate "Support for NAND Controller on NVIDIA Tegra"
83 depends on ARCH_TEGRA
84 help
85 Enables NAND flash support for NVIDIA's Tegra family of chips.
86
82config MTD_M25P80 87config MTD_M25P80
83 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)" 88 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
84 depends on SPI_MASTER && EXPERIMENTAL 89 depends on SPI_MASTER && EXPERIMENTAL
@@ -97,19 +102,12 @@ config MTD_M25P80
97 doesn't support the JEDEC ID instruction. 102 doesn't support the JEDEC ID instruction.
98 103
99config M25PXX_USE_FAST_READ 104config M25PXX_USE_FAST_READ
100 bool "Use FAST_READ OPCode allowing SPI CLK >= 50MHz" 105 bool "Use FAST_READ OPCode allowing SPI CLK <= 50MHz"
101 depends on MTD_M25P80 106 depends on MTD_M25P80
102 default y 107 default y
103 help 108 help
104 This option enables FAST_READ access supported by ST M25Pxx. 109 This option enables FAST_READ access supported by ST M25Pxx.
105 110
106config MTD_SPEAR_SMI
107 tristate "SPEAR MTD NOR Support through SMI controller"
108 depends on PLAT_SPEAR
109 default y
110 help
111 This enable SNOR support on SPEAR platforms using SMI controller
112
113config MTD_SST25L 111config MTD_SST25L
114 tristate "Support SST25L (non JEDEC) SPI Flash chips" 112 tristate "Support SST25L (non JEDEC) SPI Flash chips"
115 depends on SPI_MASTER 113 depends on SPI_MASTER
@@ -120,14 +118,6 @@ config MTD_SST25L
120 Set up your spi devices with the right board-specific platform data, 118 Set up your spi devices with the right board-specific platform data,
121 if you want to specify device partitioning. 119 if you want to specify device partitioning.
122 120
123config MTD_BCM47XXSFLASH
124 tristate "R/O support for serial flash on BCMA bus"
125 depends on BCMA_SFLASH
126 help
127 BCMA bus can have various flash memories attached, they are
128 registered by bcma as platform devices. This enables driver for
129 serial flash memories (only read-only mode is implemented).
130
131config MTD_SLRAM 121config MTD_SLRAM
132 tristate "Uncached system RAM" 122 tristate "Uncached system RAM"
133 help 123 help
@@ -207,7 +197,6 @@ comment "Disk-On-Chip Device Drivers"
207 197
208config MTD_DOC2000 198config MTD_DOC2000
209 tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)" 199 tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
210 depends on MTD_NAND
211 select MTD_DOCPROBE 200 select MTD_DOCPROBE
212 select MTD_NAND_IDS 201 select MTD_NAND_IDS
213 ---help--- 202 ---help---
@@ -230,7 +219,6 @@ config MTD_DOC2000
230 219
231config MTD_DOC2001 220config MTD_DOC2001
232 tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)" 221 tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
233 depends on MTD_NAND
234 select MTD_DOCPROBE 222 select MTD_DOCPROBE
235 select MTD_NAND_IDS 223 select MTD_NAND_IDS
236 ---help--- 224 ---help---
@@ -252,7 +240,6 @@ config MTD_DOC2001
252 240
253config MTD_DOC2001PLUS 241config MTD_DOC2001PLUS
254 tristate "M-Systems Disk-On-Chip Millennium Plus" 242 tristate "M-Systems Disk-On-Chip Millennium Plus"
255 depends on MTD_NAND
256 select MTD_DOCPROBE 243 select MTD_DOCPROBE
257 select MTD_NAND_IDS 244 select MTD_NAND_IDS
258 ---help--- 245 ---help---
@@ -268,25 +255,6 @@ config MTD_DOC2001PLUS
268 under "NAND Flash Device Drivers" (currently that driver does not 255 under "NAND Flash Device Drivers" (currently that driver does not
269 support all Millennium Plus devices). 256 support all Millennium Plus devices).
270 257
271config MTD_DOCG3
272 tristate "M-Systems Disk-On-Chip G3"
273 select BCH
274 select BCH_CONST_PARAMS
275 ---help---
276 This provides an MTD device driver for the M-Systems DiskOnChip
277 G3 devices.
278
279 The driver provides access to G3 DiskOnChip, distributed by
280 M-Systems and now Sandisk. The support is very experimental,
281 and doesn't give access to any write operations.
282
283if MTD_DOCG3
284config BCH_CONST_M
285 default 14
286config BCH_CONST_T
287 default 4
288endif
289
290config MTD_DOCPROBE 258config MTD_DOCPROBE
291 tristate 259 tristate
292 select MTD_DOCECC 260 select MTD_DOCECC
@@ -306,7 +274,8 @@ config MTD_DOCPROBE_ADVANCED
306config MTD_DOCPROBE_ADDRESS 274config MTD_DOCPROBE_ADDRESS
307 hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED 275 hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
308 depends on MTD_DOCPROBE 276 depends on MTD_DOCPROBE
309 default "0x0" 277 default "0x0000" if MTD_DOCPROBE_ADVANCED
278 default "0" if !MTD_DOCPROBE_ADVANCED
310 ---help--- 279 ---help---
311 By default, the probe for DiskOnChip devices will look for a 280 By default, the probe for DiskOnChip devices will look for a
312 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000. 281 DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 395733a30ef..67345a00a5a 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,11 +1,11 @@
1# 1#
2# linux/drivers/mtd/devices/Makefile 2# linux/drivers/mtd/devices/Makefile
3# 3#
4GCOV_PROFILE := y
4 5
5obj-$(CONFIG_MTD_DOC2000) += doc2000.o 6obj-$(CONFIG_MTD_DOC2000) += doc2000.o
6obj-$(CONFIG_MTD_DOC2001) += doc2001.o 7obj-$(CONFIG_MTD_DOC2001) += doc2001.o
7obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o 8obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
8obj-$(CONFIG_MTD_DOCG3) += docg3.o
9obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o 9obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
10obj-$(CONFIG_MTD_DOCECC) += docecc.o 10obj-$(CONFIG_MTD_DOCECC) += docecc.o
11obj-$(CONFIG_MTD_SLRAM) += slram.o 11obj-$(CONFIG_MTD_SLRAM) += slram.o
@@ -17,8 +17,5 @@ obj-$(CONFIG_MTD_LART) += lart.o
17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
20obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
21obj-$(CONFIG_MTD_SST25L) += sst25l.o 20obj-$(CONFIG_MTD_SST25L) += sst25l.o
22obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o 21obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
23
24CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
deleted file mode 100644
index 4714584aa99..00000000000
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ /dev/null
@@ -1,105 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/slab.h>
4#include <linux/mtd/mtd.h>
5#include <linux/platform_device.h>
6#include <linux/bcma/bcma.h>
7
8MODULE_LICENSE("GPL");
9MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
10
11static const char *probes[] = { "bcm47xxpart", NULL };
12
13static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
14 size_t *retlen, u_char *buf)
15{
16 struct bcma_sflash *sflash = mtd->priv;
17
18 /* Check address range */
19 if ((from + len) > mtd->size)
20 return -EINVAL;
21
22 memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
23 len);
24
25 return len;
26}
27
28static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
29 struct mtd_info *mtd)
30{
31 mtd->priv = sflash;
32 mtd->name = "bcm47xxsflash";
33 mtd->owner = THIS_MODULE;
34 mtd->type = MTD_ROM;
35 mtd->size = sflash->size;
36 mtd->_read = bcm47xxsflash_read;
37
38 /* TODO: implement writing support and verify/change following code */
39 mtd->flags = MTD_CAP_ROM;
40 mtd->writebufsize = mtd->writesize = 1;
41}
42
43static int bcm47xxsflash_probe(struct platform_device *pdev)
44{
45 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
46 int err;
47
48 sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
49 if (!sflash->mtd) {
50 err = -ENOMEM;
51 goto out;
52 }
53 bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
54
55 err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
56 if (err) {
57 pr_err("Failed to register MTD device: %d\n", err);
58 goto err_dev_reg;
59 }
60
61 return 0;
62
63err_dev_reg:
64 kfree(sflash->mtd);
65out:
66 return err;
67}
68
69static int bcm47xxsflash_remove(struct platform_device *pdev)
70{
71 struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
72
73 mtd_device_unregister(sflash->mtd);
74 kfree(sflash->mtd);
75
76 return 0;
77}
78
79static struct platform_driver bcma_sflash_driver = {
80 .remove = bcm47xxsflash_remove,
81 .driver = {
82 .name = "bcma_sflash",
83 .owner = THIS_MODULE,
84 },
85};
86
87static int __init bcm47xxsflash_init(void)
88{
89 int err;
90
91 err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
92 if (err)
93 pr_err("Failed to register BCMA serial flash driver: %d\n",
94 err);
95
96 return err;
97}
98
99static void __exit bcm47xxsflash_exit(void)
100{
101 platform_driver_unregister(&bcma_sflash_driver);
102}
103
104module_init(bcm47xxsflash_init);
105module_exit(bcm47xxsflash_exit);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e081bfeaaf7..b78f23169d4 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -14,6 +14,7 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/buffer_head.h>
17#include <linux/mutex.h> 18#include <linux/mutex.h>
18#include <linux/mount.h> 19#include <linux/mount.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
@@ -52,6 +53,8 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
52 53
53 while (pages) { 54 while (pages) {
54 page = page_read(mapping, index); 55 page = page_read(mapping, index);
56 if (!page)
57 return -ENOMEM;
55 if (IS_ERR(page)) 58 if (IS_ERR(page))
56 return PTR_ERR(page); 59 return PTR_ERR(page);
57 60
@@ -62,7 +65,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
62 memset(page_address(page), 0xff, PAGE_SIZE); 65 memset(page_address(page), 0xff, PAGE_SIZE);
63 set_page_dirty(page); 66 set_page_dirty(page);
64 unlock_page(page); 67 unlock_page(page);
65 balance_dirty_pages_ratelimited(mapping);
66 break; 68 break;
67 } 69 }
68 70
@@ -103,6 +105,14 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
103 int offset = from & (PAGE_SIZE-1); 105 int offset = from & (PAGE_SIZE-1);
104 int cpylen; 106 int cpylen;
105 107
108 if (from > mtd->size)
109 return -EINVAL;
110 if (from + len > mtd->size)
111 len = mtd->size - from;
112
113 if (retlen)
114 *retlen = 0;
115
106 while (len) { 116 while (len) {
107 if ((offset + len) > PAGE_SIZE) 117 if ((offset + len) > PAGE_SIZE)
108 cpylen = PAGE_SIZE - offset; // multiple pages 118 cpylen = PAGE_SIZE - offset; // multiple pages
@@ -111,6 +121,8 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
111 len = len - cpylen; 121 len = len - cpylen;
112 122
113 page = page_read(dev->blkdev->bd_inode->i_mapping, index); 123 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
124 if (!page)
125 return -ENOMEM;
114 if (IS_ERR(page)) 126 if (IS_ERR(page))
115 return PTR_ERR(page); 127 return PTR_ERR(page);
116 128
@@ -137,6 +149,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
137 int offset = to & ~PAGE_MASK; // page offset 149 int offset = to & ~PAGE_MASK; // page offset
138 int cpylen; 150 int cpylen;
139 151
152 if (retlen)
153 *retlen = 0;
140 while (len) { 154 while (len) {
141 if ((offset+len) > PAGE_SIZE) 155 if ((offset+len) > PAGE_SIZE)
142 cpylen = PAGE_SIZE - offset; // multiple pages 156 cpylen = PAGE_SIZE - offset; // multiple pages
@@ -145,6 +159,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
145 len = len - cpylen; 159 len = len - cpylen;
146 160
147 page = page_read(mapping, index); 161 page = page_read(mapping, index);
162 if (!page)
163 return -ENOMEM;
148 if (IS_ERR(page)) 164 if (IS_ERR(page))
149 return PTR_ERR(page); 165 return PTR_ERR(page);
150 166
@@ -153,7 +169,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
153 memcpy(page_address(page) + offset, buf, cpylen); 169 memcpy(page_address(page) + offset, buf, cpylen);
154 set_page_dirty(page); 170 set_page_dirty(page);
155 unlock_page(page); 171 unlock_page(page);
156 balance_dirty_pages_ratelimited(mapping);
157 } 172 }
158 page_cache_release(page); 173 page_cache_release(page);
159 174
@@ -174,6 +189,13 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
174 struct block2mtd_dev *dev = mtd->priv; 189 struct block2mtd_dev *dev = mtd->priv;
175 int err; 190 int err;
176 191
192 if (!len)
193 return 0;
194 if (to >= mtd->size)
195 return -ENOSPC;
196 if (to + len > mtd->size)
197 len = mtd->size - to;
198
177 mutex_lock(&dev->write_mutex); 199 mutex_lock(&dev->write_mutex);
178 err = _block2mtd_write(dev, buf, to, len, retlen); 200 err = _block2mtd_write(dev, buf, to, len, retlen);
179 mutex_unlock(&dev->write_mutex); 201 mutex_unlock(&dev->write_mutex);
@@ -262,13 +284,13 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
262 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; 284 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
263 dev->mtd.erasesize = erase_size; 285 dev->mtd.erasesize = erase_size;
264 dev->mtd.writesize = 1; 286 dev->mtd.writesize = 1;
265 dev->mtd.writebufsize = PAGE_SIZE;
266 dev->mtd.type = MTD_RAM; 287 dev->mtd.type = MTD_RAM;
267 dev->mtd.flags = MTD_CAP_RAM; 288 dev->mtd.flags = MTD_CAP_RAM;
268 dev->mtd._erase = block2mtd_erase; 289 dev->mtd.erase = block2mtd_erase;
269 dev->mtd._write = block2mtd_write; 290 dev->mtd.write = block2mtd_write;
270 dev->mtd._sync = block2mtd_sync; 291 dev->mtd.writev = default_mtd_writev;
271 dev->mtd._read = block2mtd_read; 292 dev->mtd.sync = block2mtd_sync;
293 dev->mtd.read = block2mtd_read;
272 dev->mtd.priv = dev; 294 dev->mtd.priv = dev;
273 dev->mtd.owner = THIS_MODULE; 295 dev->mtd.owner = THIS_MODULE;
274 296
@@ -435,7 +457,7 @@ static int __init block2mtd_init(void)
435} 457}
436 458
437 459
438static void block2mtd_exit(void) 460static void __devexit block2mtd_exit(void)
439{ 461{
440 struct list_head *pos, *next; 462 struct list_head *pos, *next;
441 463
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index a4eb8b5b85e..f7fbf6025ef 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -82,7 +82,8 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
82 void __iomem *docptr = doc->virtadr; 82 void __iomem *docptr = doc->virtadr;
83 unsigned long timeo = jiffies + (HZ * 10); 83 unsigned long timeo = jiffies + (HZ * 10);
84 84
85 pr_debug("_DoC_WaitReady called for out-of-line wait\n"); 85 DEBUG(MTD_DEBUG_LEVEL3,
86 "_DoC_WaitReady called for out-of-line wait\n");
86 87
87 /* Out-of-line routine to wait for chip response */ 88 /* Out-of-line routine to wait for chip response */
88 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { 89 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
@@ -91,7 +92,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
91 DoC_Delay(doc, 2); 92 DoC_Delay(doc, 2);
92 93
93 if (time_after(jiffies, timeo)) { 94 if (time_after(jiffies, timeo)) {
94 pr_debug("_DoC_WaitReady timed out.\n"); 95 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
95 return -EIO; 96 return -EIO;
96 } 97 }
97 udelay(1); 98 udelay(1);
@@ -322,7 +323,8 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
322 323
323 /* Reset the chip */ 324 /* Reset the chip */
324 if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { 325 if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
325 pr_debug("DoC_Command (reset) for %d,%d returned true\n", 326 DEBUG(MTD_DEBUG_LEVEL2,
327 "DoC_Command (reset) for %d,%d returned true\n",
326 floor, chip); 328 floor, chip);
327 return 0; 329 return 0;
328 } 330 }
@@ -330,7 +332,8 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
330 332
331 /* Read the NAND chip ID: 1. Send ReadID command */ 333 /* Read the NAND chip ID: 1. Send ReadID command */
332 if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { 334 if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
333 pr_debug("DoC_Command (ReadID) for %d,%d returned true\n", 335 DEBUG(MTD_DEBUG_LEVEL2,
336 "DoC_Command (ReadID) for %d,%d returned true\n",
334 floor, chip); 337 floor, chip);
335 return 0; 338 return 0;
336 } 339 }
@@ -562,15 +565,23 @@ void DoC2k_init(struct mtd_info *mtd)
562 565
563 mtd->type = MTD_NANDFLASH; 566 mtd->type = MTD_NANDFLASH;
564 mtd->flags = MTD_CAP_NANDFLASH; 567 mtd->flags = MTD_CAP_NANDFLASH;
565 mtd->writebufsize = mtd->writesize = 512; 568 mtd->size = 0;
569 mtd->erasesize = 0;
570 mtd->writesize = 512;
566 mtd->oobsize = 16; 571 mtd->oobsize = 16;
567 mtd->ecc_strength = 2;
568 mtd->owner = THIS_MODULE; 572 mtd->owner = THIS_MODULE;
569 mtd->_erase = doc_erase; 573 mtd->erase = doc_erase;
570 mtd->_read = doc_read; 574 mtd->point = NULL;
571 mtd->_write = doc_write; 575 mtd->unpoint = NULL;
572 mtd->_read_oob = doc_read_oob; 576 mtd->read = doc_read;
573 mtd->_write_oob = doc_write_oob; 577 mtd->write = doc_write;
578 mtd->read_oob = doc_read_oob;
579 mtd->write_oob = doc_write_oob;
580 mtd->sync = NULL;
581
582 this->totlen = 0;
583 this->numchips = 0;
584
574 this->curfloor = -1; 585 this->curfloor = -1;
575 this->curchip = -1; 586 this->curchip = -1;
576 mutex_init(&this->lock); 587 mutex_init(&this->lock);
@@ -603,7 +614,13 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
603 int i, len256 = 0, ret=0; 614 int i, len256 = 0, ret=0;
604 size_t left = len; 615 size_t left = len;
605 616
617 /* Don't allow read past end of device */
618 if (from >= this->totlen)
619 return -EINVAL;
620
606 mutex_lock(&this->lock); 621 mutex_lock(&this->lock);
622
623 *retlen = 0;
607 while (left) { 624 while (left) {
608 len = left; 625 len = left;
609 626
@@ -682,7 +699,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
682#ifdef ECC_DEBUG 699#ifdef ECC_DEBUG
683 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); 700 printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
684#endif 701#endif
685 /* Read the ECC syndrome through the DiskOnChip ECC 702 /* Read the ECC syndrom through the DiskOnChip ECC
686 logic. These syndrome will be all ZERO when there 703 logic. These syndrome will be all ZERO when there
687 is no error */ 704 is no error */
688 for (i = 0; i < 6; i++) { 705 for (i = 0; i < 6; i++) {
@@ -743,7 +760,13 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
743 size_t left = len; 760 size_t left = len;
744 int status; 761 int status;
745 762
763 /* Don't allow write past end of device */
764 if (to >= this->totlen)
765 return -EINVAL;
766
746 mutex_lock(&this->lock); 767 mutex_lock(&this->lock);
768
769 *retlen = 0;
747 while (left) { 770 while (left) {
748 len = left; 771 len = left;
749 772
@@ -907,7 +930,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
907 uint8_t *buf = ops->oobbuf; 930 uint8_t *buf = ops->oobbuf;
908 size_t len = ops->len; 931 size_t len = ops->len;
909 932
910 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 933 BUG_ON(ops->mode != MTD_OOB_PLACE);
911 934
912 ofs += ops->ooboffs; 935 ofs += ops->ooboffs;
913 936
@@ -1071,7 +1094,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1071 struct DiskOnChip *this = mtd->priv; 1094 struct DiskOnChip *this = mtd->priv;
1072 int ret; 1095 int ret;
1073 1096
1074 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 1097 BUG_ON(ops->mode != MTD_OOB_PLACE);
1075 1098
1076 mutex_lock(&this->lock); 1099 mutex_lock(&this->lock);
1077 ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len, 1100 ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index f6927955dab..241192f05bc 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -55,14 +55,15 @@ static int _DoC_WaitReady(void __iomem * docptr)
55{ 55{
56 unsigned short c = 0xffff; 56 unsigned short c = 0xffff;
57 57
58 pr_debug("_DoC_WaitReady called for out-of-line wait\n"); 58 DEBUG(MTD_DEBUG_LEVEL3,
59 "_DoC_WaitReady called for out-of-line wait\n");
59 60
60 /* Out-of-line routine to wait for chip response */ 61 /* Out-of-line routine to wait for chip response */
61 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) 62 while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
62 ; 63 ;
63 64
64 if (c == 0) 65 if (c == 0)
65 pr_debug("_DoC_WaitReady timed out.\n"); 66 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
66 67
67 return (c == 0); 68 return (c == 0);
68} 69}
@@ -343,18 +344,25 @@ void DoCMil_init(struct mtd_info *mtd)
343 344
344 mtd->type = MTD_NANDFLASH; 345 mtd->type = MTD_NANDFLASH;
345 mtd->flags = MTD_CAP_NANDFLASH; 346 mtd->flags = MTD_CAP_NANDFLASH;
347 mtd->size = 0;
346 348
347 /* FIXME: erase size is not always 8KiB */ 349 /* FIXME: erase size is not always 8KiB */
348 mtd->erasesize = 0x2000; 350 mtd->erasesize = 0x2000;
349 mtd->writebufsize = mtd->writesize = 512; 351
352 mtd->writesize = 512;
350 mtd->oobsize = 16; 353 mtd->oobsize = 16;
351 mtd->ecc_strength = 2;
352 mtd->owner = THIS_MODULE; 354 mtd->owner = THIS_MODULE;
353 mtd->_erase = doc_erase; 355 mtd->erase = doc_erase;
354 mtd->_read = doc_read; 356 mtd->point = NULL;
355 mtd->_write = doc_write; 357 mtd->unpoint = NULL;
356 mtd->_read_oob = doc_read_oob; 358 mtd->read = doc_read;
357 mtd->_write_oob = doc_write_oob; 359 mtd->write = doc_write;
360 mtd->read_oob = doc_read_oob;
361 mtd->write_oob = doc_write_oob;
362 mtd->sync = NULL;
363
364 this->totlen = 0;
365 this->numchips = 0;
358 this->curfloor = -1; 366 this->curfloor = -1;
359 this->curchip = -1; 367 this->curchip = -1;
360 368
@@ -384,6 +392,10 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
384 void __iomem *docptr = this->virtadr; 392 void __iomem *docptr = this->virtadr;
385 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 393 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
386 394
395 /* Don't allow read past end of device */
396 if (from >= this->totlen)
397 return -EINVAL;
398
387 /* Don't allow a single read to cross a 512-byte block boundary */ 399 /* Don't allow a single read to cross a 512-byte block boundary */
388 if (from + len > ((from | 0x1ff) + 1)) 400 if (from + len > ((from | 0x1ff) + 1))
389 len = ((from | 0x1ff) + 1) - from; 401 len = ((from | 0x1ff) + 1) - from;
@@ -452,7 +464,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
452#ifdef ECC_DEBUG 464#ifdef ECC_DEBUG
453 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 465 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
454#endif 466#endif
455 /* Read the ECC syndrome through the DiskOnChip ECC logic. 467 /* Read the ECC syndrom through the DiskOnChip ECC logic.
456 These syndrome will be all ZERO when there is no error */ 468 These syndrome will be all ZERO when there is no error */
457 for (i = 0; i < 6; i++) { 469 for (i = 0; i < 6; i++) {
458 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); 470 syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
@@ -491,6 +503,10 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
491 void __iomem *docptr = this->virtadr; 503 void __iomem *docptr = this->virtadr;
492 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 504 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
493 505
506 /* Don't allow write past end of device */
507 if (to >= this->totlen)
508 return -EINVAL;
509
494#if 0 510#if 0
495 /* Don't allow a single write to cross a 512-byte block boundary */ 511 /* Don't allow a single write to cross a 512-byte block boundary */
496 if (to + len > ( (to | 0x1ff) + 1)) 512 if (to + len > ( (to | 0x1ff) + 1))
@@ -592,6 +608,7 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
592 printk("Error programming flash\n"); 608 printk("Error programming flash\n");
593 /* Error in programming 609 /* Error in programming
594 FIXME: implement Bad Block Replacement (in nftl.c ??) */ 610 FIXME: implement Bad Block Replacement (in nftl.c ??) */
611 *retlen = 0;
595 ret = -EIO; 612 ret = -EIO;
596 } 613 }
597 dummy = ReadDOC(docptr, LastDataRead); 614 dummy = ReadDOC(docptr, LastDataRead);
@@ -615,7 +632,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
615 uint8_t *buf = ops->oobbuf; 632 uint8_t *buf = ops->oobbuf;
616 size_t len = ops->len; 633 size_t len = ops->len;
617 634
618 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 635 BUG_ON(ops->mode != MTD_OOB_PLACE);
619 636
620 ofs += ops->ooboffs; 637 ofs += ops->ooboffs;
621 638
@@ -673,7 +690,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
673 uint8_t *buf = ops->oobbuf; 690 uint8_t *buf = ops->oobbuf;
674 size_t len = ops->len; 691 size_t len = ops->len;
675 692
676 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 693 BUG_ON(ops->mode != MTD_OOB_PLACE);
677 694
678 ofs += ops->ooboffs; 695 ofs += ops->ooboffs;
679 696
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 4f2220ad892..09ae0adc3ad 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -61,14 +61,15 @@ static int _DoC_WaitReady(void __iomem * docptr)
61{ 61{
62 unsigned int c = 0xffff; 62 unsigned int c = 0xffff;
63 63
64 pr_debug("_DoC_WaitReady called for out-of-line wait\n"); 64 DEBUG(MTD_DEBUG_LEVEL3,
65 "_DoC_WaitReady called for out-of-line wait\n");
65 66
66 /* Out-of-line routine to wait for chip response */ 67 /* Out-of-line routine to wait for chip response */
67 while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) 68 while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
68 ; 69 ;
69 70
70 if (c == 0) 71 if (c == 0)
71 pr_debug("_DoC_WaitReady timed out.\n"); 72 DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
72 73
73 return (c == 0); 74 return (c == 0);
74} 75}
@@ -467,15 +468,23 @@ void DoCMilPlus_init(struct mtd_info *mtd)
467 468
468 mtd->type = MTD_NANDFLASH; 469 mtd->type = MTD_NANDFLASH;
469 mtd->flags = MTD_CAP_NANDFLASH; 470 mtd->flags = MTD_CAP_NANDFLASH;
470 mtd->writebufsize = mtd->writesize = 512; 471 mtd->size = 0;
472
473 mtd->erasesize = 0;
474 mtd->writesize = 512;
471 mtd->oobsize = 16; 475 mtd->oobsize = 16;
472 mtd->ecc_strength = 2;
473 mtd->owner = THIS_MODULE; 476 mtd->owner = THIS_MODULE;
474 mtd->_erase = doc_erase; 477 mtd->erase = doc_erase;
475 mtd->_read = doc_read; 478 mtd->point = NULL;
476 mtd->_write = doc_write; 479 mtd->unpoint = NULL;
477 mtd->_read_oob = doc_read_oob; 480 mtd->read = doc_read;
478 mtd->_write_oob = doc_write_oob; 481 mtd->write = doc_write;
482 mtd->read_oob = doc_read_oob;
483 mtd->write_oob = doc_write_oob;
484 mtd->sync = NULL;
485
486 this->totlen = 0;
487 this->numchips = 0;
479 this->curfloor = -1; 488 this->curfloor = -1;
480 this->curchip = -1; 489 this->curchip = -1;
481 490
@@ -582,6 +591,10 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
582 void __iomem * docptr = this->virtadr; 591 void __iomem * docptr = this->virtadr;
583 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 592 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
584 593
594 /* Don't allow read past end of device */
595 if (from >= this->totlen)
596 return -EINVAL;
597
585 /* Don't allow a single read to cross a 512-byte block boundary */ 598 /* Don't allow a single read to cross a 512-byte block boundary */
586 if (from + len > ((from | 0x1ff) + 1)) 599 if (from + len > ((from | 0x1ff) + 1))
587 len = ((from | 0x1ff) + 1) - from; 600 len = ((from | 0x1ff) + 1) - from;
@@ -642,7 +655,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
642#ifdef ECC_DEBUG 655#ifdef ECC_DEBUG
643 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); 656 printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
644#endif 657#endif
645 /* Read the ECC syndrome through the DiskOnChip ECC logic. 658 /* Read the ECC syndrom through the DiskOnChip ECC logic.
646 These syndrome will be all ZERO when there is no error */ 659 These syndrome will be all ZERO when there is no error */
647 for (i = 0; i < 6; i++) 660 for (i = 0; i < 6; i++)
648 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); 661 syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
@@ -659,15 +672,23 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
659#ifdef ECC_DEBUG 672#ifdef ECC_DEBUG
660 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", 673 printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
661 __FILE__, __LINE__, (int)from); 674 __FILE__, __LINE__, (int)from);
662 printk(" syndrome= %*phC\n", 6, syndrome); 675 printk(" syndrome= %02x:%02x:%02x:%02x:%02x:"
663 printk(" eccbuf= %*phC\n", 6, eccbuf); 676 "%02x\n",
677 syndrome[0], syndrome[1], syndrome[2],
678 syndrome[3], syndrome[4], syndrome[5]);
679 printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:"
680 "%02x\n",
681 eccbuf[0], eccbuf[1], eccbuf[2],
682 eccbuf[3], eccbuf[4], eccbuf[5]);
664#endif 683#endif
665 ret = -EIO; 684 ret = -EIO;
666 } 685 }
667 } 686 }
668 687
669#ifdef PSYCHO_DEBUG 688#ifdef PSYCHO_DEBUG
670 printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf); 689 printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
690 (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
691 eccbuf[4], eccbuf[5]);
671#endif 692#endif
672 /* disable the ECC engine */ 693 /* disable the ECC engine */
673 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf); 694 WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
@@ -689,6 +710,10 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
689 void __iomem * docptr = this->virtadr; 710 void __iomem * docptr = this->virtadr;
690 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 711 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
691 712
713 /* Don't allow write past end of device */
714 if (to >= this->totlen)
715 return -EINVAL;
716
692 /* Don't allow writes which aren't exactly one block (512 bytes) */ 717 /* Don't allow writes which aren't exactly one block (512 bytes) */
693 if ((to & 0x1ff) || (len != 0x200)) 718 if ((to & 0x1ff) || (len != 0x200))
694 return -EINVAL; 719 return -EINVAL;
@@ -785,6 +810,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
785 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); 810 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
786 /* Error in programming 811 /* Error in programming
787 FIXME: implement Bad Block Replacement (in nftl.c ??) */ 812 FIXME: implement Bad Block Replacement (in nftl.c ??) */
813 *retlen = 0;
788 ret = -EIO; 814 ret = -EIO;
789 } 815 }
790 dummy = ReadDOC(docptr, Mplus_LastDataRead); 816 dummy = ReadDOC(docptr, Mplus_LastDataRead);
@@ -809,7 +835,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
809 uint8_t *buf = ops->oobbuf; 835 uint8_t *buf = ops->oobbuf;
810 size_t len = ops->len; 836 size_t len = ops->len;
811 837
812 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 838 BUG_ON(ops->mode != MTD_OOB_PLACE);
813 839
814 ofs += ops->ooboffs; 840 ofs += ops->ooboffs;
815 841
@@ -894,7 +920,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
894 uint8_t *buf = ops->oobbuf; 920 uint8_t *buf = ops->oobbuf;
895 size_t len = ops->len; 921 size_t len = ops->len;
896 922
897 BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); 923 BUG_ON(ops->mode != MTD_OOB_PLACE);
898 924
899 ofs += ops->ooboffs; 925 ofs += ops->ooboffs;
900 926
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 4a1c39b6f37..37ef29a73ee 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -2,7 +2,7 @@
2 * ECC algorithm for M-systems disk on chip. We use the excellent Reed 2 * ECC algorithm for M-systems disk on chip. We use the excellent Reed
3 * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the 3 * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
4 * GNU GPL License. The rest is simply to convert the disk on chip 4 * GNU GPL License. The rest is simply to convert the disk on chip
5 * syndrome into a standard syndome. 5 * syndrom into a standard syndom.
6 * 6 *
7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com) 7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
8 * Copyright (C) 2000 Netgem S.A. 8 * Copyright (C) 2000 Netgem S.A.
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
deleted file mode 100644
index 8510ccb9c6f..00000000000
--- a/drivers/mtd/devices/docg3.c
+++ /dev/null
@@ -1,2162 +0,0 @@
1/*
2 * Handles the M-Systems DiskOnChip G3 chip
3 *
4 * Copyright (C) 2011 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/platform_device.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/io.h>
29#include <linux/delay.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h>
32#include <linux/bitmap.h>
33#include <linux/bitrev.h>
34#include <linux/bch.h>
35
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38
39#define CREATE_TRACE_POINTS
40#include "docg3.h"
41
42/*
43 * This driver handles the DiskOnChip G3 flash memory.
44 *
45 * As no specification is available from M-Systems/Sandisk, this drivers lacks
46 * several functions available on the chip, as :
47 * - IPL write
48 *
49 * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
50 * the driver assumes a 16bits data bus.
51 *
52 * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
53 * - a 1 byte Hamming code stored in the OOB for each page
54 * - a 7 bytes BCH code stored in the OOB for each page
55 * The BCH ECC is :
56 * - BCH is in GF(2^14)
57 * - BCH is over data of 520 bytes (512 page + 7 page_info bytes
58 * + 1 hamming byte)
59 * - BCH can correct up to 4 bits (t = 4)
60 * - BCH syndroms are calculated in hardware, and checked in hardware as well
61 *
62 */
63
64static unsigned int reliable_mode;
65module_param(reliable_mode, uint, 0);
66MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
67 "2=reliable) : MLC normal operations are in normal mode");
68
69/**
70 * struct docg3_oobinfo - DiskOnChip G3 OOB layout
71 * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
72 * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
73 * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
74 * @oobavail: 8 available bytes remaining after ECC toll
75 */
76static struct nand_ecclayout docg3_oobinfo = {
77 .eccbytes = 8,
78 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
79 .oobfree = {{0, 7}, {15, 1} },
80 .oobavail = 8,
81};
82
83static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
84{
85 u8 val = readb(docg3->cascade->base + reg);
86
87 trace_docg3_io(0, 8, reg, (int)val);
88 return val;
89}
90
91static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
92{
93 u16 val = readw(docg3->cascade->base + reg);
94
95 trace_docg3_io(0, 16, reg, (int)val);
96 return val;
97}
98
99static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
100{
101 writeb(val, docg3->cascade->base + reg);
102 trace_docg3_io(1, 8, reg, val);
103}
104
105static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
106{
107 writew(val, docg3->cascade->base + reg);
108 trace_docg3_io(1, 16, reg, val);
109}
110
111static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
112{
113 doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
114}
115
116static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
117{
118 doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
119}
120
121static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
122{
123 doc_writeb(docg3, addr, DOC_FLASHADDRESS);
124}
125
126static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
127
128static int doc_register_readb(struct docg3 *docg3, int reg)
129{
130 u8 val;
131
132 doc_writew(docg3, reg, DOC_READADDRESS);
133 val = doc_readb(docg3, reg);
134 doc_vdbg("Read register %04x : %02x\n", reg, val);
135 return val;
136}
137
138static int doc_register_readw(struct docg3 *docg3, int reg)
139{
140 u16 val;
141
142 doc_writew(docg3, reg, DOC_READADDRESS);
143 val = doc_readw(docg3, reg);
144 doc_vdbg("Read register %04x : %04x\n", reg, val);
145 return val;
146}
147
148/**
149 * doc_delay - delay docg3 operations
150 * @docg3: the device
151 * @nbNOPs: the number of NOPs to issue
152 *
153 * As no specification is available, the right timings between chip commands are
154 * unknown. The only available piece of information are the observed nops on a
155 * working docg3 chip.
156 * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
157 * friendlier msleep() functions or blocking mdelay().
158 */
159static void doc_delay(struct docg3 *docg3, int nbNOPs)
160{
161 int i;
162
163 doc_vdbg("NOP x %d\n", nbNOPs);
164 for (i = 0; i < nbNOPs; i++)
165 doc_writeb(docg3, 0, DOC_NOP);
166}
167
168static int is_prot_seq_error(struct docg3 *docg3)
169{
170 int ctrl;
171
172 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
173 return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
174}
175
176static int doc_is_ready(struct docg3 *docg3)
177{
178 int ctrl;
179
180 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
181 return ctrl & DOC_CTRL_FLASHREADY;
182}
183
184static int doc_wait_ready(struct docg3 *docg3)
185{
186 int maxWaitCycles = 100;
187
188 do {
189 doc_delay(docg3, 4);
190 cpu_relax();
191 } while (!doc_is_ready(docg3) && maxWaitCycles--);
192 doc_delay(docg3, 2);
193 if (maxWaitCycles > 0)
194 return 0;
195 else
196 return -EIO;
197}
198
199static int doc_reset_seq(struct docg3 *docg3)
200{
201 int ret;
202
203 doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
204 doc_flash_sequence(docg3, DOC_SEQ_RESET);
205 doc_flash_command(docg3, DOC_CMD_RESET);
206 doc_delay(docg3, 2);
207 ret = doc_wait_ready(docg3);
208
209 doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
210 return ret;
211}
212
213/**
214 * doc_read_data_area - Read data from data area
215 * @docg3: the device
216 * @buf: the buffer to fill in (might be NULL is dummy reads)
217 * @len: the length to read
218 * @first: first time read, DOC_READADDRESS should be set
219 *
220 * Reads bytes from flash data. Handles the single byte / even bytes reads.
221 */
222static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
223 int first)
224{
225 int i, cdr, len4;
226 u16 data16, *dst16;
227 u8 data8, *dst8;
228
229 doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
230 cdr = len & 0x1;
231 len4 = len - cdr;
232
233 if (first)
234 doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
235 dst16 = buf;
236 for (i = 0; i < len4; i += 2) {
237 data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
238 if (dst16) {
239 *dst16 = data16;
240 dst16++;
241 }
242 }
243
244 if (cdr) {
245 doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
246 DOC_READADDRESS);
247 doc_delay(docg3, 1);
248 dst8 = (u8 *)dst16;
249 for (i = 0; i < cdr; i++) {
250 data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
251 if (dst8) {
252 *dst8 = data8;
253 dst8++;
254 }
255 }
256 }
257}
258
259/**
260 * doc_write_data_area - Write data into data area
261 * @docg3: the device
262 * @buf: the buffer to get input bytes from
263 * @len: the length to write
264 *
265 * Writes bytes into flash data. Handles the single byte / even bytes writes.
266 */
267static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
268{
269 int i, cdr, len4;
270 u16 *src16;
271 u8 *src8;
272
273 doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
274 cdr = len & 0x3;
275 len4 = len - cdr;
276
277 doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
278 src16 = (u16 *)buf;
279 for (i = 0; i < len4; i += 2) {
280 doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
281 src16++;
282 }
283
284 src8 = (u8 *)src16;
285 for (i = 0; i < cdr; i++) {
286 doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
287 DOC_READADDRESS);
288 doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
289 src8++;
290 }
291}
292
293/**
294 * doc_set_data_mode - Sets the flash to normal or reliable data mode
295 * @docg3: the device
296 *
297 * The reliable data mode is a bit slower than the fast mode, but less errors
298 * occur. Entering the reliable mode cannot be done without entering the fast
299 * mode first.
300 *
301 * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
302 * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
303 * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
304 * result, which is a logical and between bytes from page 0 and page 1 (which is
305 * consistent with the fact that writing to a page is _clearing_ bits of that
306 * page).
307 */
308static void doc_set_reliable_mode(struct docg3 *docg3)
309{
310 static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
311
312 doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
313 switch (docg3->reliable) {
314 case 0:
315 break;
316 case 1:
317 doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
318 doc_flash_command(docg3, DOC_CMD_FAST_MODE);
319 break;
320 case 2:
321 doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
322 doc_flash_command(docg3, DOC_CMD_FAST_MODE);
323 doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
324 break;
325 default:
326 doc_err("doc_set_reliable_mode(): invalid mode\n");
327 break;
328 }
329 doc_delay(docg3, 2);
330}
331
332/**
333 * doc_set_asic_mode - Set the ASIC mode
334 * @docg3: the device
335 * @mode: the mode
336 *
337 * The ASIC can work in 3 modes :
338 * - RESET: all registers are zeroed
339 * - NORMAL: receives and handles commands
340 * - POWERDOWN: minimal poweruse, flash parts shut off
341 */
342static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
343{
344 int i;
345
346 for (i = 0; i < 12; i++)
347 doc_readb(docg3, DOC_IOSPACE_IPL);
348
349 mode |= DOC_ASICMODE_MDWREN;
350 doc_dbg("doc_set_asic_mode(%02x)\n", mode);
351 doc_writeb(docg3, mode, DOC_ASICMODE);
352 doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
353 doc_delay(docg3, 1);
354}
355
356/**
357 * doc_set_device_id - Sets the devices id for cascaded G3 chips
358 * @docg3: the device
359 * @id: the chip to select (amongst 0, 1, 2, 3)
360 *
361 * There can be 4 cascaded G3 chips. This function selects the one which will
362 * should be the active one.
363 */
364static void doc_set_device_id(struct docg3 *docg3, int id)
365{
366 u8 ctrl;
367
368 doc_dbg("doc_set_device_id(%d)\n", id);
369 doc_writeb(docg3, id, DOC_DEVICESELECT);
370 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
371
372 ctrl &= ~DOC_CTRL_VIOLATION;
373 ctrl |= DOC_CTRL_CE;
374 doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
375}
376
377/**
378 * doc_set_extra_page_mode - Change flash page layout
379 * @docg3: the device
380 *
381 * Normally, the flash page is split into the data (512 bytes) and the out of
382 * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
383 * leveling counters are stored. To access this last area of 4 bytes, a special
384 * mode must be input to the flash ASIC.
385 *
386 * Returns 0 if no error occurred, -EIO else.
387 */
388static int doc_set_extra_page_mode(struct docg3 *docg3)
389{
390 int fctrl;
391
392 doc_dbg("doc_set_extra_page_mode()\n");
393 doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
394 doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
395 doc_delay(docg3, 2);
396
397 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
398 if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
399 return -EIO;
400 else
401 return 0;
402}
403
404/**
405 * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
406 * @docg3: the device
407 * @sector: the sector
408 */
409static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
410{
411 doc_delay(docg3, 1);
412 doc_flash_address(docg3, sector & 0xff);
413 doc_flash_address(docg3, (sector >> 8) & 0xff);
414 doc_flash_address(docg3, (sector >> 16) & 0xff);
415 doc_delay(docg3, 1);
416}
417
418/**
419 * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
420 * @docg3: the device
421 * @sector: the sector
422 * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
423 */
424static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
425{
426 ofs = ofs >> 2;
427 doc_delay(docg3, 1);
428 doc_flash_address(docg3, ofs & 0xff);
429 doc_flash_address(docg3, sector & 0xff);
430 doc_flash_address(docg3, (sector >> 8) & 0xff);
431 doc_flash_address(docg3, (sector >> 16) & 0xff);
432 doc_delay(docg3, 1);
433}
434
435/**
436 * doc_seek - Set both flash planes to the specified block, page for reading
437 * @docg3: the device
438 * @block0: the first plane block index
439 * @block1: the second plane block index
440 * @page: the page index within the block
441 * @wear: if true, read will occur on the 4 extra bytes of the wear area
442 * @ofs: offset in page to read
443 *
444 * Programs the flash even and odd planes to the specific block and page.
445 * Alternatively, programs the flash to the wear area of the specified page.
446 */
447static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
448 int wear, int ofs)
449{
450 int sector, ret = 0;
451
452 doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
453 block0, block1, page, ofs, wear);
454
455 if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
456 doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
457 doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
458 doc_delay(docg3, 2);
459 } else {
460 doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
461 doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
462 doc_delay(docg3, 2);
463 }
464
465 doc_set_reliable_mode(docg3);
466 if (wear)
467 ret = doc_set_extra_page_mode(docg3);
468 if (ret)
469 goto out;
470
471 doc_flash_sequence(docg3, DOC_SEQ_READ);
472 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
473 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
474 doc_setup_addr_sector(docg3, sector);
475
476 sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
477 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
478 doc_setup_addr_sector(docg3, sector);
479 doc_delay(docg3, 1);
480
481out:
482 return ret;
483}
484
485/**
486 * doc_write_seek - Set both flash planes to the specified block, page for writing
487 * @docg3: the device
488 * @block0: the first plane block index
489 * @block1: the second plane block index
490 * @page: the page index within the block
491 * @ofs: offset in page to write
492 *
493 * Programs the flash even and odd planes to the specific block and page.
494 * Alternatively, programs the flash to the wear area of the specified page.
495 */
496static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
497 int ofs)
498{
499 int ret = 0, sector;
500
501 doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
502 block0, block1, page, ofs);
503
504 doc_set_reliable_mode(docg3);
505
506 if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
507 doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
508 doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
509 doc_delay(docg3, 2);
510 } else {
511 doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
512 doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
513 doc_delay(docg3, 2);
514 }
515
516 doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
517 doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
518
519 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
520 doc_setup_writeaddr_sector(docg3, sector, ofs);
521
522 doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
523 doc_delay(docg3, 2);
524 ret = doc_wait_ready(docg3);
525 if (ret)
526 goto out;
527
528 doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
529 sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
530 doc_setup_writeaddr_sector(docg3, sector, ofs);
531 doc_delay(docg3, 1);
532
533out:
534 return ret;
535}
536
537
538/**
539 * doc_read_page_ecc_init - Initialize hardware ECC engine
540 * @docg3: the device
541 * @len: the number of bytes covered by the ECC (BCH covered)
542 *
543 * The function does initialize the hardware ECC engine to compute the Hamming
544 * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
545 *
546 * Return 0 if succeeded, -EIO on error
547 */
548static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
549{
550 doc_writew(docg3, DOC_ECCCONF0_READ_MODE
551 | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
552 | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
553 DOC_ECCCONF0);
554 doc_delay(docg3, 4);
555 doc_register_readb(docg3, DOC_FLASHCONTROL);
556 return doc_wait_ready(docg3);
557}
558
559/**
560 * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
561 * @docg3: the device
562 * @len: the number of bytes covered by the ECC (BCH covered)
563 *
564 * The function does initialize the hardware ECC engine to compute the Hamming
565 * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
566 *
567 * Return 0 if succeeded, -EIO on error
568 */
569static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
570{
571 doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
572 | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
573 | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
574 DOC_ECCCONF0);
575 doc_delay(docg3, 4);
576 doc_register_readb(docg3, DOC_FLASHCONTROL);
577 return doc_wait_ready(docg3);
578}
579
580/**
581 * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
582 * @docg3: the device
583 *
584 * Disables the hardware ECC generator and checker, for unchecked reads (as when
585 * reading OOB only or write status byte).
586 */
587static void doc_ecc_disable(struct docg3 *docg3)
588{
589 doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
590 doc_delay(docg3, 4);
591}
592
593/**
594 * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
595 * @docg3: the device
596 * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
597 *
598 * This function programs the ECC hardware to compute the hamming code on the
599 * last provided N bytes to the hardware generator.
600 */
601static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
602{
603 u8 ecc_conf1;
604
605 ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
606 ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
607 ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
608 doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
609}
610
611/**
612 * doc_ecc_bch_fix_data - Fix if need be read data from flash
613 * @docg3: the device
614 * @buf: the buffer of read data (512 + 7 + 1 bytes)
615 * @hwecc: the hardware calculated ECC.
616 * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
617 * area data, and calc_ecc the ECC calculated by the hardware generator.
618 *
619 * Checks if the received data matches the ECC, and if an error is detected,
620 * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
621 * understands the (data, ecc, syndroms) in an inverted order in comparison to
622 * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
623 * bit6 and bit 1, ...) for all ECC data.
624 *
625 * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
626 * algorithm is used to decode this. However the hw operates on page
627 * data in a bit order that is the reverse of that of the bch alg,
628 * requiring that the bits be reversed on the result. Thanks to Ivan
629 * Djelic for his analysis.
630 *
631 * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
632 * errors were detected and cannot be fixed.
633 */
634static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
635{
636 u8 ecc[DOC_ECC_BCH_SIZE];
637 int errorpos[DOC_ECC_BCH_T], i, numerrs;
638
639 for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
640 ecc[i] = bitrev8(hwecc[i]);
641 numerrs = decode_bch(docg3->cascade->bch, NULL,
642 DOC_ECC_BCH_COVERED_BYTES,
643 NULL, ecc, NULL, errorpos);
644 BUG_ON(numerrs == -EINVAL);
645 if (numerrs < 0)
646 goto out;
647
648 for (i = 0; i < numerrs; i++)
649 errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
650 for (i = 0; i < numerrs; i++)
651 if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
652 /* error is located in data, correct it */
653 change_bit(errorpos[i], buf);
654out:
655 doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
656 return numerrs;
657}
658
659
660/**
661 * doc_read_page_prepare - Prepares reading data from a flash page
662 * @docg3: the device
663 * @block0: the first plane block index on flash memory
664 * @block1: the second plane block index on flash memory
665 * @page: the page index in the block
666 * @offset: the offset in the page (must be a multiple of 4)
667 *
668 * Prepares the page to be read in the flash memory :
669 * - tell ASIC to map the flash pages
670 * - tell ASIC to be in read mode
671 *
672 * After a call to this method, a call to doc_read_page_finish is mandatory,
673 * to end the read cycle of the flash.
674 *
675 * Read data from a flash page. The length to be read must be between 0 and
676 * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
677 * the extra bytes reading is not implemented).
678 *
679 * As pages are grouped by 2 (in 2 planes), reading from a page must be done
680 * in two steps:
681 * - one read of 512 bytes at offset 0
682 * - one read of 512 bytes at offset 512 + 16
683 *
684 * Returns 0 if successful, -EIO if a read error occurred.
685 */
686static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
687 int page, int offset)
688{
689 int wear_area = 0, ret = 0;
690
691 doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
692 block0, block1, page, offset);
693 if (offset >= DOC_LAYOUT_WEAR_OFFSET)
694 wear_area = 1;
695 if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
696 return -EINVAL;
697
698 doc_set_device_id(docg3, docg3->device_id);
699 ret = doc_reset_seq(docg3);
700 if (ret)
701 goto err;
702
703 /* Program the flash address block and page */
704 ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
705 if (ret)
706 goto err;
707
708 doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
709 doc_delay(docg3, 2);
710 doc_wait_ready(docg3);
711
712 doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
713 doc_delay(docg3, 1);
714 if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
715 offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
716 doc_flash_address(docg3, offset >> 2);
717 doc_delay(docg3, 1);
718 doc_wait_ready(docg3);
719
720 doc_flash_command(docg3, DOC_CMD_READ_FLASH);
721
722 return 0;
723err:
724 doc_writeb(docg3, 0, DOC_DATAEND);
725 doc_delay(docg3, 2);
726 return -EIO;
727}
728
729/**
730 * doc_read_page_getbytes - Reads bytes from a prepared page
731 * @docg3: the device
732 * @len: the number of bytes to be read (must be a multiple of 4)
733 * @buf: the buffer to be filled in (or NULL is forget bytes)
734 * @first: 1 if first time read, DOC_READADDRESS should be set
735 * @last_odd: 1 if last read ended up on an odd byte
736 *
737 * Reads bytes from a prepared page. There is a trickery here : if the last read
738 * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
739 * planes, the first byte must be read apart. If a word (16bit) read was used,
740 * the read would return the byte of plane 2 as low *and* high endian, which
741 * will mess the read.
742 *
743 */
744static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
745 int first, int last_odd)
746{
747 if (last_odd && len > 0) {
748 doc_read_data_area(docg3, buf, 1, first);
749 doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
750 } else {
751 doc_read_data_area(docg3, buf, len, first);
752 }
753 doc_delay(docg3, 2);
754 return len;
755}
756
757/**
758 * doc_write_page_putbytes - Writes bytes into a prepared page
759 * @docg3: the device
760 * @len: the number of bytes to be written
761 * @buf: the buffer of input bytes
762 *
763 */
764static void doc_write_page_putbytes(struct docg3 *docg3, int len,
765 const u_char *buf)
766{
767 doc_write_data_area(docg3, buf, len);
768 doc_delay(docg3, 2);
769}
770
771/**
772 * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
773 * @docg3: the device
774 * @hwecc: the array of 7 integers where the hardware ecc will be stored
775 */
776static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
777{
778 int i;
779
780 for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
781 hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
782}
783
784/**
785 * doc_page_finish - Ends reading/writing of a flash page
786 * @docg3: the device
787 */
788static void doc_page_finish(struct docg3 *docg3)
789{
790 doc_writeb(docg3, 0, DOC_DATAEND);
791 doc_delay(docg3, 2);
792}
793
794/**
795 * doc_read_page_finish - Ends reading of a flash page
796 * @docg3: the device
797 *
798 * As a side effect, resets the chip selector to 0. This ensures that after each
799 * read operation, the floor 0 is selected. Therefore, if the systems halts, the
800 * reboot will boot on floor 0, where the IPL is.
801 */
802static void doc_read_page_finish(struct docg3 *docg3)
803{
804 doc_page_finish(docg3);
805 doc_set_device_id(docg3, 0);
806}
807
808/**
809 * calc_block_sector - Calculate blocks, pages and ofs.
810
811 * @from: offset in flash
812 * @block0: first plane block index calculated
813 * @block1: second plane block index calculated
814 * @page: page calculated
815 * @ofs: offset in page
816 * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
817 * reliable mode.
818 *
819 * The calculation is based on the reliable/normal mode. In normal mode, the 64
820 * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
821 * clones, only 32 pages per block are available.
822 */
823static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
824 int *ofs, int reliable)
825{
826 uint sector, pages_biblock;
827
828 pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
829 if (reliable == 1 || reliable == 2)
830 pages_biblock /= 2;
831
832 sector = from / DOC_LAYOUT_PAGE_SIZE;
833 *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
834 *block1 = *block0 + 1;
835 *page = sector % pages_biblock;
836 *page /= DOC_LAYOUT_NBPLANES;
837 if (reliable == 1 || reliable == 2)
838 *page *= 2;
839 if (sector % 2)
840 *ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
841 else
842 *ofs = 0;
843}
844
845/**
846 * doc_read_oob - Read out of band bytes from flash
847 * @mtd: the device
848 * @from: the offset from first block and first page, in bytes, aligned on page
849 * size
850 * @ops: the mtd oob structure
851 *
852 * Reads flash memory OOB area of pages.
853 *
854 * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
855 */
856static int doc_read_oob(struct mtd_info *mtd, loff_t from,
857 struct mtd_oob_ops *ops)
858{
859 struct docg3 *docg3 = mtd->priv;
860 int block0, block1, page, ret, skip, ofs = 0;
861 u8 *oobbuf = ops->oobbuf;
862 u8 *buf = ops->datbuf;
863 size_t len, ooblen, nbdata, nboob;
864 u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
865 int max_bitflips = 0;
866
867 if (buf)
868 len = ops->len;
869 else
870 len = 0;
871 if (oobbuf)
872 ooblen = ops->ooblen;
873 else
874 ooblen = 0;
875
876 if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
877 oobbuf += ops->ooboffs;
878
879 doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
880 from, ops->mode, buf, len, oobbuf, ooblen);
881 if (ooblen % DOC_LAYOUT_OOB_SIZE)
882 return -EINVAL;
883
884 if (from + len > mtd->size)
885 return -EINVAL;
886
887 ops->oobretlen = 0;
888 ops->retlen = 0;
889 ret = 0;
890 skip = from % DOC_LAYOUT_PAGE_SIZE;
891 mutex_lock(&docg3->cascade->lock);
892 while (ret >= 0 && (len > 0 || ooblen > 0)) {
893 calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
894 docg3->reliable);
895 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
896 nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
897 ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
898 if (ret < 0)
899 goto out;
900 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
901 if (ret < 0)
902 goto err_in_read;
903 ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
904 if (ret < skip)
905 goto err_in_read;
906 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
907 if (ret < nbdata)
908 goto err_in_read;
909 doc_read_page_getbytes(docg3,
910 DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
911 NULL, 0, (skip + nbdata) % 2);
912 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
913 if (ret < nboob)
914 goto err_in_read;
915 doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
916 NULL, 0, nboob % 2);
917
918 doc_get_bch_hw_ecc(docg3, hwecc);
919 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
920
921 if (nboob >= DOC_LAYOUT_OOB_SIZE) {
922 doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
923 doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
924 doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
925 doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
926 }
927 doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
928 doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
929
930 ret = -EIO;
931 if (is_prot_seq_error(docg3))
932 goto err_in_read;
933 ret = 0;
934 if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
935 (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
936 (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
937 (ops->mode != MTD_OPS_RAW) &&
938 (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
939 ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
940 if (ret < 0) {
941 mtd->ecc_stats.failed++;
942 ret = -EBADMSG;
943 }
944 if (ret > 0) {
945 mtd->ecc_stats.corrected += ret;
946 max_bitflips = max(max_bitflips, ret);
947 ret = max_bitflips;
948 }
949 }
950
951 doc_read_page_finish(docg3);
952 ops->retlen += nbdata;
953 ops->oobretlen += nboob;
954 buf += nbdata;
955 oobbuf += nboob;
956 len -= nbdata;
957 ooblen -= nboob;
958 from += DOC_LAYOUT_PAGE_SIZE;
959 skip = 0;
960 }
961
962out:
963 mutex_unlock(&docg3->cascade->lock);
964 return ret;
965err_in_read:
966 doc_read_page_finish(docg3);
967 goto out;
968}
969
970/**
971 * doc_read - Read bytes from flash
972 * @mtd: the device
973 * @from: the offset from first block and first page, in bytes, aligned on page
974 * size
975 * @len: the number of bytes to read (must be a multiple of 4)
976 * @retlen: the number of bytes actually read
977 * @buf: the filled in buffer
978 *
979 * Reads flash memory pages. This function does not read the OOB chunk, but only
980 * the page data.
981 *
982 * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
983 */
984static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
985 size_t *retlen, u_char *buf)
986{
987 struct mtd_oob_ops ops;
988 size_t ret;
989
990 memset(&ops, 0, sizeof(ops));
991 ops.datbuf = buf;
992 ops.len = len;
993 ops.mode = MTD_OPS_AUTO_OOB;
994
995 ret = doc_read_oob(mtd, from, &ops);
996 *retlen = ops.retlen;
997 return ret;
998}
999
1000static int doc_reload_bbt(struct docg3 *docg3)
1001{
1002 int block = DOC_LAYOUT_BLOCK_BBT;
1003 int ret = 0, nbpages, page;
1004 u_char *buf = docg3->bbt;
1005
1006 nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
1007 for (page = 0; !ret && (page < nbpages); page++) {
1008 ret = doc_read_page_prepare(docg3, block, block + 1,
1009 page + DOC_LAYOUT_PAGE_BBT, 0);
1010 if (!ret)
1011 ret = doc_read_page_ecc_init(docg3,
1012 DOC_LAYOUT_PAGE_SIZE);
1013 if (!ret)
1014 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
1015 buf, 1, 0);
1016 buf += DOC_LAYOUT_PAGE_SIZE;
1017 }
1018 doc_read_page_finish(docg3);
1019 return ret;
1020}
1021
1022/**
1023 * doc_block_isbad - Checks whether a block is good or not
1024 * @mtd: the device
1025 * @from: the offset to find the correct block
1026 *
1027 * Returns 1 if block is bad, 0 if block is good
1028 */
1029static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
1030{
1031 struct docg3 *docg3 = mtd->priv;
1032 int block0, block1, page, ofs, is_good;
1033
1034 calc_block_sector(from, &block0, &block1, &page, &ofs,
1035 docg3->reliable);
1036 doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
1037 from, block0, block1, page, ofs);
1038
1039 if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
1040 return 0;
1041 if (block1 > docg3->max_block)
1042 return -EINVAL;
1043
1044 is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
1045 return !is_good;
1046}
1047
1048#if 0
1049/**
1050 * doc_get_erase_count - Get block erase count
1051 * @docg3: the device
1052 * @from: the offset in which the block is.
1053 *
1054 * Get the number of times a block was erased. The number is the maximum of
1055 * erase times between first and second plane (which should be equal normally).
1056 *
1057 * Returns The number of erases, or -EINVAL or -EIO on error.
1058 */
1059static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
1060{
1061 u8 buf[DOC_LAYOUT_WEAR_SIZE];
1062 int ret, plane1_erase_count, plane2_erase_count;
1063 int block0, block1, page, ofs;
1064
1065 doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
1066 if (from % DOC_LAYOUT_PAGE_SIZE)
1067 return -EINVAL;
1068 calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
1069 if (block1 > docg3->max_block)
1070 return -EINVAL;
1071
1072 ret = doc_reset_seq(docg3);
1073 if (!ret)
1074 ret = doc_read_page_prepare(docg3, block0, block1, page,
1075 ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
1076 if (!ret)
1077 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
1078 buf, 1, 0);
1079 doc_read_page_finish(docg3);
1080
1081 if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
1082 return -EIO;
1083 plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
1084 | ((u8)(~buf[5]) << 16);
1085 plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
1086 | ((u8)(~buf[7]) << 16);
1087
1088 return max(plane1_erase_count, plane2_erase_count);
1089}
1090#endif
1091
1092/**
1093 * doc_get_op_status - get erase/write operation status
1094 * @docg3: the device
1095 *
1096 * Queries the status from the chip, and returns it
1097 *
1098 * Returns the status (bits DOC_PLANES_STATUS_*)
1099 */
1100static int doc_get_op_status(struct docg3 *docg3)
1101{
1102 u8 status;
1103
1104 doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
1105 doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
1106 doc_delay(docg3, 5);
1107
1108 doc_ecc_disable(docg3);
1109 doc_read_data_area(docg3, &status, 1, 1);
1110 return status;
1111}
1112
1113/**
1114 * doc_write_erase_wait_status - wait for write or erase completion
1115 * @docg3: the device
1116 *
1117 * Wait for the chip to be ready again after erase or write operation, and check
1118 * erase/write status.
1119 *
1120 * Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
1121 * timeout
1122 */
1123static int doc_write_erase_wait_status(struct docg3 *docg3)
1124{
1125 int i, status, ret = 0;
1126
1127 for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
1128 msleep(20);
1129 if (!doc_is_ready(docg3)) {
1130 doc_dbg("Timeout reached and the chip is still not ready\n");
1131 ret = -EAGAIN;
1132 goto out;
1133 }
1134
1135 status = doc_get_op_status(docg3);
1136 if (status & DOC_PLANES_STATUS_FAIL) {
1137 doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
1138 status);
1139 ret = -EIO;
1140 }
1141
1142out:
1143 doc_page_finish(docg3);
1144 return ret;
1145}
1146
1147/**
1148 * doc_erase_block - Erase a couple of blocks
1149 * @docg3: the device
1150 * @block0: the first block to erase (leftmost plane)
1151 * @block1: the second block to erase (rightmost plane)
1152 *
1153 * Erase both blocks, and return operation status
1154 *
1155 * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
1156 * ready for too long
1157 */
1158static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
1159{
1160 int ret, sector;
1161
1162 doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
1163 ret = doc_reset_seq(docg3);
1164 if (ret)
1165 return -EIO;
1166
1167 doc_set_reliable_mode(docg3);
1168 doc_flash_sequence(docg3, DOC_SEQ_ERASE);
1169
1170 sector = block0 << DOC_ADDR_BLOCK_SHIFT;
1171 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
1172 doc_setup_addr_sector(docg3, sector);
1173 sector = block1 << DOC_ADDR_BLOCK_SHIFT;
1174 doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
1175 doc_setup_addr_sector(docg3, sector);
1176 doc_delay(docg3, 1);
1177
1178 doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
1179 doc_delay(docg3, 2);
1180
1181 if (is_prot_seq_error(docg3)) {
1182 doc_err("Erase blocks %d,%d error\n", block0, block1);
1183 return -EIO;
1184 }
1185
1186 return doc_write_erase_wait_status(docg3);
1187}
1188
1189/**
1190 * doc_erase - Erase a portion of the chip
1191 * @mtd: the device
1192 * @info: the erase info
1193 *
1194 * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
1195 * split into 2 pages of 512 bytes on 2 contiguous blocks.
1196 *
1197 * Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
1198 * issue
1199 */
1200static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
1201{
1202 struct docg3 *docg3 = mtd->priv;
1203 uint64_t len;
1204 int block0, block1, page, ret, ofs = 0;
1205
1206 doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
1207
1208 info->state = MTD_ERASE_PENDING;
1209 calc_block_sector(info->addr + info->len, &block0, &block1, &page,
1210 &ofs, docg3->reliable);
1211 ret = -EINVAL;
1212 if (info->addr + info->len > mtd->size || page || ofs)
1213 goto reset_err;
1214
1215 ret = 0;
1216 calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
1217 docg3->reliable);
1218 mutex_lock(&docg3->cascade->lock);
1219 doc_set_device_id(docg3, docg3->device_id);
1220 doc_set_reliable_mode(docg3);
1221 for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
1222 info->state = MTD_ERASING;
1223 ret = doc_erase_block(docg3, block0, block1);
1224 block0 += 2;
1225 block1 += 2;
1226 }
1227 mutex_unlock(&docg3->cascade->lock);
1228
1229 if (ret)
1230 goto reset_err;
1231
1232 info->state = MTD_ERASE_DONE;
1233 return 0;
1234
1235reset_err:
1236 info->state = MTD_ERASE_FAILED;
1237 return ret;
1238}
1239
1240/**
1241 * doc_write_page - Write a single page to the chip
1242 * @docg3: the device
1243 * @to: the offset from first block and first page, in bytes, aligned on page
1244 * size
1245 * @buf: buffer to get bytes from
1246 * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
1247 * written)
1248 * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
1249 * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
1250 * remaining ones are filled with hardware Hamming and BCH
1251 * computations. Its value is not meaningfull is oob == NULL.
1252 *
1253 * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
1254 * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
1255 * BCH generator if autoecc is not null.
1256 *
1257 * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
1258 */
1259static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
1260 const u_char *oob, int autoecc)
1261{
1262 int block0, block1, page, ret, ofs = 0;
1263 u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
1264
1265 doc_dbg("doc_write_page(to=%lld)\n", to);
1266 calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
1267
1268 doc_set_device_id(docg3, docg3->device_id);
1269 ret = doc_reset_seq(docg3);
1270 if (ret)
1271 goto err;
1272
1273 /* Program the flash address block and page */
1274 ret = doc_write_seek(docg3, block0, block1, page, ofs);
1275 if (ret)
1276 goto err;
1277
1278 doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
1279 doc_delay(docg3, 2);
1280 doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
1281
1282 if (oob && autoecc) {
1283 doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
1284 doc_delay(docg3, 2);
1285 oob += DOC_LAYOUT_OOB_UNUSED_OFS;
1286
1287 hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
1288 doc_delay(docg3, 2);
1289 doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
1290 &hamming);
1291 doc_delay(docg3, 2);
1292
1293 doc_get_bch_hw_ecc(docg3, hwecc);
1294 doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
1295 doc_delay(docg3, 2);
1296
1297 doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
1298 }
1299 if (oob && !autoecc)
1300 doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
1301
1302 doc_delay(docg3, 2);
1303 doc_page_finish(docg3);
1304 doc_delay(docg3, 2);
1305 doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
1306 doc_delay(docg3, 2);
1307
1308 /*
1309 * The wait status will perform another doc_page_finish() call, but that
1310 * seems to please the docg3, so leave it.
1311 */
1312 ret = doc_write_erase_wait_status(docg3);
1313 return ret;
1314err:
1315 doc_read_page_finish(docg3);
1316 return ret;
1317}
1318
1319/**
1320 * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
1321 * @ops: the oob operations
1322 *
1323 * Returns 0 or 1 if success, -EINVAL if invalid oob mode
1324 */
1325static int doc_guess_autoecc(struct mtd_oob_ops *ops)
1326{
1327 int autoecc;
1328
1329 switch (ops->mode) {
1330 case MTD_OPS_PLACE_OOB:
1331 case MTD_OPS_AUTO_OOB:
1332 autoecc = 1;
1333 break;
1334 case MTD_OPS_RAW:
1335 autoecc = 0;
1336 break;
1337 default:
1338 autoecc = -EINVAL;
1339 }
1340 return autoecc;
1341}
1342
1343/**
1344 * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
1345 * @dst: the target 16 bytes OOB buffer
1346 * @oobsrc: the source 8 bytes non-ECC OOB buffer
1347 *
1348 */
1349static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
1350{
1351 memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
1352 dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
1353}
1354
1355/**
1356 * doc_backup_oob - Backup OOB into docg3 structure
1357 * @docg3: the device
1358 * @to: the page offset in the chip
1359 * @ops: the OOB size and buffer
1360 *
1361 * As the docg3 should write a page with its OOB in one pass, and some userland
1362 * applications do write_oob() to setup the OOB and then write(), store the OOB
1363 * into a temporary storage. This is very dangerous, as 2 concurrent
1364 * applications could store an OOB, and then write their pages (which will
1365 * result into one having its OOB corrupted).
1366 *
1367 * The only reliable way would be for userland to call doc_write_oob() with both
1368 * the page data _and_ the OOB area.
1369 *
1370 * Returns 0 if success, -EINVAL if ops content invalid
1371 */
1372static int doc_backup_oob(struct docg3 *docg3, loff_t to,
1373 struct mtd_oob_ops *ops)
1374{
1375 int ooblen = ops->ooblen, autoecc;
1376
1377 if (ooblen != DOC_LAYOUT_OOB_SIZE)
1378 return -EINVAL;
1379 autoecc = doc_guess_autoecc(ops);
1380 if (autoecc < 0)
1381 return autoecc;
1382
1383 docg3->oob_write_ofs = to;
1384 docg3->oob_autoecc = autoecc;
1385 if (ops->mode == MTD_OPS_AUTO_OOB) {
1386 doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
1387 ops->oobretlen = 8;
1388 } else {
1389 memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
1390 ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
1391 }
1392 return 0;
1393}
1394
1395/**
1396 * doc_write_oob - Write out of band bytes to flash
1397 * @mtd: the device
1398 * @ofs: the offset from first block and first page, in bytes, aligned on page
1399 * size
1400 * @ops: the mtd oob structure
1401 *
1402 * Either write OOB data into a temporary buffer, for the subsequent write
1403 * page. The provided OOB should be 16 bytes long. If a data buffer is provided
1404 * as well, issue the page write.
1405 * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
1406 * still be filled in if asked for).
1407 *
1408 * Returns 0 is successful, EINVAL if length is not 14 bytes
1409 */
1410static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1411 struct mtd_oob_ops *ops)
1412{
1413 struct docg3 *docg3 = mtd->priv;
1414 int ret, autoecc, oobdelta;
1415 u8 *oobbuf = ops->oobbuf;
1416 u8 *buf = ops->datbuf;
1417 size_t len, ooblen;
1418 u8 oob[DOC_LAYOUT_OOB_SIZE];
1419
1420 if (buf)
1421 len = ops->len;
1422 else
1423 len = 0;
1424 if (oobbuf)
1425 ooblen = ops->ooblen;
1426 else
1427 ooblen = 0;
1428
1429 if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
1430 oobbuf += ops->ooboffs;
1431
1432 doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
1433 ofs, ops->mode, buf, len, oobbuf, ooblen);
1434 switch (ops->mode) {
1435 case MTD_OPS_PLACE_OOB:
1436 case MTD_OPS_RAW:
1437 oobdelta = mtd->oobsize;
1438 break;
1439 case MTD_OPS_AUTO_OOB:
1440 oobdelta = mtd->ecclayout->oobavail;
1441 break;
1442 default:
1443 return -EINVAL;
1444 }
1445 if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
1446 (ofs % DOC_LAYOUT_PAGE_SIZE))
1447 return -EINVAL;
1448 if (len && ooblen &&
1449 (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
1450 return -EINVAL;
1451 if (ofs + len > mtd->size)
1452 return -EINVAL;
1453
1454 ops->oobretlen = 0;
1455 ops->retlen = 0;
1456 ret = 0;
1457 if (len == 0 && ooblen == 0)
1458 return -EINVAL;
1459 if (len == 0 && ooblen > 0)
1460 return doc_backup_oob(docg3, ofs, ops);
1461
1462 autoecc = doc_guess_autoecc(ops);
1463 if (autoecc < 0)
1464 return autoecc;
1465
1466 mutex_lock(&docg3->cascade->lock);
1467 while (!ret && len > 0) {
1468 memset(oob, 0, sizeof(oob));
1469 if (ofs == docg3->oob_write_ofs)
1470 memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
1471 else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
1472 doc_fill_autooob(oob, oobbuf);
1473 else if (ooblen > 0)
1474 memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
1475 ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
1476
1477 ofs += DOC_LAYOUT_PAGE_SIZE;
1478 len -= DOC_LAYOUT_PAGE_SIZE;
1479 buf += DOC_LAYOUT_PAGE_SIZE;
1480 if (ooblen) {
1481 oobbuf += oobdelta;
1482 ooblen -= oobdelta;
1483 ops->oobretlen += oobdelta;
1484 }
1485 ops->retlen += DOC_LAYOUT_PAGE_SIZE;
1486 }
1487
1488 doc_set_device_id(docg3, 0);
1489 mutex_unlock(&docg3->cascade->lock);
1490 return ret;
1491}
1492
1493/**
1494 * doc_write - Write a buffer to the chip
1495 * @mtd: the device
1496 * @to: the offset from first block and first page, in bytes, aligned on page
1497 * size
1498 * @len: the number of bytes to write (must be a full page size, ie. 512)
1499 * @retlen: the number of bytes actually written (0 or 512)
1500 * @buf: the buffer to get bytes from
1501 *
1502 * Writes data to the chip.
1503 *
1504 * Returns 0 if write successful, -EIO if write error
1505 */
1506static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
1507 size_t *retlen, const u_char *buf)
1508{
1509 struct docg3 *docg3 = mtd->priv;
1510 int ret;
1511 struct mtd_oob_ops ops;
1512
1513 doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
1514 ops.datbuf = (char *)buf;
1515 ops.len = len;
1516 ops.mode = MTD_OPS_PLACE_OOB;
1517 ops.oobbuf = NULL;
1518 ops.ooblen = 0;
1519 ops.ooboffs = 0;
1520
1521 ret = doc_write_oob(mtd, to, &ops);
1522 *retlen = ops.retlen;
1523 return ret;
1524}
1525
1526static struct docg3 *sysfs_dev2docg3(struct device *dev,
1527 struct device_attribute *attr)
1528{
1529 int floor;
1530 struct platform_device *pdev = to_platform_device(dev);
1531 struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
1532
1533 floor = attr->attr.name[1] - '0';
1534 if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
1535 return NULL;
1536 else
1537 return docg3_floors[floor]->priv;
1538}
1539
1540static ssize_t dps0_is_key_locked(struct device *dev,
1541 struct device_attribute *attr, char *buf)
1542{
1543 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1544 int dps0;
1545
1546 mutex_lock(&docg3->cascade->lock);
1547 doc_set_device_id(docg3, docg3->device_id);
1548 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
1549 doc_set_device_id(docg3, 0);
1550 mutex_unlock(&docg3->cascade->lock);
1551
1552 return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
1553}
1554
1555static ssize_t dps1_is_key_locked(struct device *dev,
1556 struct device_attribute *attr, char *buf)
1557{
1558 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1559 int dps1;
1560
1561 mutex_lock(&docg3->cascade->lock);
1562 doc_set_device_id(docg3, docg3->device_id);
1563 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
1564 doc_set_device_id(docg3, 0);
1565 mutex_unlock(&docg3->cascade->lock);
1566
1567 return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
1568}
1569
1570static ssize_t dps0_insert_key(struct device *dev,
1571 struct device_attribute *attr,
1572 const char *buf, size_t count)
1573{
1574 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1575 int i;
1576
1577 if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
1578 return -EINVAL;
1579
1580 mutex_lock(&docg3->cascade->lock);
1581 doc_set_device_id(docg3, docg3->device_id);
1582 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
1583 doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
1584 doc_set_device_id(docg3, 0);
1585 mutex_unlock(&docg3->cascade->lock);
1586 return count;
1587}
1588
1589static ssize_t dps1_insert_key(struct device *dev,
1590 struct device_attribute *attr,
1591 const char *buf, size_t count)
1592{
1593 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1594 int i;
1595
1596 if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
1597 return -EINVAL;
1598
1599 mutex_lock(&docg3->cascade->lock);
1600 doc_set_device_id(docg3, docg3->device_id);
1601 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
1602 doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
1603 doc_set_device_id(docg3, 0);
1604 mutex_unlock(&docg3->cascade->lock);
1605 return count;
1606}
1607
1608#define FLOOR_SYSFS(id) { \
1609 __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
1610 __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
1611 __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
1612 __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
1613}
1614
1615static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
1616 FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
1617};
1618
1619static int doc_register_sysfs(struct platform_device *pdev,
1620 struct docg3_cascade *cascade)
1621{
1622 int ret = 0, floor, i = 0;
1623 struct device *dev = &pdev->dev;
1624
1625 for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
1626 cascade->floors[floor]; floor++)
1627 for (i = 0; !ret && i < 4; i++)
1628 ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
1629 if (!ret)
1630 return 0;
1631 do {
1632 while (--i >= 0)
1633 device_remove_file(dev, &doc_sys_attrs[floor][i]);
1634 i = 4;
1635 } while (--floor >= 0);
1636 return ret;
1637}
1638
1639static void doc_unregister_sysfs(struct platform_device *pdev,
1640 struct docg3_cascade *cascade)
1641{
1642 struct device *dev = &pdev->dev;
1643 int floor, i;
1644
1645 for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
1646 floor++)
1647 for (i = 0; i < 4; i++)
1648 device_remove_file(dev, &doc_sys_attrs[floor][i]);
1649}
1650
1651/*
1652 * Debug sysfs entries
1653 */
1654static int dbg_flashctrl_show(struct seq_file *s, void *p)
1655{
1656 struct docg3 *docg3 = (struct docg3 *)s->private;
1657
1658 int pos = 0;
1659 u8 fctrl;
1660
1661 mutex_lock(&docg3->cascade->lock);
1662 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
1663 mutex_unlock(&docg3->cascade->lock);
1664
1665 pos += seq_printf(s,
1666 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
1667 fctrl,
1668 fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
1669 fctrl & DOC_CTRL_CE ? "active" : "inactive",
1670 fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
1671 fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
1672 fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
1673 return pos;
1674}
1675DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
1676
1677static int dbg_asicmode_show(struct seq_file *s, void *p)
1678{
1679 struct docg3 *docg3 = (struct docg3 *)s->private;
1680
1681 int pos = 0, pctrl, mode;
1682
1683 mutex_lock(&docg3->cascade->lock);
1684 pctrl = doc_register_readb(docg3, DOC_ASICMODE);
1685 mode = pctrl & 0x03;
1686 mutex_unlock(&docg3->cascade->lock);
1687
1688 pos += seq_printf(s,
1689 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
1690 pctrl,
1691 pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
1692 pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
1693 pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
1694 pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
1695 pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
1696 mode >> 1, mode & 0x1);
1697
1698 switch (mode) {
1699 case DOC_ASICMODE_RESET:
1700 pos += seq_printf(s, "reset");
1701 break;
1702 case DOC_ASICMODE_NORMAL:
1703 pos += seq_printf(s, "normal");
1704 break;
1705 case DOC_ASICMODE_POWERDOWN:
1706 pos += seq_printf(s, "powerdown");
1707 break;
1708 }
1709 pos += seq_printf(s, ")\n");
1710 return pos;
1711}
1712DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
1713
1714static int dbg_device_id_show(struct seq_file *s, void *p)
1715{
1716 struct docg3 *docg3 = (struct docg3 *)s->private;
1717 int pos = 0;
1718 int id;
1719
1720 mutex_lock(&docg3->cascade->lock);
1721 id = doc_register_readb(docg3, DOC_DEVICESELECT);
1722 mutex_unlock(&docg3->cascade->lock);
1723
1724 pos += seq_printf(s, "DeviceId = %d\n", id);
1725 return pos;
1726}
1727DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
1728
1729static int dbg_protection_show(struct seq_file *s, void *p)
1730{
1731 struct docg3 *docg3 = (struct docg3 *)s->private;
1732 int pos = 0;
1733 int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
1734
1735 mutex_lock(&docg3->cascade->lock);
1736 protect = doc_register_readb(docg3, DOC_PROTECTION);
1737 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
1738 dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
1739 dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
1740 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
1741 dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
1742 dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
1743 mutex_unlock(&docg3->cascade->lock);
1744
1745 pos += seq_printf(s, "Protection = 0x%02x (",
1746 protect);
1747 if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
1748 pos += seq_printf(s, "FOUNDRY_OTP_LOCK,");
1749 if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
1750 pos += seq_printf(s, "CUSTOMER_OTP_LOCK,");
1751 if (protect & DOC_PROTECT_LOCK_INPUT)
1752 pos += seq_printf(s, "LOCK_INPUT,");
1753 if (protect & DOC_PROTECT_STICKY_LOCK)
1754 pos += seq_printf(s, "STICKY_LOCK,");
1755 if (protect & DOC_PROTECT_PROTECTION_ENABLED)
1756 pos += seq_printf(s, "PROTECTION ON,");
1757 if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
1758 pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,");
1759 if (protect & DOC_PROTECT_PROTECTION_ERROR)
1760 pos += seq_printf(s, "PROTECT_ERR,");
1761 else
1762 pos += seq_printf(s, "NO_PROTECT_ERR");
1763 pos += seq_printf(s, ")\n");
1764
1765 pos += seq_printf(s, "DPS0 = 0x%02x : "
1766 "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
1767 "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
1768 dps0, dps0_low, dps0_high,
1769 !!(dps0 & DOC_DPS_OTP_PROTECTED),
1770 !!(dps0 & DOC_DPS_READ_PROTECTED),
1771 !!(dps0 & DOC_DPS_WRITE_PROTECTED),
1772 !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
1773 !!(dps0 & DOC_DPS_KEY_OK));
1774 pos += seq_printf(s, "DPS1 = 0x%02x : "
1775 "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
1776 "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
1777 dps1, dps1_low, dps1_high,
1778 !!(dps1 & DOC_DPS_OTP_PROTECTED),
1779 !!(dps1 & DOC_DPS_READ_PROTECTED),
1780 !!(dps1 & DOC_DPS_WRITE_PROTECTED),
1781 !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
1782 !!(dps1 & DOC_DPS_KEY_OK));
1783 return pos;
1784}
1785DEBUGFS_RO_ATTR(protection, dbg_protection_show);
1786
1787static int __init doc_dbg_register(struct docg3 *docg3)
1788{
1789 struct dentry *root, *entry;
1790
1791 root = debugfs_create_dir("docg3", NULL);
1792 if (!root)
1793 return -ENOMEM;
1794
1795 entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3,
1796 &flashcontrol_fops);
1797 if (entry)
1798 entry = debugfs_create_file("asic_mode", S_IRUSR, root,
1799 docg3, &asic_mode_fops);
1800 if (entry)
1801 entry = debugfs_create_file("device_id", S_IRUSR, root,
1802 docg3, &device_id_fops);
1803 if (entry)
1804 entry = debugfs_create_file("protection", S_IRUSR, root,
1805 docg3, &protection_fops);
1806 if (entry) {
1807 docg3->debugfs_root = root;
1808 return 0;
1809 } else {
1810 debugfs_remove_recursive(root);
1811 return -ENOMEM;
1812 }
1813}
1814
1815static void __exit doc_dbg_unregister(struct docg3 *docg3)
1816{
1817 debugfs_remove_recursive(docg3->debugfs_root);
1818}
1819
1820/**
1821 * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
1822 * @chip_id: The chip ID of the supported chip
1823 * @mtd: The structure to fill
1824 */
1825static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1826{
1827 struct docg3 *docg3 = mtd->priv;
1828 int cfg;
1829
1830 cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
1831 docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
1832 docg3->reliable = reliable_mode;
1833
1834 switch (chip_id) {
1835 case DOC_CHIPID_G3:
1836 mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
1837 docg3->device_id);
1838 docg3->max_block = 2047;
1839 break;
1840 }
1841 mtd->type = MTD_NANDFLASH;
1842 mtd->flags = MTD_CAP_NANDFLASH;
1843 mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
1844 if (docg3->reliable == 2)
1845 mtd->size /= 2;
1846 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
1847 if (docg3->reliable == 2)
1848 mtd->erasesize /= 2;
1849 mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
1850 mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
1851 mtd->owner = THIS_MODULE;
1852 mtd->_erase = doc_erase;
1853 mtd->_read = doc_read;
1854 mtd->_write = doc_write;
1855 mtd->_read_oob = doc_read_oob;
1856 mtd->_write_oob = doc_write_oob;
1857 mtd->_block_isbad = doc_block_isbad;
1858 mtd->ecclayout = &docg3_oobinfo;
1859 mtd->ecc_strength = DOC_ECC_BCH_T;
1860}
1861
1862/**
1863 * doc_probe_device - Check if a device is available
1864 * @base: the io space where the device is probed
1865 * @floor: the floor of the probed device
1866 * @dev: the device
1867 * @cascade: the cascade of chips this devices will belong to
1868 *
1869 * Checks whether a device at the specified IO range, and floor is available.
1870 *
1871 * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
1872 * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
1873 * launched.
1874 */
1875static struct mtd_info * __init
1876doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
1877{
1878 int ret, bbt_nbpages;
1879 u16 chip_id, chip_id_inv;
1880 struct docg3 *docg3;
1881 struct mtd_info *mtd;
1882
1883 ret = -ENOMEM;
1884 docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
1885 if (!docg3)
1886 goto nomem1;
1887 mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
1888 if (!mtd)
1889 goto nomem2;
1890 mtd->priv = docg3;
1891 bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
1892 8 * DOC_LAYOUT_PAGE_SIZE);
1893 docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
1894 if (!docg3->bbt)
1895 goto nomem3;
1896
1897 docg3->dev = dev;
1898 docg3->device_id = floor;
1899 docg3->cascade = cascade;
1900 doc_set_device_id(docg3, docg3->device_id);
1901 if (!floor)
1902 doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
1903 doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
1904
1905 chip_id = doc_register_readw(docg3, DOC_CHIPID);
1906 chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
1907
1908 ret = 0;
1909 if (chip_id != (u16)(~chip_id_inv)) {
1910 goto nomem3;
1911 }
1912
1913 switch (chip_id) {
1914 case DOC_CHIPID_G3:
1915 doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
1916 docg3->cascade->base, floor);
1917 break;
1918 default:
1919 doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
1920 goto nomem3;
1921 }
1922
1923 doc_set_driver_info(chip_id, mtd);
1924
1925 doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
1926 doc_reload_bbt(docg3);
1927 return mtd;
1928
1929nomem3:
1930 kfree(mtd);
1931nomem2:
1932 kfree(docg3);
1933nomem1:
1934 return ERR_PTR(ret);
1935}
1936
1937/**
1938 * doc_release_device - Release a docg3 floor
1939 * @mtd: the device
1940 */
1941static void doc_release_device(struct mtd_info *mtd)
1942{
1943 struct docg3 *docg3 = mtd->priv;
1944
1945 mtd_device_unregister(mtd);
1946 kfree(docg3->bbt);
1947 kfree(docg3);
1948 kfree(mtd->name);
1949 kfree(mtd);
1950}
1951
1952/**
1953 * docg3_resume - Awakens docg3 floor
1954 * @pdev: platfrom device
1955 *
1956 * Returns 0 (always successful)
1957 */
1958static int docg3_resume(struct platform_device *pdev)
1959{
1960 int i;
1961 struct docg3_cascade *cascade;
1962 struct mtd_info **docg3_floors, *mtd;
1963 struct docg3 *docg3;
1964
1965 cascade = platform_get_drvdata(pdev);
1966 docg3_floors = cascade->floors;
1967 mtd = docg3_floors[0];
1968 docg3 = mtd->priv;
1969
1970 doc_dbg("docg3_resume()\n");
1971 for (i = 0; i < 12; i++)
1972 doc_readb(docg3, DOC_IOSPACE_IPL);
1973 return 0;
1974}
1975
1976/**
1977 * docg3_suspend - Put in low power mode the docg3 floor
1978 * @pdev: platform device
1979 * @state: power state
1980 *
1981 * Shuts off most of docg3 circuitery to lower power consumption.
1982 *
1983 * Returns 0 if suspend succeeded, -EIO if chip refused suspend
1984 */
1985static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
1986{
1987 int floor, i;
1988 struct docg3_cascade *cascade;
1989 struct mtd_info **docg3_floors, *mtd;
1990 struct docg3 *docg3;
1991 u8 ctrl, pwr_down;
1992
1993 cascade = platform_get_drvdata(pdev);
1994 docg3_floors = cascade->floors;
1995 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
1996 mtd = docg3_floors[floor];
1997 if (!mtd)
1998 continue;
1999 docg3 = mtd->priv;
2000
2001 doc_writeb(docg3, floor, DOC_DEVICESELECT);
2002 ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
2003 ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
2004 doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
2005
2006 for (i = 0; i < 10; i++) {
2007 usleep_range(3000, 4000);
2008 pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
2009 if (pwr_down & DOC_POWERDOWN_READY)
2010 break;
2011 }
2012 if (pwr_down & DOC_POWERDOWN_READY) {
2013 doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
2014 floor);
2015 } else {
2016 doc_err("docg3_suspend(): floor %d powerdown failed\n",
2017 floor);
2018 return -EIO;
2019 }
2020 }
2021
2022 mtd = docg3_floors[0];
2023 docg3 = mtd->priv;
2024 doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
2025 return 0;
2026}
2027
2028/**
2029 * doc_probe - Probe the IO space for a DiskOnChip G3 chip
2030 * @pdev: platform device
2031 *
2032 * Probes for a G3 chip at the specified IO space in the platform data
2033 * ressources. The floor 0 must be available.
2034 *
2035 * Returns 0 on success, -ENOMEM, -ENXIO on error
2036 */
2037static int __init docg3_probe(struct platform_device *pdev)
2038{
2039 struct device *dev = &pdev->dev;
2040 struct mtd_info *mtd;
2041 struct resource *ress;
2042 void __iomem *base;
2043 int ret, floor, found = 0;
2044 struct docg3_cascade *cascade;
2045
2046 ret = -ENXIO;
2047 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2048 if (!ress) {
2049 dev_err(dev, "No I/O memory resource defined\n");
2050 goto noress;
2051 }
2052 base = ioremap(ress->start, DOC_IOSPACE_SIZE);
2053
2054 ret = -ENOMEM;
2055 cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
2056 GFP_KERNEL);
2057 if (!cascade)
2058 goto nomem1;
2059 cascade->base = base;
2060 mutex_init(&cascade->lock);
2061 cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
2062 DOC_ECC_BCH_PRIMPOLY);
2063 if (!cascade->bch)
2064 goto nomem2;
2065
2066 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
2067 mtd = doc_probe_device(cascade, floor, dev);
2068 if (IS_ERR(mtd)) {
2069 ret = PTR_ERR(mtd);
2070 goto err_probe;
2071 }
2072 if (!mtd) {
2073 if (floor == 0)
2074 goto notfound;
2075 else
2076 continue;
2077 }
2078 cascade->floors[floor] = mtd;
2079 ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
2080 0);
2081 if (ret)
2082 goto err_probe;
2083 found++;
2084 }
2085
2086 ret = doc_register_sysfs(pdev, cascade);
2087 if (ret)
2088 goto err_probe;
2089 if (!found)
2090 goto notfound;
2091
2092 platform_set_drvdata(pdev, cascade);
2093 doc_dbg_register(cascade->floors[0]->priv);
2094 return 0;
2095
2096notfound:
2097 ret = -ENODEV;
2098 dev_info(dev, "No supported DiskOnChip found\n");
2099err_probe:
2100 kfree(cascade->bch);
2101 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
2102 if (cascade->floors[floor])
2103 doc_release_device(cascade->floors[floor]);
2104nomem2:
2105 kfree(cascade);
2106nomem1:
2107 iounmap(base);
2108noress:
2109 return ret;
2110}
2111
2112/**
2113 * docg3_release - Release the driver
2114 * @pdev: the platform device
2115 *
2116 * Returns 0
2117 */
2118static int __exit docg3_release(struct platform_device *pdev)
2119{
2120 struct docg3_cascade *cascade = platform_get_drvdata(pdev);
2121 struct docg3 *docg3 = cascade->floors[0]->priv;
2122 void __iomem *base = cascade->base;
2123 int floor;
2124
2125 doc_unregister_sysfs(pdev, cascade);
2126 doc_dbg_unregister(docg3);
2127 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
2128 if (cascade->floors[floor])
2129 doc_release_device(cascade->floors[floor]);
2130
2131 free_bch(docg3->cascade->bch);
2132 kfree(cascade);
2133 iounmap(base);
2134 return 0;
2135}
2136
2137static struct platform_driver g3_driver = {
2138 .driver = {
2139 .name = "docg3",
2140 .owner = THIS_MODULE,
2141 },
2142 .suspend = docg3_suspend,
2143 .resume = docg3_resume,
2144 .remove = __exit_p(docg3_release),
2145};
2146
2147static int __init docg3_init(void)
2148{
2149 return platform_driver_probe(&g3_driver, docg3_probe);
2150}
2151module_init(docg3_init);
2152
2153
2154static void __exit docg3_exit(void)
2155{
2156 platform_driver_unregister(&g3_driver);
2157}
2158module_exit(docg3_exit);
2159
2160MODULE_LICENSE("GPL");
2161MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
2162MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
deleted file mode 100644
index 19fb93f96a3..00000000000
--- a/drivers/mtd/devices/docg3.h
+++ /dev/null
@@ -1,370 +0,0 @@
1/*
2 * Handles the M-Systems DiskOnChip G3 chip
3 *
4 * Copyright (C) 2011 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#ifndef _MTD_DOCG3_H
23#define _MTD_DOCG3_H
24
25#include <linux/mtd/mtd.h>
26
27/*
28 * Flash memory areas :
29 * - 0x0000 .. 0x07ff : IPL
30 * - 0x0800 .. 0x0fff : Data area
31 * - 0x1000 .. 0x17ff : Registers
32 * - 0x1800 .. 0x1fff : Unknown
33 */
34#define DOC_IOSPACE_IPL 0x0000
35#define DOC_IOSPACE_DATA 0x0800
36#define DOC_IOSPACE_SIZE 0x2000
37
38/*
39 * DOC G3 layout and adressing scheme
40 * A page address for the block "b", plane "P" and page "p":
41 * address = [bbbb bPpp pppp]
42 */
43
44#define DOC_ADDR_PAGE_MASK 0x3f
45#define DOC_ADDR_BLOCK_SHIFT 6
46#define DOC_LAYOUT_NBPLANES 2
47#define DOC_LAYOUT_PAGES_PER_BLOCK 64
48#define DOC_LAYOUT_PAGE_SIZE 512
49#define DOC_LAYOUT_OOB_SIZE 16
50#define DOC_LAYOUT_WEAR_SIZE 8
51#define DOC_LAYOUT_PAGE_OOB_SIZE \
52 (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
53#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
54#define DOC_LAYOUT_BLOCK_SIZE \
55 (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
56
57/*
58 * ECC related constants
59 */
60#define DOC_ECC_BCH_M 14
61#define DOC_ECC_BCH_T 4
62#define DOC_ECC_BCH_PRIMPOLY 0x4443
63#define DOC_ECC_BCH_SIZE 7
64#define DOC_ECC_BCH_COVERED_BYTES \
65 (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
66 DOC_LAYOUT_OOB_HAMMING_SZ)
67#define DOC_ECC_BCH_TOTAL_BYTES \
68 (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
69
70/*
71 * Blocks distribution
72 */
73#define DOC_LAYOUT_BLOCK_BBT 0
74#define DOC_LAYOUT_BLOCK_OTP 0
75#define DOC_LAYOUT_BLOCK_FIRST_DATA 6
76
77#define DOC_LAYOUT_PAGE_BBT 4
78
79/*
80 * Extra page OOB (16 bytes wide) layout
81 */
82#define DOC_LAYOUT_OOB_PAGEINFO_OFS 0
83#define DOC_LAYOUT_OOB_HAMMING_OFS 7
84#define DOC_LAYOUT_OOB_BCH_OFS 8
85#define DOC_LAYOUT_OOB_UNUSED_OFS 15
86#define DOC_LAYOUT_OOB_PAGEINFO_SZ 7
87#define DOC_LAYOUT_OOB_HAMMING_SZ 1
88#define DOC_LAYOUT_OOB_BCH_SZ 7
89#define DOC_LAYOUT_OOB_UNUSED_SZ 1
90
91
92#define DOC_CHIPID_G3 0x200
93#define DOC_ERASE_MARK 0xaa
94#define DOC_MAX_NBFLOORS 4
95/*
96 * Flash registers
97 */
98#define DOC_CHIPID 0x1000
99#define DOC_TEST 0x1004
100#define DOC_BUSLOCK 0x1006
101#define DOC_ENDIANCONTROL 0x1008
102#define DOC_DEVICESELECT 0x100a
103#define DOC_ASICMODE 0x100c
104#define DOC_CONFIGURATION 0x100e
105#define DOC_INTERRUPTCONTROL 0x1010
106#define DOC_READADDRESS 0x101a
107#define DOC_DATAEND 0x101e
108#define DOC_INTERRUPTSTATUS 0x1020
109
110#define DOC_FLASHSEQUENCE 0x1032
111#define DOC_FLASHCOMMAND 0x1034
112#define DOC_FLASHADDRESS 0x1036
113#define DOC_FLASHCONTROL 0x1038
114#define DOC_NOP 0x103e
115
116#define DOC_ECCCONF0 0x1040
117#define DOC_ECCCONF1 0x1042
118#define DOC_ECCPRESET 0x1044
119#define DOC_HAMMINGPARITY 0x1046
120#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
121
122#define DOC_PROTECTION 0x1056
123#define DOC_DPS0_KEY 0x105c
124#define DOC_DPS1_KEY 0x105e
125#define DOC_DPS0_ADDRLOW 0x1060
126#define DOC_DPS0_ADDRHIGH 0x1062
127#define DOC_DPS1_ADDRLOW 0x1064
128#define DOC_DPS1_ADDRHIGH 0x1066
129#define DOC_DPS0_STATUS 0x106c
130#define DOC_DPS1_STATUS 0x106e
131
132#define DOC_ASICMODECONFIRM 0x1072
133#define DOC_CHIPID_INV 0x1074
134#define DOC_POWERMODE 0x107c
135
136/*
137 * Flash sequences
138 * A sequence is preset before one or more commands are input to the chip.
139 */
140#define DOC_SEQ_RESET 0x00
141#define DOC_SEQ_PAGE_SIZE_532 0x03
142#define DOC_SEQ_SET_FASTMODE 0x05
143#define DOC_SEQ_SET_RELIABLEMODE 0x09
144#define DOC_SEQ_READ 0x12
145#define DOC_SEQ_SET_PLANE1 0x0e
146#define DOC_SEQ_SET_PLANE2 0x10
147#define DOC_SEQ_PAGE_SETUP 0x1d
148#define DOC_SEQ_ERASE 0x27
149#define DOC_SEQ_PLANES_STATUS 0x31
150
151/*
152 * Flash commands
153 */
154#define DOC_CMD_READ_PLANE1 0x00
155#define DOC_CMD_SET_ADDR_READ 0x05
156#define DOC_CMD_READ_ALL_PLANES 0x30
157#define DOC_CMD_READ_PLANE2 0x50
158#define DOC_CMD_READ_FLASH 0xe0
159#define DOC_CMD_PAGE_SIZE_532 0x3c
160
161#define DOC_CMD_PROG_BLOCK_ADDR 0x60
162#define DOC_CMD_PROG_CYCLE1 0x80
163#define DOC_CMD_PROG_CYCLE2 0x10
164#define DOC_CMD_PROG_CYCLE3 0x11
165#define DOC_CMD_ERASECYCLE2 0xd0
166#define DOC_CMD_READ_STATUS 0x70
167#define DOC_CMD_PLANES_STATUS 0x71
168
169#define DOC_CMD_RELIABLE_MODE 0x22
170#define DOC_CMD_FAST_MODE 0xa2
171
172#define DOC_CMD_RESET 0xff
173
174/*
175 * Flash register : DOC_FLASHCONTROL
176 */
177#define DOC_CTRL_VIOLATION 0x20
178#define DOC_CTRL_CE 0x10
179#define DOC_CTRL_UNKNOWN_BITS 0x08
180#define DOC_CTRL_PROTECTION_ERROR 0x04
181#define DOC_CTRL_SEQUENCE_ERROR 0x02
182#define DOC_CTRL_FLASHREADY 0x01
183
184/*
185 * Flash register : DOC_ASICMODE
186 */
187#define DOC_ASICMODE_RESET 0x00
188#define DOC_ASICMODE_NORMAL 0x01
189#define DOC_ASICMODE_POWERDOWN 0x02
190#define DOC_ASICMODE_MDWREN 0x04
191#define DOC_ASICMODE_BDETCT_RESET 0x08
192#define DOC_ASICMODE_RSTIN_RESET 0x10
193#define DOC_ASICMODE_RAM_WE 0x20
194
195/*
196 * Flash register : DOC_ECCCONF0
197 */
198#define DOC_ECCCONF0_WRITE_MODE 0x0000
199#define DOC_ECCCONF0_READ_MODE 0x8000
200#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
201#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
202#define DOC_ECCCONF0_BCH_ENABLE 0x0800
203#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
204
205/*
206 * Flash register : DOC_ECCCONF1
207 */
208#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
209#define DOC_ECCCONF1_UNKOWN1 0x40
210#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
211#define DOC_ECCCONF1_UNKOWN3 0x10
212#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
213
214/*
215 * Flash register : DOC_PROTECTION
216 */
217#define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01
218#define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02
219#define DOC_PROTECT_LOCK_INPUT 0x04
220#define DOC_PROTECT_STICKY_LOCK 0x08
221#define DOC_PROTECT_PROTECTION_ENABLED 0x10
222#define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20
223#define DOC_PROTECT_PROTECTION_ERROR 0x80
224
225/*
226 * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
227 */
228#define DOC_DPS_OTP_PROTECTED 0x01
229#define DOC_DPS_READ_PROTECTED 0x02
230#define DOC_DPS_WRITE_PROTECTED 0x04
231#define DOC_DPS_HW_LOCK_ENABLED 0x08
232#define DOC_DPS_KEY_OK 0x80
233
234/*
235 * Flash register : DOC_CONFIGURATION
236 */
237#define DOC_CONF_IF_CFG 0x80
238#define DOC_CONF_MAX_ID_MASK 0x30
239#define DOC_CONF_VCCQ_3V 0x01
240
241/*
242 * Flash register : DOC_READADDRESS
243 */
244#define DOC_READADDR_INC 0x8000
245#define DOC_READADDR_ONE_BYTE 0x4000
246#define DOC_READADDR_ADDR_MASK 0x1fff
247
248/*
249 * Flash register : DOC_POWERMODE
250 */
251#define DOC_POWERDOWN_READY 0x80
252
253/*
254 * Status of erase and write operation
255 */
256#define DOC_PLANES_STATUS_FAIL 0x01
257#define DOC_PLANES_STATUS_PLANE0_KO 0x02
258#define DOC_PLANES_STATUS_PLANE1_KO 0x04
259
260/*
261 * DPS key management
262 *
263 * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
264 * across block boundaries, and define whether these blocks can be read or
265 * written.
266 * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
267 * page 0 of blocks (4,5) for DPS1.
268 */
269#define DOC_LAYOUT_DPS_KEY_LENGTH 8
270
271/**
272 * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
273 * @floors: floors (ie. one physical docg3 chip is one floor)
274 * @base: IO space to access all chips in the cascade
275 * @bch: the BCH correcting control structure
276 * @lock: lock to protect docg3 IO space from concurrent accesses
277 */
278struct docg3_cascade {
279 struct mtd_info *floors[DOC_MAX_NBFLOORS];
280 void __iomem *base;
281 struct bch_control *bch;
282 struct mutex lock;
283};
284
285/**
286 * struct docg3 - DiskOnChip driver private data
287 * @dev: the device currently under control
288 * @cascade: the cascade this device belongs to
289 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
290 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
291
292 * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
293 * reliable mode
294 * Fast mode implies more errors than normal mode.
295 * Reliable mode implies that page 2*n and 2*n+1 are clones.
296 * @bbt: bad block table cache
297 * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
298 * page_write)
299 * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
300 * if 0, use all the 16 bytes.
301 * @oob_write_buf: prepared OOB for next page_write
302 * @debugfs_root: debugfs root node
303 */
304struct docg3 {
305 struct device *dev;
306 struct docg3_cascade *cascade;
307 unsigned int device_id:4;
308 unsigned int if_cfg:1;
309 unsigned int reliable:2;
310 int max_block;
311 u8 *bbt;
312 loff_t oob_write_ofs;
313 int oob_autoecc;
314 u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
315 struct dentry *debugfs_root;
316};
317
318#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
319#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
320#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
321#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
322
323#define DEBUGFS_RO_ATTR(name, show_fct) \
324 static int name##_open(struct inode *inode, struct file *file) \
325 { return single_open(file, show_fct, inode->i_private); } \
326 static const struct file_operations name##_fops = { \
327 .owner = THIS_MODULE, \
328 .open = name##_open, \
329 .llseek = seq_lseek, \
330 .read = seq_read, \
331 .release = single_release \
332 };
333#endif
334
335/*
336 * Trace events part
337 */
338#undef TRACE_SYSTEM
339#define TRACE_SYSTEM docg3
340
341#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
342#define _MTD_DOCG3_TRACE
343
344#include <linux/tracepoint.h>
345
346TRACE_EVENT(docg3_io,
347 TP_PROTO(int op, int width, u16 reg, int val),
348 TP_ARGS(op, width, reg, val),
349 TP_STRUCT__entry(
350 __field(int, op)
351 __field(unsigned char, width)
352 __field(u16, reg)
353 __field(int, val)),
354 TP_fast_assign(
355 __entry->op = op;
356 __entry->width = width;
357 __entry->reg = reg;
358 __entry->val = val;),
359 TP_printk("docg3: %s%02d reg=%04x, val=%04x",
360 __entry->op ? "write" : "read", __entry->width,
361 __entry->reg, __entry->val)
362 );
363#endif
364
365/* This part must be outside protection */
366#undef TRACE_INCLUDE_PATH
367#undef TRACE_INCLUDE_FILE
368#define TRACE_INCLUDE_PATH .
369#define TRACE_INCLUDE_FILE docg3
370#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 88b3fd3e18a..d374603493a 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -50,6 +50,11 @@
50#include <linux/mtd/nand.h> 50#include <linux/mtd/nand.h>
51#include <linux/mtd/doc2000.h> 51#include <linux/mtd/doc2000.h>
52 52
53/* Where to look for the devices? */
54#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
55#define CONFIG_MTD_DOCPROBE_ADDRESS 0
56#endif
57
53 58
54static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; 59static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
55module_param(doc_config_location, ulong, 0); 60module_param(doc_config_location, ulong, 0);
@@ -70,6 +75,8 @@ static unsigned long __initdata doc_locations[] = {
70 0xe0000, 0xe2000, 0xe4000, 0xe6000, 75 0xe0000, 0xe2000, 0xe4000, 0xe6000,
71 0xe8000, 0xea000, 0xec000, 0xee000, 76 0xe8000, 0xea000, 0xec000, 0xee000,
72#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 77#endif /* CONFIG_MTD_DOCPROBE_HIGH */
78#else
79#warning Unknown architecture for DiskOnChip. No default probe locations defined
73#endif 80#endif
74 0xffffffff }; 81 0xffffffff };
75 82
@@ -239,7 +246,8 @@ static void __init DoC_Probe(unsigned long physadr)
239 return; 246 return;
240 } 247 }
241 docfound = 1; 248 docfound = 1;
242 mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL); 249 mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
250
243 if (!mtd) { 251 if (!mtd) {
244 printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n"); 252 printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
245 iounmap(docptr); 253 iounmap(docptr);
@@ -247,6 +255,10 @@ static void __init DoC_Probe(unsigned long physadr)
247 } 255 }
248 256
249 this = (struct DiskOnChip *)(&mtd[1]); 257 this = (struct DiskOnChip *)(&mtd[1]);
258
259 memset((char *)mtd,0, sizeof(struct mtd_info));
260 memset((char *)this, 0, sizeof(struct DiskOnChip));
261
250 mtd->priv = this; 262 mtd->priv = this;
251 this->virtadr = docptr; 263 this->virtadr = docptr;
252 this->physadr = physadr; 264 this->physadr = physadr;
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 82bd00af5cc..772a0ff89e0 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -34,6 +34,9 @@
34/* debugging */ 34/* debugging */
35//#define LART_DEBUG 35//#define LART_DEBUG
36 36
37/* partition support */
38#define HAVE_PARTITIONS
39
37#include <linux/kernel.h> 40#include <linux/kernel.h>
38#include <linux/module.h> 41#include <linux/module.h>
39#include <linux/types.h> 42#include <linux/types.h>
@@ -41,7 +44,9 @@
41#include <linux/errno.h> 44#include <linux/errno.h>
42#include <linux/string.h> 45#include <linux/string.h>
43#include <linux/mtd/mtd.h> 46#include <linux/mtd/mtd.h>
47#ifdef HAVE_PARTITIONS
44#include <linux/mtd/partitions.h> 48#include <linux/mtd/partitions.h>
49#endif
45 50
46#ifndef CONFIG_SA1100_LART 51#ifndef CONFIG_SA1100_LART
47#error This is for LART architecture only 52#error This is for LART architecture only
@@ -367,6 +372,9 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
367 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len); 372 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
368#endif 373#endif
369 374
375 /* sanity checks */
376 if (instr->addr + instr->len > mtd->size) return (-EINVAL);
377
370 /* 378 /*
371 * check that both start and end of the requested erase are 379 * check that both start and end of the requested erase are
372 * aligned with the erasesize at the appropriate addresses. 380 * aligned with the erasesize at the appropriate addresses.
@@ -437,6 +445,10 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle
437 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len); 445 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
438#endif 446#endif
439 447
448 /* sanity checks */
449 if (!len) return (0);
450 if (from + len > mtd->size) return (-EINVAL);
451
440 /* we always read len bytes */ 452 /* we always read len bytes */
441 *retlen = len; 453 *retlen = len;
442 454
@@ -515,8 +527,11 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
515 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len); 527 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
516#endif 528#endif
517 529
530 *retlen = 0;
531
518 /* sanity checks */ 532 /* sanity checks */
519 if (!len) return (0); 533 if (!len) return (0);
534 if (to + len > mtd->size) return (-EINVAL);
520 535
521 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */ 536 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
522 if (to & (BUSWIDTH - 1)) 537 if (to & (BUSWIDTH - 1))
@@ -583,6 +598,7 @@ static struct mtd_erase_region_info erase_regions[] = {
583 } 598 }
584}; 599};
585 600
601#ifdef HAVE_PARTITIONS
586static struct mtd_partition lart_partitions[] = { 602static struct mtd_partition lart_partitions[] = {
587 /* blob */ 603 /* blob */
588 { 604 {
@@ -603,7 +619,7 @@ static struct mtd_partition lart_partitions[] = {
603 .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ 619 .size = INITRD_LEN, /* MTDPART_SIZ_FULL */
604 } 620 }
605}; 621};
606#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions) 622#endif
607 623
608static int __init lart_flash_init (void) 624static int __init lart_flash_init (void)
609{ 625{
@@ -620,15 +636,14 @@ static int __init lart_flash_init (void)
620 mtd.name = module_name; 636 mtd.name = module_name;
621 mtd.type = MTD_NORFLASH; 637 mtd.type = MTD_NORFLASH;
622 mtd.writesize = 1; 638 mtd.writesize = 1;
623 mtd.writebufsize = 4;
624 mtd.flags = MTD_CAP_NORFLASH; 639 mtd.flags = MTD_CAP_NORFLASH;
625 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; 640 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
626 mtd.erasesize = FLASH_BLOCKSIZE_MAIN; 641 mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
627 mtd.numeraseregions = ARRAY_SIZE(erase_regions); 642 mtd.numeraseregions = ARRAY_SIZE(erase_regions);
628 mtd.eraseregions = erase_regions; 643 mtd.eraseregions = erase_regions;
629 mtd._erase = flash_erase; 644 mtd.erase = flash_erase;
630 mtd._read = flash_read; 645 mtd.read = flash_read;
631 mtd._write = flash_write; 646 mtd.write = flash_write;
632 mtd.owner = THIS_MODULE; 647 mtd.owner = THIS_MODULE;
633 648
634#ifdef LART_DEBUG 649#ifdef LART_DEBUG
@@ -653,6 +668,7 @@ static int __init lart_flash_init (void)
653 result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024, 668 result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
654 result,mtd.eraseregions[result].numblocks); 669 result,mtd.eraseregions[result].numblocks);
655 670
671#ifdef HAVE_PARTITIONS
656 printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions)); 672 printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
657 673
658 for (result = 0; result < ARRAY_SIZE(lart_partitions); result++) 674 for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
@@ -665,16 +681,25 @@ static int __init lart_flash_init (void)
665 result,lart_partitions[result].offset, 681 result,lart_partitions[result].offset,
666 result,lart_partitions[result].size,lart_partitions[result].size / 1024); 682 result,lart_partitions[result].size,lart_partitions[result].size / 1024);
667#endif 683#endif
684#endif
668 685
686#ifndef HAVE_PARTITIONS
687 result = mtd_device_register(&mtd, NULL, 0);
688#else
669 result = mtd_device_register(&mtd, lart_partitions, 689 result = mtd_device_register(&mtd, lart_partitions,
670 ARRAY_SIZE(lart_partitions)); 690 ARRAY_SIZE(lart_partitions));
691#endif
671 692
672 return (result); 693 return (result);
673} 694}
674 695
675static void __exit lart_flash_exit (void) 696static void __exit lart_flash_exit (void)
676{ 697{
698#ifndef HAVE_PARTITIONS
699 mtd_device_unregister(&mtd);
700#else
677 mtd_device_unregister(&mtd); 701 mtd_device_unregister(&mtd);
702#endif
678} 703}
679 704
680module_init (lart_flash_init); 705module_init (lart_flash_init);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4eeeb2d7f6e..35180e475c4 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -30,7 +30,6 @@
30#include <linux/mtd/cfi.h> 30#include <linux/mtd/cfi.h>
31#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
32#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
33#include <linux/of_platform.h>
34 33
35#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
36#include <linux/spi/flash.h> 35#include <linux/spi/flash.h>
@@ -73,6 +72,14 @@
73#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 72#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
74#define MAX_CMD_SIZE 5 73#define MAX_CMD_SIZE 5
75 74
75#ifdef CONFIG_M25PXX_USE_FAST_READ
76#define OPCODE_READ OPCODE_FAST_READ
77#define FAST_READ_DUMMY_BYTE 1
78#else
79#define OPCODE_READ OPCODE_NORM_READ
80#define FAST_READ_DUMMY_BYTE 0
81#endif
82
76#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) 83#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
77 84
78/****************************************************************************/ 85/****************************************************************************/
@@ -81,11 +88,11 @@ struct m25p {
81 struct spi_device *spi; 88 struct spi_device *spi;
82 struct mutex lock; 89 struct mutex lock;
83 struct mtd_info mtd; 90 struct mtd_info mtd;
91 unsigned partitioned:1;
84 u16 page_size; 92 u16 page_size;
85 u16 addr_width; 93 u16 addr_width;
86 u8 erase_opcode; 94 u8 erase_opcode;
87 u8 *command; 95 u8 *command;
88 bool fast_read;
89}; 96};
90 97
91static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) 98static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -161,7 +168,6 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
161{ 168{
162 switch (JEDEC_MFR(jedec_id)) { 169 switch (JEDEC_MFR(jedec_id)) {
163 case CFI_MFR_MACRONIX: 170 case CFI_MFR_MACRONIX:
164 case 0xEF /* winbond */:
165 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; 171 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
166 return spi_write(flash->spi, flash->command, 1); 172 return spi_write(flash->spi, flash->command, 1);
167 default: 173 default:
@@ -203,8 +209,9 @@ static int wait_till_ready(struct m25p *flash)
203 */ 209 */
204static int erase_chip(struct m25p *flash) 210static int erase_chip(struct m25p *flash)
205{ 211{
206 pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__, 212 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
207 (long long)(flash->mtd.size >> 10)); 213 dev_name(&flash->spi->dev), __func__,
214 (long long)(flash->mtd.size >> 10));
208 215
209 /* Wait until finished previous write command. */ 216 /* Wait until finished previous write command. */
210 if (wait_till_ready(flash)) 217 if (wait_till_ready(flash))
@@ -243,8 +250,9 @@ static int m25p_cmdsz(struct m25p *flash)
243 */ 250 */
244static int erase_sector(struct m25p *flash, u32 offset) 251static int erase_sector(struct m25p *flash, u32 offset)
245{ 252{
246 pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), 253 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
247 __func__, flash->mtd.erasesize / 1024, offset); 254 dev_name(&flash->spi->dev), __func__,
255 flash->mtd.erasesize / 1024, offset);
248 256
249 /* Wait until finished previous write command. */ 257 /* Wait until finished previous write command. */
250 if (wait_till_ready(flash)) 258 if (wait_till_ready(flash))
@@ -278,10 +286,13 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
278 u32 addr,len; 286 u32 addr,len;
279 uint32_t rem; 287 uint32_t rem;
280 288
281 pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), 289 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
282 __func__, (long long)instr->addr, 290 dev_name(&flash->spi->dev), __func__, "at",
283 (long long)instr->len); 291 (long long)instr->addr, (long long)instr->len);
284 292
293 /* sanity checks */
294 if (instr->addr + instr->len > flash->mtd.size)
295 return -EINVAL;
285 div_u64_rem(instr->len, mtd->erasesize, &rem); 296 div_u64_rem(instr->len, mtd->erasesize, &rem);
286 if (rem) 297 if (rem)
287 return -EINVAL; 298 return -EINVAL;
@@ -336,10 +347,17 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
336 struct m25p *flash = mtd_to_m25p(mtd); 347 struct m25p *flash = mtd_to_m25p(mtd);
337 struct spi_transfer t[2]; 348 struct spi_transfer t[2];
338 struct spi_message m; 349 struct spi_message m;
339 uint8_t opcode;
340 350
341 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 351 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
342 __func__, (u32)from, len); 352 dev_name(&flash->spi->dev), __func__, "from",
353 (u32)from, len);
354
355 /* sanity checks */
356 if (!len)
357 return 0;
358
359 if (from + len > flash->mtd.size)
360 return -EINVAL;
343 361
344 spi_message_init(&m); 362 spi_message_init(&m);
345 memset(t, 0, (sizeof t)); 363 memset(t, 0, (sizeof t));
@@ -349,13 +367,16 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
349 * Should add 1 byte DUMMY_BYTE. 367 * Should add 1 byte DUMMY_BYTE.
350 */ 368 */
351 t[0].tx_buf = flash->command; 369 t[0].tx_buf = flash->command;
352 t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0); 370 t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
353 spi_message_add_tail(&t[0], &m); 371 spi_message_add_tail(&t[0], &m);
354 372
355 t[1].rx_buf = buf; 373 t[1].rx_buf = buf;
356 t[1].len = len; 374 t[1].len = len;
357 spi_message_add_tail(&t[1], &m); 375 spi_message_add_tail(&t[1], &m);
358 376
377 /* Byte count starts at zero. */
378 *retlen = 0;
379
359 mutex_lock(&flash->lock); 380 mutex_lock(&flash->lock);
360 381
361 /* Wait till previous write/erase is done. */ 382 /* Wait till previous write/erase is done. */
@@ -371,14 +392,12 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
371 */ 392 */
372 393
373 /* Set up the write data buffer. */ 394 /* Set up the write data buffer. */
374 opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ; 395 flash->command[0] = OPCODE_READ;
375 flash->command[0] = opcode;
376 m25p_addr2cmd(flash, from, flash->command); 396 m25p_addr2cmd(flash, from, flash->command);
377 397
378 spi_sync(flash->spi, &m); 398 spi_sync(flash->spi, &m);
379 399
380 *retlen = m.actual_length - m25p_cmdsz(flash) - 400 *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
381 (flash->fast_read ? 1 : 0);
382 401
383 mutex_unlock(&flash->lock); 402 mutex_unlock(&flash->lock);
384 403
@@ -398,8 +417,18 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
398 struct spi_transfer t[2]; 417 struct spi_transfer t[2];
399 struct spi_message m; 418 struct spi_message m;
400 419
401 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 420 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
402 __func__, (u32)to, len); 421 dev_name(&flash->spi->dev), __func__, "to",
422 (u32)to, len);
423
424 *retlen = 0;
425
426 /* sanity checks */
427 if (!len)
428 return(0);
429
430 if (to + len > flash->mtd.size)
431 return -EINVAL;
403 432
404 spi_message_init(&m); 433 spi_message_init(&m);
405 memset(t, 0, (sizeof t)); 434 memset(t, 0, (sizeof t));
@@ -481,8 +510,18 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
481 size_t actual; 510 size_t actual;
482 int cmd_sz, ret; 511 int cmd_sz, ret;
483 512
484 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 513 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
485 __func__, (u32)to, len); 514 dev_name(&flash->spi->dev), __func__, "to",
515 (u32)to, len);
516
517 *retlen = 0;
518
519 /* sanity checks */
520 if (!len)
521 return 0;
522
523 if (to + len > flash->mtd.size)
524 return -EINVAL;
486 525
487 spi_message_init(&m); 526 spi_message_init(&m);
488 memset(t, 0, (sizeof t)); 527 memset(t, 0, (sizeof t));
@@ -622,7 +661,6 @@ static const struct spi_device_id m25p_ids[] = {
622 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, 661 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
623 662
624 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, 663 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
625 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
626 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, 664 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
627 665
628 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, 666 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
@@ -630,26 +668,17 @@ static const struct spi_device_id m25p_ids[] = {
630 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, 668 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
631 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, 669 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
632 670
633 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
634
635 /* EON -- en25xxx */ 671 /* EON -- en25xxx */
636 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, 672 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
637 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, 673 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
638 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
639 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, 674 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
640 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
641
642 /* Everspin */
643 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
644 675
645 /* Intel/Numonyx -- xxxs33b */ 676 /* Intel/Numonyx -- xxxs33b */
646 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 677 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
647 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, 678 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
648 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, 679 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
649 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
650 680
651 /* Macronix */ 681 /* Macronix */
652 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
653 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, 682 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
654 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, 683 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
655 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, 684 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
@@ -660,16 +689,15 @@ static const struct spi_device_id m25p_ids[] = {
660 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, 689 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
661 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, 690 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
662 691
663 /* Micron */
664 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
665 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
666 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
667
668 /* Spansion -- single (large) sector size only, at least 692 /* Spansion -- single (large) sector size only, at least
669 * for the chips listed here (without boot sectors). 693 * for the chips listed here (without boot sectors).
670 */ 694 */
671 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) }, 695 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
672 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) }, 696 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
697 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
698 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
699 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) },
700 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
673 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, 701 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
674 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) }, 702 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) },
675 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) }, 703 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) },
@@ -678,11 +706,6 @@ static const struct spi_device_id m25p_ids[] = {
678 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, 706 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
679 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, 707 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
680 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, 708 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
681 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
682 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
683 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
684 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
685 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
686 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, 709 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
687 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 710 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
688 711
@@ -706,7 +729,6 @@ static const struct spi_device_id m25p_ids[] = {
706 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, 729 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
707 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, 730 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
708 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, 731 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
709 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
710 732
711 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, 733 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
712 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, 734 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
@@ -722,7 +744,6 @@ static const struct spi_device_id m25p_ids[] = {
722 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, 744 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
723 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, 745 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
724 746
725 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
726 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, 747 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
727 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, 748 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
728 749
@@ -739,12 +760,8 @@ static const struct spi_device_id m25p_ids[] = {
739 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, 760 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
740 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 761 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
741 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 762 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
742 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
743 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 763 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
744 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 764 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
745 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
746 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
747 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
748 765
749 /* Catalyst / On Semiconductor -- non-JEDEC */ 766 /* Catalyst / On Semiconductor -- non-JEDEC */
750 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, 767 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -756,7 +773,7 @@ static const struct spi_device_id m25p_ids[] = {
756}; 773};
757MODULE_DEVICE_TABLE(spi, m25p_ids); 774MODULE_DEVICE_TABLE(spi, m25p_ids);
758 775
759static const struct spi_device_id *jedec_probe(struct spi_device *spi) 776static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
760{ 777{
761 int tmp; 778 int tmp;
762 u8 code = OPCODE_RDID; 779 u8 code = OPCODE_RDID;
@@ -771,8 +788,8 @@ static const struct spi_device_id *jedec_probe(struct spi_device *spi)
771 */ 788 */
772 tmp = spi_write_then_read(spi, &code, 1, id, 5); 789 tmp = spi_write_then_read(spi, &code, 1, id, 5);
773 if (tmp < 0) { 790 if (tmp < 0) {
774 pr_debug("%s: error %d reading JEDEC ID\n", 791 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
775 dev_name(&spi->dev), tmp); 792 dev_name(&spi->dev), tmp);
776 return ERR_PTR(tmp); 793 return ERR_PTR(tmp);
777 } 794 }
778 jedec = id[0]; 795 jedec = id[0];
@@ -801,20 +818,15 @@ static const struct spi_device_id *jedec_probe(struct spi_device *spi)
801 * matches what the READ command supports, at least until this driver 818 * matches what the READ command supports, at least until this driver
802 * understands FAST_READ (for clocks over 25 MHz). 819 * understands FAST_READ (for clocks over 25 MHz).
803 */ 820 */
804static int m25p_probe(struct spi_device *spi) 821static int __devinit m25p_probe(struct spi_device *spi)
805{ 822{
806 const struct spi_device_id *id = spi_get_device_id(spi); 823 const struct spi_device_id *id = spi_get_device_id(spi);
807 struct flash_platform_data *data; 824 struct flash_platform_data *data;
808 struct m25p *flash; 825 struct m25p *flash;
809 struct flash_info *info; 826 struct flash_info *info;
810 unsigned i; 827 unsigned i;
811 struct mtd_part_parser_data ppdata; 828 struct mtd_partition *parts = NULL;
812 struct device_node __maybe_unused *np = spi->dev.of_node; 829 int nr_parts = 0;
813
814#ifdef CONFIG_MTD_OF_PARTS
815 if (!of_device_is_available(np))
816 return -ENODEV;
817#endif
818 830
819 /* Platform data helps sort out which chip type we have, as 831 /* Platform data helps sort out which chip type we have, as
820 * well as how this board partitions it. If we don't have 832 * well as how this board partitions it. If we don't have
@@ -864,8 +876,7 @@ static int m25p_probe(struct spi_device *spi)
864 flash = kzalloc(sizeof *flash, GFP_KERNEL); 876 flash = kzalloc(sizeof *flash, GFP_KERNEL);
865 if (!flash) 877 if (!flash)
866 return -ENOMEM; 878 return -ENOMEM;
867 flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0), 879 flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
868 GFP_KERNEL);
869 if (!flash->command) { 880 if (!flash->command) {
870 kfree(flash); 881 kfree(flash);
871 return -ENOMEM; 882 return -ENOMEM;
@@ -896,14 +907,14 @@ static int m25p_probe(struct spi_device *spi)
896 flash->mtd.writesize = 1; 907 flash->mtd.writesize = 1;
897 flash->mtd.flags = MTD_CAP_NORFLASH; 908 flash->mtd.flags = MTD_CAP_NORFLASH;
898 flash->mtd.size = info->sector_size * info->n_sectors; 909 flash->mtd.size = info->sector_size * info->n_sectors;
899 flash->mtd._erase = m25p80_erase; 910 flash->mtd.erase = m25p80_erase;
900 flash->mtd._read = m25p80_read; 911 flash->mtd.read = m25p80_read;
901 912
902 /* sst flash chips use AAI word program */ 913 /* sst flash chips use AAI word program */
903 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) 914 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
904 flash->mtd._write = sst_write; 915 flash->mtd.write = sst_write;
905 else 916 else
906 flash->mtd._write = m25p80_write; 917 flash->mtd.write = m25p80_write;
907 918
908 /* prefer "small sector" erase if possible */ 919 /* prefer "small sector" erase if possible */
909 if (info->flags & SECT_4K) { 920 if (info->flags & SECT_4K) {
@@ -917,20 +928,8 @@ static int m25p_probe(struct spi_device *spi)
917 if (info->flags & M25P_NO_ERASE) 928 if (info->flags & M25P_NO_ERASE)
918 flash->mtd.flags |= MTD_NO_ERASE; 929 flash->mtd.flags |= MTD_NO_ERASE;
919 930
920 ppdata.of_node = spi->dev.of_node;
921 flash->mtd.dev.parent = &spi->dev; 931 flash->mtd.dev.parent = &spi->dev;
922 flash->page_size = info->page_size; 932 flash->page_size = info->page_size;
923 flash->mtd.writebufsize = flash->page_size;
924
925 flash->fast_read = false;
926#ifdef CONFIG_OF
927 if (np && of_property_read_bool(np, "m25p,fast-read"))
928 flash->fast_read = true;
929#endif
930
931#ifdef CONFIG_M25PXX_USE_FAST_READ
932 flash->fast_read = true;
933#endif
934 933
935 if (info->addr_width) 934 if (info->addr_width)
936 flash->addr_width = info->addr_width; 935 flash->addr_width = info->addr_width;
@@ -946,7 +945,8 @@ static int m25p_probe(struct spi_device *spi)
946 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, 945 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
947 (long long)flash->mtd.size >> 10); 946 (long long)flash->mtd.size >> 10);
948 947
949 pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " 948 DEBUG(MTD_DEBUG_LEVEL2,
949 "mtd .name = %s, .size = 0x%llx (%lldMiB) "
950 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 950 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
951 flash->mtd.name, 951 flash->mtd.name,
952 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), 952 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -955,7 +955,8 @@ static int m25p_probe(struct spi_device *spi)
955 955
956 if (flash->mtd.numeraseregions) 956 if (flash->mtd.numeraseregions)
957 for (i = 0; i < flash->mtd.numeraseregions; i++) 957 for (i = 0; i < flash->mtd.numeraseregions; i++)
958 pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, " 958 DEBUG(MTD_DEBUG_LEVEL2,
959 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
959 ".erasesize = 0x%.8x (%uKiB), " 960 ".erasesize = 0x%.8x (%uKiB), "
960 ".numblocks = %d }\n", 961 ".numblocks = %d }\n",
961 i, (long long)flash->mtd.eraseregions[i].offset, 962 i, (long long)flash->mtd.eraseregions[i].offset,
@@ -967,13 +968,45 @@ static int m25p_probe(struct spi_device *spi)
967 /* partitions should match sector boundaries; and it may be good to 968 /* partitions should match sector boundaries; and it may be good to
968 * use readonly partitions for writeprotected sectors (BP2..BP0). 969 * use readonly partitions for writeprotected sectors (BP2..BP0).
969 */ 970 */
970 return mtd_device_parse_register(&flash->mtd, NULL, &ppdata, 971 if (mtd_has_cmdlinepart()) {
971 data ? data->parts : NULL, 972 static const char *part_probes[]
972 data ? data->nr_parts : 0); 973 = { "cmdlinepart", NULL, };
974
975 nr_parts = parse_mtd_partitions(&flash->mtd,
976 part_probes, &parts, 0);
977 }
978
979 if (nr_parts <= 0 && data && data->parts) {
980 parts = data->parts;
981 nr_parts = data->nr_parts;
982 }
983
984#ifdef CONFIG_MTD_OF_PARTS
985 if (nr_parts <= 0 && spi->dev.of_node) {
986 nr_parts = of_mtd_parse_partitions(&spi->dev,
987 spi->dev.of_node, &parts);
988 }
989#endif
990
991 if (nr_parts > 0) {
992 for (i = 0; i < nr_parts; i++) {
993 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
994 "{.name = %s, .offset = 0x%llx, "
995 ".size = 0x%llx (%lldKiB) }\n",
996 i, parts[i].name,
997 (long long)parts[i].offset,
998 (long long)parts[i].size,
999 (long long)(parts[i].size >> 10));
1000 }
1001 flash->partitioned = 1;
1002 }
1003
1004 return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
1005 -ENODEV : 0;
973} 1006}
974 1007
975 1008
976static int m25p_remove(struct spi_device *spi) 1009static int __devexit m25p_remove(struct spi_device *spi)
977{ 1010{
978 struct m25p *flash = dev_get_drvdata(&spi->dev); 1011 struct m25p *flash = dev_get_drvdata(&spi->dev);
979 int status; 1012 int status;
@@ -991,11 +1024,12 @@ static int m25p_remove(struct spi_device *spi)
991static struct spi_driver m25p80_driver = { 1024static struct spi_driver m25p80_driver = {
992 .driver = { 1025 .driver = {
993 .name = "m25p80", 1026 .name = "m25p80",
1027 .bus = &spi_bus_type,
994 .owner = THIS_MODULE, 1028 .owner = THIS_MODULE,
995 }, 1029 },
996 .id_table = m25p_ids, 1030 .id_table = m25p_ids,
997 .probe = m25p_probe, 1031 .probe = m25p_probe,
998 .remove = m25p_remove, 1032 .remove = __devexit_p(m25p_remove),
999 1033
1000 /* REVISIT: many of these chips have deep power-down modes, which 1034 /* REVISIT: many of these chips have deep power-down modes, which
1001 * should clearly be entered on suspend() to minimize power use. 1035 * should clearly be entered on suspend() to minimize power use.
@@ -1003,7 +1037,21 @@ static struct spi_driver m25p80_driver = {
1003 */ 1037 */
1004}; 1038};
1005 1039
1006module_spi_driver(m25p80_driver); 1040
1041static int __init m25p80_init(void)
1042{
1043 return spi_register_driver(&m25p80_driver);
1044}
1045
1046
1047static void __exit m25p80_exit(void)
1048{
1049 spi_unregister_driver(&m25p80_driver);
1050}
1051
1052
1053module_init(m25p80_init);
1054module_exit(m25p80_exit);
1007 1055
1008MODULE_LICENSE("GPL"); 1056MODULE_LICENSE("GPL");
1009MODULE_AUTHOR("Mike Lavender"); 1057MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 182849d39c6..8423fb6d4f2 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -59,8 +59,12 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from,
59{ 59{
60 struct ms02nv_private *mp = mtd->priv; 60 struct ms02nv_private *mp = mtd->priv;
61 61
62 if (from + len > mtd->size)
63 return -EINVAL;
64
62 memcpy(buf, mp->uaddr + from, len); 65 memcpy(buf, mp->uaddr + from, len);
63 *retlen = len; 66 *retlen = len;
67
64 return 0; 68 return 0;
65} 69}
66 70
@@ -69,8 +73,12 @@ static int ms02nv_write(struct mtd_info *mtd, loff_t to,
69{ 73{
70 struct ms02nv_private *mp = mtd->priv; 74 struct ms02nv_private *mp = mtd->priv;
71 75
76 if (to + len > mtd->size)
77 return -EINVAL;
78
72 memcpy(mp->uaddr + to, buf, len); 79 memcpy(mp->uaddr + to, buf, len);
73 *retlen = len; 80 *retlen = len;
81
74 return 0; 82 return 0;
75} 83}
76 84
@@ -207,8 +215,8 @@ static int __init ms02nv_init_one(ulong addr)
207 mtd->size = fixsize; 215 mtd->size = fixsize;
208 mtd->name = (char *)ms02nv_name; 216 mtd->name = (char *)ms02nv_name;
209 mtd->owner = THIS_MODULE; 217 mtd->owner = THIS_MODULE;
210 mtd->_read = ms02nv_read; 218 mtd->read = ms02nv_read;
211 mtd->_write = ms02nv_write; 219 mtd->write = ms02nv_write;
212 mtd->writesize = 1; 220 mtd->writesize = 1;
213 221
214 ret = -EIO; 222 ret = -EIO;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 945c9f76234..13749d458a3 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -17,8 +17,6 @@
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/math64.h> 19#include <linux/math64.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22 20
23#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
24#include <linux/spi/flash.h> 22#include <linux/spi/flash.h>
@@ -26,6 +24,7 @@
26#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
28 26
27
29/* 28/*
30 * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in 29 * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
31 * each chip, which may be used for double buffered I/O; but this driver 30 * each chip, which may be used for double buffered I/O; but this driver
@@ -99,16 +98,6 @@ struct dataflash {
99 struct mtd_info mtd; 98 struct mtd_info mtd;
100}; 99};
101 100
102#ifdef CONFIG_OF
103static const struct of_device_id dataflash_dt_ids[] = {
104 { .compatible = "atmel,at45", },
105 { .compatible = "atmel,dataflash", },
106 { /* sentinel */ }
107};
108#else
109#define dataflash_dt_ids NULL
110#endif
111
112/* ......................................................................... */ 101/* ......................................................................... */
113 102
114/* 103/*
@@ -133,7 +122,7 @@ static int dataflash_waitready(struct spi_device *spi)
133 for (;;) { 122 for (;;) {
134 status = dataflash_status(spi); 123 status = dataflash_status(spi);
135 if (status < 0) { 124 if (status < 0) {
136 pr_debug("%s: status %d?\n", 125 DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
137 dev_name(&spi->dev), status); 126 dev_name(&spi->dev), status);
138 status = 0; 127 status = 0;
139 } 128 }
@@ -160,10 +149,13 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
160 uint8_t *command; 149 uint8_t *command;
161 uint32_t rem; 150 uint32_t rem;
162 151
163 pr_debug("%s: erase addr=0x%llx len 0x%llx\n", 152 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
164 dev_name(&spi->dev), (long long)instr->addr, 153 dev_name(&spi->dev), (long long)instr->addr,
165 (long long)instr->len); 154 (long long)instr->len);
166 155
156 /* Sanity checks */
157 if (instr->addr + instr->len > mtd->size)
158 return -EINVAL;
167 div_u64_rem(instr->len, priv->page_size, &rem); 159 div_u64_rem(instr->len, priv->page_size, &rem);
168 if (rem) 160 if (rem)
169 return -EINVAL; 161 return -EINVAL;
@@ -195,7 +187,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
195 command[2] = (uint8_t)(pageaddr >> 8); 187 command[2] = (uint8_t)(pageaddr >> 8);
196 command[3] = 0; 188 command[3] = 0;
197 189
198 pr_debug("ERASE %s: (%x) %x %x %x [%i]\n", 190 DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
199 do_block ? "block" : "page", 191 do_block ? "block" : "page",
200 command[0], command[1], command[2], command[3], 192 command[0], command[1], command[2], command[3],
201 pageaddr); 193 pageaddr);
@@ -246,8 +238,16 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
246 uint8_t *command; 238 uint8_t *command;
247 int status; 239 int status;
248 240
249 pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), 241 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
250 (unsigned)from, (unsigned)(from + len)); 242 dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
243
244 *retlen = 0;
245
246 /* Sanity checks */
247 if (!len)
248 return 0;
249 if (from + len > mtd->size)
250 return -EINVAL;
251 251
252 /* Calculate flash page/byte address */ 252 /* Calculate flash page/byte address */
253 addr = (((unsigned)from / priv->page_size) << priv->page_offset) 253 addr = (((unsigned)from / priv->page_size) << priv->page_offset)
@@ -255,7 +255,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
255 255
256 command = priv->command; 256 command = priv->command;
257 257
258 pr_debug("READ: (%x) %x %x %x\n", 258 DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
259 command[0], command[1], command[2], command[3]); 259 command[0], command[1], command[2], command[3]);
260 260
261 spi_message_init(&msg); 261 spi_message_init(&msg);
@@ -287,7 +287,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
287 *retlen = msg.actual_length - 8; 287 *retlen = msg.actual_length - 8;
288 status = 0; 288 status = 0;
289 } else 289 } else
290 pr_debug("%s: read %x..%x --> %d\n", 290 DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
291 dev_name(&priv->spi->dev), 291 dev_name(&priv->spi->dev),
292 (unsigned)from, (unsigned)(from + len), 292 (unsigned)from, (unsigned)(from + len),
293 status); 293 status);
@@ -314,9 +314,17 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
314 int status = -EINVAL; 314 int status = -EINVAL;
315 uint8_t *command; 315 uint8_t *command;
316 316
317 pr_debug("%s: write 0x%x..0x%x\n", 317 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); 318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
319 319
320 *retlen = 0;
321
322 /* Sanity checks */
323 if (!len)
324 return 0;
325 if ((to + len) > mtd->size)
326 return -EINVAL;
327
320 spi_message_init(&msg); 328 spi_message_init(&msg);
321 329
322 x[0].tx_buf = command = priv->command; 330 x[0].tx_buf = command = priv->command;
@@ -332,7 +340,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
332 340
333 mutex_lock(&priv->lock); 341 mutex_lock(&priv->lock);
334 while (remaining > 0) { 342 while (remaining > 0) {
335 pr_debug("write @ %i:%i len=%i\n", 343 DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n",
336 pageaddr, offset, writelen); 344 pageaddr, offset, writelen);
337 345
338 /* REVISIT: 346 /* REVISIT:
@@ -360,12 +368,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
360 command[2] = (addr & 0x0000FF00) >> 8; 368 command[2] = (addr & 0x0000FF00) >> 8;
361 command[3] = 0; 369 command[3] = 0;
362 370
363 pr_debug("TRANSFER: (%x) %x %x %x\n", 371 DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
364 command[0], command[1], command[2], command[3]); 372 command[0], command[1], command[2], command[3]);
365 373
366 status = spi_sync(spi, &msg); 374 status = spi_sync(spi, &msg);
367 if (status < 0) 375 if (status < 0)
368 pr_debug("%s: xfer %u -> %d\n", 376 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
369 dev_name(&spi->dev), addr, status); 377 dev_name(&spi->dev), addr, status);
370 378
371 (void) dataflash_waitready(priv->spi); 379 (void) dataflash_waitready(priv->spi);
@@ -378,7 +386,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
378 command[2] = (addr & 0x0000FF00) >> 8; 386 command[2] = (addr & 0x0000FF00) >> 8;
379 command[3] = (addr & 0x000000FF); 387 command[3] = (addr & 0x000000FF);
380 388
381 pr_debug("PROGRAM: (%x) %x %x %x\n", 389 DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n",
382 command[0], command[1], command[2], command[3]); 390 command[0], command[1], command[2], command[3]);
383 391
384 x[1].tx_buf = writebuf; 392 x[1].tx_buf = writebuf;
@@ -387,7 +395,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
387 status = spi_sync(spi, &msg); 395 status = spi_sync(spi, &msg);
388 spi_transfer_del(x + 1); 396 spi_transfer_del(x + 1);
389 if (status < 0) 397 if (status < 0)
390 pr_debug("%s: pgm %u/%u -> %d\n", 398 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
391 dev_name(&spi->dev), addr, writelen, status); 399 dev_name(&spi->dev), addr, writelen, status);
392 400
393 (void) dataflash_waitready(priv->spi); 401 (void) dataflash_waitready(priv->spi);
@@ -402,12 +410,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
402 command[2] = (addr & 0x0000FF00) >> 8; 410 command[2] = (addr & 0x0000FF00) >> 8;
403 command[3] = 0; 411 command[3] = 0;
404 412
405 pr_debug("COMPARE: (%x) %x %x %x\n", 413 DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
406 command[0], command[1], command[2], command[3]); 414 command[0], command[1], command[2], command[3]);
407 415
408 status = spi_sync(spi, &msg); 416 status = spi_sync(spi, &msg);
409 if (status < 0) 417 if (status < 0)
410 pr_debug("%s: compare %u -> %d\n", 418 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
411 dev_name(&spi->dev), addr, status); 419 dev_name(&spi->dev), addr, status);
412 420
413 status = dataflash_waitready(priv->spi); 421 status = dataflash_waitready(priv->spi);
@@ -471,6 +479,8 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base,
471 479
472 if ((off + len) > 64) 480 if ((off + len) > 64)
473 len = 64 - off; 481 len = 64 - off;
482 if (len == 0)
483 return len;
474 484
475 spi_message_init(&m); 485 spi_message_init(&m);
476 486
@@ -590,16 +600,16 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
590 600
591static char *otp_setup(struct mtd_info *device, char revision) 601static char *otp_setup(struct mtd_info *device, char revision)
592{ 602{
593 device->_get_fact_prot_info = dataflash_get_otp_info; 603 device->get_fact_prot_info = dataflash_get_otp_info;
594 device->_read_fact_prot_reg = dataflash_read_fact_otp; 604 device->read_fact_prot_reg = dataflash_read_fact_otp;
595 device->_get_user_prot_info = dataflash_get_otp_info; 605 device->get_user_prot_info = dataflash_get_otp_info;
596 device->_read_user_prot_reg = dataflash_read_user_otp; 606 device->read_user_prot_reg = dataflash_read_user_otp;
597 607
598 /* rev c parts (at45db321c and at45db1281 only!) use a 608 /* rev c parts (at45db321c and at45db1281 only!) use a
599 * different write procedure; not (yet?) implemented. 609 * different write procedure; not (yet?) implemented.
600 */ 610 */
601 if (revision > 'c') 611 if (revision > 'c')
602 device->_write_user_prot_reg = dataflash_write_user_otp; 612 device->write_user_prot_reg = dataflash_write_user_otp;
603 613
604 return ", OTP"; 614 return ", OTP";
605} 615}
@@ -618,15 +628,17 @@ static char *otp_setup(struct mtd_info *device, char revision)
618/* 628/*
619 * Register DataFlash device with MTD subsystem. 629 * Register DataFlash device with MTD subsystem.
620 */ 630 */
621static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages, 631static int __devinit
622 int pagesize, int pageoffset, char revision) 632add_dataflash_otp(struct spi_device *spi, char *name,
633 int nr_pages, int pagesize, int pageoffset, char revision)
623{ 634{
624 struct dataflash *priv; 635 struct dataflash *priv;
625 struct mtd_info *device; 636 struct mtd_info *device;
626 struct mtd_part_parser_data ppdata;
627 struct flash_platform_data *pdata = spi->dev.platform_data; 637 struct flash_platform_data *pdata = spi->dev.platform_data;
628 char *otp_tag = ""; 638 char *otp_tag = "";
629 int err = 0; 639 int err = 0;
640 struct mtd_partition *parts;
641 int nr_parts = 0;
630 642
631 priv = kzalloc(sizeof *priv, GFP_KERNEL); 643 priv = kzalloc(sizeof *priv, GFP_KERNEL);
632 if (!priv) 644 if (!priv)
@@ -650,9 +662,9 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
650 device->owner = THIS_MODULE; 662 device->owner = THIS_MODULE;
651 device->type = MTD_DATAFLASH; 663 device->type = MTD_DATAFLASH;
652 device->flags = MTD_WRITEABLE; 664 device->flags = MTD_WRITEABLE;
653 device->_erase = dataflash_erase; 665 device->erase = dataflash_erase;
654 device->_read = dataflash_read; 666 device->read = dataflash_read;
655 device->_write = dataflash_write; 667 device->write = dataflash_write;
656 device->priv = priv; 668 device->priv = priv;
657 669
658 device->dev.parent = &spi->dev; 670 device->dev.parent = &spi->dev;
@@ -665,11 +677,28 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
665 pagesize, otp_tag); 677 pagesize, otp_tag);
666 dev_set_drvdata(&spi->dev, priv); 678 dev_set_drvdata(&spi->dev, priv);
667 679
668 ppdata.of_node = spi->dev.of_node; 680 if (mtd_has_cmdlinepart()) {
669 err = mtd_device_parse_register(device, NULL, &ppdata, 681 static const char *part_probes[] = { "cmdlinepart", NULL, };
670 pdata ? pdata->parts : NULL,
671 pdata ? pdata->nr_parts : 0);
672 682
683 nr_parts = parse_mtd_partitions(device, part_probes, &parts,
684 0);
685 }
686
687 if (nr_parts <= 0 && pdata && pdata->parts) {
688 parts = pdata->parts;
689 nr_parts = pdata->nr_parts;
690 }
691
692 if (nr_parts > 0) {
693 priv->partitioned = 1;
694 err = mtd_device_register(device, parts, nr_parts);
695 goto out;
696 }
697
698 if (mtd_device_register(device, NULL, 0) == 1)
699 err = -ENODEV;
700
701out:
673 if (!err) 702 if (!err)
674 return 0; 703 return 0;
675 704
@@ -678,8 +707,9 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
678 return err; 707 return err;
679} 708}
680 709
681static inline int add_dataflash(struct spi_device *spi, char *name, 710static inline int __devinit
682 int nr_pages, int pagesize, int pageoffset) 711add_dataflash(struct spi_device *spi, char *name,
712 int nr_pages, int pagesize, int pageoffset)
683{ 713{
684 return add_dataflash_otp(spi, name, nr_pages, pagesize, 714 return add_dataflash_otp(spi, name, nr_pages, pagesize,
685 pageoffset, 0); 715 pageoffset, 0);
@@ -703,7 +733,7 @@ struct flash_info {
703#define IS_POW2PS 0x0001 /* uses 2^N byte pages */ 733#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
704}; 734};
705 735
706static struct flash_info dataflash_data[] = { 736static struct flash_info __devinitdata dataflash_data [] = {
707 737
708 /* 738 /*
709 * NOTE: chips with SUP_POW2PS (rev D and up) need two entries, 739 * NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
@@ -738,7 +768,7 @@ static struct flash_info dataflash_data[] = {
738 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, 768 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
739}; 769};
740 770
741static struct flash_info *jedec_probe(struct spi_device *spi) 771static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
742{ 772{
743 int tmp; 773 int tmp;
744 uint8_t code = OP_READ_ID; 774 uint8_t code = OP_READ_ID;
@@ -757,7 +787,7 @@ static struct flash_info *jedec_probe(struct spi_device *spi)
757 */ 787 */
758 tmp = spi_write_then_read(spi, &code, 1, id, 3); 788 tmp = spi_write_then_read(spi, &code, 1, id, 3);
759 if (tmp < 0) { 789 if (tmp < 0) {
760 pr_debug("%s: error %d reading JEDEC ID\n", 790 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
761 dev_name(&spi->dev), tmp); 791 dev_name(&spi->dev), tmp);
762 return ERR_PTR(tmp); 792 return ERR_PTR(tmp);
763 } 793 }
@@ -774,7 +804,7 @@ static struct flash_info *jedec_probe(struct spi_device *spi)
774 tmp < ARRAY_SIZE(dataflash_data); 804 tmp < ARRAY_SIZE(dataflash_data);
775 tmp++, info++) { 805 tmp++, info++) {
776 if (info->jedec_id == jedec) { 806 if (info->jedec_id == jedec) {
777 pr_debug("%s: OTP, sector protect%s\n", 807 DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n",
778 dev_name(&spi->dev), 808 dev_name(&spi->dev),
779 (info->flags & SUP_POW2PS) 809 (info->flags & SUP_POW2PS)
780 ? ", binary pagesize" : "" 810 ? ", binary pagesize" : ""
@@ -782,7 +812,8 @@ static struct flash_info *jedec_probe(struct spi_device *spi)
782 if (info->flags & SUP_POW2PS) { 812 if (info->flags & SUP_POW2PS) {
783 status = dataflash_status(spi); 813 status = dataflash_status(spi);
784 if (status < 0) { 814 if (status < 0) {
785 pr_debug("%s: status error %d\n", 815 DEBUG(MTD_DEBUG_LEVEL1,
816 "%s: status error %d\n",
786 dev_name(&spi->dev), status); 817 dev_name(&spi->dev), status);
787 return ERR_PTR(status); 818 return ERR_PTR(status);
788 } 819 }
@@ -821,7 +852,7 @@ static struct flash_info *jedec_probe(struct spi_device *spi)
821 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11 852 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
822 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11 853 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
823 */ 854 */
824static int dataflash_probe(struct spi_device *spi) 855static int __devinit dataflash_probe(struct spi_device *spi)
825{ 856{
826 int status; 857 int status;
827 struct flash_info *info; 858 struct flash_info *info;
@@ -847,7 +878,7 @@ static int dataflash_probe(struct spi_device *spi)
847 */ 878 */
848 status = dataflash_status(spi); 879 status = dataflash_status(spi);
849 if (status <= 0 || status == 0xff) { 880 if (status <= 0 || status == 0xff) {
850 pr_debug("%s: status error %d\n", 881 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
851 dev_name(&spi->dev), status); 882 dev_name(&spi->dev), status);
852 if (status == 0 || status == 0xff) 883 if (status == 0 || status == 0xff)
853 status = -ENODEV; 884 status = -ENODEV;
@@ -883,24 +914,24 @@ static int dataflash_probe(struct spi_device *spi)
883 break; 914 break;
884 /* obsolete AT45DB1282 not (yet?) supported */ 915 /* obsolete AT45DB1282 not (yet?) supported */
885 default: 916 default:
886 pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev), 917 DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
887 status & 0x3c); 918 dev_name(&spi->dev), status & 0x3c);
888 status = -ENODEV; 919 status = -ENODEV;
889 } 920 }
890 921
891 if (status < 0) 922 if (status < 0)
892 pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev), 923 DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
893 status); 924 dev_name(&spi->dev), status);
894 925
895 return status; 926 return status;
896} 927}
897 928
898static int dataflash_remove(struct spi_device *spi) 929static int __devexit dataflash_remove(struct spi_device *spi)
899{ 930{
900 struct dataflash *flash = dev_get_drvdata(&spi->dev); 931 struct dataflash *flash = dev_get_drvdata(&spi->dev);
901 int status; 932 int status;
902 933
903 pr_debug("%s: remove\n", dev_name(&spi->dev)); 934 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
904 935
905 status = mtd_device_unregister(&flash->mtd); 936 status = mtd_device_unregister(&flash->mtd);
906 if (status == 0) { 937 if (status == 0) {
@@ -913,17 +944,28 @@ static int dataflash_remove(struct spi_device *spi)
913static struct spi_driver dataflash_driver = { 944static struct spi_driver dataflash_driver = {
914 .driver = { 945 .driver = {
915 .name = "mtd_dataflash", 946 .name = "mtd_dataflash",
947 .bus = &spi_bus_type,
916 .owner = THIS_MODULE, 948 .owner = THIS_MODULE,
917 .of_match_table = dataflash_dt_ids,
918 }, 949 },
919 950
920 .probe = dataflash_probe, 951 .probe = dataflash_probe,
921 .remove = dataflash_remove, 952 .remove = __devexit_p(dataflash_remove),
922 953
923 /* FIXME: investigate suspend and resume... */ 954 /* FIXME: investigate suspend and resume... */
924}; 955};
925 956
926module_spi_driver(dataflash_driver); 957static int __init dataflash_init(void)
958{
959 return spi_register_driver(&dataflash_driver);
960}
961module_init(dataflash_init);
962
963static void __exit dataflash_exit(void)
964{
965 spi_unregister_driver(&dataflash_driver);
966}
967module_exit(dataflash_exit);
968
927 969
928MODULE_LICENSE("GPL"); 970MODULE_LICENSE("GPL");
929MODULE_AUTHOR("Andrew Victor, David Brownell"); 971MODULE_AUTHOR("Andrew Victor, David Brownell");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index ec59d65897f..2562689ba6b 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -34,23 +34,34 @@ static struct mtd_info *mtd_info;
34 34
35static int ram_erase(struct mtd_info *mtd, struct erase_info *instr) 35static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
36{ 36{
37 if (instr->addr + instr->len > mtd->size)
38 return -EINVAL;
39
37 memset((char *)mtd->priv + instr->addr, 0xff, instr->len); 40 memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
41
38 instr->state = MTD_ERASE_DONE; 42 instr->state = MTD_ERASE_DONE;
39 mtd_erase_callback(instr); 43 mtd_erase_callback(instr);
44
40 return 0; 45 return 0;
41} 46}
42 47
43static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, 48static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
44 size_t *retlen, void **virt, resource_size_t *phys) 49 size_t *retlen, void **virt, resource_size_t *phys)
45{ 50{
51 if (from + len > mtd->size)
52 return -EINVAL;
53
54 /* can we return a physical address with this driver? */
55 if (phys)
56 return -EINVAL;
57
46 *virt = mtd->priv + from; 58 *virt = mtd->priv + from;
47 *retlen = len; 59 *retlen = len;
48 return 0; 60 return 0;
49} 61}
50 62
51static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 63static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
52{ 64{
53 return 0;
54} 65}
55 66
56/* 67/*
@@ -69,7 +80,11 @@ static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
69static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, 80static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
70 size_t *retlen, u_char *buf) 81 size_t *retlen, u_char *buf)
71{ 82{
83 if (from + len > mtd->size)
84 return -EINVAL;
85
72 memcpy(buf, mtd->priv + from, len); 86 memcpy(buf, mtd->priv + from, len);
87
73 *retlen = len; 88 *retlen = len;
74 return 0; 89 return 0;
75} 90}
@@ -77,7 +92,11 @@ static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
77static int ram_write(struct mtd_info *mtd, loff_t to, size_t len, 92static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
78 size_t *retlen, const u_char *buf) 93 size_t *retlen, const u_char *buf)
79{ 94{
95 if (to + len > mtd->size)
96 return -EINVAL;
97
80 memcpy((char *)mtd->priv + to, buf, len); 98 memcpy((char *)mtd->priv + to, buf, len);
99
81 *retlen = len; 100 *retlen = len;
82 return 0; 101 return 0;
83} 102}
@@ -107,12 +126,12 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
107 mtd->priv = mapped_address; 126 mtd->priv = mapped_address;
108 127
109 mtd->owner = THIS_MODULE; 128 mtd->owner = THIS_MODULE;
110 mtd->_erase = ram_erase; 129 mtd->erase = ram_erase;
111 mtd->_point = ram_point; 130 mtd->point = ram_point;
112 mtd->_unpoint = ram_unpoint; 131 mtd->unpoint = ram_unpoint;
113 mtd->_get_unmapped_area = ram_get_unmapped_area; 132 mtd->get_unmapped_area = ram_get_unmapped_area;
114 mtd->_read = ram_read; 133 mtd->read = ram_read;
115 mtd->_write = ram_write; 134 mtd->write = ram_write;
116 135
117 if (mtd_device_register(mtd, NULL, 0)) 136 if (mtd_device_register(mtd, NULL, 0))
118 return -EIO; 137 return -EIO;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 67823de68db..23423bd00b0 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -33,33 +33,45 @@ struct phram_mtd_list {
33 33
34static LIST_HEAD(phram_list); 34static LIST_HEAD(phram_list);
35 35
36
36static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) 37static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
37{ 38{
38 u_char *start = mtd->priv; 39 u_char *start = mtd->priv;
39 40
41 if (instr->addr + instr->len > mtd->size)
42 return -EINVAL;
43
40 memset(start + instr->addr, 0xff, instr->len); 44 memset(start + instr->addr, 0xff, instr->len);
41 45
42 /* 46 /* This'll catch a few races. Free the thing before returning :)
43 * This'll catch a few races. Free the thing before returning :)
44 * I don't feel at all ashamed. This kind of thing is possible anyway 47 * I don't feel at all ashamed. This kind of thing is possible anyway
45 * with flash, but unlikely. 48 * with flash, but unlikely.
46 */ 49 */
50
47 instr->state = MTD_ERASE_DONE; 51 instr->state = MTD_ERASE_DONE;
52
48 mtd_erase_callback(instr); 53 mtd_erase_callback(instr);
54
49 return 0; 55 return 0;
50} 56}
51 57
52static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, 58static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
53 size_t *retlen, void **virt, resource_size_t *phys) 59 size_t *retlen, void **virt, resource_size_t *phys)
54{ 60{
61 if (from + len > mtd->size)
62 return -EINVAL;
63
64 /* can we return a physical address with this driver? */
65 if (phys)
66 return -EINVAL;
67
55 *virt = mtd->priv + from; 68 *virt = mtd->priv + from;
56 *retlen = len; 69 *retlen = len;
57 return 0; 70 return 0;
58} 71}
59 72
60static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 73static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
61{ 74{
62 return 0;
63} 75}
64 76
65static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, 77static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -67,7 +79,14 @@ static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
67{ 79{
68 u_char *start = mtd->priv; 80 u_char *start = mtd->priv;
69 81
82 if (from >= mtd->size)
83 return -EINVAL;
84
85 if (len > mtd->size - from)
86 len = mtd->size - from;
87
70 memcpy(buf, start + from, len); 88 memcpy(buf, start + from, len);
89
71 *retlen = len; 90 *retlen = len;
72 return 0; 91 return 0;
73} 92}
@@ -77,11 +96,20 @@ static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
77{ 96{
78 u_char *start = mtd->priv; 97 u_char *start = mtd->priv;
79 98
99 if (to >= mtd->size)
100 return -EINVAL;
101
102 if (len > mtd->size - to)
103 len = mtd->size - to;
104
80 memcpy(start + to, buf, len); 105 memcpy(start + to, buf, len);
106
81 *retlen = len; 107 *retlen = len;
82 return 0; 108 return 0;
83} 109}
84 110
111
112
85static void unregister_devices(void) 113static void unregister_devices(void)
86{ 114{
87 struct phram_mtd_list *this, *safe; 115 struct phram_mtd_list *this, *safe;
@@ -114,11 +142,11 @@ static int register_device(char *name, unsigned long start, unsigned long len)
114 new->mtd.name = name; 142 new->mtd.name = name;
115 new->mtd.size = len; 143 new->mtd.size = len;
116 new->mtd.flags = MTD_CAP_RAM; 144 new->mtd.flags = MTD_CAP_RAM;
117 new->mtd._erase = phram_erase; 145 new->mtd.erase = phram_erase;
118 new->mtd._point = phram_point; 146 new->mtd.point = phram_point;
119 new->mtd._unpoint = phram_unpoint; 147 new->mtd.unpoint = phram_unpoint;
120 new->mtd._read = phram_read; 148 new->mtd.read = phram_read;
121 new->mtd._write = phram_write; 149 new->mtd.write = phram_write;
122 new->mtd.owner = THIS_MODULE; 150 new->mtd.owner = THIS_MODULE;
123 new->mtd.type = MTD_RAM; 151 new->mtd.type = MTD_RAM;
124 new->mtd.erasesize = PAGE_SIZE; 152 new->mtd.erasesize = PAGE_SIZE;
@@ -205,17 +233,7 @@ static inline void kill_final_newline(char *str)
205 return 1; \ 233 return 1; \
206} while (0) 234} while (0)
207 235
208/* 236static int phram_setup(const char *val, struct kernel_param *kp)
209 * This shall contain the module parameter if any. It is of the form:
210 * - phram=<device>,<address>,<size> for module case
211 * - phram.phram=<device>,<address>,<size> for built-in case
212 * We leave 64 bytes for the device name, 12 for the address and 12 for the
213 * size.
214 * Example: phram.phram=rootfs,0xa0000000,512Mi
215 */
216static __initdata char phram_paramline[64+12+12];
217
218static int __init phram_setup(const char *val)
219{ 237{
220 char buf[64+12+12], *str = buf; 238 char buf[64+12+12], *str = buf;
221 char *token[3]; 239 char *token[3];
@@ -264,28 +282,12 @@ static int __init phram_setup(const char *val)
264 return ret; 282 return ret;
265} 283}
266 284
267static int __init phram_param_call(const char *val, struct kernel_param *kp) 285module_param_call(phram, phram_setup, NULL, NULL, 000);
268{
269 /*
270 * This function is always called before 'init_phram()', whether
271 * built-in or module.
272 */
273 if (strlen(val) >= sizeof(phram_paramline))
274 return -ENOSPC;
275 strcpy(phram_paramline, val);
276
277 return 0;
278}
279
280module_param_call(phram, phram_param_call, NULL, NULL, 000);
281MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); 286MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
282 287
283 288
284static int __init init_phram(void) 289static int __init init_phram(void)
285{ 290{
286 if (phram_paramline[0])
287 return phram_setup(phram_paramline);
288
289 return 0; 291 return 0;
290} 292}
291 293
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 0c51b988e1f..ecff765579d 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -93,49 +93,14 @@
93#include <linux/fs.h> 93#include <linux/fs.h>
94#include <linux/ioctl.h> 94#include <linux/ioctl.h>
95#include <asm/io.h> 95#include <asm/io.h>
96#include <asm/system.h>
96#include <linux/pci.h> 97#include <linux/pci.h>
97#include <linux/mtd/mtd.h>
98 98
99#define PMC551_VERSION \ 99#include <linux/mtd/mtd.h>
100 "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n" 100#include <linux/mtd/pmc551.h>
101
102#define PCI_VENDOR_ID_V3_SEMI 0x11b0
103#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
104
105#define PMC551_PCI_MEM_MAP0 0x50
106#define PMC551_PCI_MEM_MAP1 0x54
107#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
108#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
109#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
110#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
111
112#define PMC551_SDRAM_MA 0x60
113#define PMC551_SDRAM_CMD 0x62
114#define PMC551_DRAM_CFG 0x64
115#define PMC551_SYS_CTRL_REG 0x78
116
117#define PMC551_DRAM_BLK0 0x68
118#define PMC551_DRAM_BLK1 0x6c
119#define PMC551_DRAM_BLK2 0x70
120#define PMC551_DRAM_BLK3 0x74
121#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
122#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
123#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
124
125struct mypriv {
126 struct pci_dev *dev;
127 u_char *start;
128 u32 base_map0;
129 u32 curr_map0;
130 u32 asize;
131 struct mtd_info *nextpmc551;
132};
133 101
134static struct mtd_info *pmc551list; 102static struct mtd_info *pmc551list;
135 103
136static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
137 size_t *retlen, void **virt, resource_size_t *phys);
138
139static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) 104static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
140{ 105{
141 struct mypriv *priv = mtd->priv; 106 struct mypriv *priv = mtd->priv;
@@ -151,6 +116,16 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
151#endif 116#endif
152 117
153 end = instr->addr + instr->len - 1; 118 end = instr->addr + instr->len - 1;
119
120 /* Is it past the end? */
121 if (end > mtd->size) {
122#ifdef CONFIG_MTD_PMC551_DEBUG
123 printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
124 (long)end, (long)mtd->size);
125#endif
126 return -EINVAL;
127 }
128
154 eoff_hi = end & ~(priv->asize - 1); 129 eoff_hi = end & ~(priv->asize - 1);
155 soff_hi = instr->addr & ~(priv->asize - 1); 130 soff_hi = instr->addr & ~(priv->asize - 1);
156 eoff_lo = end & (priv->asize - 1); 131 eoff_lo = end & (priv->asize - 1);
@@ -204,6 +179,18 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
204 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); 179 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
205#endif 180#endif
206 181
182 if (from + len > mtd->size) {
183#ifdef CONFIG_MTD_PMC551_DEBUG
184 printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
185 (long)from + len, (long)mtd->size);
186#endif
187 return -EINVAL;
188 }
189
190 /* can we return a physical address with this driver? */
191 if (phys)
192 return -EINVAL;
193
207 soff_hi = from & ~(priv->asize - 1); 194 soff_hi = from & ~(priv->asize - 1);
208 soff_lo = from & (priv->asize - 1); 195 soff_lo = from & (priv->asize - 1);
209 196
@@ -219,12 +206,11 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
219 return 0; 206 return 0;
220} 207}
221 208
222static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 209static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
223{ 210{
224#ifdef CONFIG_MTD_PMC551_DEBUG 211#ifdef CONFIG_MTD_PMC551_DEBUG
225 printk(KERN_DEBUG "pmc551_unpoint()\n"); 212 printk(KERN_DEBUG "pmc551_unpoint()\n");
226#endif 213#endif
227 return 0;
228} 214}
229 215
230static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, 216static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -243,6 +229,16 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
243#endif 229#endif
244 230
245 end = from + len - 1; 231 end = from + len - 1;
232
233 /* Is it past the end? */
234 if (end > mtd->size) {
235#ifdef CONFIG_MTD_PMC551_DEBUG
236 printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
237 (long)end, (long)mtd->size);
238#endif
239 return -EINVAL;
240 }
241
246 soff_hi = from & ~(priv->asize - 1); 242 soff_hi = from & ~(priv->asize - 1);
247 eoff_hi = end & ~(priv->asize - 1); 243 eoff_hi = end & ~(priv->asize - 1);
248 soff_lo = from & (priv->asize - 1); 244 soff_lo = from & (priv->asize - 1);
@@ -300,6 +296,16 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
300#endif 296#endif
301 297
302 end = to + len - 1; 298 end = to + len - 1;
299 /* Is it past the end? or did the u32 wrap? */
300 if (end > mtd->size) {
301#ifdef CONFIG_MTD_PMC551_DEBUG
302 printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
303 "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
304 (long)to);
305#endif
306 return -EINVAL;
307 }
308
303 soff_hi = to & ~(priv->asize - 1); 309 soff_hi = to & ~(priv->asize - 1);
304 eoff_hi = end & ~(priv->asize - 1); 310 eoff_hi = end & ~(priv->asize - 1);
305 soff_lo = to & (priv->asize - 1); 311 soff_lo = to & (priv->asize - 1);
@@ -353,7 +359,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
353 * mechanism 359 * mechanism
354 * returns the size of the memory region found. 360 * returns the size of the memory region found.
355 */ 361 */
356static int fixup_pmc551(struct pci_dev *dev) 362static u32 fixup_pmc551(struct pci_dev *dev)
357{ 363{
358#ifdef CONFIG_MTD_PMC551_BUGFIX 364#ifdef CONFIG_MTD_PMC551_BUGFIX
359 u32 dram_data; 365 u32 dram_data;
@@ -663,7 +669,7 @@ static int __init init_pmc551(void)
663 struct mypriv *priv; 669 struct mypriv *priv;
664 int found = 0; 670 int found = 0;
665 struct mtd_info *mtd; 671 struct mtd_info *mtd;
666 int length = 0; 672 u32 length = 0;
667 673
668 if (msize) { 674 if (msize) {
669 msize = (1 << (ffs(msize) - 1)) << 20; 675 msize = (1 << (ffs(msize) - 1)) << 20;
@@ -781,11 +787,11 @@ static int __init init_pmc551(void)
781 787
782 mtd->size = msize; 788 mtd->size = msize;
783 mtd->flags = MTD_CAP_RAM; 789 mtd->flags = MTD_CAP_RAM;
784 mtd->_erase = pmc551_erase; 790 mtd->erase = pmc551_erase;
785 mtd->_read = pmc551_read; 791 mtd->read = pmc551_read;
786 mtd->_write = pmc551_write; 792 mtd->write = pmc551_write;
787 mtd->_point = pmc551_point; 793 mtd->point = pmc551_point;
788 mtd->_unpoint = pmc551_unpoint; 794 mtd->unpoint = pmc551_unpoint;
789 mtd->type = MTD_RAM; 795 mtd->type = MTD_RAM;
790 mtd->name = "PMC551 RAM board"; 796 mtd->name = "PMC551 RAM board";
791 mtd->erasesize = 0x10000; 797 mtd->erasesize = 0x10000;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 5a5cd2ace4a..e585263161b 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -42,6 +42,7 @@
42#include <linux/ioctl.h> 42#include <linux/ioctl.h>
43#include <linux/init.h> 43#include <linux/init.h>
44#include <asm/io.h> 44#include <asm/io.h>
45#include <asm/system.h>
45 46
46#include <linux/mtd/mtd.h> 47#include <linux/mtd/mtd.h>
47 48
@@ -75,7 +76,7 @@ static slram_mtd_list_t *slram_mtdlist = NULL;
75static int slram_erase(struct mtd_info *, struct erase_info *); 76static int slram_erase(struct mtd_info *, struct erase_info *);
76static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **, 77static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
77 resource_size_t *); 78 resource_size_t *);
78static int slram_unpoint(struct mtd_info *, loff_t, size_t); 79static void slram_unpoint(struct mtd_info *, loff_t, size_t);
79static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); 80static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
80static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 81static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
81 82
@@ -83,13 +84,21 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
83{ 84{
84 slram_priv_t *priv = mtd->priv; 85 slram_priv_t *priv = mtd->priv;
85 86
87 if (instr->addr + instr->len > mtd->size) {
88 return(-EINVAL);
89 }
90
86 memset(priv->start + instr->addr, 0xff, instr->len); 91 memset(priv->start + instr->addr, 0xff, instr->len);
92
87 /* This'll catch a few races. Free the thing before returning :) 93 /* This'll catch a few races. Free the thing before returning :)
88 * I don't feel at all ashamed. This kind of thing is possible anyway 94 * I don't feel at all ashamed. This kind of thing is possible anyway
89 * with flash, but unlikely. 95 * with flash, but unlikely.
90 */ 96 */
97
91 instr->state = MTD_ERASE_DONE; 98 instr->state = MTD_ERASE_DONE;
99
92 mtd_erase_callback(instr); 100 mtd_erase_callback(instr);
101
93 return(0); 102 return(0);
94} 103}
95 104
@@ -98,14 +107,20 @@ static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
98{ 107{
99 slram_priv_t *priv = mtd->priv; 108 slram_priv_t *priv = mtd->priv;
100 109
110 /* can we return a physical address with this driver? */
111 if (phys)
112 return -EINVAL;
113
114 if (from + len > mtd->size)
115 return -EINVAL;
116
101 *virt = priv->start + from; 117 *virt = priv->start + from;
102 *retlen = len; 118 *retlen = len;
103 return(0); 119 return(0);
104} 120}
105 121
106static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 122static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
107{ 123{
108 return 0;
109} 124}
110 125
111static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, 126static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -113,7 +128,14 @@ static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
113{ 128{
114 slram_priv_t *priv = mtd->priv; 129 slram_priv_t *priv = mtd->priv;
115 130
131 if (from > mtd->size)
132 return -EINVAL;
133
134 if (from + len > mtd->size)
135 len = mtd->size - from;
136
116 memcpy(buf, priv->start + from, len); 137 memcpy(buf, priv->start + from, len);
138
117 *retlen = len; 139 *retlen = len;
118 return(0); 140 return(0);
119} 141}
@@ -123,7 +145,11 @@ static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
123{ 145{
124 slram_priv_t *priv = mtd->priv; 146 slram_priv_t *priv = mtd->priv;
125 147
148 if (to + len > mtd->size)
149 return -EINVAL;
150
126 memcpy(priv->start + to, buf, len); 151 memcpy(priv->start + to, buf, len);
152
127 *retlen = len; 153 *retlen = len;
128 return(0); 154 return(0);
129} 155}
@@ -174,11 +200,11 @@ static int register_device(char *name, unsigned long start, unsigned long length
174 (*curmtd)->mtdinfo->name = name; 200 (*curmtd)->mtdinfo->name = name;
175 (*curmtd)->mtdinfo->size = length; 201 (*curmtd)->mtdinfo->size = length;
176 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM; 202 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
177 (*curmtd)->mtdinfo->_erase = slram_erase; 203 (*curmtd)->mtdinfo->erase = slram_erase;
178 (*curmtd)->mtdinfo->_point = slram_point; 204 (*curmtd)->mtdinfo->point = slram_point;
179 (*curmtd)->mtdinfo->_unpoint = slram_unpoint; 205 (*curmtd)->mtdinfo->unpoint = slram_unpoint;
180 (*curmtd)->mtdinfo->_read = slram_read; 206 (*curmtd)->mtdinfo->read = slram_read;
181 (*curmtd)->mtdinfo->_write = slram_write; 207 (*curmtd)->mtdinfo->write = slram_write;
182 (*curmtd)->mtdinfo->owner = THIS_MODULE; 208 (*curmtd)->mtdinfo->owner = THIS_MODULE;
183 (*curmtd)->mtdinfo->type = MTD_RAM; 209 (*curmtd)->mtdinfo->type = MTD_RAM;
184 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; 210 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
@@ -240,7 +266,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
240 266
241 if (*(szlength) != '+') { 267 if (*(szlength) != '+') {
242 devlength = simple_strtoul(szlength, &buffer, 0); 268 devlength = simple_strtoul(szlength, &buffer, 0);
243 devlength = handle_unit(devlength, buffer); 269 devlength = handle_unit(devlength, buffer) - devstart;
244 if (devlength < devstart) 270 if (devlength < devstart)
245 goto err_out; 271 goto err_out;
246 272
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
deleted file mode 100644
index 2aabd96bf0f..00000000000
--- a/drivers/mtd/devices/spear_smi.c
+++ /dev/null
@@ -1,1101 +0,0 @@
1/*
2 * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
3 * SPEAr platform
4 * The serial nor interface is largely based on drivers/mtd/m25p80.c,
5 * however the SPI interface has been replaced by SMI.
6 *
7 * Copyright © 2010 STMicroelectronics.
8 * Ashish Priyadarshi
9 * Shiraz Hashim <shiraz.hashim@st.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/ioport.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/param.h>
28#include <linux/platform_device.h>
29#include <linux/pm.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h>
32#include <linux/mtd/spear_smi.h>
33#include <linux/mutex.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/wait.h>
37#include <linux/of.h>
38#include <linux/of_address.h>
39
40/* SMI clock rate */
41#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
42
43/* MAX time out to safely come out of a erase or write busy conditions */
44#define SMI_PROBE_TIMEOUT (HZ / 10)
45#define SMI_MAX_TIME_OUT (3 * HZ)
46
47/* timeout for command completion */
48#define SMI_CMD_TIMEOUT (HZ / 10)
49
50/* registers of smi */
51#define SMI_CR1 0x0 /* SMI control register 1 */
52#define SMI_CR2 0x4 /* SMI control register 2 */
53#define SMI_SR 0x8 /* SMI status register */
54#define SMI_TR 0xC /* SMI transmit register */
55#define SMI_RR 0x10 /* SMI receive register */
56
57/* defines for control_reg 1 */
58#define BANK_EN (0xF << 0) /* enables all banks */
59#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
60#define SW_MODE (0x1 << 28) /* enables SW Mode */
61#define WB_MODE (0x1 << 29) /* Write Burst Mode */
62#define FAST_MODE (0x1 << 15) /* Fast Mode */
63#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
64
65/* defines for control_reg 2 */
66#define SEND (0x1 << 7) /* Send data */
67#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
68#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
69#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
70#define WE (0x1 << 11) /* Write Enable */
71
72#define TX_LEN_SHIFT 0
73#define RX_LEN_SHIFT 4
74#define BANK_SHIFT 12
75
76/* defines for status register */
77#define SR_WIP 0x1 /* Write in progress */
78#define SR_WEL 0x2 /* Write enable latch */
79#define SR_BP0 0x4 /* Block protect 0 */
80#define SR_BP1 0x8 /* Block protect 1 */
81#define SR_BP2 0x10 /* Block protect 2 */
82#define SR_SRWD 0x80 /* SR write protect */
83#define TFF 0x100 /* Transfer Finished Flag */
84#define WCF 0x200 /* Transfer Finished Flag */
85#define ERF1 0x400 /* Forbidden Write Request */
86#define ERF2 0x800 /* Forbidden Access */
87
88#define WM_SHIFT 12
89
90/* flash opcodes */
91#define OPCODE_RDID 0x9f /* Read JEDEC ID */
92
93/* Flash Device Ids maintenance section */
94
95/* data structure to maintain flash ids from different vendors */
96struct flash_device {
97 char *name;
98 u8 erase_cmd;
99 u32 device_id;
100 u32 pagesize;
101 unsigned long sectorsize;
102 unsigned long size_in_bytes;
103};
104
105#define FLASH_ID(n, es, id, psize, ssize, size) \
106{ \
107 .name = n, \
108 .erase_cmd = es, \
109 .device_id = id, \
110 .pagesize = psize, \
111 .sectorsize = ssize, \
112 .size_in_bytes = size \
113}
114
115static struct flash_device flash_devices[] = {
116 FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
117 FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
118 FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
119 FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
120 FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
121 FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
122 FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
123 FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
124 FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
125 FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
126 FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
127 FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
128 FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
129 FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
130 FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
131 FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
132 FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
133 FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
134 FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
135 FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
136 FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
137 FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
138 FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
139 FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
140 FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
141 FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
142 FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
143 FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
144 FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
145 FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
146 FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
147 FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
148 FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
149 FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
150};
151
152/* Define spear specific structures */
153
154struct spear_snor_flash;
155
156/**
157 * struct spear_smi - Structure for SMI Device
158 *
159 * @clk: functional clock
160 * @status: current status register of SMI.
161 * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
162 * @lock: lock to prevent parallel access of SMI.
163 * @io_base: base address for registers of SMI.
164 * @pdev: platform device
165 * @cmd_complete: queue to wait for command completion of NOR-flash.
166 * @num_flashes: number of flashes actually present on board.
167 * @flash: separate structure for each Serial NOR-flash attached to SMI.
168 */
169struct spear_smi {
170 struct clk *clk;
171 u32 status;
172 unsigned long clk_rate;
173 struct mutex lock;
174 void __iomem *io_base;
175 struct platform_device *pdev;
176 wait_queue_head_t cmd_complete;
177 u32 num_flashes;
178 struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
179};
180
181/**
182 * struct spear_snor_flash - Structure for Serial NOR Flash
183 *
184 * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
185 * @dev_id: Device ID of NOR-flash.
186 * @lock: lock to manage flash read, write and erase operations
187 * @mtd: MTD info for each NOR-flash.
188 * @num_parts: Total number of partition in each bank of NOR-flash.
189 * @parts: Partition info for each bank of NOR-flash.
190 * @page_size: Page size of NOR-flash.
191 * @base_addr: Base address of NOR-flash.
192 * @erase_cmd: erase command may vary on different flash types
193 * @fast_mode: flash supports read in fast mode
194 */
195struct spear_snor_flash {
196 u32 bank;
197 u32 dev_id;
198 struct mutex lock;
199 struct mtd_info mtd;
200 u32 num_parts;
201 struct mtd_partition *parts;
202 u32 page_size;
203 void __iomem *base_addr;
204 u8 erase_cmd;
205 u8 fast_mode;
206};
207
208static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
209{
210 return container_of(mtd, struct spear_snor_flash, mtd);
211}
212
213/**
214 * spear_smi_read_sr - Read status register of flash through SMI
215 * @dev: structure of SMI information.
216 * @bank: bank to which flash is connected
217 *
218 * This routine will return the status register of the flash chip present at the
219 * given bank.
220 */
221static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
222{
223 int ret;
224 u32 ctrlreg1;
225
226 mutex_lock(&dev->lock);
227 dev->status = 0; /* Will be set in interrupt handler */
228
229 ctrlreg1 = readl(dev->io_base + SMI_CR1);
230 /* program smi in hw mode */
231 writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
232
233 /* performing a rsr instruction in hw mode */
234 writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
235 dev->io_base + SMI_CR2);
236
237 /* wait for tff */
238 ret = wait_event_interruptible_timeout(dev->cmd_complete,
239 dev->status & TFF, SMI_CMD_TIMEOUT);
240
241 /* copy dev->status (lower 16 bits) in order to release lock */
242 if (ret > 0)
243 ret = dev->status & 0xffff;
244 else if (ret == 0)
245 ret = -ETIMEDOUT;
246
247 /* restore the ctrl regs state */
248 writel(ctrlreg1, dev->io_base + SMI_CR1);
249 writel(0, dev->io_base + SMI_CR2);
250 mutex_unlock(&dev->lock);
251
252 return ret;
253}
254
255/**
256 * spear_smi_wait_till_ready - wait till flash is ready
257 * @dev: structure of SMI information.
258 * @bank: flash corresponding to this bank
259 * @timeout: timeout for busy wait condition
260 *
261 * This routine checks for WIP (write in progress) bit in Status register
262 * If successful the routine returns 0 else -EBUSY
263 */
264static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
265 unsigned long timeout)
266{
267 unsigned long finish;
268 int status;
269
270 finish = jiffies + timeout;
271 do {
272 status = spear_smi_read_sr(dev, bank);
273 if (status < 0) {
274 if (status == -ETIMEDOUT)
275 continue; /* try till finish */
276 return status;
277 } else if (!(status & SR_WIP)) {
278 return 0;
279 }
280
281 cond_resched();
282 } while (!time_after_eq(jiffies, finish));
283
284 dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
285 return -EBUSY;
286}
287
288/**
289 * spear_smi_int_handler - SMI Interrupt Handler.
290 * @irq: irq number
291 * @dev_id: structure of SMI device, embedded in dev_id.
292 *
293 * The handler clears all interrupt conditions and records the status in
294 * dev->status which is used by the driver later.
295 */
296static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
297{
298 u32 status = 0;
299 struct spear_smi *dev = dev_id;
300
301 status = readl(dev->io_base + SMI_SR);
302
303 if (unlikely(!status))
304 return IRQ_NONE;
305
306 /* clear all interrupt conditions */
307 writel(0, dev->io_base + SMI_SR);
308
309 /* copy the status register in dev->status */
310 dev->status |= status;
311
312 /* send the completion */
313 wake_up_interruptible(&dev->cmd_complete);
314
315 return IRQ_HANDLED;
316}
317
318/**
319 * spear_smi_hw_init - initializes the smi controller.
320 * @dev: structure of smi device
321 *
322 * this routine initializes the smi controller wit the default values
323 */
324static void spear_smi_hw_init(struct spear_smi *dev)
325{
326 unsigned long rate = 0;
327 u32 prescale = 0;
328 u32 val;
329
330 rate = clk_get_rate(dev->clk);
331
332 /* functional clock of smi */
333 prescale = DIV_ROUND_UP(rate, dev->clk_rate);
334
335 /*
336 * setting the standard values, fast mode, prescaler for
337 * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
338 */
339 val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
340
341 mutex_lock(&dev->lock);
342 /* clear all interrupt conditions */
343 writel(0, dev->io_base + SMI_SR);
344
345 writel(val, dev->io_base + SMI_CR1);
346 mutex_unlock(&dev->lock);
347}
348
349/**
350 * get_flash_index - match chip id from a flash list.
351 * @flash_id: a valid nor flash chip id obtained from board.
352 *
353 * try to validate the chip id by matching from a list, if not found then simply
354 * returns negative. In case of success returns index in to the flash devices
355 * array.
356 */
357static int get_flash_index(u32 flash_id)
358{
359 int index;
360
361 /* Matches chip-id to entire list of 'serial-nor flash' ids */
362 for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
363 if (flash_devices[index].device_id == flash_id)
364 return index;
365 }
366
367 /* Memory chip is not listed and not supported */
368 return -ENODEV;
369}
370
371/**
372 * spear_smi_write_enable - Enable the flash to do write operation
373 * @dev: structure of SMI device
374 * @bank: enable write for flash connected to this bank
375 *
376 * Set write enable latch with Write Enable command.
377 * Returns 0 on success.
378 */
379static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
380{
381 int ret;
382 u32 ctrlreg1;
383
384 mutex_lock(&dev->lock);
385 dev->status = 0; /* Will be set in interrupt handler */
386
387 ctrlreg1 = readl(dev->io_base + SMI_CR1);
388 /* program smi in h/w mode */
389 writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
390
391 /* give the flash, write enable command */
392 writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
393
394 ret = wait_event_interruptible_timeout(dev->cmd_complete,
395 dev->status & TFF, SMI_CMD_TIMEOUT);
396
397 /* restore the ctrl regs state */
398 writel(ctrlreg1, dev->io_base + SMI_CR1);
399 writel(0, dev->io_base + SMI_CR2);
400
401 if (ret == 0) {
402 ret = -EIO;
403 dev_err(&dev->pdev->dev,
404 "smi controller failed on write enable\n");
405 } else if (ret > 0) {
406 /* check whether write mode status is set for required bank */
407 if (dev->status & (1 << (bank + WM_SHIFT)))
408 ret = 0;
409 else {
410 dev_err(&dev->pdev->dev, "couldn't enable write\n");
411 ret = -EIO;
412 }
413 }
414
415 mutex_unlock(&dev->lock);
416 return ret;
417}
418
419static inline u32
420get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
421{
422 u32 cmd;
423 u8 *x = (u8 *)&cmd;
424
425 x[0] = flash->erase_cmd;
426 x[1] = offset >> 16;
427 x[2] = offset >> 8;
428 x[3] = offset;
429
430 return cmd;
431}
432
433/**
434 * spear_smi_erase_sector - erase one sector of flash
435 * @dev: structure of SMI information
436 * @command: erase command to be send
437 * @bank: bank to which this command needs to be send
438 * @bytes: size of command
439 *
440 * Erase one sector of flash memory at offset ``offset'' which is any
441 * address within the sector which should be erased.
442 * Returns 0 if successful, non-zero otherwise.
443 */
444static int spear_smi_erase_sector(struct spear_smi *dev,
445 u32 bank, u32 command, u32 bytes)
446{
447 u32 ctrlreg1 = 0;
448 int ret;
449
450 ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
451 if (ret)
452 return ret;
453
454 ret = spear_smi_write_enable(dev, bank);
455 if (ret)
456 return ret;
457
458 mutex_lock(&dev->lock);
459
460 ctrlreg1 = readl(dev->io_base + SMI_CR1);
461 writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
462
463 /* send command in sw mode */
464 writel(command, dev->io_base + SMI_TR);
465
466 writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
467 dev->io_base + SMI_CR2);
468
469 ret = wait_event_interruptible_timeout(dev->cmd_complete,
470 dev->status & TFF, SMI_CMD_TIMEOUT);
471
472 if (ret == 0) {
473 ret = -EIO;
474 dev_err(&dev->pdev->dev, "sector erase failed\n");
475 } else if (ret > 0)
476 ret = 0; /* success */
477
478 /* restore ctrl regs */
479 writel(ctrlreg1, dev->io_base + SMI_CR1);
480 writel(0, dev->io_base + SMI_CR2);
481
482 mutex_unlock(&dev->lock);
483 return ret;
484}
485
486/**
487 * spear_mtd_erase - perform flash erase operation as requested by user
488 * @mtd: Provides the memory characteristics
489 * @e_info: Provides the erase information
490 *
491 * Erase an address range on the flash chip. The address range may extend
492 * one or more erase sectors. Return an error is there is a problem erasing.
493 */
494static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
495{
496 struct spear_snor_flash *flash = get_flash_data(mtd);
497 struct spear_smi *dev = mtd->priv;
498 u32 addr, command, bank;
499 int len, ret;
500
501 if (!flash || !dev)
502 return -ENODEV;
503
504 bank = flash->bank;
505 if (bank > dev->num_flashes - 1) {
506 dev_err(&dev->pdev->dev, "Invalid Bank Num");
507 return -EINVAL;
508 }
509
510 addr = e_info->addr;
511 len = e_info->len;
512
513 mutex_lock(&flash->lock);
514
515 /* now erase sectors in loop */
516 while (len) {
517 command = get_sector_erase_cmd(flash, addr);
518 /* preparing the command for flash */
519 ret = spear_smi_erase_sector(dev, bank, command, 4);
520 if (ret) {
521 e_info->state = MTD_ERASE_FAILED;
522 mutex_unlock(&flash->lock);
523 return ret;
524 }
525 addr += mtd->erasesize;
526 len -= mtd->erasesize;
527 }
528
529 mutex_unlock(&flash->lock);
530 e_info->state = MTD_ERASE_DONE;
531 mtd_erase_callback(e_info);
532
533 return 0;
534}
535
536/**
537 * spear_mtd_read - performs flash read operation as requested by the user
538 * @mtd: MTD information of the memory bank
539 * @from: Address from which to start read
540 * @len: Number of bytes to be read
541 * @retlen: Fills the Number of bytes actually read
542 * @buf: Fills this after reading
543 *
544 * Read an address range from the flash chip. The address range
545 * may be any size provided it is within the physical boundaries.
546 * Returns 0 on success, non zero otherwise
547 */
548static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
549 size_t *retlen, u8 *buf)
550{
551 struct spear_snor_flash *flash = get_flash_data(mtd);
552 struct spear_smi *dev = mtd->priv;
553 void *src;
554 u32 ctrlreg1, val;
555 int ret;
556
557 if (!flash || !dev)
558 return -ENODEV;
559
560 if (flash->bank > dev->num_flashes - 1) {
561 dev_err(&dev->pdev->dev, "Invalid Bank Num");
562 return -EINVAL;
563 }
564
565 /* select address as per bank number */
566 src = flash->base_addr + from;
567
568 mutex_lock(&flash->lock);
569
570 /* wait till previous write/erase is done. */
571 ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
572 if (ret) {
573 mutex_unlock(&flash->lock);
574 return ret;
575 }
576
577 mutex_lock(&dev->lock);
578 /* put smi in hw mode not wbt mode */
579 ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
580 val &= ~(SW_MODE | WB_MODE);
581 if (flash->fast_mode)
582 val |= FAST_MODE;
583
584 writel(val, dev->io_base + SMI_CR1);
585
586 memcpy_fromio(buf, (u8 *)src, len);
587
588 /* restore ctrl reg1 */
589 writel(ctrlreg1, dev->io_base + SMI_CR1);
590 mutex_unlock(&dev->lock);
591
592 *retlen = len;
593 mutex_unlock(&flash->lock);
594
595 return 0;
596}
597
598static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
599 void *dest, const void *src, size_t len)
600{
601 int ret;
602 u32 ctrlreg1;
603
604 /* wait until finished previous write command. */
605 ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
606 if (ret)
607 return ret;
608
609 /* put smi in write enable */
610 ret = spear_smi_write_enable(dev, bank);
611 if (ret)
612 return ret;
613
614 /* put smi in hw, write burst mode */
615 mutex_lock(&dev->lock);
616
617 ctrlreg1 = readl(dev->io_base + SMI_CR1);
618 writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
619
620 memcpy_toio(dest, src, len);
621
622 writel(ctrlreg1, dev->io_base + SMI_CR1);
623
624 mutex_unlock(&dev->lock);
625 return 0;
626}
627
628/**
629 * spear_mtd_write - performs write operation as requested by the user.
630 * @mtd: MTD information of the memory bank.
631 * @to: Address to write.
632 * @len: Number of bytes to be written.
633 * @retlen: Number of bytes actually wrote.
634 * @buf: Buffer from which the data to be taken.
635 *
636 * Write an address range to the flash chip. Data must be written in
637 * flash_page_size chunks. The address range may be any size provided
638 * it is within the physical boundaries.
639 * Returns 0 on success, non zero otherwise
640 */
641static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
642 size_t *retlen, const u8 *buf)
643{
644 struct spear_snor_flash *flash = get_flash_data(mtd);
645 struct spear_smi *dev = mtd->priv;
646 void *dest;
647 u32 page_offset, page_size;
648 int ret;
649
650 if (!flash || !dev)
651 return -ENODEV;
652
653 if (flash->bank > dev->num_flashes - 1) {
654 dev_err(&dev->pdev->dev, "Invalid Bank Num");
655 return -EINVAL;
656 }
657
658 /* select address as per bank number */
659 dest = flash->base_addr + to;
660 mutex_lock(&flash->lock);
661
662 page_offset = (u32)to % flash->page_size;
663
664 /* do if all the bytes fit onto one page */
665 if (page_offset + len <= flash->page_size) {
666 ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
667 if (!ret)
668 *retlen += len;
669 } else {
670 u32 i;
671
672 /* the size of data remaining on the first page */
673 page_size = flash->page_size - page_offset;
674
675 ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
676 page_size);
677 if (ret)
678 goto err_write;
679 else
680 *retlen += page_size;
681
682 /* write everything in pagesize chunks */
683 for (i = page_size; i < len; i += page_size) {
684 page_size = len - i;
685 if (page_size > flash->page_size)
686 page_size = flash->page_size;
687
688 ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
689 buf + i, page_size);
690 if (ret)
691 break;
692 else
693 *retlen += page_size;
694 }
695 }
696
697err_write:
698 mutex_unlock(&flash->lock);
699
700 return ret;
701}
702
703/**
704 * spear_smi_probe_flash - Detects the NOR Flash chip.
705 * @dev: structure of SMI information.
706 * @bank: bank on which flash must be probed
707 *
708 * This routine will check whether there exists a flash chip on a given memory
709 * bank ID.
710 * Return index of the probed flash in flash devices structure
711 */
712static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
713{
714 int ret;
715 u32 val = 0;
716
717 ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
718 if (ret)
719 return ret;
720
721 mutex_lock(&dev->lock);
722
723 dev->status = 0; /* Will be set in interrupt handler */
724 /* put smi in sw mode */
725 val = readl(dev->io_base + SMI_CR1);
726 writel(val | SW_MODE, dev->io_base + SMI_CR1);
727
728 /* send readid command in sw mode */
729 writel(OPCODE_RDID, dev->io_base + SMI_TR);
730
731 val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
732 (3 << RX_LEN_SHIFT) | TFIE;
733 writel(val, dev->io_base + SMI_CR2);
734
735 /* wait for TFF */
736 ret = wait_event_interruptible_timeout(dev->cmd_complete,
737 dev->status & TFF, SMI_CMD_TIMEOUT);
738 if (ret <= 0) {
739 ret = -ENODEV;
740 goto err_probe;
741 }
742
743 /* get memory chip id */
744 val = readl(dev->io_base + SMI_RR);
745 val &= 0x00ffffff;
746 ret = get_flash_index(val);
747
748err_probe:
749 /* clear sw mode */
750 val = readl(dev->io_base + SMI_CR1);
751 writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
752
753 mutex_unlock(&dev->lock);
754 return ret;
755}
756
757
758#ifdef CONFIG_OF
759static int spear_smi_probe_config_dt(struct platform_device *pdev,
760 struct device_node *np)
761{
762 struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
763 struct device_node *pp = NULL;
764 const __be32 *addr;
765 u32 val;
766 int len;
767 int i = 0;
768
769 if (!np)
770 return -ENODEV;
771
772 of_property_read_u32(np, "clock-rate", &val);
773 pdata->clk_rate = val;
774
775 pdata->board_flash_info = devm_kzalloc(&pdev->dev,
776 sizeof(*pdata->board_flash_info),
777 GFP_KERNEL);
778
779 /* Fill structs for each subnode (flash device) */
780 while ((pp = of_get_next_child(np, pp))) {
781 struct spear_smi_flash_info *flash_info;
782
783 flash_info = &pdata->board_flash_info[i];
784 pdata->np[i] = pp;
785
786 /* Read base-addr and size from DT */
787 addr = of_get_property(pp, "reg", &len);
788 pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
789 pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
790
791 if (of_get_property(pp, "st,smi-fast-mode", NULL))
792 pdata->board_flash_info->fast_mode = 1;
793
794 i++;
795 }
796
797 pdata->num_flashes = i;
798
799 return 0;
800}
801#else
802static int spear_smi_probe_config_dt(struct platform_device *pdev,
803 struct device_node *np)
804{
805 return -ENOSYS;
806}
807#endif
808
809static int spear_smi_setup_banks(struct platform_device *pdev,
810 u32 bank, struct device_node *np)
811{
812 struct spear_smi *dev = platform_get_drvdata(pdev);
813 struct mtd_part_parser_data ppdata = {};
814 struct spear_smi_flash_info *flash_info;
815 struct spear_smi_plat_data *pdata;
816 struct spear_snor_flash *flash;
817 struct mtd_partition *parts = NULL;
818 int count = 0;
819 int flash_index;
820 int ret = 0;
821
822 pdata = dev_get_platdata(&pdev->dev);
823 if (bank > pdata->num_flashes - 1)
824 return -EINVAL;
825
826 flash_info = &pdata->board_flash_info[bank];
827 if (!flash_info)
828 return -ENODEV;
829
830 flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
831 if (!flash)
832 return -ENOMEM;
833 flash->bank = bank;
834 flash->fast_mode = flash_info->fast_mode ? 1 : 0;
835 mutex_init(&flash->lock);
836
837 /* verify whether nor flash is really present on board */
838 flash_index = spear_smi_probe_flash(dev, bank);
839 if (flash_index < 0) {
840 dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
841 return flash_index;
842 }
843 /* map the memory for nor flash chip */
844 flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
845 flash_info->size);
846 if (!flash->base_addr)
847 return -EIO;
848
849 dev->flash[bank] = flash;
850 flash->mtd.priv = dev;
851
852 if (flash_info->name)
853 flash->mtd.name = flash_info->name;
854 else
855 flash->mtd.name = flash_devices[flash_index].name;
856
857 flash->mtd.type = MTD_NORFLASH;
858 flash->mtd.writesize = 1;
859 flash->mtd.flags = MTD_CAP_NORFLASH;
860 flash->mtd.size = flash_info->size;
861 flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
862 flash->page_size = flash_devices[flash_index].pagesize;
863 flash->mtd.writebufsize = flash->page_size;
864 flash->erase_cmd = flash_devices[flash_index].erase_cmd;
865 flash->mtd._erase = spear_mtd_erase;
866 flash->mtd._read = spear_mtd_read;
867 flash->mtd._write = spear_mtd_write;
868 flash->dev_id = flash_devices[flash_index].device_id;
869
870 dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
871 flash->mtd.name, flash->mtd.size,
872 flash->mtd.size / (1024 * 1024));
873
874 dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
875 flash->mtd.erasesize, flash->mtd.erasesize / 1024);
876
877#ifndef CONFIG_OF
878 if (flash_info->partitions) {
879 parts = flash_info->partitions;
880 count = flash_info->nr_partitions;
881 }
882#endif
883 ppdata.of_node = np;
884
885 ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
886 count);
887 if (ret) {
888 dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
889 return ret;
890 }
891
892 return 0;
893}
894
895/**
896 * spear_smi_probe - Entry routine
897 * @pdev: platform device structure
898 *
899 * This is the first routine which gets invoked during booting and does all
900 * initialization/allocation work. The routine looks for available memory banks,
901 * and do proper init for any found one.
902 * Returns 0 on success, non zero otherwise
903 */
904static int spear_smi_probe(struct platform_device *pdev)
905{
906 struct device_node *np = pdev->dev.of_node;
907 struct spear_smi_plat_data *pdata = NULL;
908 struct spear_smi *dev;
909 struct resource *smi_base;
910 int irq, ret = 0;
911 int i;
912
913 if (np) {
914 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
915 if (!pdata) {
916 pr_err("%s: ERROR: no memory", __func__);
917 ret = -ENOMEM;
918 goto err;
919 }
920 pdev->dev.platform_data = pdata;
921 ret = spear_smi_probe_config_dt(pdev, np);
922 if (ret) {
923 ret = -ENODEV;
924 dev_err(&pdev->dev, "no platform data\n");
925 goto err;
926 }
927 } else {
928 pdata = dev_get_platdata(&pdev->dev);
929 if (!pdata) {
930 ret = -ENODEV;
931 dev_err(&pdev->dev, "no platform data\n");
932 goto err;
933 }
934 }
935
936 irq = platform_get_irq(pdev, 0);
937 if (irq < 0) {
938 ret = -ENODEV;
939 dev_err(&pdev->dev, "invalid smi irq\n");
940 goto err;
941 }
942
943 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_ATOMIC);
944 if (!dev) {
945 ret = -ENOMEM;
946 dev_err(&pdev->dev, "mem alloc fail\n");
947 goto err;
948 }
949
950 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
951
952 dev->io_base = devm_request_and_ioremap(&pdev->dev, smi_base);
953 if (!dev->io_base) {
954 ret = -EIO;
955 dev_err(&pdev->dev, "devm_request_and_ioremap fail\n");
956 goto err;
957 }
958
959 dev->pdev = pdev;
960 dev->clk_rate = pdata->clk_rate;
961
962 if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
963 dev->clk_rate = SMI_MAX_CLOCK_FREQ;
964
965 dev->num_flashes = pdata->num_flashes;
966
967 if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
968 dev_err(&pdev->dev, "exceeding max number of flashes\n");
969 dev->num_flashes = MAX_NUM_FLASH_CHIP;
970 }
971
972 dev->clk = devm_clk_get(&pdev->dev, NULL);
973 if (IS_ERR(dev->clk)) {
974 ret = PTR_ERR(dev->clk);
975 goto err;
976 }
977
978 ret = clk_prepare_enable(dev->clk);
979 if (ret)
980 goto err;
981
982 ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
983 pdev->name, dev);
984 if (ret) {
985 dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
986 goto err_irq;
987 }
988
989 mutex_init(&dev->lock);
990 init_waitqueue_head(&dev->cmd_complete);
991 spear_smi_hw_init(dev);
992 platform_set_drvdata(pdev, dev);
993
994 /* loop for each serial nor-flash which is connected to smi */
995 for (i = 0; i < dev->num_flashes; i++) {
996 ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
997 if (ret) {
998 dev_err(&dev->pdev->dev, "bank setup failed\n");
999 goto err_bank_setup;
1000 }
1001 }
1002
1003 return 0;
1004
1005err_bank_setup:
1006 platform_set_drvdata(pdev, NULL);
1007err_irq:
1008 clk_disable_unprepare(dev->clk);
1009err:
1010 return ret;
1011}
1012
1013/**
1014 * spear_smi_remove - Exit routine
1015 * @pdev: platform device structure
1016 *
1017 * free all allocations and delete the partitions.
1018 */
1019static int spear_smi_remove(struct platform_device *pdev)
1020{
1021 struct spear_smi *dev;
1022 struct spear_snor_flash *flash;
1023 int ret, i;
1024
1025 dev = platform_get_drvdata(pdev);
1026 if (!dev) {
1027 dev_err(&pdev->dev, "dev is null\n");
1028 return -ENODEV;
1029 }
1030
1031 /* clean up for all nor flash */
1032 for (i = 0; i < dev->num_flashes; i++) {
1033 flash = dev->flash[i];
1034 if (!flash)
1035 continue;
1036
1037 /* clean up mtd stuff */
1038 ret = mtd_device_unregister(&flash->mtd);
1039 if (ret)
1040 dev_err(&pdev->dev, "error removing mtd\n");
1041 }
1042
1043 clk_disable_unprepare(dev->clk);
1044 platform_set_drvdata(pdev, NULL);
1045
1046 return 0;
1047}
1048
1049#ifdef CONFIG_PM
1050static int spear_smi_suspend(struct device *dev)
1051{
1052 struct spear_smi *sdev = dev_get_drvdata(dev);
1053
1054 if (sdev && sdev->clk)
1055 clk_disable_unprepare(sdev->clk);
1056
1057 return 0;
1058}
1059
1060static int spear_smi_resume(struct device *dev)
1061{
1062 struct spear_smi *sdev = dev_get_drvdata(dev);
1063 int ret = -EPERM;
1064
1065 if (sdev && sdev->clk)
1066 ret = clk_prepare_enable(sdev->clk);
1067
1068 if (!ret)
1069 spear_smi_hw_init(sdev);
1070 return ret;
1071}
1072
1073static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
1074#endif
1075
1076#ifdef CONFIG_OF
1077static const struct of_device_id spear_smi_id_table[] = {
1078 { .compatible = "st,spear600-smi" },
1079 {}
1080};
1081MODULE_DEVICE_TABLE(of, spear_smi_id_table);
1082#endif
1083
1084static struct platform_driver spear_smi_driver = {
1085 .driver = {
1086 .name = "smi",
1087 .bus = &platform_bus_type,
1088 .owner = THIS_MODULE,
1089 .of_match_table = of_match_ptr(spear_smi_id_table),
1090#ifdef CONFIG_PM
1091 .pm = &spear_smi_pm_ops,
1092#endif
1093 },
1094 .probe = spear_smi_probe,
1095 .remove = spear_smi_remove,
1096};
1097module_platform_driver(spear_smi_driver);
1098
1099MODULE_LICENSE("GPL");
1100MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
1101MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 8091b016369..83e80c65d6e 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -52,6 +52,8 @@ struct sst25l_flash {
52 struct spi_device *spi; 52 struct spi_device *spi;
53 struct mutex lock; 53 struct mutex lock;
54 struct mtd_info mtd; 54 struct mtd_info mtd;
55
56 int partitioned;
55}; 57};
56 58
57struct flash_info { 59struct flash_info {
@@ -64,7 +66,7 @@ struct flash_info {
64 66
65#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd) 67#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
66 68
67static struct flash_info sst25l_flash_info[] = { 69static struct flash_info __devinitdata sst25l_flash_info[] = {
68 {"sst25lf020a", 0xbf43, 256, 1024, 4096}, 70 {"sst25lf020a", 0xbf43, 256, 1024, 4096},
69 {"sst25lf040a", 0xbf44, 256, 2048, 4096}, 71 {"sst25lf040a", 0xbf44, 256, 2048, 4096},
70}; 72};
@@ -175,6 +177,9 @@ static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
175 int err; 177 int err;
176 178
177 /* Sanity checks */ 179 /* Sanity checks */
180 if (instr->addr + instr->len > flash->mtd.size)
181 return -EINVAL;
182
178 if ((uint32_t)instr->len % mtd->erasesize) 183 if ((uint32_t)instr->len % mtd->erasesize)
179 return -EINVAL; 184 return -EINVAL;
180 185
@@ -220,6 +225,16 @@ static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
220 unsigned char command[4]; 225 unsigned char command[4];
221 int ret; 226 int ret;
222 227
228 /* Sanity checking */
229 if (len == 0)
230 return 0;
231
232 if (from + len > flash->mtd.size)
233 return -EINVAL;
234
235 if (retlen)
236 *retlen = 0;
237
223 spi_message_init(&message); 238 spi_message_init(&message);
224 memset(&transfer, 0, sizeof(transfer)); 239 memset(&transfer, 0, sizeof(transfer));
225 240
@@ -261,6 +276,13 @@ static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
261 int i, j, ret, bytes, copied = 0; 276 int i, j, ret, bytes, copied = 0;
262 unsigned char command[5]; 277 unsigned char command[5];
263 278
279 /* Sanity checks */
280 if (!len)
281 return 0;
282
283 if (to + len > flash->mtd.size)
284 return -EINVAL;
285
264 if ((uint32_t)to % mtd->writesize) 286 if ((uint32_t)to % mtd->writesize)
265 return -EINVAL; 287 return -EINVAL;
266 288
@@ -313,7 +335,7 @@ out:
313 return ret; 335 return ret;
314} 336}
315 337
316static struct flash_info *sst25l_match_device(struct spi_device *spi) 338static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
317{ 339{
318 struct flash_info *flash_info = NULL; 340 struct flash_info *flash_info = NULL;
319 struct spi_message m; 341 struct spi_message m;
@@ -353,12 +375,14 @@ static struct flash_info *sst25l_match_device(struct spi_device *spi)
353 return flash_info; 375 return flash_info;
354} 376}
355 377
356static int sst25l_probe(struct spi_device *spi) 378static int __devinit sst25l_probe(struct spi_device *spi)
357{ 379{
358 struct flash_info *flash_info; 380 struct flash_info *flash_info;
359 struct sst25l_flash *flash; 381 struct sst25l_flash *flash;
360 struct flash_platform_data *data; 382 struct flash_platform_data *data;
361 int ret; 383 int ret, i;
384 struct mtd_partition *parts = NULL;
385 int nr_parts = 0;
362 386
363 flash_info = sst25l_match_device(spi); 387 flash_info = sst25l_match_device(spi);
364 if (!flash_info) 388 if (!flash_info)
@@ -382,16 +406,16 @@ static int sst25l_probe(struct spi_device *spi)
382 flash->mtd.flags = MTD_CAP_NORFLASH; 406 flash->mtd.flags = MTD_CAP_NORFLASH;
383 flash->mtd.erasesize = flash_info->erase_size; 407 flash->mtd.erasesize = flash_info->erase_size;
384 flash->mtd.writesize = flash_info->page_size; 408 flash->mtd.writesize = flash_info->page_size;
385 flash->mtd.writebufsize = flash_info->page_size;
386 flash->mtd.size = flash_info->page_size * flash_info->nr_pages; 409 flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
387 flash->mtd._erase = sst25l_erase; 410 flash->mtd.erase = sst25l_erase;
388 flash->mtd._read = sst25l_read; 411 flash->mtd.read = sst25l_read;
389 flash->mtd._write = sst25l_write; 412 flash->mtd.write = sst25l_write;
390 413
391 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, 414 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
392 (long long)flash->mtd.size >> 10); 415 (long long)flash->mtd.size >> 10);
393 416
394 pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " 417 DEBUG(MTD_DEBUG_LEVEL2,
418 "mtd .name = %s, .size = 0x%llx (%lldMiB) "
395 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 419 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
396 flash->mtd.name, 420 flash->mtd.name,
397 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), 421 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -399,10 +423,37 @@ static int sst25l_probe(struct spi_device *spi)
399 flash->mtd.numeraseregions); 423 flash->mtd.numeraseregions);
400 424
401 425
402 ret = mtd_device_parse_register(&flash->mtd, NULL, NULL, 426 if (mtd_has_cmdlinepart()) {
403 data ? data->parts : NULL, 427 static const char *part_probes[] = {"cmdlinepart", NULL};
404 data ? data->nr_parts : 0); 428
405 if (ret) { 429 nr_parts = parse_mtd_partitions(&flash->mtd,
430 part_probes,
431 &parts, 0);
432 }
433
434 if (nr_parts <= 0 && data && data->parts) {
435 parts = data->parts;
436 nr_parts = data->nr_parts;
437 }
438
439 if (nr_parts > 0) {
440 for (i = 0; i < nr_parts; i++) {
441 DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
442 "{.name = %s, .offset = 0x%llx, "
443 ".size = 0x%llx (%lldKiB) }\n",
444 i, parts[i].name,
445 (long long)parts[i].offset,
446 (long long)parts[i].size,
447 (long long)(parts[i].size >> 10));
448 }
449
450 flash->partitioned = 1;
451 return mtd_device_register(&flash->mtd, parts,
452 nr_parts);
453 }
454
455 ret = mtd_device_register(&flash->mtd, NULL, 0);
456 if (ret == 1) {
406 kfree(flash); 457 kfree(flash);
407 dev_set_drvdata(&spi->dev, NULL); 458 dev_set_drvdata(&spi->dev, NULL);
408 return -ENODEV; 459 return -ENODEV;
@@ -411,7 +462,7 @@ static int sst25l_probe(struct spi_device *spi)
411 return 0; 462 return 0;
412} 463}
413 464
414static int sst25l_remove(struct spi_device *spi) 465static int __devexit sst25l_remove(struct spi_device *spi)
415{ 466{
416 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev); 467 struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
417 int ret; 468 int ret;
@@ -425,13 +476,25 @@ static int sst25l_remove(struct spi_device *spi)
425static struct spi_driver sst25l_driver = { 476static struct spi_driver sst25l_driver = {
426 .driver = { 477 .driver = {
427 .name = "sst25l", 478 .name = "sst25l",
479 .bus = &spi_bus_type,
428 .owner = THIS_MODULE, 480 .owner = THIS_MODULE,
429 }, 481 },
430 .probe = sst25l_probe, 482 .probe = sst25l_probe,
431 .remove = sst25l_remove, 483 .remove = __devexit_p(sst25l_remove),
432}; 484};
433 485
434module_spi_driver(sst25l_driver); 486static int __init sst25l_init(void)
487{
488 return spi_register_driver(&sst25l_driver);
489}
490
491static void __exit sst25l_exit(void)
492{
493 spi_unregister_driver(&sst25l_driver);
494}
495
496module_init(sst25l_init);
497module_exit(sst25l_exit);
435 498
436MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); 499MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
437MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " 500MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 19d637266fc..037b399df3f 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -168,8 +168,8 @@ static int scan_header(partition_t *part)
168 (offset + sizeof(header)) < max_offset; 168 (offset + sizeof(header)) < max_offset;
169 offset += part->mbd.mtd->erasesize ? : 0x2000) { 169 offset += part->mbd.mtd->erasesize ? : 0x2000) {
170 170
171 err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret, 171 err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
172 (unsigned char *)&header); 172 (unsigned char *)&header);
173 173
174 if (err) 174 if (err)
175 return err; 175 return err;
@@ -224,8 +224,8 @@ static int build_maps(partition_t *part)
224 for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) { 224 for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
225 offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN)) 225 offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
226 << part->header.EraseUnitSize); 226 << part->header.EraseUnitSize);
227 ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval, 227 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
228 (unsigned char *)&header); 228 (unsigned char *)&header);
229 229
230 if (ret) 230 if (ret)
231 goto out_XferInfo; 231 goto out_XferInfo;
@@ -289,9 +289,9 @@ static int build_maps(partition_t *part)
289 part->EUNInfo[i].Deleted = 0; 289 part->EUNInfo[i].Deleted = 0;
290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); 290 offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
291 291
292 ret = mtd_read(part->mbd.mtd, offset, 292 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
293 part->BlocksPerUnit * sizeof(uint32_t), &retval, 293 part->BlocksPerUnit * sizeof(uint32_t), &retval,
294 (unsigned char *)part->bam_cache); 294 (unsigned char *)part->bam_cache);
295 295
296 if (ret) 296 if (ret)
297 goto out_bam_cache; 297 goto out_bam_cache;
@@ -339,7 +339,7 @@ static int erase_xfer(partition_t *part,
339 struct erase_info *erase; 339 struct erase_info *erase;
340 340
341 xfer = &part->XferInfo[xfernum]; 341 xfer = &part->XferInfo[xfernum];
342 pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); 342 DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
343 xfer->state = XFER_ERASING; 343 xfer->state = XFER_ERASING;
344 344
345 /* Is there a free erase slot? Always in MTD. */ 345 /* Is there a free erase slot? Always in MTD. */
@@ -355,7 +355,7 @@ static int erase_xfer(partition_t *part,
355 erase->len = 1 << part->header.EraseUnitSize; 355 erase->len = 1 << part->header.EraseUnitSize;
356 erase->priv = (u_long)part; 356 erase->priv = (u_long)part;
357 357
358 ret = mtd_erase(part->mbd.mtd, erase); 358 ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
359 359
360 if (!ret) 360 if (!ret)
361 xfer->EraseCount++; 361 xfer->EraseCount++;
@@ -415,15 +415,15 @@ static int prepare_xfer(partition_t *part, int i)
415 xfer = &part->XferInfo[i]; 415 xfer = &part->XferInfo[i];
416 xfer->state = XFER_FAILED; 416 xfer->state = XFER_FAILED;
417 417
418 pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); 418 DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
419 419
420 /* Write the transfer unit header */ 420 /* Write the transfer unit header */
421 header = part->header; 421 header = part->header;
422 header.LogicalEUN = cpu_to_le16(0xffff); 422 header.LogicalEUN = cpu_to_le16(0xffff);
423 header.EraseCount = cpu_to_le32(xfer->EraseCount); 423 header.EraseCount = cpu_to_le32(xfer->EraseCount);
424 424
425 ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen, 425 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
426 (u_char *)&header); 426 &retlen, (u_char *)&header);
427 427
428 if (ret) { 428 if (ret) {
429 return ret; 429 return ret;
@@ -438,8 +438,8 @@ static int prepare_xfer(partition_t *part, int i)
438 438
439 for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) { 439 for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
440 440
441 ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, 441 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
442 (u_char *)&ctl); 442 &retlen, (u_char *)&ctl);
443 443
444 if (ret) 444 if (ret)
445 return ret; 445 return ret;
@@ -476,7 +476,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
476 476
477 eun = &part->EUNInfo[srcunit]; 477 eun = &part->EUNInfo[srcunit];
478 xfer = &part->XferInfo[xferunit]; 478 xfer = &part->XferInfo[xferunit];
479 pr_debug("ftl_cs: copying block 0x%x to 0x%x\n", 479 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
480 eun->Offset, xfer->Offset); 480 eun->Offset, xfer->Offset);
481 481
482 482
@@ -485,9 +485,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
485 485
486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); 486 offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
487 487
488 ret = mtd_read(part->mbd.mtd, offset, 488 ret = part->mbd.mtd->read(part->mbd.mtd, offset,
489 part->BlocksPerUnit * sizeof(uint32_t), &retlen, 489 part->BlocksPerUnit * sizeof(uint32_t),
490 (u_char *)(part->bam_cache)); 490 &retlen, (u_char *) (part->bam_cache));
491 491
492 /* mark the cache bad, in case we get an error later */ 492 /* mark the cache bad, in case we get an error later */
493 part->bam_index = 0xffff; 493 part->bam_index = 0xffff;
@@ -503,8 +503,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
503 offset = xfer->Offset + 20; /* Bad! */ 503 offset = xfer->Offset + 20; /* Bad! */
504 unit = cpu_to_le16(0x7fff); 504 unit = cpu_to_le16(0x7fff);
505 505
506 ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen, 506 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
507 (u_char *)&unit); 507 &retlen, (u_char *) &unit);
508 508
509 if (ret) { 509 if (ret) {
510 printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n"); 510 printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
@@ -523,16 +523,16 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
523 break; 523 break;
524 case BLOCK_DATA: 524 case BLOCK_DATA:
525 case BLOCK_REPLACEMENT: 525 case BLOCK_REPLACEMENT:
526 ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen, 526 ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
527 (u_char *)buf); 527 &retlen, (u_char *) buf);
528 if (ret) { 528 if (ret) {
529 printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n"); 529 printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
530 return ret; 530 return ret;
531 } 531 }
532 532
533 533
534 ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen, 534 ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
535 (u_char *)buf); 535 &retlen, (u_char *) buf);
536 if (ret) { 536 if (ret) {
537 printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n"); 537 printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
538 return ret; 538 return ret;
@@ -550,11 +550,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
550 } 550 }
551 551
552 /* Write the BAM to the transfer unit */ 552 /* Write the BAM to the transfer unit */
553 ret = mtd_write(part->mbd.mtd, 553 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
554 xfer->Offset + le32_to_cpu(part->header.BAMOffset), 554 part->BlocksPerUnit * sizeof(int32_t), &retlen,
555 part->BlocksPerUnit * sizeof(int32_t), 555 (u_char *)part->bam_cache);
556 &retlen,
557 (u_char *)part->bam_cache);
558 if (ret) { 556 if (ret) {
559 printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n"); 557 printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
560 return ret; 558 return ret;
@@ -562,8 +560,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
562 560
563 561
564 /* All clear? Then update the LogicalEUN again */ 562 /* All clear? Then update the LogicalEUN again */
565 ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t), 563 ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
566 &retlen, (u_char *)&srcunitswap); 564 &retlen, (u_char *)&srcunitswap);
567 565
568 if (ret) { 566 if (ret) {
569 printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n"); 567 printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
@@ -600,7 +598,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
600 unit with the fewest erases, and usually pick the data unit with 598 unit with the fewest erases, and usually pick the data unit with
601 the most deleted blocks. But with a small probability, pick the 599 the most deleted blocks. But with a small probability, pick the
602 oldest data unit instead. This means that we generally postpone 600 oldest data unit instead. This means that we generally postpone
603 the next reclamation as long as possible, but shuffle static 601 the next reclaimation as long as possible, but shuffle static
604 stuff around a bit for wear leveling. 602 stuff around a bit for wear leveling.
605 603
606======================================================================*/ 604======================================================================*/
@@ -611,8 +609,8 @@ static int reclaim_block(partition_t *part)
611 uint32_t best; 609 uint32_t best;
612 int queued, ret; 610 int queued, ret;
613 611
614 pr_debug("ftl_cs: reclaiming space...\n"); 612 DEBUG(0, "ftl_cs: reclaiming space...\n");
615 pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits); 613 DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits);
616 /* Pick the least erased transfer unit */ 614 /* Pick the least erased transfer unit */
617 best = 0xffffffff; xfer = 0xffff; 615 best = 0xffffffff; xfer = 0xffff;
618 do { 616 do {
@@ -620,22 +618,22 @@ static int reclaim_block(partition_t *part)
620 for (i = 0; i < part->header.NumTransferUnits; i++) { 618 for (i = 0; i < part->header.NumTransferUnits; i++) {
621 int n=0; 619 int n=0;
622 if (part->XferInfo[i].state == XFER_UNKNOWN) { 620 if (part->XferInfo[i].state == XFER_UNKNOWN) {
623 pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i); 621 DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i);
624 n=1; 622 n=1;
625 erase_xfer(part, i); 623 erase_xfer(part, i);
626 } 624 }
627 if (part->XferInfo[i].state == XFER_ERASING) { 625 if (part->XferInfo[i].state == XFER_ERASING) {
628 pr_debug("XferInfo[%d].state == XFER_ERASING\n",i); 626 DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i);
629 n=1; 627 n=1;
630 queued = 1; 628 queued = 1;
631 } 629 }
632 else if (part->XferInfo[i].state == XFER_ERASED) { 630 else if (part->XferInfo[i].state == XFER_ERASED) {
633 pr_debug("XferInfo[%d].state == XFER_ERASED\n",i); 631 DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i);
634 n=1; 632 n=1;
635 prepare_xfer(part, i); 633 prepare_xfer(part, i);
636 } 634 }
637 if (part->XferInfo[i].state == XFER_PREPARED) { 635 if (part->XferInfo[i].state == XFER_PREPARED) {
638 pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i); 636 DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i);
639 n=1; 637 n=1;
640 if (part->XferInfo[i].EraseCount <= best) { 638 if (part->XferInfo[i].EraseCount <= best) {
641 best = part->XferInfo[i].EraseCount; 639 best = part->XferInfo[i].EraseCount;
@@ -643,21 +641,22 @@ static int reclaim_block(partition_t *part)
643 } 641 }
644 } 642 }
645 if (!n) 643 if (!n)
646 pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); 644 DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
647 645
648 } 646 }
649 if (xfer == 0xffff) { 647 if (xfer == 0xffff) {
650 if (queued) { 648 if (queued) {
651 pr_debug("ftl_cs: waiting for transfer " 649 DEBUG(1, "ftl_cs: waiting for transfer "
652 "unit to be prepared...\n"); 650 "unit to be prepared...\n");
653 mtd_sync(part->mbd.mtd); 651 if (part->mbd.mtd->sync)
652 part->mbd.mtd->sync(part->mbd.mtd);
654 } else { 653 } else {
655 static int ne = 0; 654 static int ne = 0;
656 if (++ne < 5) 655 if (++ne < 5)
657 printk(KERN_NOTICE "ftl_cs: reclaim failed: no " 656 printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
658 "suitable transfer units!\n"); 657 "suitable transfer units!\n");
659 else 658 else
660 pr_debug("ftl_cs: reclaim failed: no " 659 DEBUG(1, "ftl_cs: reclaim failed: no "
661 "suitable transfer units!\n"); 660 "suitable transfer units!\n");
662 661
663 return -EIO; 662 return -EIO;
@@ -667,7 +666,7 @@ static int reclaim_block(partition_t *part)
667 666
668 eun = 0; 667 eun = 0;
669 if ((jiffies % shuffle_freq) == 0) { 668 if ((jiffies % shuffle_freq) == 0) {
670 pr_debug("ftl_cs: recycling freshest block...\n"); 669 DEBUG(1, "ftl_cs: recycling freshest block...\n");
671 best = 0xffffffff; 670 best = 0xffffffff;
672 for (i = 0; i < part->DataUnits; i++) 671 for (i = 0; i < part->DataUnits; i++)
673 if (part->EUNInfo[i].EraseCount <= best) { 672 if (part->EUNInfo[i].EraseCount <= best) {
@@ -687,7 +686,7 @@ static int reclaim_block(partition_t *part)
687 printk(KERN_NOTICE "ftl_cs: reclaim failed: " 686 printk(KERN_NOTICE "ftl_cs: reclaim failed: "
688 "no free blocks!\n"); 687 "no free blocks!\n");
689 else 688 else
690 pr_debug("ftl_cs: reclaim failed: " 689 DEBUG(1,"ftl_cs: reclaim failed: "
691 "no free blocks!\n"); 690 "no free blocks!\n");
692 691
693 return -EIO; 692 return -EIO;
@@ -748,11 +747,10 @@ static uint32_t find_free(partition_t *part)
748 /* Invalidate cache */ 747 /* Invalidate cache */
749 part->bam_index = 0xffff; 748 part->bam_index = 0xffff;
750 749
751 ret = mtd_read(part->mbd.mtd, 750 ret = part->mbd.mtd->read(part->mbd.mtd,
752 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), 751 part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
753 part->BlocksPerUnit * sizeof(uint32_t), 752 part->BlocksPerUnit * sizeof(uint32_t),
754 &retlen, 753 &retlen, (u_char *) (part->bam_cache));
755 (u_char *)(part->bam_cache));
756 754
757 if (ret) { 755 if (ret) {
758 printk(KERN_WARNING"ftl: Error reading BAM in find_free\n"); 756 printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
@@ -773,7 +771,7 @@ static uint32_t find_free(partition_t *part)
773 printk(KERN_NOTICE "ftl_cs: bad free list!\n"); 771 printk(KERN_NOTICE "ftl_cs: bad free list!\n");
774 return 0; 772 return 0;
775 } 773 }
776 pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun); 774 DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
777 return blk; 775 return blk;
778 776
779} /* find_free */ 777} /* find_free */
@@ -793,7 +791,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
793 int ret; 791 int ret;
794 size_t offset, retlen; 792 size_t offset, retlen;
795 793
796 pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", 794 DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
797 part, sector, nblocks); 795 part, sector, nblocks);
798 if (!(part->state & FTL_FORMATTED)) { 796 if (!(part->state & FTL_FORMATTED)) {
799 printk(KERN_NOTICE "ftl_cs: bad partition\n"); 797 printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -812,8 +810,8 @@ static int ftl_read(partition_t *part, caddr_t buffer,
812 else { 810 else {
813 offset = (part->EUNInfo[log_addr / bsize].Offset 811 offset = (part->EUNInfo[log_addr / bsize].Offset
814 + (log_addr % bsize)); 812 + (log_addr % bsize));
815 ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, 813 ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
816 (u_char *)buffer); 814 &retlen, (u_char *) buffer);
817 815
818 if (ret) { 816 if (ret) {
819 printk(KERN_WARNING "Error reading MTD device in ftl_read()\n"); 817 printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
@@ -842,7 +840,7 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
842 int ret; 840 int ret;
843 size_t retlen, offset; 841 size_t retlen, offset;
844 842
845 pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", 843 DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
846 part, log_addr, virt_addr); 844 part, log_addr, virt_addr);
847 bsize = 1 << part->header.EraseUnitSize; 845 bsize = 1 << part->header.EraseUnitSize;
848 eun = log_addr / bsize; 846 eun = log_addr / bsize;
@@ -851,8 +849,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
851 le32_to_cpu(part->header.BAMOffset)); 849 le32_to_cpu(part->header.BAMOffset));
852 850
853#ifdef PSYCHO_DEBUG 851#ifdef PSYCHO_DEBUG
854 ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, 852 ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
855 (u_char *)&old_addr); 853 &retlen, (u_char *)&old_addr);
856 if (ret) { 854 if (ret) {
857 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); 855 printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
858 return ret; 856 return ret;
@@ -888,8 +886,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
888#endif 886#endif
889 part->bam_cache[blk] = le_virt_addr; 887 part->bam_cache[blk] = le_virt_addr;
890 } 888 }
891 ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, 889 ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
892 (u_char *)&le_virt_addr); 890 &retlen, (u_char *)&le_virt_addr);
893 891
894 if (ret) { 892 if (ret) {
895 printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n"); 893 printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
@@ -907,7 +905,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
907 int ret; 905 int ret;
908 size_t retlen, offset; 906 size_t retlen, offset;
909 907
910 pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n", 908 DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
911 part, sector, nblocks); 909 part, sector, nblocks);
912 if (!(part->state & FTL_FORMATTED)) { 910 if (!(part->state & FTL_FORMATTED)) {
913 printk(KERN_NOTICE "ftl_cs: bad partition\n"); 911 printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -948,7 +946,8 @@ static int ftl_write(partition_t *part, caddr_t buffer,
948 part->EUNInfo[part->bam_index].Deleted++; 946 part->EUNInfo[part->bam_index].Deleted++;
949 offset = (part->EUNInfo[part->bam_index].Offset + 947 offset = (part->EUNInfo[part->bam_index].Offset +
950 blk * SECTOR_SIZE); 948 blk * SECTOR_SIZE);
951 ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer); 949 ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
950 buffer);
952 951
953 if (ret) { 952 if (ret) {
954 printk(KERN_NOTICE "ftl_cs: block write failed!\n"); 953 printk(KERN_NOTICE "ftl_cs: block write failed!\n");
@@ -1012,7 +1011,7 @@ static int ftl_discardsect(struct mtd_blktrans_dev *dev,
1012 partition_t *part = (void *)dev; 1011 partition_t *part = (void *)dev;
1013 uint32_t bsize = 1 << part->header.EraseUnitSize; 1012 uint32_t bsize = 1 << part->header.EraseUnitSize;
1014 1013
1015 pr_debug("FTL erase sector %ld for %d sectors\n", 1014 DEBUG(1, "FTL erase sector %ld for %d sectors\n",
1016 sector, nr_sects); 1015 sector, nr_sects);
1017 1016
1018 while (nr_sects) { 1017 while (nr_sects) {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 3af35148409..d7592e67d04 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -56,19 +56,21 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
57 return; 57 return;
58 58
59 if (!mtd->_block_isbad) { 59 if (!mtd->block_isbad) {
60 printk(KERN_ERR 60 printk(KERN_ERR
61"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" 61"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
62"Please use the new diskonchip driver under the NAND subsystem.\n"); 62"Please use the new diskonchip driver under the NAND subsystem.\n");
63 return; 63 return;
64 } 64 }
65 65
66 pr_debug("INFTL: add_mtd for %s\n", mtd->name); 66 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
67 67
68 inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); 68 inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
69 69
70 if (!inftl) 70 if (!inftl) {
71 printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
71 return; 72 return;
73 }
72 74
73 inftl->mbd.mtd = mtd; 75 inftl->mbd.mtd = mtd;
74 inftl->mbd.devnum = -1; 76 inftl->mbd.devnum = -1;
@@ -131,7 +133,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
131{ 133{
132 struct INFTLrecord *inftl = (void *)dev; 134 struct INFTLrecord *inftl = (void *)dev;
133 135
134 pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum); 136 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum);
135 137
136 del_mtd_blktrans_dev(dev); 138 del_mtd_blktrans_dev(dev);
137 139
@@ -152,13 +154,13 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
152 struct mtd_oob_ops ops; 154 struct mtd_oob_ops ops;
153 int res; 155 int res;
154 156
155 ops.mode = MTD_OPS_PLACE_OOB; 157 ops.mode = MTD_OOB_PLACE;
156 ops.ooboffs = offs & (mtd->writesize - 1); 158 ops.ooboffs = offs & (mtd->writesize - 1);
157 ops.ooblen = len; 159 ops.ooblen = len;
158 ops.oobbuf = buf; 160 ops.oobbuf = buf;
159 ops.datbuf = NULL; 161 ops.datbuf = NULL;
160 162
161 res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 163 res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
162 *retlen = ops.oobretlen; 164 *retlen = ops.oobretlen;
163 return res; 165 return res;
164} 166}
@@ -172,13 +174,13 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
172 struct mtd_oob_ops ops; 174 struct mtd_oob_ops ops;
173 int res; 175 int res;
174 176
175 ops.mode = MTD_OPS_PLACE_OOB; 177 ops.mode = MTD_OOB_PLACE;
176 ops.ooboffs = offs & (mtd->writesize - 1); 178 ops.ooboffs = offs & (mtd->writesize - 1);
177 ops.ooblen = len; 179 ops.ooblen = len;
178 ops.oobbuf = buf; 180 ops.oobbuf = buf;
179 ops.datbuf = NULL; 181 ops.datbuf = NULL;
180 182
181 res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 183 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
182 *retlen = ops.oobretlen; 184 *retlen = ops.oobretlen;
183 return res; 185 return res;
184} 186}
@@ -192,14 +194,14 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
192 struct mtd_oob_ops ops; 194 struct mtd_oob_ops ops;
193 int res; 195 int res;
194 196
195 ops.mode = MTD_OPS_PLACE_OOB; 197 ops.mode = MTD_OOB_PLACE;
196 ops.ooboffs = offs; 198 ops.ooboffs = offs;
197 ops.ooblen = mtd->oobsize; 199 ops.ooblen = mtd->oobsize;
198 ops.oobbuf = oob; 200 ops.oobbuf = oob;
199 ops.datbuf = buf; 201 ops.datbuf = buf;
200 ops.len = len; 202 ops.len = len;
201 203
202 res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 204 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
203 *retlen = ops.retlen; 205 *retlen = ops.retlen;
204 return res; 206 return res;
205} 207}
@@ -213,16 +215,16 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
213 u16 pot = inftl->LastFreeEUN; 215 u16 pot = inftl->LastFreeEUN;
214 int silly = inftl->nb_blocks; 216 int silly = inftl->nb_blocks;
215 217
216 pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n", 218 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
217 inftl, desperate); 219 "desperate=%d)\n", inftl, desperate);
218 220
219 /* 221 /*
220 * Normally, we force a fold to happen before we run out of free 222 * Normally, we force a fold to happen before we run out of free
221 * blocks completely. 223 * blocks completely.
222 */ 224 */
223 if (!desperate && inftl->numfreeEUNs < 2) { 225 if (!desperate && inftl->numfreeEUNs < 2) {
224 pr_debug("INFTL: there are too few free EUNs (%d)\n", 226 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
225 inftl->numfreeEUNs); 227 "EUNs (%d)\n", inftl->numfreeEUNs);
226 return BLOCK_NIL; 228 return BLOCK_NIL;
227 } 229 }
228 230
@@ -257,8 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
257 struct inftl_oob oob; 259 struct inftl_oob oob;
258 size_t retlen; 260 size_t retlen;
259 261
260 pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n", 262 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
261 inftl, thisVUC, pendingblock); 263 "pending=%d)\n", inftl, thisVUC, pendingblock);
262 264
263 memset(BlockMap, 0xff, sizeof(BlockMap)); 265 memset(BlockMap, 0xff, sizeof(BlockMap));
264 memset(BlockDeleted, 0, sizeof(BlockDeleted)); 266 memset(BlockDeleted, 0, sizeof(BlockDeleted));
@@ -321,7 +323,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
321 * Chain, and the Erase Unit into which we are supposed to be copying. 323 * Chain, and the Erase Unit into which we are supposed to be copying.
322 * Go for it. 324 * Go for it.
323 */ 325 */
324 pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); 326 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
327 thisVUC, targetEUN);
325 328
326 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { 329 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
327 unsigned char movebuf[SECTORSIZE]; 330 unsigned char movebuf[SECTORSIZE];
@@ -343,19 +346,17 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
343 if (BlockMap[block] == BLOCK_NIL) 346 if (BlockMap[block] == BLOCK_NIL)
344 continue; 347 continue;
345 348
346 ret = mtd_read(mtd, 349 ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
347 (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), 350 (block * SECTORSIZE), SECTORSIZE, &retlen,
348 SECTORSIZE, 351 movebuf);
349 &retlen, 352 if (ret < 0 && ret != -EUCLEAN) {
350 movebuf); 353 ret = mtd->read(mtd,
351 if (ret < 0 && !mtd_is_bitflip(ret)) { 354 (inftl->EraseSize * BlockMap[block]) +
352 ret = mtd_read(mtd, 355 (block * SECTORSIZE), SECTORSIZE,
353 (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE), 356 &retlen, movebuf);
354 SECTORSIZE,
355 &retlen,
356 movebuf);
357 if (ret != -EIO) 357 if (ret != -EIO)
358 pr_debug("INFTL: error went away on retry?\n"); 358 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
359 "away on retry?\n");
359 } 360 }
360 memset(&oob, 0xff, sizeof(struct inftl_oob)); 361 memset(&oob, 0xff, sizeof(struct inftl_oob));
361 oob.b.Status = oob.b.Status1 = SECTOR_USED; 362 oob.b.Status = oob.b.Status1 = SECTOR_USED;
@@ -371,7 +372,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
371 * is important, by doing oldest first if we crash/reboot then it 372 * is important, by doing oldest first if we crash/reboot then it
372 * it is relatively simple to clean up the mess). 373 * it is relatively simple to clean up the mess).
373 */ 374 */
374 pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC); 375 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
376 thisVUC);
375 377
376 for (;;) { 378 for (;;) {
377 /* Find oldest unit in chain. */ 379 /* Find oldest unit in chain. */
@@ -419,7 +421,7 @@ static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
419 u16 ChainLength = 0, thislen; 421 u16 ChainLength = 0, thislen;
420 u16 chain, EUN; 422 u16 chain, EUN;
421 423
422 pr_debug("INFTL: INFTL_makefreeblock(inftl=%p," 424 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
423 "pending=%d)\n", inftl, pendingblock); 425 "pending=%d)\n", inftl, pendingblock);
424 426
425 for (chain = 0; chain < inftl->nb_blocks; chain++) { 427 for (chain = 0; chain < inftl->nb_blocks; chain++) {
@@ -482,8 +484,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
482 size_t retlen; 484 size_t retlen;
483 int silly, silly2 = 3; 485 int silly, silly2 = 3;
484 486
485 pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n", 487 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
486 inftl, block); 488 "block=%d)\n", inftl, block);
487 489
488 do { 490 do {
489 /* 491 /*
@@ -499,8 +501,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
499 blockofs, 8, &retlen, (char *)&bci); 501 blockofs, 8, &retlen, (char *)&bci);
500 502
501 status = bci.Status | bci.Status1; 503 status = bci.Status | bci.Status1;
502 pr_debug("INFTL: status of block %d in EUN %d is %x\n", 504 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
503 block , writeEUN, status); 505 "EUN %d is %x\n", block , writeEUN, status);
504 506
505 switch(status) { 507 switch(status) {
506 case SECTOR_FREE: 508 case SECTOR_FREE:
@@ -553,9 +555,9 @@ hitused:
553 * Hopefully we free something, lets try again. 555 * Hopefully we free something, lets try again.
554 * This time we are desperate... 556 * This time we are desperate...
555 */ 557 */
556 pr_debug("INFTL: using desperate==1 to find free EUN " 558 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
557 "to accommodate write to VUC %d\n", 559 "to find free EUN to accommodate write to "
558 thisVUC); 560 "VUC %d\n", thisVUC);
559 writeEUN = INFTL_findfreeblock(inftl, 1); 561 writeEUN = INFTL_findfreeblock(inftl, 1);
560 if (writeEUN == BLOCK_NIL) { 562 if (writeEUN == BLOCK_NIL) {
561 /* 563 /*
@@ -645,7 +647,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
645 struct inftl_bci bci; 647 struct inftl_bci bci;
646 size_t retlen; 648 size_t retlen;
647 649
648 pr_debug("INFTL: INFTL_trydeletechain(inftl=%p," 650 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
649 "thisVUC=%d)\n", inftl, thisVUC); 651 "thisVUC=%d)\n", inftl, thisVUC);
650 652
651 memset(BlockUsed, 0, sizeof(BlockUsed)); 653 memset(BlockUsed, 0, sizeof(BlockUsed));
@@ -709,7 +711,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
709 * For each block in the chain free it and make it available 711 * For each block in the chain free it and make it available
710 * for future use. Erase from the oldest unit first. 712 * for future use. Erase from the oldest unit first.
711 */ 713 */
712 pr_debug("INFTL: deleting empty VUC %d\n", thisVUC); 714 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);
713 715
714 for (;;) { 716 for (;;) {
715 u16 *prevEUN = &inftl->VUtable[thisVUC]; 717 u16 *prevEUN = &inftl->VUtable[thisVUC];
@@ -717,7 +719,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
717 719
718 /* If the chain is all gone already, we're done */ 720 /* If the chain is all gone already, we're done */
719 if (thisEUN == BLOCK_NIL) { 721 if (thisEUN == BLOCK_NIL) {
720 pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); 722 DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
721 return; 723 return;
722 } 724 }
723 725
@@ -729,7 +731,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
729 thisEUN = *prevEUN; 731 thisEUN = *prevEUN;
730 } 732 }
731 733
732 pr_debug("Deleting EUN %d from VUC %d\n", 734 DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
733 thisEUN, thisVUC); 735 thisEUN, thisVUC);
734 736
735 if (INFTL_formatblock(inftl, thisEUN) < 0) { 737 if (INFTL_formatblock(inftl, thisEUN) < 0) {
@@ -765,7 +767,7 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
765 size_t retlen; 767 size_t retlen;
766 struct inftl_bci bci; 768 struct inftl_bci bci;
767 769
768 pr_debug("INFTL: INFTL_deleteblock(inftl=%p," 770 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
769 "block=%d)\n", inftl, block); 771 "block=%d)\n", inftl, block);
770 772
771 while (thisEUN < inftl->nb_blocks) { 773 while (thisEUN < inftl->nb_blocks) {
@@ -824,7 +826,7 @@ static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
824 struct inftl_oob oob; 826 struct inftl_oob oob;
825 char *p, *pend; 827 char *p, *pend;
826 828
827 pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld," 829 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
828 "buffer=%p)\n", inftl, block, buffer); 830 "buffer=%p)\n", inftl, block, buffer);
829 831
830 /* Is block all zero? */ 832 /* Is block all zero? */
@@ -874,7 +876,7 @@ static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
874 struct inftl_bci bci; 876 struct inftl_bci bci;
875 size_t retlen; 877 size_t retlen;
876 878
877 pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld," 879 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
878 "buffer=%p)\n", inftl, block, buffer); 880 "buffer=%p)\n", inftl, block, buffer);
879 881
880 while (thisEUN < inftl->nb_blocks) { 882 while (thisEUN < inftl->nb_blocks) {
@@ -917,10 +919,10 @@ foundit:
917 } else { 919 } else {
918 size_t retlen; 920 size_t retlen;
919 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; 921 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
920 int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer); 922 int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
921 923
922 /* Handle corrected bit flips gracefully */ 924 /* Handle corrected bit flips gracefully */
923 if (ret < 0 && !mtd_is_bitflip(ret)) 925 if (ret < 0 && ret != -EUCLEAN)
924 return -EIO; 926 return -EIO;
925 } 927 }
926 return 0; 928 return 0;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 4adc0374fb6..104052e774b 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -53,7 +53,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
53 struct INFTLPartition *ip; 53 struct INFTLPartition *ip;
54 size_t retlen; 54 size_t retlen;
55 55
56 pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl); 56 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
57 57
58 /* 58 /*
59 * Assume logical EraseSize == physical erasesize for starting the 59 * Assume logical EraseSize == physical erasesize for starting the
@@ -73,8 +73,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
73 * Check for BNAND header first. Then whinge if it's found 73 * Check for BNAND header first. Then whinge if it's found
74 * but later checks fail. 74 * but later checks fail.
75 */ 75 */
76 ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE, 76 ret = mtd->read(mtd, block * inftl->EraseSize,
77 &retlen, buf); 77 SECTORSIZE, &retlen, buf);
78 /* We ignore ret in case the ECC of the MediaHeader is invalid 78 /* We ignore ret in case the ECC of the MediaHeader is invalid
79 (which is apparently acceptable) */ 79 (which is apparently acceptable) */
80 if (retlen != SECTORSIZE) { 80 if (retlen != SECTORSIZE) {
@@ -118,8 +118,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
118 memcpy(mh, buf, sizeof(struct INFTLMediaHeader)); 118 memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
119 119
120 /* Read the spare media header at offset 4096 */ 120 /* Read the spare media header at offset 4096 */
121 mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE, 121 mtd->read(mtd, block * inftl->EraseSize + 4096,
122 &retlen, buf); 122 SECTORSIZE, &retlen, buf);
123 if (retlen != SECTORSIZE) { 123 if (retlen != SECTORSIZE) {
124 printk(KERN_WARNING "INFTL: Unable to read spare " 124 printk(KERN_WARNING "INFTL: Unable to read spare "
125 "Media Header\n"); 125 "Media Header\n");
@@ -139,20 +139,24 @@ static int find_boot_record(struct INFTLrecord *inftl)
139 mh->FormatFlags = le32_to_cpu(mh->FormatFlags); 139 mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
140 mh->PercentUsed = le32_to_cpu(mh->PercentUsed); 140 mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
141 141
142 pr_debug("INFTL: Media Header ->\n" 142#ifdef CONFIG_MTD_DEBUG_VERBOSE
143 " bootRecordID = %s\n" 143 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
144 " NoOfBootImageBlocks = %d\n" 144 printk("INFTL: Media Header ->\n"
145 " NoOfBinaryPartitions = %d\n" 145 " bootRecordID = %s\n"
146 " NoOfBDTLPartitions = %d\n" 146 " NoOfBootImageBlocks = %d\n"
147 " BlockMultiplerBits = %d\n" 147 " NoOfBinaryPartitions = %d\n"
148 " FormatFlgs = %d\n" 148 " NoOfBDTLPartitions = %d\n"
149 " OsakVersion = 0x%x\n" 149 " BlockMultiplerBits = %d\n"
150 " PercentUsed = %d\n", 150 " FormatFlgs = %d\n"
151 mh->bootRecordID, mh->NoOfBootImageBlocks, 151 " OsakVersion = 0x%x\n"
152 mh->NoOfBinaryPartitions, 152 " PercentUsed = %d\n",
153 mh->NoOfBDTLPartitions, 153 mh->bootRecordID, mh->NoOfBootImageBlocks,
154 mh->BlockMultiplierBits, mh->FormatFlags, 154 mh->NoOfBinaryPartitions,
155 mh->OsakVersion, mh->PercentUsed); 155 mh->NoOfBDTLPartitions,
156 mh->BlockMultiplierBits, mh->FormatFlags,
157 mh->OsakVersion, mh->PercentUsed);
158 }
159#endif
156 160
157 if (mh->NoOfBDTLPartitions == 0) { 161 if (mh->NoOfBDTLPartitions == 0) {
158 printk(KERN_WARNING "INFTL: Media Header sanity check " 162 printk(KERN_WARNING "INFTL: Media Header sanity check "
@@ -196,15 +200,19 @@ static int find_boot_record(struct INFTLrecord *inftl)
196 ip->spareUnits = le32_to_cpu(ip->spareUnits); 200 ip->spareUnits = le32_to_cpu(ip->spareUnits);
197 ip->Reserved0 = le32_to_cpu(ip->Reserved0); 201 ip->Reserved0 = le32_to_cpu(ip->Reserved0);
198 202
199 pr_debug(" PARTITION[%d] ->\n" 203#ifdef CONFIG_MTD_DEBUG_VERBOSE
200 " virtualUnits = %d\n" 204 if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
201 " firstUnit = %d\n" 205 printk(" PARTITION[%d] ->\n"
202 " lastUnit = %d\n" 206 " virtualUnits = %d\n"
203 " flags = 0x%x\n" 207 " firstUnit = %d\n"
204 " spareUnits = %d\n", 208 " lastUnit = %d\n"
205 i, ip->virtualUnits, ip->firstUnit, 209 " flags = 0x%x\n"
206 ip->lastUnit, ip->flags, 210 " spareUnits = %d\n",
207 ip->spareUnits); 211 i, ip->virtualUnits, ip->firstUnit,
212 ip->lastUnit, ip->flags,
213 ip->spareUnits);
214 }
215#endif
208 216
209 if (ip->Reserved0 != ip->firstUnit) { 217 if (ip->Reserved0 != ip->firstUnit) {
210 struct erase_info *instr = &inftl->instr; 218 struct erase_info *instr = &inftl->instr;
@@ -220,7 +228,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
220 */ 228 */
221 instr->addr = ip->Reserved0 * inftl->EraseSize; 229 instr->addr = ip->Reserved0 * inftl->EraseSize;
222 instr->len = inftl->EraseSize; 230 instr->len = inftl->EraseSize;
223 mtd_erase(mtd, instr); 231 mtd->erase(mtd, instr);
224 } 232 }
225 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) { 233 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
226 printk(KERN_WARNING "INFTL: Media Header " 234 printk(KERN_WARNING "INFTL: Media Header "
@@ -306,8 +314,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
306 /* If any of the physical eraseblocks are bad, don't 314 /* If any of the physical eraseblocks are bad, don't
307 use the unit. */ 315 use the unit. */
308 for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) { 316 for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
309 if (mtd_block_isbad(inftl->mbd.mtd, 317 if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
310 i * inftl->EraseSize + physblock))
311 inftl->PUtable[i] = BLOCK_RESERVED; 318 inftl->PUtable[i] = BLOCK_RESERVED;
312 } 319 }
313 } 320 }
@@ -343,7 +350,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
343 int i; 350 int i;
344 351
345 for (i = 0; i < len; i += SECTORSIZE) { 352 for (i = 0; i < len; i += SECTORSIZE) {
346 if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf)) 353 if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
347 return -1; 354 return -1;
348 if (memcmpb(buf, 0xff, SECTORSIZE) != 0) 355 if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
349 return -1; 356 return -1;
@@ -368,7 +375,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
368 * 375 *
369 * Return: 0 when succeed, -1 on error. 376 * Return: 0 when succeed, -1 on error.
370 * 377 *
371 * ToDo: 1. Is it necessary to check_free_sector after erasing ?? 378 * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
372 */ 379 */
373int INFTL_formatblock(struct INFTLrecord *inftl, int block) 380int INFTL_formatblock(struct INFTLrecord *inftl, int block)
374{ 381{
@@ -378,7 +385,8 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
378 struct mtd_info *mtd = inftl->mbd.mtd; 385 struct mtd_info *mtd = inftl->mbd.mtd;
379 int physblock; 386 int physblock;
380 387
381 pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block); 388 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
389 "block=%d)\n", inftl, block);
382 390
383 memset(instr, 0, sizeof(struct erase_info)); 391 memset(instr, 0, sizeof(struct erase_info));
384 392
@@ -394,7 +402,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
394 mark only the failed block in the bbt. */ 402 mark only the failed block in the bbt. */
395 for (physblock = 0; physblock < inftl->EraseSize; 403 for (physblock = 0; physblock < inftl->EraseSize;
396 physblock += instr->len, instr->addr += instr->len) { 404 physblock += instr->len, instr->addr += instr->len) {
397 mtd_erase(inftl->mbd.mtd, instr); 405 mtd->erase(inftl->mbd.mtd, instr);
398 406
399 if (instr->state == MTD_ERASE_FAILED) { 407 if (instr->state == MTD_ERASE_FAILED) {
400 printk(KERN_WARNING "INFTL: error while formatting block %d\n", 408 printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -424,7 +432,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
424fail: 432fail:
425 /* could not format, update the bad block table (caller is responsible 433 /* could not format, update the bad block table (caller is responsible
426 for setting the PUtable to BLOCK_RESERVED on failure) */ 434 for setting the PUtable to BLOCK_RESERVED on failure) */
427 mtd_block_markbad(inftl->mbd.mtd, instr->addr); 435 inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
428 return -1; 436 return -1;
429} 437}
430 438
@@ -468,30 +476,30 @@ void INFTL_dumptables(struct INFTLrecord *s)
468{ 476{
469 int i; 477 int i;
470 478
471 pr_debug("-------------------------------------------" 479 printk("-------------------------------------------"
472 "----------------------------------\n"); 480 "----------------------------------\n");
473 481
474 pr_debug("VUtable[%d] ->", s->nb_blocks); 482 printk("VUtable[%d] ->", s->nb_blocks);
475 for (i = 0; i < s->nb_blocks; i++) { 483 for (i = 0; i < s->nb_blocks; i++) {
476 if ((i % 8) == 0) 484 if ((i % 8) == 0)
477 pr_debug("\n%04x: ", i); 485 printk("\n%04x: ", i);
478 pr_debug("%04x ", s->VUtable[i]); 486 printk("%04x ", s->VUtable[i]);
479 } 487 }
480 488
481 pr_debug("\n-------------------------------------------" 489 printk("\n-------------------------------------------"
482 "----------------------------------\n"); 490 "----------------------------------\n");
483 491
484 pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); 492 printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
485 for (i = 0; i <= s->lastEUN; i++) { 493 for (i = 0; i <= s->lastEUN; i++) {
486 if ((i % 8) == 0) 494 if ((i % 8) == 0)
487 pr_debug("\n%04x: ", i); 495 printk("\n%04x: ", i);
488 pr_debug("%04x ", s->PUtable[i]); 496 printk("%04x ", s->PUtable[i]);
489 } 497 }
490 498
491 pr_debug("\n-------------------------------------------" 499 printk("\n-------------------------------------------"
492 "----------------------------------\n"); 500 "----------------------------------\n");
493 501
494 pr_debug("INFTL ->\n" 502 printk("INFTL ->\n"
495 " EraseSize = %d\n" 503 " EraseSize = %d\n"
496 " h/s/c = %d/%d/%d\n" 504 " h/s/c = %d/%d/%d\n"
497 " numvunits = %d\n" 505 " numvunits = %d\n"
@@ -505,7 +513,7 @@ void INFTL_dumptables(struct INFTLrecord *s)
505 s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs, 513 s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
506 s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks); 514 s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
507 515
508 pr_debug("\n-------------------------------------------" 516 printk("\n-------------------------------------------"
509 "----------------------------------\n"); 517 "----------------------------------\n");
510} 518}
511 519
@@ -513,25 +521,25 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
513{ 521{
514 int logical, block, i; 522 int logical, block, i;
515 523
516 pr_debug("-------------------------------------------" 524 printk("-------------------------------------------"
517 "----------------------------------\n"); 525 "----------------------------------\n");
518 526
519 pr_debug("INFTL Virtual Unit Chains:\n"); 527 printk("INFTL Virtual Unit Chains:\n");
520 for (logical = 0; logical < s->nb_blocks; logical++) { 528 for (logical = 0; logical < s->nb_blocks; logical++) {
521 block = s->VUtable[logical]; 529 block = s->VUtable[logical];
522 if (block > s->nb_blocks) 530 if (block > s->nb_blocks)
523 continue; 531 continue;
524 pr_debug(" LOGICAL %d --> %d ", logical, block); 532 printk(" LOGICAL %d --> %d ", logical, block);
525 for (i = 0; i < s->nb_blocks; i++) { 533 for (i = 0; i < s->nb_blocks; i++) {
526 if (s->PUtable[block] == BLOCK_NIL) 534 if (s->PUtable[block] == BLOCK_NIL)
527 break; 535 break;
528 block = s->PUtable[block]; 536 block = s->PUtable[block];
529 pr_debug("%d ", block); 537 printk("%d ", block);
530 } 538 }
531 pr_debug("\n"); 539 printk("\n");
532 } 540 }
533 541
534 pr_debug("-------------------------------------------" 542 printk("-------------------------------------------"
535 "----------------------------------\n"); 543 "----------------------------------\n");
536} 544}
537 545
@@ -547,7 +555,7 @@ int INFTL_mount(struct INFTLrecord *s)
547 int i; 555 int i;
548 u8 *ANACtable, ANAC; 556 u8 *ANACtable, ANAC;
549 557
550 pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s); 558 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
551 559
552 /* Search for INFTL MediaHeader and Spare INFTL Media Header */ 560 /* Search for INFTL MediaHeader and Spare INFTL Media Header */
553 if (find_boot_record(s) < 0) { 561 if (find_boot_record(s) < 0) {
@@ -577,7 +585,7 @@ int INFTL_mount(struct INFTLrecord *s)
577 * NOTEXPLORED state. Then at the end we will try to format it and 585 * NOTEXPLORED state. Then at the end we will try to format it and
578 * mark it as free. 586 * mark it as free.
579 */ 587 */
580 pr_debug("INFTL: pass 1, explore each unit\n"); 588 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n");
581 for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) { 589 for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
582 if (s->PUtable[first_block] != BLOCK_NOTEXPLORED) 590 if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
583 continue; 591 continue;
@@ -709,14 +717,17 @@ int INFTL_mount(struct INFTLrecord *s)
709 logical_block = BLOCK_NIL; 717 logical_block = BLOCK_NIL;
710 } 718 }
711 719
712 INFTL_dumptables(s); 720#ifdef CONFIG_MTD_DEBUG_VERBOSE
721 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
722 INFTL_dumptables(s);
723#endif
713 724
714 /* 725 /*
715 * Second pass, check for infinite loops in chains. These are 726 * Second pass, check for infinite loops in chains. These are
716 * possible because we don't update the previous pointers when 727 * possible because we don't update the previous pointers when
717 * we fold chains. No big deal, just fix them up in PUtable. 728 * we fold chains. No big deal, just fix them up in PUtable.
718 */ 729 */
719 pr_debug("INFTL: pass 2, validate virtual chains\n"); 730 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n");
720 for (logical_block = 0; logical_block < s->numvunits; logical_block++) { 731 for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
721 block = s->VUtable[logical_block]; 732 block = s->VUtable[logical_block];
722 last_block = BLOCK_NIL; 733 last_block = BLOCK_NIL;
@@ -761,8 +772,12 @@ int INFTL_mount(struct INFTLrecord *s)
761 } 772 }
762 } 773 }
763 774
764 INFTL_dumptables(s); 775#ifdef CONFIG_MTD_DEBUG_VERBOSE
765 INFTL_dumpVUchains(s); 776 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
777 INFTL_dumptables(s);
778 if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
779 INFTL_dumpVUchains(s);
780#endif
766 781
767 /* 782 /*
768 * Third pass, format unreferenced blocks and init free block count. 783 * Third pass, format unreferenced blocks and init free block count.
@@ -770,7 +785,7 @@ int INFTL_mount(struct INFTLrecord *s)
770 s->numfreeEUNs = 0; 785 s->numfreeEUNs = 0;
771 s->LastFreeEUN = BLOCK_NIL; 786 s->LastFreeEUN = BLOCK_NIL;
772 787
773 pr_debug("INFTL: pass 3, format unused blocks\n"); 788 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n");
774 for (block = s->firstEUN; block <= s->lastEUN; block++) { 789 for (block = s->firstEUN; block <= s->lastEUN; block++) {
775 if (s->PUtable[block] == BLOCK_NOTEXPLORED) { 790 if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
776 printk("INFTL: unreferenced block %d, formatting it\n", 791 printk("INFTL: unreferenced block %d, formatting it\n",
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index d3cfe26beea..65655dd59e1 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -27,7 +27,6 @@
27#include <linux/mtd/pfow.h> 27#include <linux/mtd/pfow.h>
28#include <linux/mtd/qinfo.h> 28#include <linux/mtd/qinfo.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/module.h>
31 30
32static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, 31static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
33 size_t *retlen, u_char *buf); 32 size_t *retlen, u_char *buf);
@@ -40,7 +39,7 @@ static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
40static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 39static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
41static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, 40static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
42 size_t *retlen, void **mtdbuf, resource_size_t *phys); 41 size_t *retlen, void **mtdbuf, resource_size_t *phys);
43static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); 42static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
44static int get_chip(struct map_info *map, struct flchip *chip, int mode); 43static int get_chip(struct map_info *map, struct flchip *chip, int mode);
45static int chip_ready(struct map_info *map, struct flchip *chip, int mode); 44static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
46static void put_chip(struct map_info *map, struct flchip *chip); 45static void put_chip(struct map_info *map, struct flchip *chip);
@@ -63,19 +62,26 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
63 mtd->type = MTD_NORFLASH; 62 mtd->type = MTD_NORFLASH;
64 63
65 /* Fill in the default mtd operations */ 64 /* Fill in the default mtd operations */
66 mtd->_read = lpddr_read; 65 mtd->read = lpddr_read;
67 mtd->type = MTD_NORFLASH; 66 mtd->type = MTD_NORFLASH;
68 mtd->flags = MTD_CAP_NORFLASH; 67 mtd->flags = MTD_CAP_NORFLASH;
69 mtd->flags &= ~MTD_BIT_WRITEABLE; 68 mtd->flags &= ~MTD_BIT_WRITEABLE;
70 mtd->_erase = lpddr_erase; 69 mtd->erase = lpddr_erase;
71 mtd->_write = lpddr_write_buffers; 70 mtd->write = lpddr_write_buffers;
72 mtd->_writev = lpddr_writev; 71 mtd->writev = lpddr_writev;
73 mtd->_lock = lpddr_lock; 72 mtd->read_oob = NULL;
74 mtd->_unlock = lpddr_unlock; 73 mtd->write_oob = NULL;
74 mtd->sync = NULL;
75 mtd->lock = lpddr_lock;
76 mtd->unlock = lpddr_unlock;
77 mtd->suspend = NULL;
78 mtd->resume = NULL;
75 if (map_is_linear(map)) { 79 if (map_is_linear(map)) {
76 mtd->_point = lpddr_point; 80 mtd->point = lpddr_point;
77 mtd->_unpoint = lpddr_unpoint; 81 mtd->unpoint = lpddr_unpoint;
78 } 82 }
83 mtd->block_isbad = NULL;
84 mtd->block_markbad = NULL;
79 mtd->size = 1 << lpddr->qinfo->DevSizeShift; 85 mtd->size = 1 << lpddr->qinfo->DevSizeShift;
80 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; 86 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
81 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift; 87 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
@@ -530,12 +536,14 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
530 struct flchip *chip = &lpddr->chips[chipnum]; 536 struct flchip *chip = &lpddr->chips[chipnum];
531 int ret = 0; 537 int ret = 0;
532 538
533 if (!map->virt) 539 if (!map->virt || (adr + len > mtd->size))
534 return -EINVAL; 540 return -EINVAL;
535 541
536 /* ofs: offset within the first chip that the first read should start */ 542 /* ofs: offset within the first chip that the first read should start */
537 ofs = adr - (chipnum << lpddr->chipshift); 543 ofs = adr - (chipnum << lpddr->chipshift);
544
538 *mtdbuf = (void *)map->virt + chip->start + ofs; 545 *mtdbuf = (void *)map->virt + chip->start + ofs;
546 *retlen = 0;
539 547
540 while (len) { 548 while (len) {
541 unsigned long thislen; 549 unsigned long thislen;
@@ -573,11 +581,11 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
573 return 0; 581 return 0;
574} 582}
575 583
576static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) 584static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
577{ 585{
578 struct map_info *map = mtd->priv; 586 struct map_info *map = mtd->priv;
579 struct lpddr_private *lpddr = map->fldrv_priv; 587 struct lpddr_private *lpddr = map->fldrv_priv;
580 int chipnum = adr >> lpddr->chipshift, err = 0; 588 int chipnum = adr >> lpddr->chipshift;
581 unsigned long ofs; 589 unsigned long ofs;
582 590
583 /* ofs: offset within the first chip that the first read should start */ 591 /* ofs: offset within the first chip that the first read should start */
@@ -601,11 +609,9 @@ static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
601 chip->ref_point_counter--; 609 chip->ref_point_counter--;
602 if (chip->ref_point_counter == 0) 610 if (chip->ref_point_counter == 0)
603 chip->state = FL_READY; 611 chip->state = FL_READY;
604 } else { 612 } else
605 printk(KERN_WARNING "%s: Warning: unpoint called on non" 613 printk(KERN_WARNING "%s: Warning: unpoint called on non"
606 "pointed region\n", map->name); 614 "pointed region\n", map->name);
607 err = -EINVAL;
608 }
609 615
610 put_chip(map, chip); 616 put_chip(map, chip);
611 mutex_unlock(&chip->mutex); 617 mutex_unlock(&chip->mutex);
@@ -614,8 +620,6 @@ static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
614 ofs = 0; 620 ofs = 0;
615 chipnum++; 621 chipnum++;
616 } 622 }
617
618 return err;
619} 623}
620 624
621static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 625static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
@@ -639,11 +643,13 @@ static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
639 int chipnum; 643 int chipnum;
640 unsigned long ofs, vec_seek, i; 644 unsigned long ofs, vec_seek, i;
641 int wbufsize = 1 << lpddr->qinfo->BufSizeShift; 645 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
646
642 size_t len = 0; 647 size_t len = 0;
643 648
644 for (i = 0; i < count; i++) 649 for (i = 0; i < count; i++)
645 len += vecs[i].iov_len; 650 len += vecs[i].iov_len;
646 651
652 *retlen = 0;
647 if (!len) 653 if (!len)
648 return 0; 654 return 0;
649 655
@@ -688,6 +694,9 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
688 ofs = instr->addr; 694 ofs = instr->addr;
689 len = instr->len; 695 len = instr->len;
690 696
697 if (ofs > mtd->size || (len + ofs) > mtd->size)
698 return -EINVAL;
699
691 while (len > 0) { 700 while (len > 0) {
692 ret = do_erase_oneblock(mtd, ofs); 701 ret = do_erase_oneblock(mtd, ofs);
693 if (ret) 702 if (ret)
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 45abed67f1e..dbfe17baf04 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
57 57
58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) 58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
59{ 59{
60 int qinfo_lines = ARRAY_SIZE(qinfo_array); 60 int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
61 int i; 61 int i;
62 int bankwidth = map_bankwidth(map) * 8; 62 int bankwidth = map_bankwidth(map) * 8;
63 int major, minor; 63 int major, minor;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 62ba82c396c..299e67c039f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -1,6 +1,5 @@
1menu "Mapping drivers for chip access" 1menu "Mapping drivers for chip access"
2 depends on MTD!=n 2 depends on MTD!=n
3 depends on HAS_IOMEM
4 3
5config MTD_COMPLEX_MAPPINGS 4config MTD_COMPLEX_MAPPINGS
6 bool "Support non-linear mappings of flash chips" 5 bool "Support non-linear mappings of flash chips"
@@ -42,6 +41,8 @@ config MTD_PHYSMAP_START
42 are mapped on your particular target board. Refer to the 41 are mapped on your particular target board. Refer to the
43 memory map which should hopefully be in the documentation for 42 memory map which should hopefully be in the documentation for
44 your board. 43 your board.
44 Ignore this option if you use run-time physmap configuration
45 (i.e., run-time calling physmap_configure()).
45 46
46config MTD_PHYSMAP_LEN 47config MTD_PHYSMAP_LEN
47 hex "Physical length of flash mapping" 48 hex "Physical length of flash mapping"
@@ -54,6 +55,8 @@ config MTD_PHYSMAP_LEN
54 than the total amount of flash present. Refer to the memory 55 than the total amount of flash present. Refer to the memory
55 map which should hopefully be in the documentation for your 56 map which should hopefully be in the documentation for your
56 board. 57 board.
58 Ignore this option if you use run-time physmap configuration
59 (i.e., run-time calling physmap_configure()).
57 60
58config MTD_PHYSMAP_BANKWIDTH 61config MTD_PHYSMAP_BANKWIDTH
59 int "Bank width in octets" 62 int "Bank width in octets"
@@ -64,6 +67,8 @@ config MTD_PHYSMAP_BANKWIDTH
64 in octets. For example, if you have a data bus width of 32 67 in octets. For example, if you have a data bus width of 32
65 bits, you would set the bus width octet value to 4. This is 68 bits, you would set the bus width octet value to 4. This is
66 used internally by the CFI drivers. 69 used internally by the CFI drivers.
70 Ignore this option if you use run-time physmap configuration
71 (i.e., run-time calling physmap_configure()).
67 72
68config MTD_PHYSMAP_OF 73config MTD_PHYSMAP_OF
69 tristate "Flash device in physical memory map based on OF description" 74 tristate "Flash device in physical memory map based on OF description"
@@ -224,7 +229,7 @@ config MTD_CK804XROM
224 229
225config MTD_SCB2_FLASH 230config MTD_SCB2_FLASH
226 tristate "BIOS flash chip on Intel SCB2 boards" 231 tristate "BIOS flash chip on Intel SCB2 boards"
227 depends on X86 && MTD_JEDECPROBE && PCI 232 depends on X86 && MTD_JEDECPROBE
228 help 233 help
229 Support for treating the BIOS flash chip on Intel SCB2 boards 234 Support for treating the BIOS flash chip on Intel SCB2 boards
230 as an MTD device - with this you can reprogram your BIOS. 235 as an MTD device - with this you can reprogram your BIOS.
@@ -243,9 +248,19 @@ config MTD_NETtel
243 help 248 help
244 Support for flash chips on NETtel/SecureEdge/SnapGear boards. 249 Support for flash chips on NETtel/SecureEdge/SnapGear boards.
245 250
251config MTD_BCM963XX
252 tristate "Map driver for Broadcom BCM963xx boards"
253 depends on BCM63XX
254 select MTD_MAP_BANK_WIDTH_2
255 select MTD_CFI_I1
256 help
257 Support for parsing CFE image tag and creating MTD partitions on
258 Broadcom BCM63xx boards.
259
246config MTD_LANTIQ 260config MTD_LANTIQ
247 tristate "Lantiq SoC NOR support" 261 tristate "Lantiq SoC NOR support"
248 depends on LANTIQ 262 depends on LANTIQ
263 select MTD_PARTITIONS
249 help 264 help
250 Support for NOR flash attached to the Lantiq SoC's External Bus Unit. 265 Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
251 266
@@ -324,6 +339,17 @@ config MTD_SOLUTIONENGINE
324 This enables access to the flash chips on the Hitachi SolutionEngine and 339 This enables access to the flash chips on the Hitachi SolutionEngine and
325 similar boards. Say 'Y' if you are building a kernel for such a board. 340 similar boards. Say 'Y' if you are building a kernel for such a board.
326 341
342config MTD_ARM_INTEGRATOR
343 tristate "CFI Flash device mapped on ARM Integrator/P720T"
344 depends on ARM && MTD_CFI
345
346config MTD_CDB89712
347 tristate "Cirrus CDB89712 evaluation board mappings"
348 depends on MTD_CFI && ARCH_CDB89712
349 help
350 This enables access to the flash or ROM chips on the CDB89712 board.
351 If you have such a board, say 'Y'.
352
327config MTD_SA1100 353config MTD_SA1100
328 tristate "CFI Flash device mapped on StrongARM SA11x0" 354 tristate "CFI Flash device mapped on StrongARM SA11x0"
329 depends on MTD_CFI && ARCH_SA1100 355 depends on MTD_CFI && ARCH_SA1100
@@ -358,13 +384,27 @@ config MTD_IXP2000
358 IXP2000 based board and would like to use the flash chips on it, 384 IXP2000 based board and would like to use the flash chips on it,
359 say 'Y'. 385 say 'Y'.
360 386
387config MTD_FORTUNET
388 tristate "CFI Flash device mapped on the FortuNet board"
389 depends on MTD_CFI && SA1100_FORTUNET
390 help
391 This enables access to the Flash on the FortuNet board. If you
392 have such a board, say 'Y'.
393
361config MTD_AUTCPU12 394config MTD_AUTCPU12
362 bool "NV-RAM mapping AUTCPU12 board" 395 tristate "NV-RAM mapping AUTCPU12 board"
363 depends on ARCH_AUTCPU12 396 depends on ARCH_AUTCPU12
364 help 397 help
365 This enables access to the NV-RAM on autronix autcpu12 board. 398 This enables access to the NV-RAM on autronix autcpu12 board.
366 If you have such a board, say 'Y'. 399 If you have such a board, say 'Y'.
367 400
401config MTD_EDB7312
402 tristate "CFI Flash device mapped on EDB7312"
403 depends on ARCH_EDB7312 && MTD_CFI
404 help
405 This enables access to the CFI Flash on the Cogent EDB7312 board.
406 If you have such a board, say 'Y' here.
407
368config MTD_IMPA7 408config MTD_IMPA7
369 tristate "JEDEC Flash device mapped on impA7" 409 tristate "JEDEC Flash device mapped on impA7"
370 depends on ARM && MTD_JEDECPROBE 410 depends on ARM && MTD_JEDECPROBE
@@ -372,6 +412,14 @@ config MTD_IMPA7
372 This enables access to the NOR Flash on the impA7 board of 412 This enables access to the NOR Flash on the impA7 board of
373 implementa GmbH. If you have such a board, say 'Y' here. 413 implementa GmbH. If you have such a board, say 'Y' here.
374 414
415config MTD_CEIVA
416 tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame"
417 depends on MTD_JEDECPROBE && ARCH_CEIVA
418 help
419 This enables access to the flash chips on the Ceiva/Polaroid
420 PhotoMax Digital Picture Frame.
421 If you have such a device, say 'Y'.
422
375config MTD_H720X 423config MTD_H720X
376 tristate "Hynix evaluation board mappings" 424 tristate "Hynix evaluation board mappings"
377 depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) 425 depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
@@ -429,10 +477,22 @@ config MTD_GPIO_ADDR
429 477
430config MTD_UCLINUX 478config MTD_UCLINUX
431 bool "Generic uClinux RAM/ROM filesystem support" 479 bool "Generic uClinux RAM/ROM filesystem support"
432 depends on MTD_RAM=y && (!MMU || COLDFIRE) 480 depends on MTD_RAM=y && !MMU
433 help 481 help
434 Map driver to support image based filesystems for uClinux. 482 Map driver to support image based filesystems for uClinux.
435 483
484config MTD_WRSBC8260
485 tristate "Map driver for WindRiver PowerQUICC II MPC82xx board"
486 depends on (SBC82xx || SBC8560)
487 select MTD_MAP_BANK_WIDTH_4
488 select MTD_MAP_BANK_WIDTH_1
489 select MTD_CFI_I1
490 select MTD_CFI_I4
491 help
492 Map driver for WindRiver PowerQUICC II MPC82xx board. Drives
493 all three flash regions on CS0, CS1 and CS6 if they are configured
494 correctly by the boot loader.
495
436config MTD_DMV182 496config MTD_DMV182
437 tristate "Map driver for Dy-4 SVME/DMV-182 board." 497 tristate "Map driver for Dy-4 SVME/DMV-182 board."
438 depends on DMV182 498 depends on DMV182
@@ -501,4 +561,11 @@ config MTD_LATCH_ADDR
501 561
502 If compiled as a module, it will be called latch-addr-flash. 562 If compiled as a module, it will be called latch-addr-flash.
503 563
564config MTD_NOR_TEGRA
565 bool "NOR Flash mapping driver for NVIDIA Tegra based boards"
566 depends on MTD_COMPLEX_MAPPINGS && ARCH_TEGRA
567 help
568 This enables access routines for the flash chips on the NVIDIA Tegra
569 based boards.
570
504endmenu 571endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 4ded28711bc..bb5eef14a36 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MTD) += map_funcs.o
7endif 7endif
8 8
9# Chip mappings 9# Chip mappings
10obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
10obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o 11obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
11obj-$(CONFIG_MTD_DC21285) += dc21285.o 12obj-$(CONFIG_MTD_DC21285) += dc21285.o
12obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o 13obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
18obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o 19obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
19obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o 20obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
20obj-$(CONFIG_MTD_MBX860) += mbx860.o 21obj-$(CONFIG_MTD_MBX860) += mbx860.o
22obj-$(CONFIG_MTD_CEIVA) += ceiva.o
21obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 23obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
22obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 24obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
23obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 25obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
@@ -38,13 +40,16 @@ obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
38obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o 40obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
39obj-$(CONFIG_MTD_PCI) += pci.o 41obj-$(CONFIG_MTD_PCI) += pci.o
40obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o 42obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
43obj-$(CONFIG_MTD_EDB7312) += edb7312.o
41obj-$(CONFIG_MTD_IMPA7) += impa7.o 44obj-$(CONFIG_MTD_IMPA7) += impa7.o
45obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
42obj-$(CONFIG_MTD_UCLINUX) += uclinux.o 46obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
43obj-$(CONFIG_MTD_NETtel) += nettel.o 47obj-$(CONFIG_MTD_NETtel) += nettel.o
44obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o 48obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
45obj-$(CONFIG_MTD_H720X) += h720x-flash.o 49obj-$(CONFIG_MTD_H720X) += h720x-flash.o
46obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o 50obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
47obj-$(CONFIG_MTD_IXP2000) += ixp2000.o 51obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
52obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
48obj-$(CONFIG_MTD_DMV182) += dmv182.o 53obj-$(CONFIG_MTD_DMV182) += dmv182.o
49obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o 54obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
50obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o 55obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
@@ -52,5 +57,7 @@ obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
52obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o 57obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
53obj-$(CONFIG_MTD_VMU) += vmu-flash.o 58obj-$(CONFIG_MTD_VMU) += vmu-flash.o
54obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o 59obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
60obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
55obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o 61obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
56obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o 62obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
63obj-$(CONFIG_MTD_NOR_TEGRA) += tegra_nor.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index f7207b0a76d..e2875d6fe12 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -100,8 +100,8 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
100} 100}
101 101
102 102
103static int amd76xrom_init_one(struct pci_dev *pdev, 103static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
104 const struct pci_device_id *ent) 104 const struct pci_device_id *ent)
105{ 105{
106 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 106 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
107 u8 byte; 107 u8 byte;
@@ -289,7 +289,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
289} 289}
290 290
291 291
292static void amd76xrom_remove_one(struct pci_dev *pdev) 292static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
293{ 293{
294 struct amd76xrom_window *window = &amd76xrom_window; 294 struct amd76xrom_window *window = &amd76xrom_window;
295 295
@@ -347,3 +347,4 @@ module_exit(cleanup_amd76xrom);
347MODULE_LICENSE("GPL"); 347MODULE_LICENSE("GPL");
348MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>"); 348MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
349MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge"); 349MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
350
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index a2dc2ae4b24..e5bfd0e093b 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -15,54 +15,43 @@
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
18 */ 19 */
19#include <linux/sizes.h>
20 20
21#include <linux/module.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/ioport.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <linux/device.h> 26#include <asm/io.h>
25#include <linux/module.h> 27#include <asm/sizes.h>
26#include <linux/platform_device.h> 28#include <mach/hardware.h>
27 29#include <mach/autcpu12.h>
28#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
29#include <linux/mtd/map.h> 31#include <linux/mtd/map.h>
32#include <linux/mtd/partitions.h>
30 33
31struct autcpu12_nvram_priv {
32 struct mtd_info *mtd;
33 struct map_info map;
34};
35
36static int autcpu12_nvram_probe(struct platform_device *pdev)
37{
38 map_word tmp, save0, save1;
39 struct resource *res;
40 struct autcpu12_nvram_priv *priv;
41 34
42 priv = devm_kzalloc(&pdev->dev, 35static struct mtd_info *sram_mtd;
43 sizeof(struct autcpu12_nvram_priv), GFP_KERNEL);
44 if (!priv)
45 return -ENOMEM;
46 36
47 platform_set_drvdata(pdev, priv); 37struct map_info autcpu12_sram_map = {
38 .name = "SRAM",
39 .size = 32768,
40 .bankwidth = 4,
41 .phys = 0x12000000,
42};
48 43
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 44static int __init init_autcpu12_sram (void)
50 if (!res) { 45{
51 dev_err(&pdev->dev, "failed to get memory resource\n"); 46 int err, save0, save1;
52 return -ENOENT;
53 }
54 47
55 priv->map.bankwidth = 4; 48 autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
56 priv->map.phys = res->start; 49 if (!autcpu12_sram_map.virt) {
57 priv->map.size = resource_size(res); 50 printk("Failed to ioremap autcpu12 NV-RAM space\n");
58 priv->map.virt = devm_request_and_ioremap(&pdev->dev, res); 51 err = -EIO;
59 strcpy((char *)priv->map.name, res->name); 52 goto out;
60 if (!priv->map.virt) {
61 dev_err(&pdev->dev, "failed to remap mem resource\n");
62 return -EBUSY;
63 } 53 }
64 54 simple_map_init(&autcpu_sram_map);
65 simple_map_init(&priv->map);
66 55
67 /* 56 /*
68 * Check for 32K/128K 57 * Check for 32K/128K
@@ -72,59 +61,65 @@ static int autcpu12_nvram_probe(struct platform_device *pdev)
72 * Read and check result on ofs 0x0 61 * Read and check result on ofs 0x0
73 * Restore contents 62 * Restore contents
74 */ 63 */
75 save0 = map_read(&priv->map, 0); 64 save0 = map_read32(&autcpu12_sram_map,0);
76 save1 = map_read(&priv->map, 0x10000); 65 save1 = map_read32(&autcpu12_sram_map,0x10000);
77 tmp.x[0] = ~save0.x[0]; 66 map_write32(&autcpu12_sram_map,~save0,0x10000);
78 map_write(&priv->map, tmp, 0x10000); 67 /* if we find this pattern on 0x0, we have 32K size
79 tmp = map_read(&priv->map, 0); 68 * restore contents and exit
80 /* if we find this pattern on 0x0, we have 32K size */ 69 */
81 if (!map_word_equal(&priv->map, tmp, save0)) { 70 if ( map_read32(&autcpu12_sram_map,0) != save0) {
82 map_write(&priv->map, save0, 0x0); 71 map_write32(&autcpu12_sram_map,save0,0x0);
83 priv->map.size = SZ_32K; 72 goto map;
84 } else
85 map_write(&priv->map, save1, 0x10000);
86
87 priv->mtd = do_map_probe("map_ram", &priv->map);
88 if (!priv->mtd) {
89 dev_err(&pdev->dev, "probing failed\n");
90 return -ENXIO;
91 } 73 }
92 74 /* We have a 128K found, restore 0x10000 and set size
93 priv->mtd->owner = THIS_MODULE; 75 * to 128K
94 priv->mtd->erasesize = 16; 76 */
95 priv->mtd->dev.parent = &pdev->dev; 77 map_write32(&autcpu12_sram_map,save1,0x10000);
96 if (!mtd_device_register(priv->mtd, NULL, 0)) { 78 autcpu12_sram_map.size = SZ_128K;
97 dev_info(&pdev->dev, 79
98 "NV-RAM device size %ldKiB registered on AUTCPU12\n", 80map:
99 priv->map.size / SZ_1K); 81 sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map);
100 return 0; 82 if (!sram_mtd) {
83 printk("NV-RAM probe failed\n");
84 err = -ENXIO;
85 goto out_ioremap;
101 } 86 }
102 87
103 map_destroy(priv->mtd); 88 sram_mtd->owner = THIS_MODULE;
104 dev_err(&pdev->dev, "NV-RAM device addition failed\n"); 89 sram_mtd->erasesize = 16;
105 return -ENOMEM;
106}
107 90
108static int autcpu12_nvram_remove(struct platform_device *pdev) 91 if (mtd_device_register(sram_mtd, NULL, 0)) {
109{ 92 printk("NV-RAM device addition failed\n");
110 struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev); 93 err = -ENOMEM;
94 goto out_probe;
95 }
111 96
112 mtd_device_unregister(priv->mtd); 97 printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
113 map_destroy(priv->mtd);
114 98
115 return 0; 99 return 0;
100
101out_probe:
102 map_destroy(sram_mtd);
103 sram_mtd = 0;
104
105out_ioremap:
106 iounmap((void *)autcpu12_sram_map.virt);
107out:
108 return err;
116} 109}
117 110
118static struct platform_driver autcpu12_nvram_driver = { 111static void __exit cleanup_autcpu12_maps(void)
119 .driver = { 112{
120 .name = "autcpu12_nvram", 113 if (sram_mtd) {
121 .owner = THIS_MODULE, 114 mtd_device_unregister(sram_mtd);
122 }, 115 map_destroy(sram_mtd);
123 .probe = autcpu12_nvram_probe, 116 iounmap((void *)autcpu12_sram_map.virt);
124 .remove = autcpu12_nvram_remove, 117 }
125}; 118}
126module_platform_driver(autcpu12_nvram_driver); 119
120module_init(init_autcpu12_sram);
121module_exit(cleanup_autcpu12_maps);
127 122
128MODULE_AUTHOR("Thomas Gleixner"); 123MODULE_AUTHOR("Thomas Gleixner");
129MODULE_DESCRIPTION("autcpu12 NVRAM map driver"); 124MODULE_DESCRIPTION("autcpu12 NV-RAM map driver");
130MODULE_LICENSE("GPL"); 125MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index f833edfaab7..67815eed2f0 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -30,8 +30,7 @@
30#include <linux/io.h> 30#include <linux/io.h>
31#include <asm/unaligned.h> 31#include <asm/unaligned.h>
32 32
33#define pr_devinit(fmt, args...) \ 33#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
34 ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
35 34
36#define DRIVER_NAME "bfin-async-flash" 35#define DRIVER_NAME "bfin-async-flash"
37 36
@@ -42,6 +41,7 @@ struct async_state {
42 uint32_t flash_ambctl0, flash_ambctl1; 41 uint32_t flash_ambctl0, flash_ambctl1;
43 uint32_t save_ambctl0, save_ambctl1; 42 uint32_t save_ambctl0, save_ambctl1;
44 unsigned long irq_flags; 43 unsigned long irq_flags;
44 struct mtd_partition *parts;
45}; 45};
46 46
47static void switch_to_flash(struct async_state *state) 47static void switch_to_flash(struct async_state *state)
@@ -124,7 +124,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
124 124
125static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; 125static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
126 126
127static int bfin_flash_probe(struct platform_device *pdev) 127static int __devinit bfin_flash_probe(struct platform_device *pdev)
128{ 128{
129 int ret; 129 int ret;
130 struct physmap_flash_data *pdata = pdev->dev.platform_data; 130 struct physmap_flash_data *pdata = pdev->dev.platform_data;
@@ -165,19 +165,30 @@ static int bfin_flash_probe(struct platform_device *pdev)
165 return -ENXIO; 165 return -ENXIO;
166 } 166 }
167 167
168 mtd_device_parse_register(state->mtd, part_probe_types, NULL, 168 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
169 pdata->parts, pdata->nr_parts); 169 if (ret > 0) {
170 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
171 mtd_device_register(state->mtd, pdata->parts, ret);
172 state->parts = pdata->parts;
173 } else if (pdata->nr_parts) {
174 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
175 mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
176 } else {
177 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
178 mtd_device_register(state->mtd, NULL, 0);
179 }
170 180
171 platform_set_drvdata(pdev, state); 181 platform_set_drvdata(pdev, state);
172 182
173 return 0; 183 return 0;
174} 184}
175 185
176static int bfin_flash_remove(struct platform_device *pdev) 186static int __devexit bfin_flash_remove(struct platform_device *pdev)
177{ 187{
178 struct async_state *state = platform_get_drvdata(pdev); 188 struct async_state *state = platform_get_drvdata(pdev);
179 gpio_free(state->enet_flash_pin); 189 gpio_free(state->enet_flash_pin);
180 mtd_device_unregister(state->mtd); 190 mtd_device_unregister(state->mtd);
191 kfree(state->parts);
181 map_destroy(state->mtd); 192 map_destroy(state->mtd);
182 kfree(state); 193 kfree(state);
183 return 0; 194 return 0;
@@ -185,13 +196,23 @@ static int bfin_flash_remove(struct platform_device *pdev)
185 196
186static struct platform_driver bfin_flash_driver = { 197static struct platform_driver bfin_flash_driver = {
187 .probe = bfin_flash_probe, 198 .probe = bfin_flash_probe,
188 .remove = bfin_flash_remove, 199 .remove = __devexit_p(bfin_flash_remove),
189 .driver = { 200 .driver = {
190 .name = DRIVER_NAME, 201 .name = DRIVER_NAME,
191 }, 202 },
192}; 203};
193 204
194module_platform_driver(bfin_flash_driver); 205static int __init bfin_flash_init(void)
206{
207 return platform_driver_register(&bfin_flash_driver);
208}
209module_init(bfin_flash_init);
210
211static void __exit bfin_flash_exit(void)
212{
213 platform_driver_unregister(&bfin_flash_driver);
214}
215module_exit(bfin_flash_exit);
195 216
196MODULE_LICENSE("GPL"); 217MODULE_LICENSE("GPL");
197MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank"); 218MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 586a1c77e48..3d0e762fa5f 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
112} 112}
113 113
114 114
115static int ck804xrom_init_one(struct pci_dev *pdev, 115static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
116 const struct pci_device_id *ent) 116 const struct pci_device_id *ent)
117{ 117{
118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 118 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
119 u8 byte; 119 u8 byte;
@@ -320,7 +320,7 @@ static int ck804xrom_init_one(struct pci_dev *pdev,
320} 320}
321 321
322 322
323static void ck804xrom_remove_one(struct pci_dev *pdev) 323static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
324{ 324{
325 struct ck804xrom_window *window = &ck804xrom_window; 325 struct ck804xrom_window *window = &ck804xrom_window;
326 326
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 080f06053bd..7a9e1989c97 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,10 +145,14 @@ static struct map_info dc21285_map = {
145 145
146 146
147/* Partition stuff */ 147/* Partition stuff */
148static struct mtd_partition *dc21285_parts;
148static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 149static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
149 150
150static int __init init_dc21285(void) 151static int __init init_dc21285(void)
151{ 152{
153
154 int nrparts;
155
152 /* Determine bankwidth */ 156 /* Determine bankwidth */
153 switch (*CSR_SA110_CNTL & (3<<14)) { 157 switch (*CSR_SA110_CNTL & (3<<14)) {
154 case SA110_CNTL_ROMWIDTH_8: 158 case SA110_CNTL_ROMWIDTH_8:
@@ -196,7 +200,8 @@ static int __init init_dc21285(void)
196 200
197 dc21285_mtd->owner = THIS_MODULE; 201 dc21285_mtd->owner = THIS_MODULE;
198 202
199 mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0); 203 nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
204 mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
200 205
201 if(machine_is_ebsa285()) { 206 if(machine_is_ebsa285()) {
202 /* 207 /*
@@ -219,6 +224,8 @@ static int __init init_dc21285(void)
219static void __exit cleanup_dc21285(void) 224static void __exit cleanup_dc21285(void)
220{ 225{
221 mtd_device_unregister(dc21285_mtd); 226 mtd_device_unregister(dc21285_mtd);
227 if (dc21285_parts)
228 kfree(dc21285_parts);
222 map_destroy(dc21285_mtd); 229 map_destroy(dc21285_mtd);
223 iounmap(dc21285_map.virt); 230 iounmap(dc21285_map.virt);
224} 231}
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index f784cf0caa1..08322b1c3e8 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
144 pci_dev_put(window->pdev); 144 pci_dev_put(window->pdev);
145} 145}
146 146
147static int esb2rom_init_one(struct pci_dev *pdev, 147static int __devinit esb2rom_init_one(struct pci_dev *pdev,
148 const struct pci_device_id *ent) 148 const struct pci_device_id *ent)
149{ 149{
150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 150 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
151 struct esb2rom_window *window = &esb2rom_window; 151 struct esb2rom_window *window = &esb2rom_window;
@@ -378,13 +378,13 @@ static int esb2rom_init_one(struct pci_dev *pdev,
378 return 0; 378 return 0;
379} 379}
380 380
381static void esb2rom_remove_one(struct pci_dev *pdev) 381static void __devexit esb2rom_remove_one (struct pci_dev *pdev)
382{ 382{
383 struct esb2rom_window *window = &esb2rom_window; 383 struct esb2rom_window *window = &esb2rom_window;
384 esb2rom_cleanup(window); 384 esb2rom_cleanup(window);
385} 385}
386 386
387static struct pci_device_id esb2rom_pci_tbl[] = { 387static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = {
388 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, 388 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
389 PCI_ANY_ID, PCI_ANY_ID, }, 389 PCI_ANY_ID, PCI_ANY_ID, },
390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7b643de2500..7568c5f8b8a 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -26,8 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/types.h> 27#include <linux/types.h>
28 28
29#define pr_devinit(fmt, args...) \ 29#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
30 ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
31 30
32#define DRIVER_NAME "gpio-addr-flash" 31#define DRIVER_NAME "gpio-addr-flash"
33#define PFX DRIVER_NAME ": " 32#define PFX DRIVER_NAME ": "
@@ -143,8 +142,7 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
143 * 142 *
144 * See gf_copy_from() caveat. 143 * See gf_copy_from() caveat.
145 */ 144 */
146static void gf_copy_to(struct map_info *map, unsigned long to, 145static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
147 const void *from, ssize_t len)
148{ 146{
149 struct async_state *state = gf_map_info_to_state(map); 147 struct async_state *state = gf_map_info_to_state(map);
150 148
@@ -187,8 +185,9 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
187 * ... 185 * ...
188 * }; 186 * };
189 */ 187 */
190static int gpio_flash_probe(struct platform_device *pdev) 188static int __devinit gpio_flash_probe(struct platform_device *pdev)
191{ 189{
190 int nr_parts;
192 size_t i, arr_size; 191 size_t i, arr_size;
193 struct physmap_flash_data *pdata; 192 struct physmap_flash_data *pdata;
194 struct resource *memory; 193 struct resource *memory;
@@ -253,14 +252,25 @@ static int gpio_flash_probe(struct platform_device *pdev)
253 return -ENXIO; 252 return -ENXIO;
254 } 253 }
255 254
255 nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
256 &pdata->parts, 0);
257 if (nr_parts > 0) {
258 pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
259 kfree(pdata->parts);
260 } else if (pdata->nr_parts) {
261 pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
262 nr_parts = pdata->nr_parts;
263 } else {
264 pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
265 nr_parts = 0;
266 }
256 267
257 mtd_device_parse_register(state->mtd, part_probe_types, NULL, 268 mtd_device_register(state->mtd, pdata->parts, nr_parts);
258 pdata->parts, pdata->nr_parts);
259 269
260 return 0; 270 return 0;
261} 271}
262 272
263static int gpio_flash_remove(struct platform_device *pdev) 273static int __devexit gpio_flash_remove(struct platform_device *pdev)
264{ 274{
265 struct async_state *state = platform_get_drvdata(pdev); 275 struct async_state *state = platform_get_drvdata(pdev);
266 size_t i = 0; 276 size_t i = 0;
@@ -275,13 +285,23 @@ static int gpio_flash_remove(struct platform_device *pdev)
275 285
276static struct platform_driver gpio_flash_driver = { 286static struct platform_driver gpio_flash_driver = {
277 .probe = gpio_flash_probe, 287 .probe = gpio_flash_probe,
278 .remove = gpio_flash_remove, 288 .remove = __devexit_p(gpio_flash_remove),
279 .driver = { 289 .driver = {
280 .name = DRIVER_NAME, 290 .name = DRIVER_NAME,
281 }, 291 },
282}; 292};
283 293
284module_platform_driver(gpio_flash_driver); 294static int __init gpio_flash_init(void)
295{
296 return platform_driver_register(&gpio_flash_driver);
297}
298module_init(gpio_flash_init);
299
300static void __exit gpio_flash_exit(void)
301{
302 platform_driver_unregister(&gpio_flash_driver);
303}
304module_exit(gpio_flash_exit);
285 305
286MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>"); 306MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
287MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios"); 307MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 8ed6cb4529d..7f035860a36 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -58,11 +58,18 @@ static struct mtd_partition h720x_partitions[] = {
58 58
59#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) 59#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
60 60
61static int nr_mtd_parts;
62static struct mtd_partition *mtd_parts;
63static const char *probes[] = { "cmdlinepart", NULL };
64
61/* 65/*
62 * Initialize FLASH support 66 * Initialize FLASH support
63 */ 67 */
64static int __init h720x_mtd_init(void) 68static int __init h720x_mtd_init(void)
65{ 69{
70
71 char *part_type = NULL;
72
66 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); 73 h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
67 74
68 if (!h720x_map.virt) { 75 if (!h720x_map.virt) {
@@ -85,8 +92,16 @@ static int __init h720x_mtd_init(void)
85 if (mymtd) { 92 if (mymtd) {
86 mymtd->owner = THIS_MODULE; 93 mymtd->owner = THIS_MODULE;
87 94
88 mtd_device_parse_register(mymtd, NULL, NULL, 95 nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
89 h720x_partitions, NUM_PARTITIONS); 96 if (nr_mtd_parts > 0)
97 part_type = "command line";
98 if (nr_mtd_parts <= 0) {
99 mtd_parts = h720x_partitions;
100 nr_mtd_parts = NUM_PARTITIONS;
101 part_type = "builtin";
102 }
103 printk(KERN_INFO "Using %s partition table\n", part_type);
104 mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
90 return 0; 105 return 0;
91 } 106 }
92 107
@@ -105,6 +120,10 @@ static void __exit h720x_mtd_cleanup(void)
105 map_destroy(mymtd); 120 map_destroy(mymtd);
106 } 121 }
107 122
123 /* Free partition info, if commandline partition was used */
124 if (mtd_parts && (mtd_parts != h720x_partitions))
125 kfree (mtd_parts);
126
108 if (h720x_map.virt) { 127 if (h720x_map.virt) {
109 iounmap((void *)h720x_map.virt); 128 iounmap((void *)h720x_map.virt);
110 h720x_map.virt = 0; 129 h720x_map.virt = 0;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index c7478e18f48..6689dcb3124 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
84} 84}
85 85
86 86
87static int ichxrom_init_one(struct pci_dev *pdev, 87static int __devinit ichxrom_init_one (struct pci_dev *pdev,
88 const struct pci_device_id *ent) 88 const struct pci_device_id *ent)
89{ 89{
90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 90 static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
91 struct ichxrom_window *window = &ichxrom_window; 91 struct ichxrom_window *window = &ichxrom_window;
@@ -315,13 +315,13 @@ static int ichxrom_init_one(struct pci_dev *pdev,
315} 315}
316 316
317 317
318static void ichxrom_remove_one(struct pci_dev *pdev) 318static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
319{ 319{
320 struct ichxrom_window *window = &ichxrom_window; 320 struct ichxrom_window *window = &ichxrom_window;
321 ichxrom_cleanup(window); 321 ichxrom_cleanup(window);
322} 322}
323 323
324static struct pci_device_id ichxrom_pci_tbl[] = { 324static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, 325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
326 PCI_ANY_ID, PCI_ANY_ID, }, 326 PCI_ANY_ID, PCI_ANY_ID, },
327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 834a06c56f5..404a50cbafa 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -49,7 +49,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
49/* 49/*
50 * MTD partitioning stuff 50 * MTD partitioning stuff
51 */ 51 */
52static struct mtd_partition partitions[] = 52static struct mtd_partition static_partitions[] =
53{ 53{
54 { 54 {
55 .name = "FileSystem", 55 .name = "FileSystem",
@@ -58,10 +58,16 @@ static struct mtd_partition partitions[] =
58 }, 58 },
59}; 59};
60 60
61static int mtd_parts_nb[NUM_FLASHBANKS];
62static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
63
64static const char *probes[] = { "cmdlinepart", NULL };
65
61static int __init init_impa7(void) 66static int __init init_impa7(void)
62{ 67{
63 static const char *rom_probe_types[] = PROBETYPES; 68 static const char *rom_probe_types[] = PROBETYPES;
64 const char **type; 69 const char **type;
70 const char *part_type = 0;
65 int i; 71 int i;
66 static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { 72 static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
67 { WINDOW_ADDR0, WINDOW_SIZE0 }, 73 { WINDOW_ADDR0, WINDOW_SIZE0 },
@@ -91,9 +97,23 @@ static int __init init_impa7(void)
91 if (impa7_mtd[i]) { 97 if (impa7_mtd[i]) {
92 impa7_mtd[i]->owner = THIS_MODULE; 98 impa7_mtd[i]->owner = THIS_MODULE;
93 devicesfound++; 99 devicesfound++;
94 mtd_device_parse_register(impa7_mtd[i], NULL, NULL, 100 mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
95 partitions, 101 probes,
96 ARRAY_SIZE(partitions)); 102 &mtd_parts[i],
103 0);
104 if (mtd_parts_nb[i] > 0) {
105 part_type = "command line";
106 } else {
107 mtd_parts[i] = static_partitions;
108 mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
109 part_type = "static";
110 }
111
112 printk(KERN_NOTICE MSG_PREFIX
113 "using %s partition definition\n",
114 part_type);
115 mtd_device_register(impa7_mtd[i],
116 mtd_parts[i], mtd_parts_nb[i]);
97 } 117 }
98 else 118 else
99 iounmap((void *)impa7_map[i].virt); 119 iounmap((void *)impa7_map[i].virt);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index b14053b2502..d2f47be8754 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -44,6 +44,7 @@ struct vr_nor_mtd {
44 void __iomem *csr_base; 44 void __iomem *csr_base;
45 struct map_info map; 45 struct map_info map;
46 struct mtd_info *info; 46 struct mtd_info *info;
47 int nr_parts;
47 struct pci_dev *dev; 48 struct pci_dev *dev;
48}; 49};
49 50
@@ -63,24 +64,28 @@ struct vr_nor_mtd {
63#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */ 64#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
64#define TIMING_MASK 0x3FFF0000 65#define TIMING_MASK 0x3FFF0000
65 66
66static void vr_nor_destroy_partitions(struct vr_nor_mtd *p) 67static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
67{ 68{
68 mtd_device_unregister(p->info); 69 mtd_device_unregister(p->info);
69} 70}
70 71
71static int vr_nor_init_partitions(struct vr_nor_mtd *p) 72static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
72{ 73{
74 struct mtd_partition *parts;
75 static const char *part_probes[] = { "cmdlinepart", NULL };
76
73 /* register the flash bank */ 77 /* register the flash bank */
74 /* partition the flash bank */ 78 /* partition the flash bank */
75 return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0); 79 p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
80 return mtd_device_register(p->info, parts, p->nr_parts);
76} 81}
77 82
78static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 83static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
79{ 84{
80 map_destroy(p->info); 85 map_destroy(p->info);
81} 86}
82 87
83static int vr_nor_mtd_setup(struct vr_nor_mtd *p) 88static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
84{ 89{
85 static const char *probe_types[] = 90 static const char *probe_types[] =
86 { "cfi_probe", "jedec_probe", NULL }; 91 { "cfi_probe", "jedec_probe", NULL };
@@ -96,7 +101,7 @@ static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
96 return 0; 101 return 0;
97} 102}
98 103
99static void vr_nor_destroy_maps(struct vr_nor_mtd *p) 104static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
100{ 105{
101 unsigned int exp_timing_cs0; 106 unsigned int exp_timing_cs0;
102 107
@@ -116,7 +121,7 @@ static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
116 * Initialize the map_info structure and map the flash. 121 * Initialize the map_info structure and map the flash.
117 * Returns 0 on success, nonzero otherwise. 122 * Returns 0 on success, nonzero otherwise.
118 */ 123 */
119static int vr_nor_init_maps(struct vr_nor_mtd *p) 124static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
120{ 125{
121 unsigned long csr_phys, csr_len; 126 unsigned long csr_phys, csr_len;
122 unsigned long win_phys, win_len; 127 unsigned long win_phys, win_len;
@@ -176,7 +181,7 @@ static struct pci_device_id vr_nor_pci_ids[] = {
176 {0,} 181 {0,}
177}; 182};
178 183
179static void vr_nor_pci_remove(struct pci_dev *dev) 184static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
180{ 185{
181 struct vr_nor_mtd *p = pci_get_drvdata(dev); 186 struct vr_nor_mtd *p = pci_get_drvdata(dev);
182 187
@@ -189,7 +194,8 @@ static void vr_nor_pci_remove(struct pci_dev *dev)
189 pci_disable_device(dev); 194 pci_disable_device(dev);
190} 195}
191 196
192static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 197static int __devinit
198vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
193{ 199{
194 struct vr_nor_mtd *p = NULL; 200 struct vr_nor_mtd *p = NULL;
195 unsigned int exp_timing_cs0; 201 unsigned int exp_timing_cs0;
@@ -255,11 +261,22 @@ static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
255static struct pci_driver vr_nor_pci_driver = { 261static struct pci_driver vr_nor_pci_driver = {
256 .name = DRV_NAME, 262 .name = DRV_NAME,
257 .probe = vr_nor_pci_probe, 263 .probe = vr_nor_pci_probe,
258 .remove = vr_nor_pci_remove, 264 .remove = __devexit_p(vr_nor_pci_remove),
259 .id_table = vr_nor_pci_ids, 265 .id_table = vr_nor_pci_ids,
260}; 266};
261 267
262module_pci_driver(vr_nor_pci_driver); 268static int __init vr_nor_mtd_init(void)
269{
270 return pci_register_driver(&vr_nor_pci_driver);
271}
272
273static void __exit vr_nor_mtd_exit(void)
274{
275 pci_unregister_driver(&vr_nor_pci_driver);
276}
277
278module_init(vr_nor_mtd_init);
279module_exit(vr_nor_mtd_exit);
263 280
264MODULE_AUTHOR("Andy Lowe"); 281MODULE_AUTHOR("Andy Lowe");
265MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range"); 282MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 4a41ced0f71..1594a802631 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -38,6 +38,7 @@
38struct ixp2000_flash_info { 38struct ixp2000_flash_info {
39 struct mtd_info *mtd; 39 struct mtd_info *mtd;
40 struct map_info map; 40 struct map_info map;
41 struct mtd_partition *partitions;
41 struct resource *res; 42 struct resource *res;
42}; 43};
43 44
@@ -124,6 +125,8 @@ static int ixp2000_flash_remove(struct platform_device *dev)
124 if (info->map.map_priv_1) 125 if (info->map.map_priv_1)
125 iounmap((void *) info->map.map_priv_1); 126 iounmap((void *) info->map.map_priv_1);
126 127
128 kfree(info->partitions);
129
127 if (info->res) { 130 if (info->res) {
128 release_resource(info->res); 131 release_resource(info->res);
129 kfree(info->res); 132 kfree(info->res);
@@ -226,7 +229,13 @@ static int ixp2000_flash_probe(struct platform_device *dev)
226 } 229 }
227 info->mtd->owner = THIS_MODULE; 230 info->mtd->owner = THIS_MODULE;
228 231
229 err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0); 232 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
233 if (err > 0) {
234 err = mtd_device_register(info->mtd, info->partitions, err);
235 if(err)
236 dev_err(&dev->dev, "Could not parse partitions\n");
237 }
238
230 if (err) 239 if (err)
231 goto Error; 240 goto Error;
232 241
@@ -246,8 +255,18 @@ static struct platform_driver ixp2000_flash_driver = {
246 }, 255 },
247}; 256};
248 257
249module_platform_driver(ixp2000_flash_driver); 258static int __init ixp2000_flash_init(void)
259{
260 return platform_driver_register(&ixp2000_flash_driver);
261}
262
263static void __exit ixp2000_flash_exit(void)
264{
265 platform_driver_unregister(&ixp2000_flash_driver);
266}
250 267
268module_init(ixp2000_flash_init);
269module_exit(ixp2000_flash_exit);
251MODULE_LICENSE("GPL"); 270MODULE_LICENSE("GPL");
252MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); 271MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
253MODULE_ALIAS("platform:IXP2000-Flash"); 272MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index e864fc6c58f..155b21942f4 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -145,6 +145,7 @@ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
145struct ixp4xx_flash_info { 145struct ixp4xx_flash_info {
146 struct mtd_info *mtd; 146 struct mtd_info *mtd;
147 struct map_info map; 147 struct map_info map;
148 struct mtd_partition *partitions;
148 struct resource *res; 149 struct resource *res;
149}; 150};
150 151
@@ -167,6 +168,8 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
167 if (info->map.virt) 168 if (info->map.virt)
168 iounmap(info->map.virt); 169 iounmap(info->map.virt);
169 170
171 kfree(info->partitions);
172
170 if (info->res) { 173 if (info->res) {
171 release_resource(info->res); 174 release_resource(info->res);
172 kfree(info->res); 175 kfree(info->res);
@@ -182,9 +185,8 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
182{ 185{
183 struct flash_platform_data *plat = dev->dev.platform_data; 186 struct flash_platform_data *plat = dev->dev.platform_data;
184 struct ixp4xx_flash_info *info; 187 struct ixp4xx_flash_info *info;
185 struct mtd_part_parser_data ppdata = { 188 const char *part_type = NULL;
186 .origin = dev->resource->start, 189 int nr_parts = 0;
187 };
188 int err = -1; 190 int err = -1;
189 191
190 if (!plat) 192 if (!plat)
@@ -250,12 +252,28 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
250 /* Use the fast version */ 252 /* Use the fast version */
251 info->map.write = ixp4xx_write16; 253 info->map.write = ixp4xx_write16;
252 254
253 err = mtd_device_parse_register(info->mtd, probes, &ppdata, 255 nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
254 plat->parts, plat->nr_parts); 256 dev->resource->start);
255 if (err) { 257 if (nr_parts > 0) {
258 part_type = "dynamic";
259 } else {
260 info->partitions = plat->parts;
261 nr_parts = plat->nr_parts;
262 part_type = "static";
263 }
264 if (nr_parts == 0)
265 printk(KERN_NOTICE "IXP4xx flash: no partition info "
266 "available, registering whole flash\n");
267 else
268 printk(KERN_NOTICE "IXP4xx flash: using %s partition "
269 "definition\n", part_type);
270
271 err = mtd_device_register(info->mtd, info->partitions, nr_parts);
272 if (err)
256 printk(KERN_ERR "Could not parse partitions\n"); 273 printk(KERN_ERR "Could not parse partitions\n");
274
275 if (err)
257 goto Error; 276 goto Error;
258 }
259 277
260 return 0; 278 return 0;
261 279
@@ -273,7 +291,19 @@ static struct platform_driver ixp4xx_flash_driver = {
273 }, 291 },
274}; 292};
275 293
276module_platform_driver(ixp4xx_flash_driver); 294static int __init ixp4xx_flash_init(void)
295{
296 return platform_driver_register(&ixp4xx_flash_driver);
297}
298
299static void __exit ixp4xx_flash_exit(void)
300{
301 platform_driver_unregister(&ixp4xx_flash_driver);
302}
303
304
305module_init(ixp4xx_flash_init);
306module_exit(ixp4xx_flash_exit);
277 307
278MODULE_LICENSE("GPL"); 308MODULE_LICENSE("GPL");
279MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems"); 309MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 74bd98ee635..dd0360ba241 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -27,21 +27,17 @@ static struct mtd_info *mymtd;
27 27
28 28
29/* Is this really the vpp port? */ 29/* Is this really the vpp port? */
30static DEFINE_SPINLOCK(l440gx_vpp_lock);
31static int l440gx_vpp_refcnt;
32static void l440gx_set_vpp(struct map_info *map, int vpp) 30static void l440gx_set_vpp(struct map_info *map, int vpp)
33{ 31{
34 unsigned long flags; 32 unsigned long l;
35 33
36 spin_lock_irqsave(&l440gx_vpp_lock, flags); 34 l = inl(VPP_PORT);
37 if (vpp) { 35 if (vpp) {
38 if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */ 36 l |= 1;
39 outl(inl(VPP_PORT) | 1, VPP_PORT);
40 } else { 37 } else {
41 if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */ 38 l &= ~1;
42 outl(inl(VPP_PORT) & ~1, VPP_PORT);
43 } 39 }
44 spin_unlock_irqrestore(&l440gx_vpp_lock, flags); 40 outl(l, VPP_PORT);
45} 41}
46 42
47static struct map_info l440gx_map = { 43static struct map_info l440gx_map = {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 3c3c791eb96..a90cabd7b84 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
19#include <linux/mtd/cfi.h> 19#include <linux/mtd/cfi.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mtd/physmap.h> 21#include <linux/mtd/physmap.h>
22#include <linux/of.h>
23 22
24#include <lantiq_soc.h> 23#include <lantiq_soc.h>
24#include <lantiq_platform.h>
25 25
26/* 26/*
27 * The NOR flash is connected to the same external bus unit (EBU) as PCI. 27 * The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,9 +44,7 @@ struct ltq_mtd {
44 struct map_info *map; 44 struct map_info *map;
45}; 45};
46 46
47static const char ltq_map_name[] = "ltq_nor"; 47static char ltq_map_name[] = "ltq_nor";
48static const char *ltq_probe_types[] = {
49 "cmdlinepart", "ofpart", NULL };
50 48
51static map_word 49static map_word
52ltq_read16(struct map_info *map, unsigned long adr) 50ltq_read16(struct map_info *map, unsigned long adr)
@@ -109,40 +107,48 @@ ltq_copy_to(struct map_info *map, unsigned long to,
109 spin_unlock_irqrestore(&ebu_lock, flags); 107 spin_unlock_irqrestore(&ebu_lock, flags);
110} 108}
111 109
112static int 110static const char const *part_probe_types[] = { "cmdlinepart", NULL };
111
112static int __init
113ltq_mtd_probe(struct platform_device *pdev) 113ltq_mtd_probe(struct platform_device *pdev)
114{ 114{
115 struct mtd_part_parser_data ppdata; 115 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
116 struct ltq_mtd *ltq_mtd; 116 struct ltq_mtd *ltq_mtd;
117 struct mtd_partition *parts;
118 struct resource *res;
119 int nr_parts = 0;
117 struct cfi_private *cfi; 120 struct cfi_private *cfi;
118 int err; 121 int err;
119 122
120 if (of_machine_is_compatible("lantiq,falcon") &&
121 (ltq_boot_select() != BS_FLASH)) {
122 dev_err(&pdev->dev, "invalid bootstrap options\n");
123 return -ENODEV;
124 }
125
126 ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); 123 ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
127 platform_set_drvdata(pdev, ltq_mtd); 124 platform_set_drvdata(pdev, ltq_mtd);
128 125
129 ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 126 ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
130 if (!ltq_mtd->res) { 127 if (!ltq_mtd->res) {
131 dev_err(&pdev->dev, "failed to get memory resource\n"); 128 dev_err(&pdev->dev, "failed to get memory resource");
132 err = -ENOENT; 129 err = -ENOENT;
133 goto err_out; 130 goto err_out;
134 } 131 }
135 132
136 ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); 133 res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
137 ltq_mtd->map->phys = ltq_mtd->res->start; 134 resource_size(ltq_mtd->res), dev_name(&pdev->dev));
138 ltq_mtd->map->size = resource_size(ltq_mtd->res); 135 if (!ltq_mtd->res) {
139 ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res); 136 dev_err(&pdev->dev, "failed to request mem resource");
140 if (!ltq_mtd->map->virt) {
141 dev_err(&pdev->dev, "failed to remap mem resource\n");
142 err = -EBUSY; 137 err = -EBUSY;
143 goto err_out; 138 goto err_out;
144 } 139 }
145 140
141 ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
142 ltq_mtd->map->phys = res->start;
143 ltq_mtd->map->size = resource_size(res);
144 ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
145 ltq_mtd->map->phys, ltq_mtd->map->size);
146 if (!ltq_mtd->map->virt) {
147 dev_err(&pdev->dev, "failed to ioremap!\n");
148 err = -ENOMEM;
149 goto err_free;
150 }
151
146 ltq_mtd->map->name = ltq_map_name; 152 ltq_mtd->map->name = ltq_map_name;
147 ltq_mtd->map->bankwidth = 2; 153 ltq_mtd->map->bankwidth = 2;
148 ltq_mtd->map->read = ltq_read16; 154 ltq_mtd->map->read = ltq_read16;
@@ -157,7 +163,7 @@ ltq_mtd_probe(struct platform_device *pdev)
157 if (!ltq_mtd->mtd) { 163 if (!ltq_mtd->mtd) {
158 dev_err(&pdev->dev, "probing failed\n"); 164 dev_err(&pdev->dev, "probing failed\n");
159 err = -ENXIO; 165 err = -ENXIO;
160 goto err_free; 166 goto err_unmap;
161 } 167 }
162 168
163 ltq_mtd->mtd->owner = THIS_MODULE; 169 ltq_mtd->mtd->owner = THIS_MODULE;
@@ -166,9 +172,17 @@ ltq_mtd_probe(struct platform_device *pdev)
166 cfi->addr_unlock1 ^= 1; 172 cfi->addr_unlock1 ^= 1;
167 cfi->addr_unlock2 ^= 1; 173 cfi->addr_unlock2 ^= 1;
168 174
169 ppdata.of_node = pdev->dev.of_node; 175 nr_parts = parse_mtd_partitions(ltq_mtd->mtd,
170 err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, 176 part_probe_types, &parts, 0);
171 &ppdata, NULL, 0); 177 if (nr_parts > 0) {
178 dev_info(&pdev->dev,
179 "using %d partitions from cmdline", nr_parts);
180 } else {
181 nr_parts = ltq_mtd_data->nr_parts;
182 parts = ltq_mtd_data->parts;
183 }
184
185 err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts);
172 if (err) { 186 if (err) {
173 dev_err(&pdev->dev, "failed to add partitions\n"); 187 dev_err(&pdev->dev, "failed to add partitions\n");
174 goto err_destroy; 188 goto err_destroy;
@@ -178,6 +192,8 @@ ltq_mtd_probe(struct platform_device *pdev)
178 192
179err_destroy: 193err_destroy:
180 map_destroy(ltq_mtd->mtd); 194 map_destroy(ltq_mtd->mtd);
195err_unmap:
196 iounmap(ltq_mtd->map->virt);
181err_free: 197err_free:
182 kfree(ltq_mtd->map); 198 kfree(ltq_mtd->map);
183err_out: 199err_out:
@@ -185,39 +201,50 @@ err_out:
185 return err; 201 return err;
186} 202}
187 203
188static int 204static int __devexit
189ltq_mtd_remove(struct platform_device *pdev) 205ltq_mtd_remove(struct platform_device *pdev)
190{ 206{
191 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); 207 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
192 208
193 if (ltq_mtd) { 209 if (ltq_mtd) {
194 if (ltq_mtd->mtd) { 210 if (ltq_mtd->mtd) {
195 mtd_device_unregister(ltq_mtd->mtd); 211 del_mtd_partitions(ltq_mtd->mtd);
196 map_destroy(ltq_mtd->mtd); 212 map_destroy(ltq_mtd->mtd);
197 } 213 }
214 if (ltq_mtd->map->virt)
215 iounmap(ltq_mtd->map->virt);
198 kfree(ltq_mtd->map); 216 kfree(ltq_mtd->map);
199 kfree(ltq_mtd); 217 kfree(ltq_mtd);
200 } 218 }
201 return 0; 219 return 0;
202} 220}
203 221
204static const struct of_device_id ltq_mtd_match[] = {
205 { .compatible = "lantiq,nor" },
206 {},
207};
208MODULE_DEVICE_TABLE(of, ltq_mtd_match);
209
210static struct platform_driver ltq_mtd_driver = { 222static struct platform_driver ltq_mtd_driver = {
211 .probe = ltq_mtd_probe, 223 .remove = __devexit_p(ltq_mtd_remove),
212 .remove = ltq_mtd_remove,
213 .driver = { 224 .driver = {
214 .name = "ltq-nor", 225 .name = "ltq_nor",
215 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
216 .of_match_table = ltq_mtd_match,
217 }, 227 },
218}; 228};
219 229
220module_platform_driver(ltq_mtd_driver); 230static int __init
231init_ltq_mtd(void)
232{
233 int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
234
235 if (ret)
236 pr_err("ltq_nor: error registering platform driver");
237 return ret;
238}
239
240static void __exit
241exit_ltq_mtd(void)
242{
243 platform_driver_unregister(&ltq_mtd_driver);
244}
245
246module_init(init_ltq_mtd);
247module_exit(exit_ltq_mtd);
221 248
222MODULE_LICENSE("GPL"); 249MODULE_LICENSE("GPL");
223MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); 250MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index ab0fead56b8..5936c466e90 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -33,6 +33,9 @@ struct latch_addr_flash_info {
33 /* cache; could be found out of res */ 33 /* cache; could be found out of res */
34 unsigned long win_mask; 34 unsigned long win_mask;
35 35
36 int nr_parts;
37 struct mtd_partition *parts;
38
36 spinlock_t lock; 39 spinlock_t lock;
37}; 40};
38 41
@@ -94,6 +97,8 @@ static void lf_copy_from(struct map_info *map, void *to,
94 97
95static char *rom_probe_types[] = { "cfi_probe", NULL }; 98static char *rom_probe_types[] = { "cfi_probe", NULL };
96 99
100static char *part_probe_types[] = { "cmdlinepart", NULL };
101
97static int latch_addr_flash_remove(struct platform_device *dev) 102static int latch_addr_flash_remove(struct platform_device *dev)
98{ 103{
99 struct latch_addr_flash_info *info; 104 struct latch_addr_flash_info *info;
@@ -107,6 +112,8 @@ static int latch_addr_flash_remove(struct platform_device *dev)
107 latch_addr_data = dev->dev.platform_data; 112 latch_addr_data = dev->dev.platform_data;
108 113
109 if (info->mtd != NULL) { 114 if (info->mtd != NULL) {
115 if (info->nr_parts)
116 kfree(info->parts);
110 mtd_device_unregister(info->mtd); 117 mtd_device_unregister(info->mtd);
111 map_destroy(info->mtd); 118 map_destroy(info->mtd);
112 } 119 }
@@ -125,7 +132,7 @@ static int latch_addr_flash_remove(struct platform_device *dev)
125 return 0; 132 return 0;
126} 133}
127 134
128static int latch_addr_flash_probe(struct platform_device *dev) 135static int __devinit latch_addr_flash_probe(struct platform_device *dev)
129{ 136{
130 struct latch_addr_flash_data *latch_addr_data; 137 struct latch_addr_flash_data *latch_addr_data;
131 struct latch_addr_flash_info *info; 138 struct latch_addr_flash_info *info;
@@ -199,9 +206,21 @@ static int latch_addr_flash_probe(struct platform_device *dev)
199 } 206 }
200 info->mtd->owner = THIS_MODULE; 207 info->mtd->owner = THIS_MODULE;
201 208
202 mtd_device_parse_register(info->mtd, NULL, NULL, 209 err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
203 latch_addr_data->parts, 210 &info->parts, 0);
204 latch_addr_data->nr_parts); 211 if (err > 0) {
212 mtd_device_register(info->mtd, info->parts, err);
213 return 0;
214 }
215 if (latch_addr_data->nr_parts) {
216 pr_notice("Using latch-addr-flash partition information\n");
217 mtd_device_register(info->mtd,
218 latch_addr_data->parts,
219 latch_addr_data->nr_parts);
220 return 0;
221 }
222
223 mtd_device_register(info->mtd, NULL, 0);
205 return 0; 224 return 0;
206 225
207iounmap: 226iounmap:
@@ -218,13 +237,23 @@ done:
218 237
219static struct platform_driver latch_addr_flash_driver = { 238static struct platform_driver latch_addr_flash_driver = {
220 .probe = latch_addr_flash_probe, 239 .probe = latch_addr_flash_probe,
221 .remove = latch_addr_flash_remove, 240 .remove = __devexit_p(latch_addr_flash_remove),
222 .driver = { 241 .driver = {
223 .name = DRIVER_NAME, 242 .name = DRIVER_NAME,
224 }, 243 },
225}; 244};
226 245
227module_platform_driver(latch_addr_flash_driver); 246static int __init latch_addr_flash_init(void)
247{
248 return platform_driver_register(&latch_addr_flash_driver);
249}
250module_init(latch_addr_flash_init);
251
252static void __exit latch_addr_flash_exit(void)
253{
254 platform_driver_unregister(&latch_addr_flash_driver);
255}
256module_exit(latch_addr_flash_exit);
228 257
229MODULE_AUTHOR("David Griego <dgriego@mvista.com>"); 258MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
230MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper " 259MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index c3aebd5da5d..1d005a3e9b4 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -43,14 +43,26 @@ static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
43 struct map_pci_info *map = (struct map_pci_info *)_map; 43 struct map_pci_info *map = (struct map_pci_info *)_map;
44 map_word val; 44 map_word val;
45 val.x[0]= readb(map->base + map->translate(map, ofs)); 45 val.x[0]= readb(map->base + map->translate(map, ofs));
46// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
46 return val; 47 return val;
47} 48}
48 49
50#if 0
51static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
52{
53 struct map_pci_info *map = (struct map_pci_info *)_map;
54 map_word val;
55 val.x[0] = readw(map->base + map->translate(map, ofs));
56// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
57 return val;
58}
59#endif
49static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs) 60static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
50{ 61{
51 struct map_pci_info *map = (struct map_pci_info *)_map; 62 struct map_pci_info *map = (struct map_pci_info *)_map;
52 map_word val; 63 map_word val;
53 val.x[0] = readl(map->base + map->translate(map, ofs)); 64 val.x[0] = readl(map->base + map->translate(map, ofs));
65// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
54 return val; 66 return val;
55} 67}
56 68
@@ -63,12 +75,22 @@ static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from
63static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs) 75static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
64{ 76{
65 struct map_pci_info *map = (struct map_pci_info *)_map; 77 struct map_pci_info *map = (struct map_pci_info *)_map;
78// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
66 writeb(val.x[0], map->base + map->translate(map, ofs)); 79 writeb(val.x[0], map->base + map->translate(map, ofs));
67} 80}
68 81
82#if 0
83static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
84{
85 struct map_pci_info *map = (struct map_pci_info *)_map;
86// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
87 writew(val.x[0], map->base + map->translate(map, ofs));
88}
89#endif
69static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs) 90static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
70{ 91{
71 struct map_pci_info *map = (struct map_pci_info *)_map; 92 struct map_pci_info *map = (struct map_pci_info *)_map;
93// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
72 writel(val.x[0], map->base + map->translate(map, ofs)); 94 writel(val.x[0], map->base + map->translate(map, ofs));
73} 95}
74 96
@@ -253,7 +275,8 @@ static struct pci_device_id mtd_pci_ids[] = {
253 * Generic code follows. 275 * Generic code follows.
254 */ 276 */
255 277
256static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 278static int __devinit
279mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
257{ 280{
258 struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data; 281 struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
259 struct map_pci_info *map = NULL; 282 struct map_pci_info *map = NULL;
@@ -307,7 +330,8 @@ out:
307 return err; 330 return err;
308} 331}
309 332
310static void mtd_pci_remove(struct pci_dev *dev) 333static void __devexit
334mtd_pci_remove(struct pci_dev *dev)
311{ 335{
312 struct mtd_info *mtd = pci_get_drvdata(dev); 336 struct mtd_info *mtd = pci_get_drvdata(dev);
313 struct map_pci_info *map = mtd->priv; 337 struct map_pci_info *map = mtd->priv;
@@ -324,13 +348,25 @@ static void mtd_pci_remove(struct pci_dev *dev)
324static struct pci_driver mtd_pci_driver = { 348static struct pci_driver mtd_pci_driver = {
325 .name = "MTD PCI", 349 .name = "MTD PCI",
326 .probe = mtd_pci_probe, 350 .probe = mtd_pci_probe,
327 .remove = mtd_pci_remove, 351 .remove = __devexit_p(mtd_pci_remove),
328 .id_table = mtd_pci_ids, 352 .id_table = mtd_pci_ids,
329}; 353};
330 354
331module_pci_driver(mtd_pci_driver); 355static int __init mtd_pci_maps_init(void)
356{
357 return pci_register_driver(&mtd_pci_driver);
358}
359
360static void __exit mtd_pci_maps_exit(void)
361{
362 pci_unregister_driver(&mtd_pci_driver);
363}
364
365module_init(mtd_pci_maps_init);
366module_exit(mtd_pci_maps_exit);
332 367
333MODULE_LICENSE("GPL"); 368MODULE_LICENSE("GPL");
334MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 369MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
335MODULE_DESCRIPTION("Generic PCI map driver"); 370MODULE_DESCRIPTION("Generic PCI map driver");
336MODULE_DEVICE_TABLE(pci, mtd_pci_ids); 371MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
372
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index a3cfad392ed..bbe168b65c2 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/io.h> 16#include <asm/io.h>
17#include <asm/system.h>
17 18
18#include <pcmcia/cistpl.h> 19#include <pcmcia/cistpl.h>
19#include <pcmcia/ds.h> 20#include <pcmcia/ds.h>
@@ -21,6 +22,22 @@
21#include <linux/mtd/map.h> 22#include <linux/mtd/map.h>
22#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
23 24
25#ifdef CONFIG_MTD_DEBUG
26static int debug = CONFIG_MTD_DEBUG_VERBOSE;
27module_param(debug, int, 0);
28MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
29#undef DEBUG
30#define DEBUG(n, format, arg...) \
31 if (n <= debug) { \
32 printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
33 }
34
35#else
36#undef DEBUG
37#define DEBUG(n, arg...)
38static const int debug = 0;
39#endif
40
24#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) 41#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
25 42
26#define DRIVER_DESC "PCMCIA Flash memory card driver" 43#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -88,13 +105,13 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
88 int ret; 105 int ret;
89 106
90 if (!pcmcia_dev_present(dev->p_dev)) { 107 if (!pcmcia_dev_present(dev->p_dev)) {
91 pr_debug("device removed\n"); 108 DEBUG(1, "device removed");
92 return 0; 109 return 0;
93 } 110 }
94 111
95 offset = to & ~(dev->win_size-1); 112 offset = to & ~(dev->win_size-1);
96 if (offset != dev->offset) { 113 if (offset != dev->offset) {
97 pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n", 114 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
98 dev->offset, offset); 115 dev->offset, offset);
99 ret = pcmcia_map_mem_page(dev->p_dev, win, offset); 116 ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
100 if (ret != 0) 117 if (ret != 0)
@@ -115,7 +132,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
115 return d; 132 return d;
116 133
117 d.x[0] = readb(addr); 134 d.x[0] = readb(addr);
118 pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]); 135 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
119 return d; 136 return d;
120} 137}
121 138
@@ -130,7 +147,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
130 return d; 147 return d;
131 148
132 d.x[0] = readw(addr); 149 d.x[0] = readw(addr);
133 pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]); 150 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
134 return d; 151 return d;
135} 152}
136 153
@@ -140,7 +157,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
140 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 157 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
141 unsigned long win_size = dev->win_size; 158 unsigned long win_size = dev->win_size;
142 159
143 pr_debug("to = %p from = %lu len = %zd\n", to, from, len); 160 DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
144 while(len) { 161 while(len) {
145 int toread = win_size - (from & (win_size-1)); 162 int toread = win_size - (from & (win_size-1));
146 caddr_t addr; 163 caddr_t addr;
@@ -152,7 +169,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
152 if(!addr) 169 if(!addr)
153 return; 170 return;
154 171
155 pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread); 172 DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
156 memcpy_fromio(to, addr, toread); 173 memcpy_fromio(to, addr, toread);
157 len -= toread; 174 len -= toread;
158 to += toread; 175 to += toread;
@@ -168,7 +185,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
168 if(!addr) 185 if(!addr)
169 return; 186 return;
170 187
171 pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]); 188 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
172 writeb(d.x[0], addr); 189 writeb(d.x[0], addr);
173} 190}
174 191
@@ -179,7 +196,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
179 if(!addr) 196 if(!addr)
180 return; 197 return;
181 198
182 pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]); 199 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
183 writew(d.x[0], addr); 200 writew(d.x[0], addr);
184} 201}
185 202
@@ -189,7 +206,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
189 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 206 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
190 unsigned long win_size = dev->win_size; 207 unsigned long win_size = dev->win_size;
191 208
192 pr_debug("to = %lu from = %p len = %zd\n", to, from, len); 209 DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
193 while(len) { 210 while(len) {
194 int towrite = win_size - (to & (win_size-1)); 211 int towrite = win_size - (to & (win_size-1));
195 caddr_t addr; 212 caddr_t addr;
@@ -201,7 +218,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
201 if(!addr) 218 if(!addr)
202 return; 219 return;
203 220
204 pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite); 221 DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
205 memcpy_toio(addr, from, towrite); 222 memcpy_toio(addr, from, towrite);
206 len -= towrite; 223 len -= towrite;
207 to += towrite; 224 to += towrite;
@@ -223,7 +240,7 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
223 return d; 240 return d;
224 241
225 d.x[0] = readb(win_base + ofs); 242 d.x[0] = readb(win_base + ofs);
226 pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", 243 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
227 ofs, win_base + ofs, d.x[0]); 244 ofs, win_base + ofs, d.x[0]);
228 return d; 245 return d;
229} 246}
@@ -238,7 +255,7 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
238 return d; 255 return d;
239 256
240 d.x[0] = readw(win_base + ofs); 257 d.x[0] = readw(win_base + ofs);
241 pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", 258 DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
242 ofs, win_base + ofs, d.x[0]); 259 ofs, win_base + ofs, d.x[0]);
243 return d; 260 return d;
244} 261}
@@ -251,7 +268,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
251 if(DEV_REMOVED(map)) 268 if(DEV_REMOVED(map))
252 return; 269 return;
253 270
254 pr_debug("to = %p from = %lu len = %zd\n", to, from, len); 271 DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
255 memcpy_fromio(to, win_base + from, len); 272 memcpy_fromio(to, win_base + from, len);
256} 273}
257 274
@@ -263,7 +280,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
263 if(DEV_REMOVED(map)) 280 if(DEV_REMOVED(map))
264 return; 281 return;
265 282
266 pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", 283 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
267 adr, win_base + adr, d.x[0]); 284 adr, win_base + adr, d.x[0]);
268 writeb(d.x[0], win_base + adr); 285 writeb(d.x[0], win_base + adr);
269} 286}
@@ -276,7 +293,7 @@ static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
276 if(DEV_REMOVED(map)) 293 if(DEV_REMOVED(map))
277 return; 294 return;
278 295
279 pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", 296 DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
280 adr, win_base + adr, d.x[0]); 297 adr, win_base + adr, d.x[0]);
281 writew(d.x[0], win_base + adr); 298 writew(d.x[0], win_base + adr);
282} 299}
@@ -289,29 +306,18 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
289 if(DEV_REMOVED(map)) 306 if(DEV_REMOVED(map))
290 return; 307 return;
291 308
292 pr_debug("to = %lu from = %p len = %zd\n", to, from, len); 309 DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
293 memcpy_toio(win_base + to, from, len); 310 memcpy_toio(win_base + to, from, len);
294} 311}
295 312
296 313
297static DEFINE_SPINLOCK(pcmcia_vpp_lock);
298static int pcmcia_vpp_refcnt;
299static void pcmciamtd_set_vpp(struct map_info *map, int on) 314static void pcmciamtd_set_vpp(struct map_info *map, int on)
300{ 315{
301 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 316 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
302 struct pcmcia_device *link = dev->p_dev; 317 struct pcmcia_device *link = dev->p_dev;
303 unsigned long flags;
304 318
305 pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); 319 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
306 spin_lock_irqsave(&pcmcia_vpp_lock, flags); 320 pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
307 if (on) {
308 if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
309 pcmcia_fixup_vpp(link, dev->vpp);
310 } else {
311 if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
312 pcmcia_fixup_vpp(link, 0);
313 }
314 spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
315} 321}
316 322
317 323
@@ -319,7 +325,7 @@ static void pcmciamtd_release(struct pcmcia_device *link)
319{ 325{
320 struct pcmciamtd_dev *dev = link->priv; 326 struct pcmciamtd_dev *dev = link->priv;
321 327
322 pr_debug("link = 0x%p\n", link); 328 DEBUG(3, "link = 0x%p", link);
323 329
324 if (link->resource[2]->end) { 330 if (link->resource[2]->end) {
325 if(dev->win_base) { 331 if(dev->win_base) {
@@ -331,6 +337,7 @@ static void pcmciamtd_release(struct pcmcia_device *link)
331} 337}
332 338
333 339
340#ifdef CONFIG_MTD_DEBUG
334static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, 341static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
335 tuple_t *tuple, 342 tuple_t *tuple,
336 void *priv_data) 343 void *priv_data)
@@ -340,7 +347,7 @@ static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
340 if (!pcmcia_parse_tuple(tuple, &parse)) { 347 if (!pcmcia_parse_tuple(tuple, &parse)) {
341 cistpl_format_t *t = &parse.format; 348 cistpl_format_t *t = &parse.format;
342 (void)t; /* Shut up, gcc */ 349 (void)t; /* Shut up, gcc */
343 pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n", 350 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
344 t->type, t->edc, t->offset, t->length); 351 t->type, t->edc, t->offset, t->length);
345 } 352 }
346 return -ENOSPC; 353 return -ENOSPC;
@@ -356,11 +363,12 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
356 if (!pcmcia_parse_tuple(tuple, &parse)) { 363 if (!pcmcia_parse_tuple(tuple, &parse)) {
357 cistpl_jedec_t *t = &parse.jedec; 364 cistpl_jedec_t *t = &parse.jedec;
358 for (i = 0; i < t->nid; i++) 365 for (i = 0; i < t->nid; i++)
359 pr_debug("JEDEC: 0x%02x 0x%02x\n", 366 DEBUG(2, "JEDEC: 0x%02x 0x%02x",
360 t->id[i].mfr, t->id[i].info); 367 t->id[i].mfr, t->id[i].info);
361 } 368 }
362 return -ENOSPC; 369 return -ENOSPC;
363} 370}
371#endif
364 372
365static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, 373static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
366 tuple_t *tuple, 374 tuple_t *tuple,
@@ -374,14 +382,14 @@ static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
374 if (pcmcia_parse_tuple(tuple, &parse)) 382 if (pcmcia_parse_tuple(tuple, &parse))
375 return -EINVAL; 383 return -EINVAL;
376 384
377 pr_debug("Common memory:\n"); 385 DEBUG(2, "Common memory:");
378 dev->pcmcia_map.size = t->dev[0].size; 386 dev->pcmcia_map.size = t->dev[0].size;
379 /* from here on: DEBUG only */ 387 /* from here on: DEBUG only */
380 for (i = 0; i < t->ndev; i++) { 388 for (i = 0; i < t->ndev; i++) {
381 pr_debug("Region %d, type = %u\n", i, t->dev[i].type); 389 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
382 pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp); 390 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
383 pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed); 391 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
384 pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size); 392 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
385 } 393 }
386 return 0; 394 return 0;
387} 395}
@@ -401,12 +409,12 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
401 dev->pcmcia_map.bankwidth = t->geo[0].buswidth; 409 dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
402 /* from here on: DEBUG only */ 410 /* from here on: DEBUG only */
403 for (i = 0; i < t->ngeo; i++) { 411 for (i = 0; i < t->ngeo; i++) {
404 pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth); 412 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
405 pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block); 413 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
406 pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block); 414 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
407 pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block); 415 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
408 pr_debug("region: %d partition = %u\n", i, t->geo[i].partition); 416 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
409 pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave); 417 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
410 } 418 }
411 return 0; 419 return 0;
412} 420}
@@ -424,11 +432,13 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
424 if (p_dev->prod_id[i]) 432 if (p_dev->prod_id[i])
425 strcat(dev->mtd_name, p_dev->prod_id[i]); 433 strcat(dev->mtd_name, p_dev->prod_id[i]);
426 } 434 }
427 pr_debug("Found name: %s\n", dev->mtd_name); 435 DEBUG(2, "Found name: %s", dev->mtd_name);
428 } 436 }
429 437
438#ifdef CONFIG_MTD_DEBUG
430 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL); 439 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
431 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL); 440 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
441#endif
432 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev); 442 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
433 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev); 443 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
434 444
@@ -440,12 +450,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
440 450
441 if(force_size) { 451 if(force_size) {
442 dev->pcmcia_map.size = force_size << 20; 452 dev->pcmcia_map.size = force_size << 20;
443 pr_debug("size forced to %dM\n", force_size); 453 DEBUG(2, "size forced to %dM", force_size);
444 } 454 }
445 455
446 if(bankwidth) { 456 if(bankwidth) {
447 dev->pcmcia_map.bankwidth = bankwidth; 457 dev->pcmcia_map.bankwidth = bankwidth;
448 pr_debug("bankwidth forced to %d\n", bankwidth); 458 DEBUG(2, "bankwidth forced to %d", bankwidth);
449 } 459 }
450 460
451 dev->pcmcia_map.name = dev->mtd_name; 461 dev->pcmcia_map.name = dev->mtd_name;
@@ -454,7 +464,7 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev
454 *new_name = 1; 464 *new_name = 1;
455 } 465 }
456 466
457 pr_debug("Device: Size: %lu Width:%d Name: %s\n", 467 DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
458 dev->pcmcia_map.size, 468 dev->pcmcia_map.size,
459 dev->pcmcia_map.bankwidth << 3, dev->mtd_name); 469 dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
460} 470}
@@ -469,7 +479,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
469 static char *probes[] = { "jedec_probe", "cfi_probe" }; 479 static char *probes[] = { "jedec_probe", "cfi_probe" };
470 int new_name = 0; 480 int new_name = 0;
471 481
472 pr_debug("link=0x%p\n", link); 482 DEBUG(3, "link=0x%p", link);
473 483
474 card_settings(dev, link, &new_name); 484 card_settings(dev, link, &new_name);
475 485
@@ -502,11 +512,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
502 512
503 do { 513 do {
504 int ret; 514 int ret;
505 pr_debug("requesting window with size = %luKiB memspeed = %d\n", 515 DEBUG(2, "requesting window with size = %luKiB memspeed = %d",
506 (unsigned long) resource_size(link->resource[2]) >> 10, 516 (unsigned long) resource_size(link->resource[2]) >> 10,
507 mem_speed); 517 mem_speed);
508 ret = pcmcia_request_window(link, link->resource[2], mem_speed); 518 ret = pcmcia_request_window(link, link->resource[2], mem_speed);
509 pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size); 519 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
510 if(ret) { 520 if(ret) {
511 j++; 521 j++;
512 link->resource[2]->start = 0; 522 link->resource[2]->start = 0;
@@ -514,21 +524,21 @@ static int pcmciamtd_config(struct pcmcia_device *link)
514 force_size << 20 : MAX_PCMCIA_ADDR; 524 force_size << 20 : MAX_PCMCIA_ADDR;
515 link->resource[2]->end >>= j; 525 link->resource[2]->end >>= j;
516 } else { 526 } else {
517 pr_debug("Got window of size %luKiB\n", (unsigned long) 527 DEBUG(2, "Got window of size %luKiB", (unsigned long)
518 resource_size(link->resource[2]) >> 10); 528 resource_size(link->resource[2]) >> 10);
519 dev->win_size = resource_size(link->resource[2]); 529 dev->win_size = resource_size(link->resource[2]);
520 break; 530 break;
521 } 531 }
522 } while (link->resource[2]->end >= 0x1000); 532 } while (link->resource[2]->end >= 0x1000);
523 533
524 pr_debug("dev->win_size = %d\n", dev->win_size); 534 DEBUG(2, "dev->win_size = %d", dev->win_size);
525 535
526 if(!dev->win_size) { 536 if(!dev->win_size) {
527 dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); 537 dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
528 pcmciamtd_release(link); 538 pcmciamtd_release(link);
529 return -ENODEV; 539 return -ENODEV;
530 } 540 }
531 pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10); 541 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
532 542
533 /* Get write protect status */ 543 /* Get write protect status */
534 dev->win_base = ioremap(link->resource[2]->start, 544 dev->win_base = ioremap(link->resource[2]->start,
@@ -539,7 +549,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
539 pcmciamtd_release(link); 549 pcmciamtd_release(link);
540 return -ENODEV; 550 return -ENODEV;
541 } 551 }
542 pr_debug("mapped window dev = %p @ %pR, base = %p\n", 552 DEBUG(1, "mapped window dev = %p @ %pR, base = %p",
543 dev, link->resource[2], dev->win_base); 553 dev, link->resource[2], dev->win_base);
544 554
545 dev->offset = 0; 555 dev->offset = 0;
@@ -554,7 +564,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
554 } 564 }
555 565
556 link->config_index = 0; 566 link->config_index = 0;
557 pr_debug("Setting Configuration\n"); 567 DEBUG(2, "Setting Configuration");
558 ret = pcmcia_enable_device(link); 568 ret = pcmcia_enable_device(link);
559 if (ret != 0) { 569 if (ret != 0) {
560 if (dev->win_base) { 570 if (dev->win_base) {
@@ -570,17 +580,17 @@ static int pcmciamtd_config(struct pcmcia_device *link)
570 mtd = do_map_probe("map_rom", &dev->pcmcia_map); 580 mtd = do_map_probe("map_rom", &dev->pcmcia_map);
571 } else { 581 } else {
572 for(i = 0; i < ARRAY_SIZE(probes); i++) { 582 for(i = 0; i < ARRAY_SIZE(probes); i++) {
573 pr_debug("Trying %s\n", probes[i]); 583 DEBUG(1, "Trying %s", probes[i]);
574 mtd = do_map_probe(probes[i], &dev->pcmcia_map); 584 mtd = do_map_probe(probes[i], &dev->pcmcia_map);
575 if(mtd) 585 if(mtd)
576 break; 586 break;
577 587
578 pr_debug("FAILED: %s\n", probes[i]); 588 DEBUG(1, "FAILED: %s", probes[i]);
579 } 589 }
580 } 590 }
581 591
582 if(!mtd) { 592 if(!mtd) {
583 pr_debug("Can not find an MTD\n"); 593 DEBUG(1, "Can not find an MTD");
584 pcmciamtd_release(link); 594 pcmciamtd_release(link);
585 return -ENODEV; 595 return -ENODEV;
586 } 596 }
@@ -607,7 +617,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
607 /* If the memory found is fits completely into the mapped PCMCIA window, 617 /* If the memory found is fits completely into the mapped PCMCIA window,
608 use the faster non-remapping read/write functions */ 618 use the faster non-remapping read/write functions */
609 if(mtd->size <= dev->win_size) { 619 if(mtd->size <= dev->win_size) {
610 pr_debug("Using non remapping memory functions\n"); 620 DEBUG(1, "Using non remapping memory functions");
611 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base; 621 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
612 if (dev->pcmcia_map.bankwidth == 1) { 622 if (dev->pcmcia_map.bankwidth == 1) {
613 dev->pcmcia_map.read = pcmcia_read8; 623 dev->pcmcia_map.read = pcmcia_read8;
@@ -635,7 +645,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
635 645
636static int pcmciamtd_suspend(struct pcmcia_device *dev) 646static int pcmciamtd_suspend(struct pcmcia_device *dev)
637{ 647{
638 pr_debug("EVENT_PM_RESUME\n"); 648 DEBUG(2, "EVENT_PM_RESUME");
639 649
640 /* get_lock(link); */ 650 /* get_lock(link); */
641 651
@@ -644,7 +654,7 @@ static int pcmciamtd_suspend(struct pcmcia_device *dev)
644 654
645static int pcmciamtd_resume(struct pcmcia_device *dev) 655static int pcmciamtd_resume(struct pcmcia_device *dev)
646{ 656{
647 pr_debug("EVENT_PM_SUSPEND\n"); 657 DEBUG(2, "EVENT_PM_SUSPEND");
648 658
649 /* free_lock(link); */ 659 /* free_lock(link); */
650 660
@@ -656,7 +666,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
656{ 666{
657 struct pcmciamtd_dev *dev = link->priv; 667 struct pcmciamtd_dev *dev = link->priv;
658 668
659 pr_debug("link=0x%p\n", link); 669 DEBUG(3, "link=0x%p", link);
660 670
661 if(dev->mtd_info) { 671 if(dev->mtd_info) {
662 mtd_device_unregister(dev->mtd_info); 672 mtd_device_unregister(dev->mtd_info);
@@ -676,7 +686,7 @@ static int pcmciamtd_probe(struct pcmcia_device *link)
676 /* Create new memory card device */ 686 /* Create new memory card device */
677 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 687 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
678 if (!dev) return -ENOMEM; 688 if (!dev) return -ENOMEM;
679 pr_debug("dev=0x%p\n", dev); 689 DEBUG(1, "dev=0x%p", dev);
680 690
681 dev->p_dev = link; 691 dev->p_dev = link;
682 link->priv = dev; 692 link->priv = dev;
@@ -745,7 +755,7 @@ static int __init init_pcmciamtd(void)
745 755
746static void __exit exit_pcmciamtd(void) 756static void __exit exit_pcmciamtd(void)
747{ 757{
748 pr_debug(DRIVER_DESC " unloading"); 758 DEBUG(1, DRIVER_DESC " unloading");
749 pcmcia_unregister_driver(&pcmciamtd_driver); 759 pcmcia_unregister_driver(&pcmciamtd_driver);
750} 760}
751 761
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 21b0b713cac..f64cee4a3bf 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,8 +27,8 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 spinlock_t vpp_lock; 30 int nr_parts;
31 int vpp_refcnt; 31 struct mtd_partition *parts;
32}; 32};
33 33
34static int physmap_flash_remove(struct platform_device *dev) 34static int physmap_flash_remove(struct platform_device *dev)
@@ -46,6 +46,8 @@ static int physmap_flash_remove(struct platform_device *dev)
46 46
47 if (info->cmtd) { 47 if (info->cmtd) {
48 mtd_device_unregister(info->cmtd); 48 mtd_device_unregister(info->cmtd);
49 if (info->nr_parts)
50 kfree(info->parts);
49 if (info->cmtd != info->mtd[0]) 51 if (info->cmtd != info->mtd[0])
50 mtd_concat_destroy(info->cmtd); 52 mtd_concat_destroy(info->cmtd);
51 } 53 }
@@ -65,26 +67,12 @@ static void physmap_set_vpp(struct map_info *map, int state)
65{ 67{
66 struct platform_device *pdev; 68 struct platform_device *pdev;
67 struct physmap_flash_data *physmap_data; 69 struct physmap_flash_data *physmap_data;
68 struct physmap_flash_info *info;
69 unsigned long flags;
70 70
71 pdev = (struct platform_device *)map->map_priv_1; 71 pdev = (struct platform_device *)map->map_priv_1;
72 physmap_data = pdev->dev.platform_data; 72 physmap_data = pdev->dev.platform_data;
73 73
74 if (!physmap_data->set_vpp) 74 if (physmap_data->set_vpp)
75 return; 75 physmap_data->set_vpp(pdev, state);
76
77 info = platform_get_drvdata(pdev);
78
79 spin_lock_irqsave(&info->vpp_lock, flags);
80 if (state) {
81 if (++info->vpp_refcnt == 1) /* first nested 'on' */
82 physmap_data->set_vpp(pdev, 1);
83 } else {
84 if (--info->vpp_refcnt == 0) /* last nested 'off' */
85 physmap_data->set_vpp(pdev, 0);
86 }
87 spin_unlock_irqrestore(&info->vpp_lock, flags);
88} 76}
89 77
90static const char *rom_probe_types[] = { 78static const char *rom_probe_types[] = {
@@ -101,7 +89,6 @@ static int physmap_flash_probe(struct platform_device *dev)
101 struct physmap_flash_data *physmap_data; 89 struct physmap_flash_data *physmap_data;
102 struct physmap_flash_info *info; 90 struct physmap_flash_info *info;
103 const char **probe_type; 91 const char **probe_type;
104 const char **part_types;
105 int err = 0; 92 int err = 0;
106 int i; 93 int i;
107 int devices_found = 0; 94 int devices_found = 0;
@@ -188,12 +175,23 @@ static int physmap_flash_probe(struct platform_device *dev)
188 if (err) 175 if (err)
189 goto err_out; 176 goto err_out;
190 177
191 spin_lock_init(&info->vpp_lock); 178 err = parse_mtd_partitions(info->cmtd, part_probe_types,
179 &info->parts, 0);
180 if (err > 0) {
181 mtd_device_register(info->cmtd, info->parts, err);
182 info->nr_parts = err;
183 return 0;
184 }
192 185
193 part_types = physmap_data->part_probe_types ? : part_probe_types; 186 if (physmap_data->nr_parts) {
187 printk(KERN_NOTICE "Using physmap partition information\n");
188 mtd_device_register(info->cmtd, physmap_data->parts,
189 physmap_data->nr_parts);
190 return 0;
191 }
192
193 mtd_device_register(info->cmtd, NULL, 0);
194 194
195 mtd_device_parse_register(info->cmtd, part_types, NULL,
196 physmap_data->parts, physmap_data->nr_parts);
197 return 0; 195 return 0;
198 196
199err_out: 197err_out:
@@ -208,8 +206,9 @@ static void physmap_flash_shutdown(struct platform_device *dev)
208 int i; 206 int i;
209 207
210 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) 208 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
211 if (mtd_suspend(info->mtd[i]) == 0) 209 if (info->mtd[i]->suspend && info->mtd[i]->resume)
212 mtd_resume(info->mtd[i]); 210 if (info->mtd[i]->suspend(info->mtd[i]) == 0)
211 info->mtd[i]->resume(info->mtd[i]);
213} 212}
214#else 213#else
215#define physmap_flash_shutdown NULL 214#define physmap_flash_shutdown NULL
@@ -246,6 +245,21 @@ static struct platform_device physmap_flash = {
246 .num_resources = 1, 245 .num_resources = 1,
247 .resource = &physmap_flash_resource, 246 .resource = &physmap_flash_resource,
248}; 247};
248
249void physmap_configure(unsigned long addr, unsigned long size,
250 int bankwidth, void (*set_vpp)(struct map_info *, int))
251{
252 physmap_flash_resource.start = addr;
253 physmap_flash_resource.end = addr + size - 1;
254 physmap_flash_data.width = bankwidth;
255 physmap_flash_data.set_vpp = set_vpp;
256}
257
258void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
259{
260 physmap_flash_data.nr_parts = num_parts;
261 physmap_flash_data.parts = parts;
262}
249#endif 263#endif
250 264
251static int __init physmap_init(void) 265static int __init physmap_init(void)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 67cc73c18dd..d251d1db129 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,10 +34,58 @@ struct of_flash_list {
34 34
35struct of_flash { 35struct of_flash {
36 struct mtd_info *cmtd; 36 struct mtd_info *cmtd;
37 struct mtd_partition *parts;
37 int list_size; /* number of elements in of_flash_list */ 38 int list_size; /* number of elements in of_flash_list */
38 struct of_flash_list list[0]; 39 struct of_flash_list list[0];
39}; 40};
40 41
42#define OF_FLASH_PARTS(info) ((info)->parts)
43static int parse_obsolete_partitions(struct platform_device *dev,
44 struct of_flash *info,
45 struct device_node *dp)
46{
47 int i, plen, nr_parts;
48 const struct {
49 __be32 offset, len;
50 } *part;
51 const char *names;
52
53 part = of_get_property(dp, "partitions", &plen);
54 if (!part)
55 return 0; /* No partitions found */
56
57 dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n");
58
59 nr_parts = plen / sizeof(part[0]);
60
61 info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL);
62 if (!info->parts)
63 return -ENOMEM;
64
65 names = of_get_property(dp, "partition-names", &plen);
66
67 for (i = 0; i < nr_parts; i++) {
68 info->parts[i].offset = be32_to_cpu(part->offset);
69 info->parts[i].size = be32_to_cpu(part->len) & ~1;
70 if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
71 info->parts[i].mask_flags = MTD_WRITEABLE;
72
73 if (names && (plen > 0)) {
74 int len = strlen(names) + 1;
75
76 info->parts[i].name = (char *)names;
77 plen -= len;
78 names += len;
79 } else {
80 info->parts[i].name = "unnamed";
81 }
82
83 part++;
84 }
85
86 return nr_parts;
87}
88
41static int of_flash_remove(struct platform_device *dev) 89static int of_flash_remove(struct platform_device *dev)
42{ 90{
43 struct of_flash *info; 91 struct of_flash *info;
@@ -53,8 +101,11 @@ static int of_flash_remove(struct platform_device *dev)
53 mtd_concat_destroy(info->cmtd); 101 mtd_concat_destroy(info->cmtd);
54 } 102 }
55 103
56 if (info->cmtd) 104 if (info->cmtd) {
105 if (OF_FLASH_PARTS(info))
106 kfree(OF_FLASH_PARTS(info));
57 mtd_device_unregister(info->cmtd); 107 mtd_device_unregister(info->cmtd);
108 }
58 109
59 for (i = 0; i < info->list_size; i++) { 110 for (i = 0; i < info->list_size; i++) {
60 if (info->list[i].mtd) 111 if (info->list[i].mtd)
@@ -77,8 +128,8 @@ static int of_flash_remove(struct platform_device *dev)
77/* Helper function to handle probing of the obsolete "direct-mapped" 128/* Helper function to handle probing of the obsolete "direct-mapped"
78 * compatible binding, which has an extra "probe-type" property 129 * compatible binding, which has an extra "probe-type" property
79 * describing the type of flash probe necessary. */ 130 * describing the type of flash probe necessary. */
80static struct mtd_info *obsolete_probe(struct platform_device *dev, 131static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
81 struct map_info *map) 132 struct map_info *map)
82{ 133{
83 struct device_node *dp = dev->dev.of_node; 134 struct device_node *dp = dev->dev.of_node;
84 const char *of_probe; 135 const char *of_probe;
@@ -114,9 +165,8 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev,
114 specifies the list of partition probers to use. If none is given then the 165 specifies the list of partition probers to use. If none is given then the
115 default is use. These take precedence over other device tree 166 default is use. These take precedence over other device tree
116 information. */ 167 information. */
117static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", 168static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
118 "ofpart", "ofoldpart", NULL }; 169static const char ** __devinit of_get_probes(struct device_node *dp)
119static const char **of_get_probes(struct device_node *dp)
120{ 170{
121 const char *cp; 171 const char *cp;
122 int cplen; 172 int cplen;
@@ -145,14 +195,14 @@ static const char **of_get_probes(struct device_node *dp)
145 return res; 195 return res;
146} 196}
147 197
148static void of_free_probes(const char **probes) 198static void __devinit of_free_probes(const char **probes)
149{ 199{
150 if (probes != part_probe_types_def) 200 if (probes != part_probe_types_def)
151 kfree(probes); 201 kfree(probes);
152} 202}
153 203
154static struct of_device_id of_flash_match[]; 204static struct of_device_id of_flash_match[];
155static int of_flash_probe(struct platform_device *dev) 205static int __devinit of_flash_probe(struct platform_device *dev)
156{ 206{
157 const char **part_probe_types; 207 const char **part_probe_types;
158 const struct of_device_id *match; 208 const struct of_device_id *match;
@@ -168,9 +218,6 @@ static int of_flash_probe(struct platform_device *dev)
168 int reg_tuple_size; 218 int reg_tuple_size;
169 struct mtd_info **mtd_list = NULL; 219 struct mtd_info **mtd_list = NULL;
170 resource_size_t res_size; 220 resource_size_t res_size;
171 struct mtd_part_parser_data ppdata;
172 bool map_indirect;
173 const char *mtd_name;
174 221
175 match = of_match_device(of_flash_match, &dev->dev); 222 match = of_match_device(of_flash_match, &dev->dev);
176 if (!match) 223 if (!match)
@@ -179,8 +226,6 @@ static int of_flash_probe(struct platform_device *dev)
179 226
180 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 227 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
181 228
182 of_property_read_string(dp, "linux,mtd-name", &mtd_name);
183
184 /* 229 /*
185 * Get number of "reg" tuples. Scan for MTD devices on area's 230 * Get number of "reg" tuples. Scan for MTD devices on area's
186 * described by each "reg" region. This makes it possible (including 231 * described by each "reg" region. This makes it possible (including
@@ -196,8 +241,6 @@ static int of_flash_probe(struct platform_device *dev)
196 } 241 }
197 count /= reg_tuple_size; 242 count /= reg_tuple_size;
198 243
199 map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
200
201 err = -ENOMEM; 244 err = -ENOMEM;
202 info = kzalloc(sizeof(struct of_flash) + 245 info = kzalloc(sizeof(struct of_flash) +
203 sizeof(struct of_flash_list) * count, GFP_KERNEL); 246 sizeof(struct of_flash_list) * count, GFP_KERNEL);
@@ -237,7 +280,7 @@ static int of_flash_probe(struct platform_device *dev)
237 goto err_out; 280 goto err_out;
238 } 281 }
239 282
240 info->list[i].map.name = mtd_name ?: dev_name(&dev->dev); 283 info->list[i].map.name = dev_name(&dev->dev);
241 info->list[i].map.phys = res.start; 284 info->list[i].map.phys = res.start;
242 info->list[i].map.size = res_size; 285 info->list[i].map.size = res_size;
243 info->list[i].map.bankwidth = be32_to_cpup(width); 286 info->list[i].map.bankwidth = be32_to_cpup(width);
@@ -253,17 +296,6 @@ static int of_flash_probe(struct platform_device *dev)
253 296
254 simple_map_init(&info->list[i].map); 297 simple_map_init(&info->list[i].map);
255 298
256 /*
257 * On some platforms (e.g. MPC5200) a direct 1:1 mapping
258 * may cause problems with JFFS2 usage, as the local bus (LPB)
259 * doesn't support unaligned accesses as implemented in the
260 * JFFS2 code via memcpy(). By setting NO_XIP, the
261 * flash will not be exposed directly to the MTD users
262 * (e.g. JFFS2) any more.
263 */
264 if (map_indirect)
265 info->list[i].map.phys = NO_XIP;
266
267 if (probe_type) { 299 if (probe_type) {
268 info->list[i].mtd = do_map_probe(probe_type, 300 info->list[i].mtd = do_map_probe(probe_type,
269 &info->list[i].map); 301 &info->list[i].map);
@@ -285,7 +317,6 @@ static int of_flash_probe(struct platform_device *dev)
285 } 317 }
286 318
287 err = 0; 319 err = 0;
288 info->cmtd = NULL;
289 if (info->list_size == 1) { 320 if (info->list_size == 1) {
290 info->cmtd = info->list[0].mtd; 321 info->cmtd = info->list[0].mtd;
291 } else if (info->list_size > 1) { 322 } else if (info->list_size > 1) {
@@ -294,19 +325,35 @@ static int of_flash_probe(struct platform_device *dev)
294 */ 325 */
295 info->cmtd = mtd_concat_create(mtd_list, info->list_size, 326 info->cmtd = mtd_concat_create(mtd_list, info->list_size,
296 dev_name(&dev->dev)); 327 dev_name(&dev->dev));
328 if (info->cmtd == NULL)
329 err = -ENXIO;
297 } 330 }
298 if (info->cmtd == NULL)
299 err = -ENXIO;
300
301 if (err) 331 if (err)
302 goto err_out; 332 goto err_out;
303 333
304 ppdata.of_node = dp;
305 part_probe_types = of_get_probes(dp); 334 part_probe_types = of_get_probes(dp);
306 mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata, 335 err = parse_mtd_partitions(info->cmtd, part_probe_types,
307 NULL, 0); 336 &info->parts, 0);
337 if (err < 0) {
338 of_free_probes(part_probe_types);
339 goto err_out;
340 }
308 of_free_probes(part_probe_types); 341 of_free_probes(part_probe_types);
309 342
343 if (err == 0) {
344 err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
345 if (err < 0)
346 goto err_out;
347 }
348
349 if (err == 0) {
350 err = parse_obsolete_partitions(dev, info, dp);
351 if (err < 0)
352 goto err_out;
353 }
354
355 mtd_device_register(info->cmtd, info->parts, err);
356
310 kfree(mtd_list); 357 kfree(mtd_list);
311 358
312 return 0; 359 return 0;
@@ -357,7 +404,18 @@ static struct platform_driver of_flash_driver = {
357 .remove = of_flash_remove, 404 .remove = of_flash_remove,
358}; 405};
359 406
360module_platform_driver(of_flash_driver); 407static int __init of_flash_init(void)
408{
409 return platform_driver_register(&of_flash_driver);
410}
411
412static void __exit of_flash_exit(void)
413{
414 platform_driver_unregister(&of_flash_driver);
415}
416
417module_init(of_flash_init);
418module_exit(of_flash_exit);
361 419
362MODULE_LICENSE("GPL"); 420MODULE_LICENSE("GPL");
363MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>"); 421MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index dc6df9abea0..65bd1cd4d62 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -58,7 +58,7 @@ static void pismo_set_vpp(struct platform_device *pdev, int on)
58 pismo->vpp(pismo->vpp_data, on); 58 pismo->vpp(pismo->vpp_data, on);
59} 59}
60 60
61static unsigned int pismo_width_to_bytes(unsigned int width) 61static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
62{ 62{
63 width &= 15; 63 width &= 15;
64 if (width > 2) 64 if (width > 2)
@@ -66,8 +66,8 @@ static unsigned int pismo_width_to_bytes(unsigned int width)
66 return 1 << width; 66 return 1 << width;
67} 67}
68 68
69static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr, 69static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
70 size_t size) 70 u8 addr, size_t size)
71{ 71{
72 int ret; 72 int ret;
73 struct i2c_msg msg[] = { 73 struct i2c_msg msg[] = {
@@ -88,9 +88,8 @@ static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
88 return ret == ARRAY_SIZE(msg) ? size : -EIO; 88 return ret == ARRAY_SIZE(msg) ? size : -EIO;
89} 89}
90 90
91static int pismo_add_device(struct pismo_data *pismo, int i, 91static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
92 struct pismo_mem *region, const char *name, 92 struct pismo_mem *region, const char *name, void *pdata, size_t psize)
93 void *pdata, size_t psize)
94{ 93{
95 struct platform_device *dev; 94 struct platform_device *dev;
96 struct resource res = { }; 95 struct resource res = { };
@@ -130,8 +129,8 @@ static int pismo_add_device(struct pismo_data *pismo, int i,
130 return ret; 129 return ret;
131} 130}
132 131
133static int pismo_add_nor(struct pismo_data *pismo, int i, 132static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
134 struct pismo_mem *region) 133 struct pismo_mem *region)
135{ 134{
136 struct physmap_flash_data data = { 135 struct physmap_flash_data data = {
137 .width = region->width, 136 .width = region->width,
@@ -144,8 +143,8 @@ static int pismo_add_nor(struct pismo_data *pismo, int i,
144 &data, sizeof(data)); 143 &data, sizeof(data));
145} 144}
146 145
147static int pismo_add_sram(struct pismo_data *pismo, int i, 146static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
148 struct pismo_mem *region) 147 struct pismo_mem *region)
149{ 148{
150 struct platdata_mtd_ram data = { 149 struct platdata_mtd_ram data = {
151 .bankwidth = region->width, 150 .bankwidth = region->width,
@@ -155,8 +154,8 @@ static int pismo_add_sram(struct pismo_data *pismo, int i,
155 &data, sizeof(data)); 154 &data, sizeof(data));
156} 155}
157 156
158static void pismo_add_one(struct pismo_data *pismo, int i, 157static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
159 const struct pismo_cs_block *cs, phys_addr_t base) 158 const struct pismo_cs_block *cs, phys_addr_t base)
160{ 159{
161 struct device *dev = &pismo->client->dev; 160 struct device *dev = &pismo->client->dev;
162 struct pismo_mem region; 161 struct pismo_mem region;
@@ -198,7 +197,7 @@ static void pismo_add_one(struct pismo_data *pismo, int i,
198 } 197 }
199} 198}
200 199
201static int pismo_remove(struct i2c_client *client) 200static int __devexit pismo_remove(struct i2c_client *client)
202{ 201{
203 struct pismo_data *pismo = i2c_get_clientdata(client); 202 struct pismo_data *pismo = i2c_get_clientdata(client);
204 int i; 203 int i;
@@ -211,8 +210,8 @@ static int pismo_remove(struct i2c_client *client)
211 return 0; 210 return 0;
212} 211}
213 212
214static int pismo_probe(struct i2c_client *client, 213static int __devinit pismo_probe(struct i2c_client *client,
215 const struct i2c_device_id *id) 214 const struct i2c_device_id *id)
216{ 215{
217 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 216 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
218 struct pismo_pdata *pdata = client->dev.platform_data; 217 struct pismo_pdata *pdata = client->dev.platform_data;
@@ -268,7 +267,7 @@ static struct i2c_driver pismo_driver = {
268 .owner = THIS_MODULE, 267 .owner = THIS_MODULE,
269 }, 268 },
270 .probe = pismo_probe, 269 .probe = pismo_probe,
271 .remove = pismo_remove, 270 .remove = __devexit_p(pismo_remove),
272 .id_table = pismo_id, 271 .id_table = pismo_id,
273}; 272};
274 273
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 2de66b062f0..9ca1eccba4b 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -44,6 +44,8 @@ struct platram_info {
44 struct device *dev; 44 struct device *dev;
45 struct mtd_info *mtd; 45 struct mtd_info *mtd;
46 struct map_info map; 46 struct map_info map;
47 struct mtd_partition *partitions;
48 bool free_partitions;
47 struct resource *area; 49 struct resource *area;
48 struct platdata_mtd_ram *pdata; 50 struct platdata_mtd_ram *pdata;
49}; 51};
@@ -93,6 +95,10 @@ static int platram_remove(struct platform_device *pdev)
93 95
94 if (info->mtd) { 96 if (info->mtd) {
95 mtd_device_unregister(info->mtd); 97 mtd_device_unregister(info->mtd);
98 if (info->partitions) {
99 if (info->free_partitions)
100 kfree(info->partitions);
101 }
96 map_destroy(info->mtd); 102 map_destroy(info->mtd);
97 } 103 }
98 104
@@ -219,23 +225,31 @@ static int platram_probe(struct platform_device *pdev)
219 225
220 platram_setrw(info, PLATRAM_RW); 226 platram_setrw(info, PLATRAM_RW);
221 227
222 /* check to see if there are any available partitions, or whether 228 /* check to see if there are any available partitions, or wether
223 * to add this device whole */ 229 * to add this device whole */
224 230
225 err = mtd_device_parse_register(info->mtd, pdata->probes, NULL, 231 if (!pdata->nr_partitions) {
226 pdata->partitions, 232 /* try to probe using the supplied probe type */
227 pdata->nr_partitions); 233 if (pdata->probes) {
234 err = parse_mtd_partitions(info->mtd, pdata->probes,
235 &info->partitions, 0);
236 info->free_partitions = 1;
237 if (err > 0)
238 err = mtd_device_register(info->mtd,
239 info->partitions, err);
240 }
241 }
242 /* use the static mapping */
243 else
244 err = mtd_device_register(info->mtd, pdata->partitions,
245 pdata->nr_partitions);
228 if (!err) 246 if (!err)
229 dev_info(&pdev->dev, "registered mtd device\n"); 247 dev_info(&pdev->dev, "registered mtd device\n");
230 248
231 if (pdata->nr_partitions) { 249 /* add the whole device. */
232 /* add the whole device. */ 250 err = mtd_device_register(info->mtd, NULL, 0);
233 err = mtd_device_register(info->mtd, NULL, 0); 251 if (err)
234 if (err) { 252 dev_err(&pdev->dev, "failed to register the entire device\n");
235 dev_err(&pdev->dev,
236 "failed to register the entire device\n");
237 }
238 }
239 253
240 return err; 254 return err;
241 255
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 43e3dbb976d..7ae137d4b99 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -41,6 +41,8 @@ static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
41} 41}
42 42
43struct pxa2xx_flash_info { 43struct pxa2xx_flash_info {
44 struct mtd_partition *parts;
45 int nr_parts;
44 struct mtd_info *mtd; 46 struct mtd_info *mtd;
45 struct map_info map; 47 struct map_info map;
46}; 48};
@@ -49,11 +51,13 @@ struct pxa2xx_flash_info {
49static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; 51static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
50 52
51 53
52static int pxa2xx_flash_probe(struct platform_device *pdev) 54static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
53{ 55{
54 struct flash_platform_data *flash = pdev->dev.platform_data; 56 struct flash_platform_data *flash = pdev->dev.platform_data;
55 struct pxa2xx_flash_info *info; 57 struct pxa2xx_flash_info *info;
58 struct mtd_partition *parts;
56 struct resource *res; 59 struct resource *res;
60 int ret = 0;
57 61
58 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 62 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!res) 63 if (!res)
@@ -67,6 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
67 info->map.bankwidth = flash->width; 71 info->map.bankwidth = flash->width;
68 info->map.phys = res->start; 72 info->map.phys = res->start;
69 info->map.size = resource_size(res); 73 info->map.size = resource_size(res);
74 info->parts = flash->parts;
75 info->nr_parts = flash->nr_parts;
70 76
71 info->map.virt = ioremap(info->map.phys, info->map.size); 77 info->map.virt = ioremap(info->map.phys, info->map.size);
72 if (!info->map.virt) { 78 if (!info->map.virt) {
@@ -98,14 +104,24 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
98 } 104 }
99 info->mtd->owner = THIS_MODULE; 105 info->mtd->owner = THIS_MODULE;
100 106
101 mtd_device_parse_register(info->mtd, probes, NULL, flash->parts, 107 ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
102 flash->nr_parts); 108
109 if (ret > 0) {
110 info->nr_parts = ret;
111 info->parts = parts;
112 }
113
114 if (!info->nr_parts)
115 printk("Registering %s as whole device\n",
116 info->map.name);
117
118 mtd_device_register(info->mtd, info->parts, info->nr_parts);
103 119
104 platform_set_drvdata(pdev, info); 120 platform_set_drvdata(pdev, info);
105 return 0; 121 return 0;
106} 122}
107 123
108static int pxa2xx_flash_remove(struct platform_device *dev) 124static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
109{ 125{
110 struct pxa2xx_flash_info *info = platform_get_drvdata(dev); 126 struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
111 127
@@ -117,6 +133,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
117 iounmap(info->map.virt); 133 iounmap(info->map.virt);
118 if (info->map.cached) 134 if (info->map.cached)
119 iounmap(info->map.cached); 135 iounmap(info->map.cached);
136 kfree(info->parts);
120 kfree(info); 137 kfree(info);
121 return 0; 138 return 0;
122} 139}
@@ -126,8 +143,8 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
126{ 143{
127 struct pxa2xx_flash_info *info = platform_get_drvdata(dev); 144 struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
128 145
129 if (info && mtd_suspend(info->mtd) == 0) 146 if (info && info->mtd->suspend(info->mtd) == 0)
130 mtd_resume(info->mtd); 147 info->mtd->resume(info->mtd);
131} 148}
132#else 149#else
133#define pxa2xx_flash_shutdown NULL 150#define pxa2xx_flash_shutdown NULL
@@ -139,11 +156,22 @@ static struct platform_driver pxa2xx_flash_driver = {
139 .owner = THIS_MODULE, 156 .owner = THIS_MODULE,
140 }, 157 },
141 .probe = pxa2xx_flash_probe, 158 .probe = pxa2xx_flash_probe,
142 .remove = pxa2xx_flash_remove, 159 .remove = __devexit_p(pxa2xx_flash_remove),
143 .shutdown = pxa2xx_flash_shutdown, 160 .shutdown = pxa2xx_flash_shutdown,
144}; 161};
145 162
146module_platform_driver(pxa2xx_flash_driver); 163static int __init init_pxa2xx_flash(void)
164{
165 return platform_driver_register(&pxa2xx_flash_driver);
166}
167
168static void __exit cleanup_pxa2xx_flash(void)
169{
170 platform_driver_unregister(&pxa2xx_flash_driver);
171}
172
173module_init(init_pxa2xx_flash);
174module_exit(cleanup_pxa2xx_flash);
147 175
148MODULE_LICENSE("GPL"); 176MODULE_LICENSE("GPL");
149MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>"); 177MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 49c3fe715ee..761fb459d2c 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,6 +25,8 @@
25struct rbtx4939_flash_info { 25struct rbtx4939_flash_info {
26 struct mtd_info *mtd; 26 struct mtd_info *mtd;
27 struct map_info map; 27 struct map_info map;
28 int nr_parts;
29 struct mtd_partition *parts;
28}; 30};
29 31
30static int rbtx4939_flash_remove(struct platform_device *dev) 32static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -39,6 +41,8 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
39 if (info->mtd) { 41 if (info->mtd) {
40 struct rbtx4939_flash_data *pdata = dev->dev.platform_data; 42 struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
41 43
44 if (info->nr_parts)
45 kfree(info->parts);
42 mtd_device_unregister(info->mtd); 46 mtd_device_unregister(info->mtd);
43 map_destroy(info->mtd); 47 map_destroy(info->mtd);
44 } 48 }
@@ -46,6 +50,7 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
46} 50}
47 51
48static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; 52static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
53static const char *part_probe_types[] = { "cmdlinepart", NULL };
49 54
50static int rbtx4939_flash_probe(struct platform_device *dev) 55static int rbtx4939_flash_probe(struct platform_device *dev)
51{ 56{
@@ -100,11 +105,24 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
100 goto err_out; 105 goto err_out;
101 } 106 }
102 info->mtd->owner = THIS_MODULE; 107 info->mtd->owner = THIS_MODULE;
103 err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
104 pdata->nr_parts);
105
106 if (err) 108 if (err)
107 goto err_out; 109 goto err_out;
110
111 err = parse_mtd_partitions(info->mtd, part_probe_types,
112 &info->parts, 0);
113 if (err > 0) {
114 mtd_device_register(info->mtd, info->parts, err);
115 info->nr_parts = err;
116 return 0;
117 }
118
119 if (pdata->nr_parts) {
120 pr_notice("Using rbtx4939 partition information\n");
121 mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
122 return 0;
123 }
124
125 mtd_device_register(info->mtd, NULL, 0);
108 return 0; 126 return 0;
109 127
110err_out: 128err_out:
@@ -117,8 +135,9 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
117{ 135{
118 struct rbtx4939_flash_info *info = platform_get_drvdata(dev); 136 struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
119 137
120 if (mtd_suspend(info->mtd) == 0) 138 if (info->mtd->suspend && info->mtd->resume)
121 mtd_resume(info->mtd); 139 if (info->mtd->suspend(info->mtd) == 0)
140 info->mtd->resume(info->mtd);
122} 141}
123#else 142#else
124#define rbtx4939_flash_shutdown NULL 143#define rbtx4939_flash_shutdown NULL
@@ -134,7 +153,18 @@ static struct platform_driver rbtx4939_flash_driver = {
134 }, 153 },
135}; 154};
136 155
137module_platform_driver(rbtx4939_flash_driver); 156static int __init rbtx4939_flash_init(void)
157{
158 return platform_driver_register(&rbtx4939_flash_driver);
159}
160
161static void __exit rbtx4939_flash_exit(void)
162{
163 platform_driver_unregister(&rbtx4939_flash_driver);
164}
165
166module_init(rbtx4939_flash_init);
167module_exit(rbtx4939_flash_exit);
138 168
139MODULE_LICENSE("GPL"); 169MODULE_LICENSE("GPL");
140MODULE_DESCRIPTION("RBTX4939 MTD map driver"); 170MODULE_DESCRIPTION("RBTX4939 MTD map driver");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index f694417cf7e..a9b5e0e5c4c 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -23,6 +23,106 @@
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <asm/mach/flash.h> 24#include <asm/mach/flash.h>
25 25
26#if 0
27/*
28 * This is here for documentation purposes only - until these people
29 * submit their machine types. It will be gone January 2005.
30 */
31static struct mtd_partition consus_partitions[] = {
32 {
33 .name = "Consus boot firmware",
34 .offset = 0,
35 .size = 0x00040000,
36 .mask_flags = MTD_WRITABLE, /* force read-only */
37 }, {
38 .name = "Consus kernel",
39 .offset = 0x00040000,
40 .size = 0x00100000,
41 .mask_flags = 0,
42 }, {
43 .name = "Consus disk",
44 .offset = 0x00140000,
45 /* The rest (up to 16M) for jffs. We could put 0 and
46 make it find the size automatically, but right now
47 i have 32 megs. jffs will use all 32 megs if given
48 the chance, and this leads to horrible problems
49 when you try to re-flash the image because blob
50 won't erase the whole partition. */
51 .size = 0x01000000 - 0x00140000,
52 .mask_flags = 0,
53 }, {
54 /* this disk is a secondary disk, which can be used as
55 needed, for simplicity, make it the size of the other
56 consus partition, although realistically it could be
57 the remainder of the disk (depending on the file
58 system used) */
59 .name = "Consus disk2",
60 .offset = 0x01000000,
61 .size = 0x01000000 - 0x00140000,
62 .mask_flags = 0,
63 }
64};
65
66/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
67static struct mtd_partition frodo_partitions[] =
68{
69 {
70 .name = "bootloader",
71 .size = 0x00040000,
72 .offset = 0x00000000,
73 .mask_flags = MTD_WRITEABLE
74 }, {
75 .name = "bootloader params",
76 .size = 0x00040000,
77 .offset = MTDPART_OFS_APPEND,
78 .mask_flags = MTD_WRITEABLE
79 }, {
80 .name = "kernel",
81 .size = 0x00100000,
82 .offset = MTDPART_OFS_APPEND,
83 .mask_flags = MTD_WRITEABLE
84 }, {
85 .name = "ramdisk",
86 .size = 0x00400000,
87 .offset = MTDPART_OFS_APPEND,
88 .mask_flags = MTD_WRITEABLE
89 }, {
90 .name = "file system",
91 .size = MTDPART_SIZ_FULL,
92 .offset = MTDPART_OFS_APPEND
93 }
94};
95
96static struct mtd_partition jornada56x_partitions[] = {
97 {
98 .name = "bootldr",
99 .size = 0x00040000,
100 .offset = 0,
101 .mask_flags = MTD_WRITEABLE,
102 }, {
103 .name = "rootfs",
104 .size = MTDPART_SIZ_FULL,
105 .offset = MTDPART_OFS_APPEND,
106 }
107};
108
109static void jornada56x_set_vpp(int vpp)
110{
111 if (vpp)
112 GPSR = GPIO_GPIO26;
113 else
114 GPCR = GPIO_GPIO26;
115 GPDR |= GPIO_GPIO26;
116}
117
118/*
119 * Machine Phys Size set_vpp
120 * Consus : SA1100_CS0_PHYS SZ_32M
121 * Frodo : SA1100_CS0_PHYS SZ_32M
122 * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp
123 */
124#endif
125
26struct sa_subdev_info { 126struct sa_subdev_info {
27 char name[16]; 127 char name[16];
28 struct map_info map; 128 struct map_info map;
@@ -31,27 +131,17 @@ struct sa_subdev_info {
31}; 131};
32 132
33struct sa_info { 133struct sa_info {
134 struct mtd_partition *parts;
34 struct mtd_info *mtd; 135 struct mtd_info *mtd;
35 int num_subdev; 136 int num_subdev;
137 unsigned int nr_parts;
36 struct sa_subdev_info subdev[0]; 138 struct sa_subdev_info subdev[0];
37}; 139};
38 140
39static DEFINE_SPINLOCK(sa1100_vpp_lock);
40static int sa1100_vpp_refcnt;
41static void sa1100_set_vpp(struct map_info *map, int on) 141static void sa1100_set_vpp(struct map_info *map, int on)
42{ 142{
43 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); 143 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
44 unsigned long flags; 144 subdev->plat->set_vpp(on);
45
46 spin_lock_irqsave(&sa1100_vpp_lock, flags);
47 if (on) {
48 if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
49 subdev->plat->set_vpp(1);
50 } else {
51 if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
52 subdev->plat->set_vpp(0);
53 }
54 spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
55} 145}
56 146
57static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) 147static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
@@ -141,6 +231,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
141 mtd_concat_destroy(info->mtd); 231 mtd_concat_destroy(info->mtd);
142 } 232 }
143 233
234 kfree(info->parts);
235
144 for (i = info->num_subdev - 1; i >= 0; i--) 236 for (i = info->num_subdev - 1; i >= 0; i--)
145 sa1100_destroy_subdev(&info->subdev[i]); 237 sa1100_destroy_subdev(&info->subdev[i]);
146 kfree(info); 238 kfree(info);
@@ -149,8 +241,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
149 plat->exit(); 241 plat->exit();
150} 242}
151 243
152static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, 244static struct sa_info *__devinit
153 struct flash_platform_data *plat) 245sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
154{ 246{
155 struct sa_info *info; 247 struct sa_info *info;
156 int nr, size, i, ret = 0; 248 int nr, size, i, ret = 0;
@@ -246,11 +338,13 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
246 338
247static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; 339static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
248 340
249static int sa1100_mtd_probe(struct platform_device *pdev) 341static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
250{ 342{
251 struct flash_platform_data *plat = pdev->dev.platform_data; 343 struct flash_platform_data *plat = pdev->dev.platform_data;
344 struct mtd_partition *parts;
345 const char *part_type = NULL;
252 struct sa_info *info; 346 struct sa_info *info;
253 int err; 347 int err, nr_parts = 0;
254 348
255 if (!plat) 349 if (!plat)
256 return -ENODEV; 350 return -ENODEV;
@@ -264,8 +358,26 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
264 /* 358 /*
265 * Partition selection stuff. 359 * Partition selection stuff.
266 */ 360 */
267 mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts, 361 nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
268 plat->nr_parts); 362 if (nr_parts > 0) {
363 info->parts = parts;
364 part_type = "dynamic";
365 } else {
366 parts = plat->parts;
367 nr_parts = plat->nr_parts;
368 part_type = "static";
369 }
370
371 if (nr_parts == 0)
372 printk(KERN_NOTICE "SA1100 flash: no partition info "
373 "available, registering whole flash\n");
374 else
375 printk(KERN_NOTICE "SA1100 flash: using %s partition "
376 "definition\n", part_type);
377
378 mtd_device_register(info->mtd, parts, nr_parts);
379
380 info->nr_parts = nr_parts;
269 381
270 platform_set_drvdata(pdev, info); 382 platform_set_drvdata(pdev, info);
271 err = 0; 383 err = 0;
@@ -285,16 +397,39 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
285 return 0; 397 return 0;
286} 398}
287 399
400#ifdef CONFIG_PM
401static void sa1100_mtd_shutdown(struct platform_device *dev)
402{
403 struct sa_info *info = platform_get_drvdata(dev);
404 if (info && info->mtd->suspend(info->mtd) == 0)
405 info->mtd->resume(info->mtd);
406}
407#else
408#define sa1100_mtd_shutdown NULL
409#endif
410
288static struct platform_driver sa1100_mtd_driver = { 411static struct platform_driver sa1100_mtd_driver = {
289 .probe = sa1100_mtd_probe, 412 .probe = sa1100_mtd_probe,
290 .remove = __exit_p(sa1100_mtd_remove), 413 .remove = __exit_p(sa1100_mtd_remove),
414 .shutdown = sa1100_mtd_shutdown,
291 .driver = { 415 .driver = {
292 .name = "sa1100-mtd", 416 .name = "sa1100-mtd",
293 .owner = THIS_MODULE, 417 .owner = THIS_MODULE,
294 }, 418 },
295}; 419};
296 420
297module_platform_driver(sa1100_mtd_driver); 421static int __init sa1100_mtd_init(void)
422{
423 return platform_driver_register(&sa1100_mtd_driver);
424}
425
426static void __exit sa1100_mtd_exit(void)
427{
428 platform_driver_unregister(&sa1100_mtd_driver);
429}
430
431module_init(sa1100_mtd_init);
432module_exit(sa1100_mtd_exit);
298 433
299MODULE_AUTHOR("Nicolas Pitre"); 434MODULE_AUTHOR("Nicolas Pitre");
300MODULE_DESCRIPTION("SA1100 CFI map driver"); 435MODULE_DESCRIPTION("SA1100 CFI map driver");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index c77b68c9412..d88c8426bb0 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -69,7 +69,8 @@ static struct map_info scb2_map = {
69}; 69};
70static int region_fail; 70static int region_fail;
71 71
72static int scb2_fixup_mtd(struct mtd_info *mtd) 72static int __devinit
73scb2_fixup_mtd(struct mtd_info *mtd)
73{ 74{
74 int i; 75 int i;
75 int done = 0; 76 int done = 0;
@@ -132,8 +133,8 @@ static int scb2_fixup_mtd(struct mtd_info *mtd)
132/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */ 133/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
133#define CSB5_FCR 0x41 134#define CSB5_FCR 0x41
134#define CSB5_FCR_DECODE_ALL 0x0e 135#define CSB5_FCR_DECODE_ALL 0x0e
135static int scb2_flash_probe(struct pci_dev *dev, 136static int __devinit
136 const struct pci_device_id *ent) 137scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
137{ 138{
138 u8 reg; 139 u8 reg;
139 140
@@ -196,13 +197,15 @@ static int scb2_flash_probe(struct pci_dev *dev,
196 return 0; 197 return 0;
197} 198}
198 199
199static void scb2_flash_remove(struct pci_dev *dev) 200static void __devexit
201scb2_flash_remove(struct pci_dev *dev)
200{ 202{
201 if (!scb2_mtd) 203 if (!scb2_mtd)
202 return; 204 return;
203 205
204 /* disable flash writes */ 206 /* disable flash writes */
205 mtd_lock(scb2_mtd, 0, scb2_mtd->size); 207 if (scb2_mtd->lock)
208 scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
206 209
207 mtd_device_unregister(scb2_mtd); 210 mtd_device_unregister(scb2_mtd);
208 map_destroy(scb2_mtd); 211 map_destroy(scb2_mtd);
@@ -229,10 +232,23 @@ static struct pci_driver scb2_flash_driver = {
229 .name = "Intel SCB2 BIOS Flash", 232 .name = "Intel SCB2 BIOS Flash",
230 .id_table = scb2_flash_pci_ids, 233 .id_table = scb2_flash_pci_ids,
231 .probe = scb2_flash_probe, 234 .probe = scb2_flash_probe,
232 .remove = scb2_flash_remove, 235 .remove = __devexit_p(scb2_flash_remove),
233}; 236};
234 237
235module_pci_driver(scb2_flash_driver); 238static int __init
239scb2_flash_init(void)
240{
241 return pci_register_driver(&scb2_flash_driver);
242}
243
244static void __exit
245scb2_flash_exit(void)
246{
247 pci_unregister_driver(&scb2_flash_driver);
248}
249
250module_init(scb2_flash_init);
251module_exit(scb2_flash_exit);
236 252
237MODULE_LICENSE("GPL"); 253MODULE_LICENSE("GPL");
238MODULE_AUTHOR("Tim Hockin <thockin@sun.com>"); 254MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 9d900ada670..cbf6bade935 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -19,6 +19,8 @@
19static struct mtd_info *flash_mtd; 19static struct mtd_info *flash_mtd;
20static struct mtd_info *eprom_mtd; 20static struct mtd_info *eprom_mtd;
21 21
22static struct mtd_partition *parsed_parts;
23
22struct map_info soleng_eprom_map = { 24struct map_info soleng_eprom_map = {
23 .name = "Solution Engine EPROM", 25 .name = "Solution Engine EPROM",
24 .size = 0x400000, 26 .size = 0x400000,
@@ -49,14 +51,12 @@ static struct mtd_partition superh_se_partitions[] = {
49 .size = MTDPART_SIZ_FULL, 51 .size = MTDPART_SIZ_FULL,
50 } 52 }
51}; 53};
52#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
53#else
54#define superh_se_partitions NULL
55#define NUM_PARTITIONS 0
56#endif /* CONFIG_MTD_SUPERH_RESERVE */ 54#endif /* CONFIG_MTD_SUPERH_RESERVE */
57 55
58static int __init init_soleng_maps(void) 56static int __init init_soleng_maps(void)
59{ 57{
58 int nr_parts = 0;
59
60 /* First probe at offset 0 */ 60 /* First probe at offset 0 */
61 soleng_flash_map.phys = 0; 61 soleng_flash_map.phys = 0;
62 soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0); 62 soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
@@ -92,8 +92,21 @@ static int __init init_soleng_maps(void)
92 mtd_device_register(eprom_mtd, NULL, 0); 92 mtd_device_register(eprom_mtd, NULL, 0);
93 } 93 }
94 94
95 mtd_device_parse_register(flash_mtd, probes, NULL, 95 nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
96 superh_se_partitions, NUM_PARTITIONS); 96
97#ifdef CONFIG_MTD_SUPERH_RESERVE
98 if (nr_parts <= 0) {
99 printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
100 CONFIG_MTD_SUPERH_RESERVE);
101 parsed_parts = superh_se_partitions;
102 nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts);
103 }
104#endif /* CONFIG_MTD_SUPERH_RESERVE */
105
106 if (nr_parts > 0)
107 mtd_device_register(flash_mtd, parsed_parts, nr_parts);
108 else
109 mtd_device_register(flash_mtd, NULL, 0);
97 110
98 return 0; 111 return 0;
99} 112}
@@ -105,7 +118,10 @@ static void __exit cleanup_soleng_maps(void)
105 map_destroy(eprom_mtd); 118 map_destroy(eprom_mtd);
106 } 119 }
107 120
108 mtd_device_unregister(flash_mtd); 121 if (parsed_parts)
122 mtd_device_unregister(flash_mtd);
123 else
124 mtd_device_unregister(flash_mtd);
109 map_destroy(flash_mtd); 125 map_destroy(flash_mtd);
110} 126}
111 127
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index d467f3b11c9..2d66234f57c 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
108 return 0; 108 return 0;
109} 109}
110 110
111static int uflash_probe(struct platform_device *op) 111static int __devinit uflash_probe(struct platform_device *op)
112{ 112{
113 struct device_node *dp = op->dev.of_node; 113 struct device_node *dp = op->dev.of_node;
114 114
@@ -121,7 +121,7 @@ static int uflash_probe(struct platform_device *op)
121 return uflash_devinit(op, dp); 121 return uflash_devinit(op, dp);
122} 122}
123 123
124static int uflash_remove(struct platform_device *op) 124static int __devexit uflash_remove(struct platform_device *op)
125{ 125{
126 struct uflash_dev *up = dev_get_drvdata(&op->dev); 126 struct uflash_dev *up = dev_get_drvdata(&op->dev);
127 127
@@ -155,7 +155,18 @@ static struct platform_driver uflash_driver = {
155 .of_match_table = uflash_match, 155 .of_match_table = uflash_match,
156 }, 156 },
157 .probe = uflash_probe, 157 .probe = uflash_probe,
158 .remove = uflash_remove, 158 .remove = __devexit_p(uflash_remove),
159}; 159};
160 160
161module_platform_driver(uflash_driver); 161static int __init uflash_init(void)
162{
163 return platform_driver_register(&uflash_driver);
164}
165
166static void __exit uflash_exit(void)
167{
168 platform_driver_unregister(&uflash_driver);
169}
170
171module_init(uflash_init);
172module_exit(uflash_exit);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 299bf88a6f4..6793074f3f4 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,13 +19,14 @@
19#include <linux/mtd/map.h> 19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sections.h>
23 22
24/****************************************************************************/ 23/****************************************************************************/
25 24
25extern char _ebss;
26
26struct map_info uclinux_ram_map = { 27struct map_info uclinux_ram_map = {
27 .name = "RAM", 28 .name = "RAM",
28 .phys = (unsigned long)__bss_stop, 29 .phys = (unsigned long)&_ebss,
29 .size = 0, 30 .size = 0,
30}; 31};
31 32
@@ -67,16 +68,10 @@ static int __init uclinux_mtd_init(void)
67 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", 68 printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
68 (int) mapp->phys, (int) mapp->size); 69 (int) mapp->phys, (int) mapp->size);
69 70
70 /* 71 mapp->virt = ioremap_nocache(mapp->phys, mapp->size);
71 * The filesystem is guaranteed to be in direct mapped memory. It is
72 * directly following the kernels own bss region. Following the same
73 * mechanism used by architectures setting up traditional initrds we
74 * use phys_to_virt to get the virtual address of its start.
75 */
76 mapp->virt = phys_to_virt(mapp->phys);
77 72
78 if (mapp->virt == 0) { 73 if (mapp->virt == 0) {
79 printk("uclinux[mtd]: no virtual mapping?\n"); 74 printk("uclinux[mtd]: ioremap_nocache() failed\n");
80 return(-EIO); 75 return(-EIO);
81 } 76 }
82 77
@@ -85,11 +80,12 @@ static int __init uclinux_mtd_init(void)
85 mtd = do_map_probe("map_ram", mapp); 80 mtd = do_map_probe("map_ram", mapp);
86 if (!mtd) { 81 if (!mtd) {
87 printk("uclinux[mtd]: failed to find a mapping?\n"); 82 printk("uclinux[mtd]: failed to find a mapping?\n");
83 iounmap(mapp->virt);
88 return(-ENXIO); 84 return(-ENXIO);
89 } 85 }
90 86
91 mtd->owner = THIS_MODULE; 87 mtd->owner = THIS_MODULE;
92 mtd->_point = uclinux_point; 88 mtd->point = uclinux_point;
93 mtd->priv = mapp; 89 mtd->priv = mapp;
94 90
95 uclinux_ram_mtdinfo = mtd; 91 uclinux_ram_mtdinfo = mtd;
@@ -107,8 +103,10 @@ static void __exit uclinux_mtd_cleanup(void)
107 map_destroy(uclinux_ram_mtdinfo); 103 map_destroy(uclinux_ram_mtdinfo);
108 uclinux_ram_mtdinfo = NULL; 104 uclinux_ram_mtdinfo = NULL;
109 } 105 }
110 if (uclinux_ram_map.virt) 106 if (uclinux_ram_map.virt) {
107 iounmap((void *) uclinux_ram_map.virt);
111 uclinux_ram_map.virt = 0; 108 uclinux_ram_map.virt = 0;
109 }
112} 110}
113 111
114/****************************************************************************/ 112/****************************************************************************/
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 6b223cfe92b..3a04b078576 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -360,6 +360,9 @@ static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
360 int index = 0, retval, partition, leftover, numblocks; 360 int index = 0, retval, partition, leftover, numblocks;
361 unsigned char cx; 361 unsigned char cx;
362 362
363 if (len < 1)
364 return -EIO;
365
363 mpart = mtd->priv; 366 mpart = mtd->priv;
364 mdev = mpart->mdev; 367 mdev = mpart->mdev;
365 partition = mpart->partition; 368 partition = mpart->partition;
@@ -431,6 +434,11 @@ static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
431 partition = mpart->partition; 434 partition = mpart->partition;
432 card = maple_get_drvdata(mdev); 435 card = maple_get_drvdata(mdev);
433 436
437 /* simple sanity checks */
438 if (len < 1) {
439 error = -EIO;
440 goto failed;
441 }
434 numblocks = card->parts[partition].numblocks; 442 numblocks = card->parts[partition].numblocks;
435 if (to + len > numblocks * card->blocklen) 443 if (to + len > numblocks * card->blocklen)
436 len = numblocks * card->blocklen - to; 444 len = numblocks * card->blocklen - to;
@@ -536,9 +544,9 @@ static void vmu_queryblocks(struct mapleq *mq)
536 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; 544 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
537 mtd_cur->size = part_cur->numblocks * card->blocklen; 545 mtd_cur->size = part_cur->numblocks * card->blocklen;
538 mtd_cur->erasesize = card->blocklen; 546 mtd_cur->erasesize = card->blocklen;
539 mtd_cur->_write = vmu_flash_write; 547 mtd_cur->write = vmu_flash_write;
540 mtd_cur->_read = vmu_flash_read; 548 mtd_cur->read = vmu_flash_read;
541 mtd_cur->_sync = vmu_flash_sync; 549 mtd_cur->sync = vmu_flash_sync;
542 mtd_cur->writesize = card->blocklen; 550 mtd_cur->writesize = card->blocklen;
543 551
544 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); 552 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
@@ -596,7 +604,7 @@ fail_name:
596} 604}
597 605
598/* Handles very basic info about the flash, queries for details */ 606/* Handles very basic info about the flash, queries for details */
599static int vmu_connect(struct maple_device *mdev) 607static int __devinit vmu_connect(struct maple_device *mdev)
600{ 608{
601 unsigned long test_flash_data, basic_flash_data; 609 unsigned long test_flash_data, basic_flash_data;
602 int c, error; 610 int c, error;
@@ -690,7 +698,7 @@ fail_nomem:
690 return error; 698 return error;
691} 699}
692 700
693static void vmu_disconnect(struct maple_device *mdev) 701static void __devexit vmu_disconnect(struct maple_device *mdev)
694{ 702{
695 struct memcard *card; 703 struct memcard *card;
696 struct mdev_part *mpart; 704 struct mdev_part *mpart;
@@ -772,7 +780,7 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
772} 780}
773 781
774 782
775static int probe_maple_vmu(struct device *dev) 783static int __devinit probe_maple_vmu(struct device *dev)
776{ 784{
777 int error; 785 int error;
778 struct maple_device *mdev = to_maple_dev(dev); 786 struct maple_device *mdev = to_maple_dev(dev);
@@ -789,7 +797,7 @@ static int probe_maple_vmu(struct device *dev)
789 return 0; 797 return 0;
790} 798}
791 799
792static int remove_maple_vmu(struct device *dev) 800static int __devexit remove_maple_vmu(struct device *dev)
793{ 801{
794 struct maple_device *mdev = to_maple_dev(dev); 802 struct maple_device *mdev = to_maple_dev(dev);
795 803
@@ -802,7 +810,7 @@ static struct maple_driver vmu_flash_driver = {
802 .drv = { 810 .drv = {
803 .name = "Dreamcast_visual_memory", 811 .name = "Dreamcast_visual_memory",
804 .probe = probe_maple_vmu, 812 .probe = probe_maple_vmu,
805 .remove = remove_maple_vmu, 813 .remove = __devexit_p(remove_maple_vmu),
806 }, 814 },
807}; 815};
808 816
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 5ad39bb5ab4..bff8d4671ad 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,6 +32,7 @@
32#include <linux/hdreg.h> 32#include <linux/hdreg.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/kthread.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
37#include "mtdcore.h" 38#include "mtdcore.h"
@@ -120,14 +121,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
120 121
121int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) 122int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
122{ 123{
124 if (kthread_should_stop())
125 return 1;
126
123 return dev->bg_stop; 127 return dev->bg_stop;
124} 128}
125EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); 129EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
126 130
127static void mtd_blktrans_work(struct work_struct *work) 131static int mtd_blktrans_thread(void *arg)
128{ 132{
129 struct mtd_blktrans_dev *dev = 133 struct mtd_blktrans_dev *dev = arg;
130 container_of(work, struct mtd_blktrans_dev, work);
131 struct mtd_blktrans_ops *tr = dev->tr; 134 struct mtd_blktrans_ops *tr = dev->tr;
132 struct request_queue *rq = dev->rq; 135 struct request_queue *rq = dev->rq;
133 struct request *req = NULL; 136 struct request *req = NULL;
@@ -135,7 +138,7 @@ static void mtd_blktrans_work(struct work_struct *work)
135 138
136 spin_lock_irq(rq->queue_lock); 139 spin_lock_irq(rq->queue_lock);
137 140
138 while (1) { 141 while (!kthread_should_stop()) {
139 int res; 142 int res;
140 143
141 dev->bg_stop = false; 144 dev->bg_stop = false;
@@ -153,7 +156,15 @@ static void mtd_blktrans_work(struct work_struct *work)
153 background_done = !dev->bg_stop; 156 background_done = !dev->bg_stop;
154 continue; 157 continue;
155 } 158 }
156 break; 159 set_current_state(TASK_INTERRUPTIBLE);
160
161 if (kthread_should_stop())
162 set_current_state(TASK_RUNNING);
163
164 spin_unlock_irq(rq->queue_lock);
165 schedule();
166 spin_lock_irq(rq->queue_lock);
167 continue;
157 } 168 }
158 169
159 spin_unlock_irq(rq->queue_lock); 170 spin_unlock_irq(rq->queue_lock);
@@ -174,6 +185,8 @@ static void mtd_blktrans_work(struct work_struct *work)
174 __blk_end_request_all(req, -EIO); 185 __blk_end_request_all(req, -EIO);
175 186
176 spin_unlock_irq(rq->queue_lock); 187 spin_unlock_irq(rq->queue_lock);
188
189 return 0;
177} 190}
178 191
179static void mtd_blktrans_request(struct request_queue *rq) 192static void mtd_blktrans_request(struct request_queue *rq)
@@ -186,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq)
186 if (!dev) 199 if (!dev)
187 while ((req = blk_fetch_request(rq)) != NULL) 200 while ((req = blk_fetch_request(rq)) != NULL)
188 __blk_end_request_all(req, -ENODEV); 201 __blk_end_request_all(req, -ENODEV);
189 else 202 else {
190 queue_work(dev->wq, &dev->work); 203 dev->bg_stop = true;
204 wake_up_process(dev->thread);
205 }
191} 206}
192 207
193static int blktrans_open(struct block_device *bdev, fmode_t mode) 208static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -218,7 +233,6 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
218 ret = __get_mtd_device(dev->mtd); 233 ret = __get_mtd_device(dev->mtd);
219 if (ret) 234 if (ret)
220 goto error_release; 235 goto error_release;
221 dev->file_mode = mode;
222 236
223unlock: 237unlock:
224 dev->open++; 238 dev->open++;
@@ -310,7 +324,7 @@ unlock:
310 return ret; 324 return ret;
311} 325}
312 326
313static const struct block_device_operations mtd_block_ops = { 327static const struct block_device_operations mtd_blktrans_ops = {
314 .owner = THIS_MODULE, 328 .owner = THIS_MODULE,
315 .open = blktrans_open, 329 .open = blktrans_open,
316 .release = blktrans_release, 330 .release = blktrans_release,
@@ -386,7 +400,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
386 gd->private_data = new; 400 gd->private_data = new;
387 gd->major = tr->major; 401 gd->major = tr->major;
388 gd->first_minor = (new->devnum) << tr->part_bits; 402 gd->first_minor = (new->devnum) << tr->part_bits;
389 gd->fops = &mtd_block_ops; 403 gd->fops = &mtd_blktrans_ops;
390 404
391 if (tr->part_bits) 405 if (tr->part_bits)
392 if (new->devnum < 26) 406 if (new->devnum < 26)
@@ -413,8 +427,6 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
413 new->rq->queuedata = new; 427 new->rq->queuedata = new;
414 blk_queue_logical_block_size(new->rq, tr->blksize); 428 blk_queue_logical_block_size(new->rq, tr->blksize);
415 429
416 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
417
418 if (tr->discard) { 430 if (tr->discard) {
419 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); 431 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
420 new->rq->limits.max_discard_sectors = UINT_MAX; 432 new->rq->limits.max_discard_sectors = UINT_MAX;
@@ -422,13 +434,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
422 434
423 gd->queue = new->rq; 435 gd->queue = new->rq;
424 436
425 /* Create processing workqueue */ 437 /* Create processing thread */
426 new->wq = alloc_workqueue("%s%d", 0, 0, 438 /* TODO: workqueue ? */
427 tr->name, new->mtd->index); 439 new->thread = kthread_run(mtd_blktrans_thread, new,
428 if (!new->wq) 440 "%s%d", tr->name, new->mtd->index);
441 if (IS_ERR(new->thread)) {
442 ret = PTR_ERR(new->thread);
429 goto error4; 443 goto error4;
430 INIT_WORK(&new->work, mtd_blktrans_work); 444 }
431
432 gd->driverfs_dev = &new->mtd->dev; 445 gd->driverfs_dev = &new->mtd->dev;
433 446
434 if (new->readonly) 447 if (new->readonly)
@@ -468,8 +481,9 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
468 /* Stop new requests to arrive */ 481 /* Stop new requests to arrive */
469 del_gendisk(old->disk); 482 del_gendisk(old->disk);
470 483
471 /* Stop workqueue. This will perform any pending request. */ 484
472 destroy_workqueue(old->wq); 485 /* Stop the thread */
486 kthread_stop(old->thread);
473 487
474 /* Kill current requests */ 488 /* Kill current requests */
475 spin_lock_irqsave(&old->queue_lock, flags); 489 spin_lock_irqsave(&old->queue_lock, flags);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 6c6d80736fa..3326615ad66 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -44,7 +44,7 @@ struct mtdblk_dev {
44 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 44 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
45}; 45};
46 46
47static DEFINE_MUTEX(mtdblks_lock); 47static struct mutex mtdblks_lock;
48 48
49/* 49/*
50 * Cache stuff... 50 * Cache stuff...
@@ -85,7 +85,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
85 set_current_state(TASK_INTERRUPTIBLE); 85 set_current_state(TASK_INTERRUPTIBLE);
86 add_wait_queue(&wait_q, &wait); 86 add_wait_queue(&wait_q, &wait);
87 87
88 ret = mtd_erase(mtd, &erase); 88 ret = mtd->erase(mtd, &erase);
89 if (ret) { 89 if (ret) {
90 set_current_state(TASK_RUNNING); 90 set_current_state(TASK_RUNNING);
91 remove_wait_queue(&wait_q, &wait); 91 remove_wait_queue(&wait_q, &wait);
@@ -102,7 +102,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
102 * Next, write the data to flash. 102 * Next, write the data to flash.
103 */ 103 */
104 104
105 ret = mtd_write(mtd, pos, len, &retlen, buf); 105 ret = mtd->write(mtd, pos, len, &retlen, buf);
106 if (ret) 106 if (ret)
107 return ret; 107 return ret;
108 if (retlen != len) 108 if (retlen != len)
@@ -119,7 +119,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
119 if (mtdblk->cache_state != STATE_DIRTY) 119 if (mtdblk->cache_state != STATE_DIRTY)
120 return 0; 120 return 0;
121 121
122 pr_debug("mtdblock: writing cached data for \"%s\" " 122 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
123 "at 0x%lx, size 0x%x\n", mtd->name, 123 "at 0x%lx, size 0x%x\n", mtd->name,
124 mtdblk->cache_offset, mtdblk->cache_size); 124 mtdblk->cache_offset, mtdblk->cache_size);
125 125
@@ -148,11 +148,11 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
148 size_t retlen; 148 size_t retlen;
149 int ret; 149 int ret;
150 150
151 pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", 151 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
152 mtd->name, pos, len); 152 mtd->name, pos, len);
153 153
154 if (!sect_size) 154 if (!sect_size)
155 return mtd_write(mtd, pos, len, &retlen, buf); 155 return mtd->write(mtd, pos, len, &retlen, buf);
156 156
157 while (len > 0) { 157 while (len > 0) {
158 unsigned long sect_start = (pos/sect_size)*sect_size; 158 unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -184,8 +184,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
184 mtdblk->cache_offset != sect_start) { 184 mtdblk->cache_offset != sect_start) {
185 /* fill the cache with the current sector */ 185 /* fill the cache with the current sector */
186 mtdblk->cache_state = STATE_EMPTY; 186 mtdblk->cache_state = STATE_EMPTY;
187 ret = mtd_read(mtd, sect_start, sect_size, 187 ret = mtd->read(mtd, sect_start, sect_size,
188 &retlen, mtdblk->cache_data); 188 &retlen, mtdblk->cache_data);
189 if (ret) 189 if (ret)
190 return ret; 190 return ret;
191 if (retlen != sect_size) 191 if (retlen != sect_size)
@@ -218,11 +218,11 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
218 size_t retlen; 218 size_t retlen;
219 int ret; 219 int ret;
220 220
221 pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", 221 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
222 mtd->name, pos, len); 222 mtd->name, pos, len);
223 223
224 if (!sect_size) 224 if (!sect_size)
225 return mtd_read(mtd, pos, len, &retlen, buf); 225 return mtd->read(mtd, pos, len, &retlen, buf);
226 226
227 while (len > 0) { 227 while (len > 0) {
228 unsigned long sect_start = (pos/sect_size)*sect_size; 228 unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -241,7 +241,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
241 mtdblk->cache_offset == sect_start) { 241 mtdblk->cache_offset == sect_start) {
242 memcpy (buf, mtdblk->cache_data + offset, size); 242 memcpy (buf, mtdblk->cache_data + offset, size);
243 } else { 243 } else {
244 ret = mtd_read(mtd, pos, size, &retlen, buf); 244 ret = mtd->read(mtd, pos, size, &retlen, buf);
245 if (ret) 245 if (ret)
246 return ret; 246 return ret;
247 if (retlen != size) 247 if (retlen != size)
@@ -283,7 +283,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
283{ 283{
284 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 284 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
285 285
286 pr_debug("mtdblock_open\n"); 286 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
287 287
288 mutex_lock(&mtdblks_lock); 288 mutex_lock(&mtdblks_lock);
289 if (mtdblk->count) { 289 if (mtdblk->count) {
@@ -303,7 +303,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
303 303
304 mutex_unlock(&mtdblks_lock); 304 mutex_unlock(&mtdblks_lock);
305 305
306 pr_debug("ok\n"); 306 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
307 307
308 return 0; 308 return 0;
309} 309}
@@ -312,7 +312,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
312{ 312{
313 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 313 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
314 314
315 pr_debug("mtdblock_release\n"); 315 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
316 316
317 mutex_lock(&mtdblks_lock); 317 mutex_lock(&mtdblks_lock);
318 318
@@ -321,18 +321,15 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
321 mutex_unlock(&mtdblk->cache_mutex); 321 mutex_unlock(&mtdblk->cache_mutex);
322 322
323 if (!--mtdblk->count) { 323 if (!--mtdblk->count) {
324 /* 324 /* It was the last usage. Free the cache */
325 * It was the last usage. Free the cache, but only sync if 325 if (mbd->mtd->sync)
326 * opened for writing. 326 mbd->mtd->sync(mbd->mtd);
327 */
328 if (mbd->file_mode & FMODE_WRITE)
329 mtd_sync(mbd->mtd);
330 vfree(mtdblk->cache_data); 327 vfree(mtdblk->cache_data);
331 } 328 }
332 329
333 mutex_unlock(&mtdblks_lock); 330 mutex_unlock(&mtdblks_lock);
334 331
335 pr_debug("ok\n"); 332 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
336 333
337 return 0; 334 return 0;
338} 335}
@@ -344,7 +341,9 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
344 mutex_lock(&mtdblk->cache_mutex); 341 mutex_lock(&mtdblk->cache_mutex);
345 write_cached_data(mtdblk); 342 write_cached_data(mtdblk);
346 mutex_unlock(&mtdblk->cache_mutex); 343 mutex_unlock(&mtdblk->cache_mutex);
347 mtd_sync(dev->mtd); 344
345 if (dev->mtd->sync)
346 dev->mtd->sync(dev->mtd);
348 return 0; 347 return 0;
349} 348}
350 349
@@ -390,6 +389,8 @@ static struct mtd_blktrans_ops mtdblock_tr = {
390 389
391static int __init init_mtdblock(void) 390static int __init init_mtdblock(void)
392{ 391{
392 mutex_init(&mtdblks_lock);
393
393 return register_mtd_blktrans(&mtdblock_tr); 394 return register_mtd_blktrans(&mtdblock_tr);
394} 395}
395 396
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 92759a9d298..795a8c0a05b 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -23,14 +23,13 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
25#include <linux/mtd/blktrans.h> 25#include <linux/mtd/blktrans.h>
26#include <linux/module.h>
27 26
28static int mtdblock_readsect(struct mtd_blktrans_dev *dev, 27static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
29 unsigned long block, char *buf) 28 unsigned long block, char *buf)
30{ 29{
31 size_t retlen; 30 size_t retlen;
32 31
33 if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf)) 32 if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
34 return 1; 33 return 1;
35 return 0; 34 return 0;
36} 35}
@@ -40,7 +39,7 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
40{ 39{
41 size_t retlen; 40 size_t retlen;
42 41
43 if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf)) 42 if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
44 return 1; 43 return 1;
45 return 0; 44 return 0;
46} 45}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 82c06165d3d..49e20a49708 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -31,18 +31,19 @@
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/mount.h> 32#include <linux/mount.h>
33#include <linux/blkpg.h> 33#include <linux/blkpg.h>
34#include <linux/magic.h>
35#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
36#include <linux/mtd/partitions.h> 35#include <linux/mtd/partitions.h>
37#include <linux/mtd/map.h> 36#include <linux/mtd/map.h>
38 37
39#include <asm/uaccess.h> 38#include <asm/uaccess.h>
40 39
40#define MTD_INODE_FS_MAGIC 0x11307854
41static DEFINE_MUTEX(mtd_mutex); 41static DEFINE_MUTEX(mtd_mutex);
42static struct vfsmount *mtd_inode_mnt __read_mostly;
42 43
43/* 44/*
44 * Data structure to hold the pointer to the mtd device as well 45 * Data structure to hold the pointer to the mtd device as well
45 * as mode information of various use cases. 46 * as mode information ofr various use cases.
46 */ 47 */
47struct mtd_file_info { 48struct mtd_file_info {
48 struct mtd_info *mtd; 49 struct mtd_info *mtd;
@@ -50,7 +51,7 @@ struct mtd_file_info {
50 enum mtd_file_modes mode; 51 enum mtd_file_modes mode;
51}; 52};
52 53
53static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 54static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
54{ 55{
55 struct mtd_file_info *mfi = file->private_data; 56 struct mtd_file_info *mfi = file->private_data;
56 struct mtd_info *mtd = mfi->mtd; 57 struct mtd_info *mtd = mfi->mtd;
@@ -74,11 +75,9 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
74 return -EINVAL; 75 return -EINVAL;
75} 76}
76 77
77static int count;
78static struct vfsmount *mnt;
79static struct file_system_type mtd_inodefs_type;
80 78
81static int mtdchar_open(struct inode *inode, struct file *file) 79
80static int mtd_open(struct inode *inode, struct file *file)
82{ 81{
83 int minor = iminor(inode); 82 int minor = iminor(inode);
84 int devnum = minor >> 1; 83 int devnum = minor >> 1;
@@ -87,16 +86,12 @@ static int mtdchar_open(struct inode *inode, struct file *file)
87 struct mtd_file_info *mfi; 86 struct mtd_file_info *mfi;
88 struct inode *mtd_ino; 87 struct inode *mtd_ino;
89 88
90 pr_debug("MTD_open\n"); 89 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
91 90
92 /* You can't open the RO devices RW */ 91 /* You can't open the RO devices RW */
93 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 92 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
94 return -EACCES; 93 return -EACCES;
95 94
96 ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
97 if (ret)
98 return ret;
99
100 mutex_lock(&mtd_mutex); 95 mutex_lock(&mtd_mutex);
101 mtd = get_mtd_device(NULL, devnum); 96 mtd = get_mtd_device(NULL, devnum);
102 97
@@ -106,14 +101,16 @@ static int mtdchar_open(struct inode *inode, struct file *file)
106 } 101 }
107 102
108 if (mtd->type == MTD_ABSENT) { 103 if (mtd->type == MTD_ABSENT) {
104 put_mtd_device(mtd);
109 ret = -ENODEV; 105 ret = -ENODEV;
110 goto out1; 106 goto out;
111 } 107 }
112 108
113 mtd_ino = iget_locked(mnt->mnt_sb, devnum); 109 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
114 if (!mtd_ino) { 110 if (!mtd_ino) {
111 put_mtd_device(mtd);
115 ret = -ENOMEM; 112 ret = -ENOMEM;
116 goto out1; 113 goto out;
117 } 114 }
118 if (mtd_ino->i_state & I_NEW) { 115 if (mtd_ino->i_state & I_NEW) {
119 mtd_ino->i_private = mtd; 116 mtd_ino->i_private = mtd;
@@ -125,53 +122,49 @@ static int mtdchar_open(struct inode *inode, struct file *file)
125 122
126 /* You can't open it RW if it's not a writeable device */ 123 /* You can't open it RW if it's not a writeable device */
127 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 124 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
125 iput(mtd_ino);
126 put_mtd_device(mtd);
128 ret = -EACCES; 127 ret = -EACCES;
129 goto out2; 128 goto out;
130 } 129 }
131 130
132 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 131 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
133 if (!mfi) { 132 if (!mfi) {
133 iput(mtd_ino);
134 put_mtd_device(mtd);
134 ret = -ENOMEM; 135 ret = -ENOMEM;
135 goto out2; 136 goto out;
136 } 137 }
137 mfi->ino = mtd_ino; 138 mfi->ino = mtd_ino;
138 mfi->mtd = mtd; 139 mfi->mtd = mtd;
139 file->private_data = mfi; 140 file->private_data = mfi;
140 mutex_unlock(&mtd_mutex);
141 return 0;
142 141
143out2:
144 iput(mtd_ino);
145out1:
146 put_mtd_device(mtd);
147out: 142out:
148 mutex_unlock(&mtd_mutex); 143 mutex_unlock(&mtd_mutex);
149 simple_release_fs(&mnt, &count);
150 return ret; 144 return ret;
151} /* mtdchar_open */ 145} /* mtd_open */
152 146
153/*====================================================================*/ 147/*====================================================================*/
154 148
155static int mtdchar_close(struct inode *inode, struct file *file) 149static int mtd_close(struct inode *inode, struct file *file)
156{ 150{
157 struct mtd_file_info *mfi = file->private_data; 151 struct mtd_file_info *mfi = file->private_data;
158 struct mtd_info *mtd = mfi->mtd; 152 struct mtd_info *mtd = mfi->mtd;
159 153
160 pr_debug("MTD_close\n"); 154 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
161 155
162 /* Only sync if opened RW */ 156 /* Only sync if opened RW */
163 if ((file->f_mode & FMODE_WRITE)) 157 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
164 mtd_sync(mtd); 158 mtd->sync(mtd);
165 159
166 iput(mfi->ino); 160 iput(mfi->ino);
167 161
168 put_mtd_device(mtd); 162 put_mtd_device(mtd);
169 file->private_data = NULL; 163 file->private_data = NULL;
170 kfree(mfi); 164 kfree(mfi);
171 simple_release_fs(&mnt, &count);
172 165
173 return 0; 166 return 0;
174} /* mtdchar_close */ 167} /* mtd_close */
175 168
176/* Back in June 2001, dwmw2 wrote: 169/* Back in June 2001, dwmw2 wrote:
177 * 170 *
@@ -191,19 +184,18 @@ static int mtdchar_close(struct inode *inode, struct file *file)
191 * alignment requirements are not met in the NAND subdriver. 184 * alignment requirements are not met in the NAND subdriver.
192 */ 185 */
193 186
194static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 187static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
195 loff_t *ppos)
196{ 188{
197 struct mtd_file_info *mfi = file->private_data; 189 struct mtd_file_info *mfi = file->private_data;
198 struct mtd_info *mtd = mfi->mtd; 190 struct mtd_info *mtd = mfi->mtd;
199 size_t retlen; 191 size_t retlen=0;
200 size_t total_retlen=0; 192 size_t total_retlen=0;
201 int ret=0; 193 int ret=0;
202 int len; 194 int len;
203 size_t size = count; 195 size_t size = count;
204 char *kbuf; 196 char *kbuf;
205 197
206 pr_debug("MTD_read\n"); 198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
207 199
208 if (*ppos + count > mtd->size) 200 if (*ppos + count > mtd->size)
209 count = mtd->size - *ppos; 201 count = mtd->size - *ppos;
@@ -219,40 +211,38 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
219 len = min_t(size_t, count, size); 211 len = min_t(size_t, count, size);
220 212
221 switch (mfi->mode) { 213 switch (mfi->mode) {
222 case MTD_FILE_MODE_OTP_FACTORY: 214 case MTD_MODE_OTP_FACTORY:
223 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 215 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
224 &retlen, kbuf);
225 break; 216 break;
226 case MTD_FILE_MODE_OTP_USER: 217 case MTD_MODE_OTP_USER:
227 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 218 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
228 &retlen, kbuf);
229 break; 219 break;
230 case MTD_FILE_MODE_RAW: 220 case MTD_MODE_RAW:
231 { 221 {
232 struct mtd_oob_ops ops; 222 struct mtd_oob_ops ops;
233 223
234 ops.mode = MTD_OPS_RAW; 224 ops.mode = MTD_OOB_RAW;
235 ops.datbuf = kbuf; 225 ops.datbuf = kbuf;
236 ops.oobbuf = NULL; 226 ops.oobbuf = NULL;
237 ops.len = len; 227 ops.len = len;
238 228
239 ret = mtd_read_oob(mtd, *ppos, &ops); 229 ret = mtd->read_oob(mtd, *ppos, &ops);
240 retlen = ops.retlen; 230 retlen = ops.retlen;
241 break; 231 break;
242 } 232 }
243 default: 233 default:
244 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 234 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
245 } 235 }
246 /* Nand returns -EBADMSG on ECC errors, but it returns 236 /* Nand returns -EBADMSG on ecc errors, but it returns
247 * the data. For our userspace tools it is important 237 * the data. For our userspace tools it is important
248 * to dump areas with ECC errors! 238 * to dump areas with ecc errors !
249 * For kernel internal usage it also might return -EUCLEAN 239 * For kernel internal usage it also might return -EUCLEAN
250 * to signal the caller that a bitflip has occurred and has 240 * to signal the caller that a bitflip has occurred and has
251 * been corrected by the ECC algorithm. 241 * been corrected by the ECC algorithm.
252 * Userspace software which accesses NAND this way 242 * Userspace software which accesses NAND this way
253 * must be aware of the fact that it deals with NAND 243 * must be aware of the fact that it deals with NAND
254 */ 244 */
255 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 245 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
256 *ppos += retlen; 246 *ppos += retlen;
257 if (copy_to_user(buf, kbuf, retlen)) { 247 if (copy_to_user(buf, kbuf, retlen)) {
258 kfree(kbuf); 248 kfree(kbuf);
@@ -275,10 +265,9 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
275 265
276 kfree(kbuf); 266 kfree(kbuf);
277 return total_retlen; 267 return total_retlen;
278} /* mtdchar_read */ 268} /* mtd_read */
279 269
280static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 270static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
281 loff_t *ppos)
282{ 271{
283 struct mtd_file_info *mfi = file->private_data; 272 struct mtd_file_info *mfi = file->private_data;
284 struct mtd_info *mtd = mfi->mtd; 273 struct mtd_info *mtd = mfi->mtd;
@@ -289,7 +278,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
289 int ret=0; 278 int ret=0;
290 int len; 279 int len;
291 280
292 pr_debug("MTD_write\n"); 281 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
293 282
294 if (*ppos == mtd->size) 283 if (*ppos == mtd->size)
295 return -ENOSPC; 284 return -ENOSPC;
@@ -313,31 +302,34 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
313 } 302 }
314 303
315 switch (mfi->mode) { 304 switch (mfi->mode) {
316 case MTD_FILE_MODE_OTP_FACTORY: 305 case MTD_MODE_OTP_FACTORY:
317 ret = -EROFS; 306 ret = -EROFS;
318 break; 307 break;
319 case MTD_FILE_MODE_OTP_USER: 308 case MTD_MODE_OTP_USER:
320 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 309 if (!mtd->write_user_prot_reg) {
321 &retlen, kbuf); 310 ret = -EOPNOTSUPP;
311 break;
312 }
313 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
322 break; 314 break;
323 315
324 case MTD_FILE_MODE_RAW: 316 case MTD_MODE_RAW:
325 { 317 {
326 struct mtd_oob_ops ops; 318 struct mtd_oob_ops ops;
327 319
328 ops.mode = MTD_OPS_RAW; 320 ops.mode = MTD_OOB_RAW;
329 ops.datbuf = kbuf; 321 ops.datbuf = kbuf;
330 ops.oobbuf = NULL; 322 ops.oobbuf = NULL;
331 ops.ooboffs = 0; 323 ops.ooboffs = 0;
332 ops.len = len; 324 ops.len = len;
333 325
334 ret = mtd_write_oob(mtd, *ppos, &ops); 326 ret = mtd->write_oob(mtd, *ppos, &ops);
335 retlen = ops.retlen; 327 retlen = ops.retlen;
336 break; 328 break;
337 } 329 }
338 330
339 default: 331 default:
340 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 332 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
341 } 333 }
342 if (!ret) { 334 if (!ret) {
343 *ppos += retlen; 335 *ppos += retlen;
@@ -353,7 +345,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
353 345
354 kfree(kbuf); 346 kfree(kbuf);
355 return total_retlen; 347 return total_retlen;
356} /* mtdchar_write */ 348} /* mtd_write */
357 349
358/*====================================================================== 350/*======================================================================
359 351
@@ -369,22 +361,20 @@ static void mtdchar_erase_callback (struct erase_info *instr)
369static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 361static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
370{ 362{
371 struct mtd_info *mtd = mfi->mtd; 363 struct mtd_info *mtd = mfi->mtd;
372 size_t retlen;
373 int ret = 0; 364 int ret = 0;
374 365
375 /*
376 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
377 * operations are supported.
378 */
379 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
380 return -EOPNOTSUPP;
381
382 switch (mode) { 366 switch (mode) {
383 case MTD_OTP_FACTORY: 367 case MTD_OTP_FACTORY:
384 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 368 if (!mtd->read_fact_prot_reg)
369 ret = -EOPNOTSUPP;
370 else
371 mfi->mode = MTD_MODE_OTP_FACTORY;
385 break; 372 break;
386 case MTD_OTP_USER: 373 case MTD_OTP_USER:
387 mfi->mode = MTD_FILE_MODE_OTP_USER; 374 if (!mtd->read_fact_prot_reg)
375 ret = -EOPNOTSUPP;
376 else
377 mfi->mode = MTD_MODE_OTP_USER;
388 break; 378 break;
389 default: 379 default:
390 ret = -EINVAL; 380 ret = -EINVAL;
@@ -397,11 +387,10 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
397# define otp_select_filemode(f,m) -EOPNOTSUPP 387# define otp_select_filemode(f,m) -EOPNOTSUPP
398#endif 388#endif
399 389
400static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 390static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
401 uint64_t start, uint32_t length, void __user *ptr, 391 uint64_t start, uint32_t length, void __user *ptr,
402 uint32_t __user *retp) 392 uint32_t __user *retp)
403{ 393{
404 struct mtd_file_info *mfi = file->private_data;
405 struct mtd_oob_ops ops; 394 struct mtd_oob_ops ops;
406 uint32_t retlen; 395 uint32_t retlen;
407 int ret = 0; 396 int ret = 0;
@@ -412,7 +401,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
412 if (length > 4096) 401 if (length > 4096)
413 return -EINVAL; 402 return -EINVAL;
414 403
415 if (!mtd->_write_oob) 404 if (!mtd->write_oob)
416 ret = -EOPNOTSUPP; 405 ret = -EOPNOTSUPP;
417 else 406 else
418 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 407 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
@@ -421,10 +410,9 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
421 return ret; 410 return ret;
422 411
423 ops.ooblen = length; 412 ops.ooblen = length;
424 ops.ooboffs = start & (mtd->writesize - 1); 413 ops.ooboffs = start & (mtd->oobsize - 1);
425 ops.datbuf = NULL; 414 ops.datbuf = NULL;
426 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 415 ops.mode = MTD_OOB_PLACE;
427 MTD_OPS_PLACE_OOB;
428 416
429 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 417 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
430 return -EINVAL; 418 return -EINVAL;
@@ -433,8 +421,8 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
433 if (IS_ERR(ops.oobbuf)) 421 if (IS_ERR(ops.oobbuf))
434 return PTR_ERR(ops.oobbuf); 422 return PTR_ERR(ops.oobbuf);
435 423
436 start &= ~((uint64_t)mtd->writesize - 1); 424 start &= ~((uint64_t)mtd->oobsize - 1);
437 ret = mtd_write_oob(mtd, start, &ops); 425 ret = mtd->write_oob(mtd, start, &ops);
438 426
439 if (ops.oobretlen > 0xFFFFFFFFU) 427 if (ops.oobretlen > 0xFFFFFFFFU)
440 ret = -EOVERFLOW; 428 ret = -EOVERFLOW;
@@ -446,25 +434,27 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
446 return ret; 434 return ret;
447} 435}
448 436
449static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 437static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
450 uint64_t start, uint32_t length, void __user *ptr, 438 uint32_t length, void __user *ptr, uint32_t __user *retp)
451 uint32_t __user *retp)
452{ 439{
453 struct mtd_file_info *mfi = file->private_data;
454 struct mtd_oob_ops ops; 440 struct mtd_oob_ops ops;
455 int ret = 0; 441 int ret = 0;
456 442
457 if (length > 4096) 443 if (length > 4096)
458 return -EINVAL; 444 return -EINVAL;
459 445
460 if (!access_ok(VERIFY_WRITE, ptr, length)) 446 if (!mtd->read_oob)
461 return -EFAULT; 447 ret = -EOPNOTSUPP;
448 else
449 ret = access_ok(VERIFY_WRITE, ptr,
450 length) ? 0 : -EFAULT;
451 if (ret)
452 return ret;
462 453
463 ops.ooblen = length; 454 ops.ooblen = length;
464 ops.ooboffs = start & (mtd->writesize - 1); 455 ops.ooboffs = start & (mtd->oobsize - 1);
465 ops.datbuf = NULL; 456 ops.datbuf = NULL;
466 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 457 ops.mode = MTD_OOB_PLACE;
467 MTD_OPS_PLACE_OOB;
468 458
469 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 459 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
470 return -EINVAL; 460 return -EINVAL;
@@ -473,8 +463,8 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
473 if (!ops.oobbuf) 463 if (!ops.oobbuf)
474 return -ENOMEM; 464 return -ENOMEM;
475 465
476 start &= ~((uint64_t)mtd->writesize - 1); 466 start &= ~((uint64_t)mtd->oobsize - 1);
477 ret = mtd_read_oob(mtd, start, &ops); 467 ret = mtd->read_oob(mtd, start, &ops);
478 468
479 if (put_user(ops.oobretlen, retp)) 469 if (put_user(ops.oobretlen, retp))
480 ret = -EFAULT; 470 ret = -EFAULT;
@@ -483,29 +473,13 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
483 ret = -EFAULT; 473 ret = -EFAULT;
484 474
485 kfree(ops.oobbuf); 475 kfree(ops.oobbuf);
486
487 /*
488 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
489 * data. For our userspace tools it is important to dump areas
490 * with ECC errors!
491 * For kernel internal usage it also might return -EUCLEAN
492 * to signal the caller that a bitflip has occured and has
493 * been corrected by the ECC algorithm.
494 *
495 * Note: currently the standard NAND function, nand_read_oob_std,
496 * does not calculate ECC for the OOB area, so do not rely on
497 * this behavior unless you have replaced it with your own.
498 */
499 if (mtd_is_bitflip_or_eccerr(ret))
500 return 0;
501
502 return ret; 476 return ret;
503} 477}
504 478
505/* 479/*
506 * Copies (and truncates, if necessary) data from the larger struct, 480 * Copies (and truncates, if necessary) data from the larger struct,
507 * nand_ecclayout, to the smaller, deprecated layout struct, 481 * nand_ecclayout, to the smaller, deprecated layout struct,
508 * nand_ecclayout_user. This is necessary only to support the deprecated 482 * nand_ecclayout_user. This is necessary only to suppport the deprecated
509 * API ioctl ECCGETLAYOUT while allowing all new functionality to use 483 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
510 * nand_ecclayout flexibly (i.e. the struct may change size in new 484 * nand_ecclayout flexibly (i.e. the struct may change size in new
511 * releases without requiring major rewrites). 485 * releases without requiring major rewrites).
@@ -535,7 +509,7 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
535 return 0; 509 return 0;
536} 510}
537 511
538static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 512static int mtd_blkpg_ioctl(struct mtd_info *mtd,
539 struct blkpg_ioctl_arg __user *arg) 513 struct blkpg_ioctl_arg __user *arg)
540{ 514{
541 struct blkpg_ioctl_arg a; 515 struct blkpg_ioctl_arg a;
@@ -571,56 +545,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
571 } 545 }
572} 546}
573 547
574static int mtdchar_write_ioctl(struct mtd_info *mtd, 548static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
575 struct mtd_write_req __user *argp)
576{
577 struct mtd_write_req req;
578 struct mtd_oob_ops ops;
579 void __user *usr_data, *usr_oob;
580 int ret;
581
582 if (copy_from_user(&req, argp, sizeof(req)) ||
583 !access_ok(VERIFY_READ, req.usr_data, req.len) ||
584 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
585 return -EFAULT;
586 if (!mtd->_write_oob)
587 return -EOPNOTSUPP;
588
589 ops.mode = req.mode;
590 ops.len = (size_t)req.len;
591 ops.ooblen = (size_t)req.ooblen;
592 ops.ooboffs = 0;
593
594 usr_data = (void __user *)(uintptr_t)req.usr_data;
595 usr_oob = (void __user *)(uintptr_t)req.usr_oob;
596
597 if (req.usr_data) {
598 ops.datbuf = memdup_user(usr_data, ops.len);
599 if (IS_ERR(ops.datbuf))
600 return PTR_ERR(ops.datbuf);
601 } else {
602 ops.datbuf = NULL;
603 }
604
605 if (req.usr_oob) {
606 ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
607 if (IS_ERR(ops.oobbuf)) {
608 kfree(ops.datbuf);
609 return PTR_ERR(ops.oobbuf);
610 }
611 } else {
612 ops.oobbuf = NULL;
613 }
614
615 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
616
617 kfree(ops.datbuf);
618 kfree(ops.oobbuf);
619
620 return ret;
621}
622
623static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
624{ 549{
625 struct mtd_file_info *mfi = file->private_data; 550 struct mtd_file_info *mfi = file->private_data;
626 struct mtd_info *mtd = mfi->mtd; 551 struct mtd_info *mtd = mfi->mtd;
@@ -629,7 +554,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
629 u_long size; 554 u_long size;
630 struct mtd_info_user info; 555 struct mtd_info_user info;
631 556
632 pr_debug("MTD_ioctl\n"); 557 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
633 558
634 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 559 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
635 if (cmd & IOC_IN) { 560 if (cmd & IOC_IN) {
@@ -677,8 +602,8 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
677 info.erasesize = mtd->erasesize; 602 info.erasesize = mtd->erasesize;
678 info.writesize = mtd->writesize; 603 info.writesize = mtd->writesize;
679 info.oobsize = mtd->oobsize; 604 info.oobsize = mtd->oobsize;
680 /* The below field is obsolete */ 605 /* The below fields are obsolete */
681 info.padding = 0; 606 info.ecctype = -1;
682 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 607 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
683 return -EFAULT; 608 return -EFAULT;
684 break; 609 break;
@@ -734,7 +659,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
734 wq_head is no longer there when the 659 wq_head is no longer there when the
735 callback routine tries to wake us up. 660 callback routine tries to wake us up.
736 */ 661 */
737 ret = mtd_erase(mtd, erase); 662 ret = mtd->erase(mtd, erase);
738 if (!ret) { 663 if (!ret) {
739 set_current_state(TASK_UNINTERRUPTIBLE); 664 set_current_state(TASK_UNINTERRUPTIBLE);
740 add_wait_queue(&waitq, &wait); 665 add_wait_queue(&waitq, &wait);
@@ -760,7 +685,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
760 if (copy_from_user(&buf, argp, sizeof(buf))) 685 if (copy_from_user(&buf, argp, sizeof(buf)))
761 ret = -EFAULT; 686 ret = -EFAULT;
762 else 687 else
763 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 688 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
764 buf.ptr, &buf_user->length); 689 buf.ptr, &buf_user->length);
765 break; 690 break;
766 } 691 }
@@ -774,7 +699,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
774 if (copy_from_user(&buf, argp, sizeof(buf))) 699 if (copy_from_user(&buf, argp, sizeof(buf)))
775 ret = -EFAULT; 700 ret = -EFAULT;
776 else 701 else
777 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 702 ret = mtd_do_readoob(mtd, buf.start, buf.length,
778 buf.ptr, &buf_user->start); 703 buf.ptr, &buf_user->start);
779 break; 704 break;
780 } 705 }
@@ -787,7 +712,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
787 if (copy_from_user(&buf, argp, sizeof(buf))) 712 if (copy_from_user(&buf, argp, sizeof(buf)))
788 ret = -EFAULT; 713 ret = -EFAULT;
789 else 714 else
790 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 715 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
791 (void __user *)(uintptr_t)buf.usr_ptr, 716 (void __user *)(uintptr_t)buf.usr_ptr,
792 &buf_user->length); 717 &buf_user->length);
793 break; 718 break;
@@ -801,19 +726,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
801 if (copy_from_user(&buf, argp, sizeof(buf))) 726 if (copy_from_user(&buf, argp, sizeof(buf)))
802 ret = -EFAULT; 727 ret = -EFAULT;
803 else 728 else
804 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 729 ret = mtd_do_readoob(mtd, buf.start, buf.length,
805 (void __user *)(uintptr_t)buf.usr_ptr, 730 (void __user *)(uintptr_t)buf.usr_ptr,
806 &buf_user->length); 731 &buf_user->length);
807 break; 732 break;
808 } 733 }
809 734
810 case MEMWRITE:
811 {
812 ret = mtdchar_write_ioctl(mtd,
813 (struct mtd_write_req __user *)arg);
814 break;
815 }
816
817 case MEMLOCK: 735 case MEMLOCK:
818 { 736 {
819 struct erase_info_user einfo; 737 struct erase_info_user einfo;
@@ -821,7 +739,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
821 if (copy_from_user(&einfo, argp, sizeof(einfo))) 739 if (copy_from_user(&einfo, argp, sizeof(einfo)))
822 return -EFAULT; 740 return -EFAULT;
823 741
824 ret = mtd_lock(mtd, einfo.start, einfo.length); 742 if (!mtd->lock)
743 ret = -EOPNOTSUPP;
744 else
745 ret = mtd->lock(mtd, einfo.start, einfo.length);
825 break; 746 break;
826 } 747 }
827 748
@@ -832,7 +753,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
832 if (copy_from_user(&einfo, argp, sizeof(einfo))) 753 if (copy_from_user(&einfo, argp, sizeof(einfo)))
833 return -EFAULT; 754 return -EFAULT;
834 755
835 ret = mtd_unlock(mtd, einfo.start, einfo.length); 756 if (!mtd->unlock)
757 ret = -EOPNOTSUPP;
758 else
759 ret = mtd->unlock(mtd, einfo.start, einfo.length);
836 break; 760 break;
837 } 761 }
838 762
@@ -843,7 +767,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
843 if (copy_from_user(&einfo, argp, sizeof(einfo))) 767 if (copy_from_user(&einfo, argp, sizeof(einfo)))
844 return -EFAULT; 768 return -EFAULT;
845 769
846 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 770 if (!mtd->is_locked)
771 ret = -EOPNOTSUPP;
772 else
773 ret = mtd->is_locked(mtd, einfo.start, einfo.length);
847 break; 774 break;
848 } 775 }
849 776
@@ -874,7 +801,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
874 801
875 if (copy_from_user(&offs, argp, sizeof(loff_t))) 802 if (copy_from_user(&offs, argp, sizeof(loff_t)))
876 return -EFAULT; 803 return -EFAULT;
877 return mtd_block_isbad(mtd, offs); 804 if (!mtd->block_isbad)
805 ret = -EOPNOTSUPP;
806 else
807 return mtd->block_isbad(mtd, offs);
878 break; 808 break;
879 } 809 }
880 810
@@ -884,7 +814,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
884 814
885 if (copy_from_user(&offs, argp, sizeof(loff_t))) 815 if (copy_from_user(&offs, argp, sizeof(loff_t)))
886 return -EFAULT; 816 return -EFAULT;
887 return mtd_block_markbad(mtd, offs); 817 if (!mtd->block_markbad)
818 ret = -EOPNOTSUPP;
819 else
820 return mtd->block_markbad(mtd, offs);
888 break; 821 break;
889 } 822 }
890 823
@@ -895,7 +828,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
895 if (copy_from_user(&mode, argp, sizeof(int))) 828 if (copy_from_user(&mode, argp, sizeof(int)))
896 return -EFAULT; 829 return -EFAULT;
897 830
898 mfi->mode = MTD_FILE_MODE_NORMAL; 831 mfi->mode = MTD_MODE_NORMAL;
899 832
900 ret = otp_select_filemode(mfi, mode); 833 ret = otp_select_filemode(mfi, mode);
901 834
@@ -909,15 +842,17 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
909 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 842 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
910 if (!buf) 843 if (!buf)
911 return -ENOMEM; 844 return -ENOMEM;
845 ret = -EOPNOTSUPP;
912 switch (mfi->mode) { 846 switch (mfi->mode) {
913 case MTD_FILE_MODE_OTP_FACTORY: 847 case MTD_MODE_OTP_FACTORY:
914 ret = mtd_get_fact_prot_info(mtd, buf, 4096); 848 if (mtd->get_fact_prot_info)
849 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
915 break; 850 break;
916 case MTD_FILE_MODE_OTP_USER: 851 case MTD_MODE_OTP_USER:
917 ret = mtd_get_user_prot_info(mtd, buf, 4096); 852 if (mtd->get_user_prot_info)
853 ret = mtd->get_user_prot_info(mtd, buf, 4096);
918 break; 854 break;
919 default: 855 default:
920 ret = -EINVAL;
921 break; 856 break;
922 } 857 }
923 if (ret >= 0) { 858 if (ret >= 0) {
@@ -937,16 +872,18 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
937 { 872 {
938 struct otp_info oinfo; 873 struct otp_info oinfo;
939 874
940 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 875 if (mfi->mode != MTD_MODE_OTP_USER)
941 return -EINVAL; 876 return -EINVAL;
942 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 877 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
943 return -EFAULT; 878 return -EFAULT;
944 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 879 if (!mtd->lock_user_prot_reg)
880 return -EOPNOTSUPP;
881 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
945 break; 882 break;
946 } 883 }
947#endif 884#endif
948 885
949 /* This ioctl is being deprecated - it truncates the ECC layout */ 886 /* This ioctl is being deprecated - it truncates the ecc layout */
950 case ECCGETLAYOUT: 887 case ECCGETLAYOUT:
951 { 888 {
952 struct nand_ecclayout_user *usrlay; 889 struct nand_ecclayout_user *usrlay;
@@ -979,17 +916,17 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
979 mfi->mode = 0; 916 mfi->mode = 0;
980 917
981 switch(arg) { 918 switch(arg) {
982 case MTD_FILE_MODE_OTP_FACTORY: 919 case MTD_MODE_OTP_FACTORY:
983 case MTD_FILE_MODE_OTP_USER: 920 case MTD_MODE_OTP_USER:
984 ret = otp_select_filemode(mfi, arg); 921 ret = otp_select_filemode(mfi, arg);
985 break; 922 break;
986 923
987 case MTD_FILE_MODE_RAW: 924 case MTD_MODE_RAW:
988 if (!mtd_has_oob(mtd)) 925 if (!mtd->read_oob || !mtd->write_oob)
989 return -EOPNOTSUPP; 926 return -EOPNOTSUPP;
990 mfi->mode = arg; 927 mfi->mode = arg;
991 928
992 case MTD_FILE_MODE_NORMAL: 929 case MTD_MODE_NORMAL:
993 break; 930 break;
994 default: 931 default:
995 ret = -EINVAL; 932 ret = -EINVAL;
@@ -1000,7 +937,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
1000 937
1001 case BLKPG: 938 case BLKPG:
1002 { 939 {
1003 ret = mtdchar_blkpg_ioctl(mtd, 940 ret = mtd_blkpg_ioctl(mtd,
1004 (struct blkpg_ioctl_arg __user *)arg); 941 (struct blkpg_ioctl_arg __user *)arg);
1005 break; 942 break;
1006 } 943 }
@@ -1019,12 +956,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
1019 return ret; 956 return ret;
1020} /* memory_ioctl */ 957} /* memory_ioctl */
1021 958
1022static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 959static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1023{ 960{
1024 int ret; 961 int ret;
1025 962
1026 mutex_lock(&mtd_mutex); 963 mutex_lock(&mtd_mutex);
1027 ret = mtdchar_ioctl(file, cmd, arg); 964 ret = mtd_ioctl(file, cmd, arg);
1028 mutex_unlock(&mtd_mutex); 965 mutex_unlock(&mtd_mutex);
1029 966
1030 return ret; 967 return ret;
@@ -1041,7 +978,7 @@ struct mtd_oob_buf32 {
1041#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 978#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
1042#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 979#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
1043 980
1044static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 981static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
1045 unsigned long arg) 982 unsigned long arg)
1046{ 983{
1047 struct mtd_file_info *mfi = file->private_data; 984 struct mtd_file_info *mfi = file->private_data;
@@ -1060,7 +997,7 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1060 if (copy_from_user(&buf, argp, sizeof(buf))) 997 if (copy_from_user(&buf, argp, sizeof(buf)))
1061 ret = -EFAULT; 998 ret = -EFAULT;
1062 else 999 else
1063 ret = mtdchar_writeoob(file, mtd, buf.start, 1000 ret = mtd_do_writeoob(file, mtd, buf.start,
1064 buf.length, compat_ptr(buf.ptr), 1001 buf.length, compat_ptr(buf.ptr),
1065 &buf_user->length); 1002 &buf_user->length);
1066 break; 1003 break;
@@ -1075,13 +1012,13 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1075 if (copy_from_user(&buf, argp, sizeof(buf))) 1012 if (copy_from_user(&buf, argp, sizeof(buf)))
1076 ret = -EFAULT; 1013 ret = -EFAULT;
1077 else 1014 else
1078 ret = mtdchar_readoob(file, mtd, buf.start, 1015 ret = mtd_do_readoob(mtd, buf.start,
1079 buf.length, compat_ptr(buf.ptr), 1016 buf.length, compat_ptr(buf.ptr),
1080 &buf_user->start); 1017 &buf_user->start);
1081 break; 1018 break;
1082 } 1019 }
1083 default: 1020 default:
1084 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1021 ret = mtd_ioctl(file, cmd, (unsigned long)argp);
1085 } 1022 }
1086 1023
1087 mutex_unlock(&mtd_mutex); 1024 mutex_unlock(&mtd_mutex);
@@ -1097,7 +1034,7 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1097 * mappings) 1034 * mappings)
1098 */ 1035 */
1099#ifndef CONFIG_MMU 1036#ifndef CONFIG_MMU
1100static unsigned long mtdchar_get_unmapped_area(struct file *file, 1037static unsigned long mtd_get_unmapped_area(struct file *file,
1101 unsigned long addr, 1038 unsigned long addr,
1102 unsigned long len, 1039 unsigned long len,
1103 unsigned long pgoff, 1040 unsigned long pgoff,
@@ -1105,88 +1042,52 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
1105{ 1042{
1106 struct mtd_file_info *mfi = file->private_data; 1043 struct mtd_file_info *mfi = file->private_data;
1107 struct mtd_info *mtd = mfi->mtd; 1044 struct mtd_info *mtd = mfi->mtd;
1108 unsigned long offset;
1109 int ret;
1110 1045
1111 if (addr != 0) 1046 if (mtd->get_unmapped_area) {
1112 return (unsigned long) -EINVAL; 1047 unsigned long offset;
1113 1048
1114 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1049 if (addr != 0)
1115 return (unsigned long) -EINVAL; 1050 return (unsigned long) -EINVAL;
1116 1051
1117 offset = pgoff << PAGE_SHIFT; 1052 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1118 if (offset > mtd->size - len) 1053 return (unsigned long) -EINVAL;
1119 return (unsigned long) -EINVAL;
1120 1054
1121 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1055 offset = pgoff << PAGE_SHIFT;
1122 return ret == -EOPNOTSUPP ? -ENOSYS : ret; 1056 if (offset > mtd->size - len)
1123} 1057 return (unsigned long) -EINVAL;
1124#endif
1125 1058
1126static inline unsigned long get_vm_size(struct vm_area_struct *vma) 1059 return mtd->get_unmapped_area(mtd, len, offset, flags);
1127{ 1060 }
1128 return vma->vm_end - vma->vm_start;
1129}
1130 1061
1131static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) 1062 /* can't map directly */
1132{ 1063 return (unsigned long) -ENOSYS;
1133 return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
1134}
1135
1136/*
1137 * Set a new vm offset.
1138 *
1139 * Verify that the incoming offset really works as a page offset,
1140 * and that the offset and size fit in a resource_size_t.
1141 */
1142static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
1143{
1144 pgoff_t pgoff = off >> PAGE_SHIFT;
1145 if (off != (resource_size_t) pgoff << PAGE_SHIFT)
1146 return -EINVAL;
1147 if (off + get_vm_size(vma) - 1 < off)
1148 return -EINVAL;
1149 vma->vm_pgoff = pgoff;
1150 return 0;
1151} 1064}
1065#endif
1152 1066
1153/* 1067/*
1154 * set up a mapping for shared memory segments 1068 * set up a mapping for shared memory segments
1155 */ 1069 */
1156static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1070static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
1157{ 1071{
1158#ifdef CONFIG_MMU 1072#ifdef CONFIG_MMU
1159 struct mtd_file_info *mfi = file->private_data; 1073 struct mtd_file_info *mfi = file->private_data;
1160 struct mtd_info *mtd = mfi->mtd; 1074 struct mtd_info *mtd = mfi->mtd;
1161 struct map_info *map = mtd->priv; 1075 struct map_info *map = mtd->priv;
1162 resource_size_t start, off; 1076 unsigned long start;
1163 unsigned long len, vma_len; 1077 unsigned long off;
1164 1078 u32 len;
1165 /* This is broken because it assumes the MTD device is map-based 1079
1166 and that mtd->priv is a valid struct map_info. It should be 1080 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
1167 replaced with something that uses the mtd_get_unmapped_area() 1081 off = vma->vm_pgoff << PAGE_SHIFT;
1168 operation properly. */
1169 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1170 off = get_vm_offset(vma);
1171 start = map->phys; 1082 start = map->phys;
1172 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1083 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
1173 start &= PAGE_MASK; 1084 start &= PAGE_MASK;
1174 vma_len = get_vm_size(vma); 1085 if ((vma->vm_end - vma->vm_start + off) > len)
1175
1176 /* Overflow in off+len? */
1177 if (vma_len + off < off)
1178 return -EINVAL;
1179 /* Does it fit in the mapping? */
1180 if (vma_len + off > len)
1181 return -EINVAL; 1086 return -EINVAL;
1182 1087
1183 off += start; 1088 off += start;
1184 /* Did that overflow? */ 1089 vma->vm_pgoff = off >> PAGE_SHIFT;
1185 if (off < start) 1090 vma->vm_flags |= VM_IO | VM_RESERVED;
1186 return -EINVAL;
1187 if (set_vm_offset(vma, off) < 0)
1188 return -EINVAL;
1189 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1190 1091
1191#ifdef pgprot_noncached 1092#ifdef pgprot_noncached
1192 if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) 1093 if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
@@ -1207,30 +1108,25 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1207 1108
1208static const struct file_operations mtd_fops = { 1109static const struct file_operations mtd_fops = {
1209 .owner = THIS_MODULE, 1110 .owner = THIS_MODULE,
1210 .llseek = mtdchar_lseek, 1111 .llseek = mtd_lseek,
1211 .read = mtdchar_read, 1112 .read = mtd_read,
1212 .write = mtdchar_write, 1113 .write = mtd_write,
1213 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1114 .unlocked_ioctl = mtd_unlocked_ioctl,
1214#ifdef CONFIG_COMPAT 1115#ifdef CONFIG_COMPAT
1215 .compat_ioctl = mtdchar_compat_ioctl, 1116 .compat_ioctl = mtd_compat_ioctl,
1216#endif 1117#endif
1217 .open = mtdchar_open, 1118 .open = mtd_open,
1218 .release = mtdchar_close, 1119 .release = mtd_close,
1219 .mmap = mtdchar_mmap, 1120 .mmap = mtd_mmap,
1220#ifndef CONFIG_MMU 1121#ifndef CONFIG_MMU
1221 .get_unmapped_area = mtdchar_get_unmapped_area, 1122 .get_unmapped_area = mtd_get_unmapped_area,
1222#endif 1123#endif
1223}; 1124};
1224 1125
1225static const struct super_operations mtd_ops = {
1226 .drop_inode = generic_delete_inode,
1227 .statfs = simple_statfs,
1228};
1229
1230static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1126static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1231 int flags, const char *dev_name, void *data) 1127 int flags, const char *dev_name, void *data)
1232{ 1128{
1233 return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC); 1129 return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
1234} 1130}
1235 1131
1236static struct file_system_type mtd_inodefs_type = { 1132static struct file_system_type mtd_inodefs_type = {
@@ -1239,6 +1135,26 @@ static struct file_system_type mtd_inodefs_type = {
1239 .kill_sb = kill_anon_super, 1135 .kill_sb = kill_anon_super,
1240}; 1136};
1241 1137
1138static void mtdchar_notify_add(struct mtd_info *mtd)
1139{
1140}
1141
1142static void mtdchar_notify_remove(struct mtd_info *mtd)
1143{
1144 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
1145
1146 if (mtd_ino) {
1147 /* Destroy the inode if it exists */
1148 mtd_ino->i_nlink = 0;
1149 iput(mtd_ino);
1150 }
1151}
1152
1153static struct mtd_notifier mtdchar_notifier = {
1154 .add = mtdchar_notify_add,
1155 .remove = mtdchar_notify_remove,
1156};
1157
1242static int __init init_mtdchar(void) 1158static int __init init_mtdchar(void)
1243{ 1159{
1244 int ret; 1160 int ret;
@@ -1256,8 +1172,19 @@ static int __init init_mtdchar(void)
1256 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); 1172 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
1257 goto err_unregister_chdev; 1173 goto err_unregister_chdev;
1258 } 1174 }
1175
1176 mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
1177 if (IS_ERR(mtd_inode_mnt)) {
1178 ret = PTR_ERR(mtd_inode_mnt);
1179 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
1180 goto err_unregister_filesystem;
1181 }
1182 register_mtd_user(&mtdchar_notifier);
1183
1259 return ret; 1184 return ret;
1260 1185
1186err_unregister_filesystem:
1187 unregister_filesystem(&mtd_inodefs_type);
1261err_unregister_chdev: 1188err_unregister_chdev:
1262 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1189 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1263 return ret; 1190 return ret;
@@ -1265,6 +1192,8 @@ err_unregister_chdev:
1265 1192
1266static void __exit cleanup_mtdchar(void) 1193static void __exit cleanup_mtdchar(void)
1267{ 1194{
1195 unregister_mtd_user(&mtdchar_notifier);
1196 kern_unmount(mtd_inode_mnt);
1268 unregister_filesystem(&mtd_inodefs_type); 1197 unregister_filesystem(&mtd_inodefs_type);
1269 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1198 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1270} 1199}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index b9000563b9f..e601672a530 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,6 +72,8 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
72 int ret = 0, err; 72 int ret = 0, err;
73 int i; 73 int i;
74 74
75 *retlen = 0;
76
75 for (i = 0; i < concat->num_subdev; i++) { 77 for (i = 0; i < concat->num_subdev; i++) {
76 struct mtd_info *subdev = concat->subdev[i]; 78 struct mtd_info *subdev = concat->subdev[i];
77 size_t size, retsize; 79 size_t size, retsize;
@@ -89,14 +91,14 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
89 /* Entire transaction goes into this subdev */ 91 /* Entire transaction goes into this subdev */
90 size = len; 92 size = len;
91 93
92 err = mtd_read(subdev, from, size, &retsize, buf); 94 err = subdev->read(subdev, from, size, &retsize, buf);
93 95
94 /* Save information about bitflips! */ 96 /* Save information about bitflips! */
95 if (unlikely(err)) { 97 if (unlikely(err)) {
96 if (mtd_is_eccerr(err)) { 98 if (err == -EBADMSG) {
97 mtd->ecc_stats.failed++; 99 mtd->ecc_stats.failed++;
98 ret = err; 100 ret = err;
99 } else if (mtd_is_bitflip(err)) { 101 } else if (err == -EUCLEAN) {
100 mtd->ecc_stats.corrected++; 102 mtd->ecc_stats.corrected++;
101 /* Do not overwrite -EBADMSG !! */ 103 /* Do not overwrite -EBADMSG !! */
102 if (!ret) 104 if (!ret)
@@ -124,6 +126,11 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
124 int err = -EINVAL; 126 int err = -EINVAL;
125 int i; 127 int i;
126 128
129 if (!(mtd->flags & MTD_WRITEABLE))
130 return -EROFS;
131
132 *retlen = 0;
133
127 for (i = 0; i < concat->num_subdev; i++) { 134 for (i = 0; i < concat->num_subdev; i++) {
128 struct mtd_info *subdev = concat->subdev[i]; 135 struct mtd_info *subdev = concat->subdev[i];
129 size_t size, retsize; 136 size_t size, retsize;
@@ -138,7 +145,11 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
138 else 145 else
139 size = len; 146 size = len;
140 147
141 err = mtd_write(subdev, to, size, &retsize, buf); 148 if (!(subdev->flags & MTD_WRITEABLE))
149 err = -EROFS;
150 else
151 err = subdev->write(subdev, to, size, &retsize, buf);
152
142 if (err) 153 if (err)
143 break; 154 break;
144 155
@@ -165,10 +176,19 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
165 int i; 176 int i;
166 int err = -EINVAL; 177 int err = -EINVAL;
167 178
179 if (!(mtd->flags & MTD_WRITEABLE))
180 return -EROFS;
181
182 *retlen = 0;
183
168 /* Calculate total length of data */ 184 /* Calculate total length of data */
169 for (i = 0; i < count; i++) 185 for (i = 0; i < count; i++)
170 total_len += vecs[i].iov_len; 186 total_len += vecs[i].iov_len;
171 187
188 /* Do not allow write past end of device */
189 if ((to + total_len) > mtd->size)
190 return -EINVAL;
191
172 /* Check alignment */ 192 /* Check alignment */
173 if (mtd->writesize > 1) { 193 if (mtd->writesize > 1) {
174 uint64_t __to = to; 194 uint64_t __to = to;
@@ -204,8 +224,11 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
204 old_iov_len = vecs_copy[entry_high].iov_len; 224 old_iov_len = vecs_copy[entry_high].iov_len;
205 vecs_copy[entry_high].iov_len = size; 225 vecs_copy[entry_high].iov_len = size;
206 226
207 err = mtd_writev(subdev, &vecs_copy[entry_low], 227 if (!(subdev->flags & MTD_WRITEABLE))
208 entry_high - entry_low + 1, to, &retsize); 228 err = -EROFS;
229 else
230 err = subdev->writev(subdev, &vecs_copy[entry_low],
231 entry_high - entry_low + 1, to, &retsize);
209 232
210 vecs_copy[entry_high].iov_len = old_iov_len - size; 233 vecs_copy[entry_high].iov_len = old_iov_len - size;
211 vecs_copy[entry_high].iov_base += size; 234 vecs_copy[entry_high].iov_base += size;
@@ -250,16 +273,16 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
250 if (from + devops.len > subdev->size) 273 if (from + devops.len > subdev->size)
251 devops.len = subdev->size - from; 274 devops.len = subdev->size - from;
252 275
253 err = mtd_read_oob(subdev, from, &devops); 276 err = subdev->read_oob(subdev, from, &devops);
254 ops->retlen += devops.retlen; 277 ops->retlen += devops.retlen;
255 ops->oobretlen += devops.oobretlen; 278 ops->oobretlen += devops.oobretlen;
256 279
257 /* Save information about bitflips! */ 280 /* Save information about bitflips! */
258 if (unlikely(err)) { 281 if (unlikely(err)) {
259 if (mtd_is_eccerr(err)) { 282 if (err == -EBADMSG) {
260 mtd->ecc_stats.failed++; 283 mtd->ecc_stats.failed++;
261 ret = err; 284 ret = err;
262 } else if (mtd_is_bitflip(err)) { 285 } else if (err == -EUCLEAN) {
263 mtd->ecc_stats.corrected++; 286 mtd->ecc_stats.corrected++;
264 /* Do not overwrite -EBADMSG !! */ 287 /* Do not overwrite -EBADMSG !! */
265 if (!ret) 288 if (!ret)
@@ -310,7 +333,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
310 if (to + devops.len > subdev->size) 333 if (to + devops.len > subdev->size)
311 devops.len = subdev->size - to; 334 devops.len = subdev->size - to;
312 335
313 err = mtd_write_oob(subdev, to, &devops); 336 err = subdev->write_oob(subdev, to, &devops);
314 ops->retlen += devops.oobretlen; 337 ops->retlen += devops.oobretlen;
315 if (err) 338 if (err)
316 return err; 339 return err;
@@ -356,7 +379,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
356 * FIXME: Allow INTERRUPTIBLE. Which means 379 * FIXME: Allow INTERRUPTIBLE. Which means
357 * not having the wait_queue head on the stack. 380 * not having the wait_queue head on the stack.
358 */ 381 */
359 err = mtd_erase(mtd, erase); 382 err = mtd->erase(mtd, erase);
360 if (!err) { 383 if (!err) {
361 set_current_state(TASK_UNINTERRUPTIBLE); 384 set_current_state(TASK_UNINTERRUPTIBLE);
362 add_wait_queue(&waitq, &wait); 385 add_wait_queue(&waitq, &wait);
@@ -379,6 +402,15 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
379 uint64_t length, offset = 0; 402 uint64_t length, offset = 0;
380 struct erase_info *erase; 403 struct erase_info *erase;
381 404
405 if (!(mtd->flags & MTD_WRITEABLE))
406 return -EROFS;
407
408 if (instr->addr > concat->mtd.size)
409 return -EINVAL;
410
411 if (instr->len + instr->addr > concat->mtd.size)
412 return -EINVAL;
413
382 /* 414 /*
383 * Check for proper erase block alignment of the to-be-erased area. 415 * Check for proper erase block alignment of the to-be-erased area.
384 * It is easier to do this based on the super device's erase 416 * It is easier to do this based on the super device's erase
@@ -426,6 +458,8 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
426 return -EINVAL; 458 return -EINVAL;
427 } 459 }
428 460
461 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
462
429 /* make a local copy of instr to avoid modifying the caller's struct */ 463 /* make a local copy of instr to avoid modifying the caller's struct */
430 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 464 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
431 465
@@ -464,6 +498,10 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
464 else 498 else
465 erase->len = length; 499 erase->len = length;
466 500
501 if (!(subdev->flags & MTD_WRITEABLE)) {
502 err = -EROFS;
503 break;
504 }
467 length -= erase->len; 505 length -= erase->len;
468 if ((err = concat_dev_erase(subdev, erase))) { 506 if ((err = concat_dev_erase(subdev, erase))) {
469 /* sanity check: should never happen since 507 /* sanity check: should never happen since
@@ -499,6 +537,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
499 struct mtd_concat *concat = CONCAT(mtd); 537 struct mtd_concat *concat = CONCAT(mtd);
500 int i, err = -EINVAL; 538 int i, err = -EINVAL;
501 539
540 if ((len + ofs) > mtd->size)
541 return -EINVAL;
542
502 for (i = 0; i < concat->num_subdev; i++) { 543 for (i = 0; i < concat->num_subdev; i++) {
503 struct mtd_info *subdev = concat->subdev[i]; 544 struct mtd_info *subdev = concat->subdev[i];
504 uint64_t size; 545 uint64_t size;
@@ -513,9 +554,12 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
513 else 554 else
514 size = len; 555 size = len;
515 556
516 err = mtd_lock(subdev, ofs, size); 557 if (subdev->lock) {
517 if (err) 558 err = subdev->lock(subdev, ofs, size);
518 break; 559 if (err)
560 break;
561 } else
562 err = -EOPNOTSUPP;
519 563
520 len -= size; 564 len -= size;
521 if (len == 0) 565 if (len == 0)
@@ -533,6 +577,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
533 struct mtd_concat *concat = CONCAT(mtd); 577 struct mtd_concat *concat = CONCAT(mtd);
534 int i, err = 0; 578 int i, err = 0;
535 579
580 if ((len + ofs) > mtd->size)
581 return -EINVAL;
582
536 for (i = 0; i < concat->num_subdev; i++) { 583 for (i = 0; i < concat->num_subdev; i++) {
537 struct mtd_info *subdev = concat->subdev[i]; 584 struct mtd_info *subdev = concat->subdev[i];
538 uint64_t size; 585 uint64_t size;
@@ -547,9 +594,12 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
547 else 594 else
548 size = len; 595 size = len;
549 596
550 err = mtd_unlock(subdev, ofs, size); 597 if (subdev->unlock) {
551 if (err) 598 err = subdev->unlock(subdev, ofs, size);
552 break; 599 if (err)
600 break;
601 } else
602 err = -EOPNOTSUPP;
553 603
554 len -= size; 604 len -= size;
555 if (len == 0) 605 if (len == 0)
@@ -569,7 +619,7 @@ static void concat_sync(struct mtd_info *mtd)
569 619
570 for (i = 0; i < concat->num_subdev; i++) { 620 for (i = 0; i < concat->num_subdev; i++) {
571 struct mtd_info *subdev = concat->subdev[i]; 621 struct mtd_info *subdev = concat->subdev[i];
572 mtd_sync(subdev); 622 subdev->sync(subdev);
573 } 623 }
574} 624}
575 625
@@ -580,7 +630,7 @@ static int concat_suspend(struct mtd_info *mtd)
580 630
581 for (i = 0; i < concat->num_subdev; i++) { 631 for (i = 0; i < concat->num_subdev; i++) {
582 struct mtd_info *subdev = concat->subdev[i]; 632 struct mtd_info *subdev = concat->subdev[i];
583 if ((rc = mtd_suspend(subdev)) < 0) 633 if ((rc = subdev->suspend(subdev)) < 0)
584 return rc; 634 return rc;
585 } 635 }
586 return rc; 636 return rc;
@@ -593,7 +643,7 @@ static void concat_resume(struct mtd_info *mtd)
593 643
594 for (i = 0; i < concat->num_subdev; i++) { 644 for (i = 0; i < concat->num_subdev; i++) {
595 struct mtd_info *subdev = concat->subdev[i]; 645 struct mtd_info *subdev = concat->subdev[i];
596 mtd_resume(subdev); 646 subdev->resume(subdev);
597 } 647 }
598} 648}
599 649
@@ -602,9 +652,12 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
602 struct mtd_concat *concat = CONCAT(mtd); 652 struct mtd_concat *concat = CONCAT(mtd);
603 int i, res = 0; 653 int i, res = 0;
604 654
605 if (!mtd_can_have_bb(concat->subdev[0])) 655 if (!concat->subdev[0]->block_isbad)
606 return res; 656 return res;
607 657
658 if (ofs > mtd->size)
659 return -EINVAL;
660
608 for (i = 0; i < concat->num_subdev; i++) { 661 for (i = 0; i < concat->num_subdev; i++) {
609 struct mtd_info *subdev = concat->subdev[i]; 662 struct mtd_info *subdev = concat->subdev[i];
610 663
@@ -613,7 +666,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
613 continue; 666 continue;
614 } 667 }
615 668
616 res = mtd_block_isbad(subdev, ofs); 669 res = subdev->block_isbad(subdev, ofs);
617 break; 670 break;
618 } 671 }
619 672
@@ -625,6 +678,12 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
625 struct mtd_concat *concat = CONCAT(mtd); 678 struct mtd_concat *concat = CONCAT(mtd);
626 int i, err = -EINVAL; 679 int i, err = -EINVAL;
627 680
681 if (!concat->subdev[0]->block_markbad)
682 return 0;
683
684 if (ofs > mtd->size)
685 return -EINVAL;
686
628 for (i = 0; i < concat->num_subdev; i++) { 687 for (i = 0; i < concat->num_subdev; i++) {
629 struct mtd_info *subdev = concat->subdev[i]; 688 struct mtd_info *subdev = concat->subdev[i];
630 689
@@ -633,7 +692,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
633 continue; 692 continue;
634 } 693 }
635 694
636 err = mtd_block_markbad(subdev, ofs); 695 err = subdev->block_markbad(subdev, ofs);
637 if (!err) 696 if (!err)
638 mtd->ecc_stats.badblocks++; 697 mtd->ecc_stats.badblocks++;
639 break; 698 break;
@@ -662,7 +721,15 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
662 continue; 721 continue;
663 } 722 }
664 723
665 return mtd_get_unmapped_area(subdev, len, offset, flags); 724 /* we've found the subdev over which the mapping will reside */
725 if (offset + len > subdev->size)
726 return (unsigned long) -EINVAL;
727
728 if (subdev->get_unmapped_area)
729 return subdev->get_unmapped_area(subdev, len, offset,
730 flags);
731
732 break;
666 } 733 }
667 734
668 return (unsigned long) -ENOSYS; 735 return (unsigned long) -ENOSYS;
@@ -703,7 +770,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
703 770
704 /* 771 /*
705 * Set up the new "super" device's MTD object structure, check for 772 * Set up the new "super" device's MTD object structure, check for
706 * incompatibilities between the subdevices. 773 * incompatibilites between the subdevices.
707 */ 774 */
708 concat->mtd.type = subdev[0]->type; 775 concat->mtd.type = subdev[0]->type;
709 concat->mtd.flags = subdev[0]->flags; 776 concat->mtd.flags = subdev[0]->flags;
@@ -719,16 +786,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
719 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 786 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
720 concat->mtd.oobsize = subdev[0]->oobsize; 787 concat->mtd.oobsize = subdev[0]->oobsize;
721 concat->mtd.oobavail = subdev[0]->oobavail; 788 concat->mtd.oobavail = subdev[0]->oobavail;
722 if (subdev[0]->_writev) 789 if (subdev[0]->writev)
723 concat->mtd._writev = concat_writev; 790 concat->mtd.writev = concat_writev;
724 if (subdev[0]->_read_oob) 791 if (subdev[0]->read_oob)
725 concat->mtd._read_oob = concat_read_oob; 792 concat->mtd.read_oob = concat_read_oob;
726 if (subdev[0]->_write_oob) 793 if (subdev[0]->write_oob)
727 concat->mtd._write_oob = concat_write_oob; 794 concat->mtd.write_oob = concat_write_oob;
728 if (subdev[0]->_block_isbad) 795 if (subdev[0]->block_isbad)
729 concat->mtd._block_isbad = concat_block_isbad; 796 concat->mtd.block_isbad = concat_block_isbad;
730 if (subdev[0]->_block_markbad) 797 if (subdev[0]->block_markbad)
731 concat->mtd._block_markbad = concat_block_markbad; 798 concat->mtd.block_markbad = concat_block_markbad;
732 799
733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 800 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
734 801
@@ -775,8 +842,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
775 if (concat->mtd.writesize != subdev[i]->writesize || 842 if (concat->mtd.writesize != subdev[i]->writesize ||
776 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 843 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
777 concat->mtd.oobsize != subdev[i]->oobsize || 844 concat->mtd.oobsize != subdev[i]->oobsize ||
778 !concat->mtd._read_oob != !subdev[i]->_read_oob || 845 !concat->mtd.read_oob != !subdev[i]->read_oob ||
779 !concat->mtd._write_oob != !subdev[i]->_write_oob) { 846 !concat->mtd.write_oob != !subdev[i]->write_oob) {
780 kfree(concat); 847 kfree(concat);
781 printk("Incompatible OOB or ECC data on \"%s\"\n", 848 printk("Incompatible OOB or ECC data on \"%s\"\n",
782 subdev[i]->name); 849 subdev[i]->name);
@@ -791,15 +858,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
791 concat->num_subdev = num_devs; 858 concat->num_subdev = num_devs;
792 concat->mtd.name = name; 859 concat->mtd.name = name;
793 860
794 concat->mtd._erase = concat_erase; 861 concat->mtd.erase = concat_erase;
795 concat->mtd._read = concat_read; 862 concat->mtd.read = concat_read;
796 concat->mtd._write = concat_write; 863 concat->mtd.write = concat_write;
797 concat->mtd._sync = concat_sync; 864 concat->mtd.sync = concat_sync;
798 concat->mtd._lock = concat_lock; 865 concat->mtd.lock = concat_lock;
799 concat->mtd._unlock = concat_unlock; 866 concat->mtd.unlock = concat_unlock;
800 concat->mtd._suspend = concat_suspend; 867 concat->mtd.suspend = concat_suspend;
801 concat->mtd._resume = concat_resume; 868 concat->mtd.resume = concat_resume;
802 concat->mtd._get_unmapped_area = concat_get_unmapped_area; 869 concat->mtd.get_unmapped_area = concat_get_unmapped_area;
803 870
804 /* 871 /*
805 * Combine the erase block size info of the subdevices: 872 * Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index ec794a72975..c510aff289a 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,8 +107,7 @@ static LIST_HEAD(mtd_notifiers);
107 */ 107 */
108static void mtd_release(struct device *dev) 108static void mtd_release(struct device *dev)
109{ 109{
110 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev); 110 dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
111 dev_t index = MTD_DEVT(mtd->index);
112 111
113 /* remove /dev/mtdXro node if needed */ 112 /* remove /dev/mtdXro node if needed */
114 if (index) 113 if (index)
@@ -117,24 +116,27 @@ static void mtd_release(struct device *dev)
117 116
118static int mtd_cls_suspend(struct device *dev, pm_message_t state) 117static int mtd_cls_suspend(struct device *dev, pm_message_t state)
119{ 118{
120 struct mtd_info *mtd = dev_get_drvdata(dev); 119 struct mtd_info *mtd = dev_to_mtd(dev);
121 120
122 return mtd ? mtd_suspend(mtd) : 0; 121 if (mtd && mtd->suspend)
122 return mtd->suspend(mtd);
123 else
124 return 0;
123} 125}
124 126
125static int mtd_cls_resume(struct device *dev) 127static int mtd_cls_resume(struct device *dev)
126{ 128{
127 struct mtd_info *mtd = dev_get_drvdata(dev); 129 struct mtd_info *mtd = dev_to_mtd(dev);
128 130
129 if (mtd) 131 if (mtd && mtd->resume)
130 mtd_resume(mtd); 132 mtd->resume(mtd);
131 return 0; 133 return 0;
132} 134}
133 135
134static ssize_t mtd_type_show(struct device *dev, 136static ssize_t mtd_type_show(struct device *dev,
135 struct device_attribute *attr, char *buf) 137 struct device_attribute *attr, char *buf)
136{ 138{
137 struct mtd_info *mtd = dev_get_drvdata(dev); 139 struct mtd_info *mtd = dev_to_mtd(dev);
138 char *type; 140 char *type;
139 141
140 switch (mtd->type) { 142 switch (mtd->type) {
@@ -170,7 +172,7 @@ static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
170static ssize_t mtd_flags_show(struct device *dev, 172static ssize_t mtd_flags_show(struct device *dev,
171 struct device_attribute *attr, char *buf) 173 struct device_attribute *attr, char *buf)
172{ 174{
173 struct mtd_info *mtd = dev_get_drvdata(dev); 175 struct mtd_info *mtd = dev_to_mtd(dev);
174 176
175 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 177 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
176 178
@@ -180,7 +182,7 @@ static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
180static ssize_t mtd_size_show(struct device *dev, 182static ssize_t mtd_size_show(struct device *dev,
181 struct device_attribute *attr, char *buf) 183 struct device_attribute *attr, char *buf)
182{ 184{
183 struct mtd_info *mtd = dev_get_drvdata(dev); 185 struct mtd_info *mtd = dev_to_mtd(dev);
184 186
185 return snprintf(buf, PAGE_SIZE, "%llu\n", 187 return snprintf(buf, PAGE_SIZE, "%llu\n",
186 (unsigned long long)mtd->size); 188 (unsigned long long)mtd->size);
@@ -191,7 +193,7 @@ static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
191static ssize_t mtd_erasesize_show(struct device *dev, 193static ssize_t mtd_erasesize_show(struct device *dev,
192 struct device_attribute *attr, char *buf) 194 struct device_attribute *attr, char *buf)
193{ 195{
194 struct mtd_info *mtd = dev_get_drvdata(dev); 196 struct mtd_info *mtd = dev_to_mtd(dev);
195 197
196 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 198 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
197 199
@@ -201,7 +203,7 @@ static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
201static ssize_t mtd_writesize_show(struct device *dev, 203static ssize_t mtd_writesize_show(struct device *dev,
202 struct device_attribute *attr, char *buf) 204 struct device_attribute *attr, char *buf)
203{ 205{
204 struct mtd_info *mtd = dev_get_drvdata(dev); 206 struct mtd_info *mtd = dev_to_mtd(dev);
205 207
206 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 208 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
207 209
@@ -211,7 +213,7 @@ static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
211static ssize_t mtd_subpagesize_show(struct device *dev, 213static ssize_t mtd_subpagesize_show(struct device *dev,
212 struct device_attribute *attr, char *buf) 214 struct device_attribute *attr, char *buf)
213{ 215{
214 struct mtd_info *mtd = dev_get_drvdata(dev); 216 struct mtd_info *mtd = dev_to_mtd(dev);
215 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 217 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
216 218
217 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 219 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
@@ -222,7 +224,7 @@ static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
222static ssize_t mtd_oobsize_show(struct device *dev, 224static ssize_t mtd_oobsize_show(struct device *dev,
223 struct device_attribute *attr, char *buf) 225 struct device_attribute *attr, char *buf)
224{ 226{
225 struct mtd_info *mtd = dev_get_drvdata(dev); 227 struct mtd_info *mtd = dev_to_mtd(dev);
226 228
227 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 229 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
228 230
@@ -232,7 +234,7 @@ static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
232static ssize_t mtd_numeraseregions_show(struct device *dev, 234static ssize_t mtd_numeraseregions_show(struct device *dev,
233 struct device_attribute *attr, char *buf) 235 struct device_attribute *attr, char *buf)
234{ 236{
235 struct mtd_info *mtd = dev_get_drvdata(dev); 237 struct mtd_info *mtd = dev_to_mtd(dev);
236 238
237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 239 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
238 240
@@ -243,50 +245,13 @@ static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
243static ssize_t mtd_name_show(struct device *dev, 245static ssize_t mtd_name_show(struct device *dev,
244 struct device_attribute *attr, char *buf) 246 struct device_attribute *attr, char *buf)
245{ 247{
246 struct mtd_info *mtd = dev_get_drvdata(dev); 248 struct mtd_info *mtd = dev_to_mtd(dev);
247 249
248 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 250 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
249 251
250} 252}
251static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 253static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
252 254
253static ssize_t mtd_ecc_strength_show(struct device *dev,
254 struct device_attribute *attr, char *buf)
255{
256 struct mtd_info *mtd = dev_get_drvdata(dev);
257
258 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
259}
260static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
261
262static ssize_t mtd_bitflip_threshold_show(struct device *dev,
263 struct device_attribute *attr,
264 char *buf)
265{
266 struct mtd_info *mtd = dev_get_drvdata(dev);
267
268 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
269}
270
271static ssize_t mtd_bitflip_threshold_store(struct device *dev,
272 struct device_attribute *attr,
273 const char *buf, size_t count)
274{
275 struct mtd_info *mtd = dev_get_drvdata(dev);
276 unsigned int bitflip_threshold;
277 int retval;
278
279 retval = kstrtouint(buf, 0, &bitflip_threshold);
280 if (retval)
281 return retval;
282
283 mtd->bitflip_threshold = bitflip_threshold;
284 return count;
285}
286static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
287 mtd_bitflip_threshold_show,
288 mtd_bitflip_threshold_store);
289
290static struct attribute *mtd_attrs[] = { 255static struct attribute *mtd_attrs[] = {
291 &dev_attr_type.attr, 256 &dev_attr_type.attr,
292 &dev_attr_flags.attr, 257 &dev_attr_flags.attr,
@@ -297,8 +262,6 @@ static struct attribute *mtd_attrs[] = {
297 &dev_attr_oobsize.attr, 262 &dev_attr_oobsize.attr,
298 &dev_attr_numeraseregions.attr, 263 &dev_attr_numeraseregions.attr,
299 &dev_attr_name.attr, 264 &dev_attr_name.attr,
300 &dev_attr_ecc_strength.attr,
301 &dev_attr_bitflip_threshold.attr,
302 NULL, 265 NULL,
303}; 266};
304 267
@@ -361,10 +324,6 @@ int add_mtd_device(struct mtd_info *mtd)
361 mtd->index = i; 324 mtd->index = i;
362 mtd->usecount = 0; 325 mtd->usecount = 0;
363 326
364 /* default value if not set by driver */
365 if (mtd->bitflip_threshold == 0)
366 mtd->bitflip_threshold = mtd->ecc_strength;
367
368 if (is_power_of_2(mtd->erasesize)) 327 if (is_power_of_2(mtd->erasesize))
369 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 328 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
370 else 329 else
@@ -379,9 +338,9 @@ int add_mtd_device(struct mtd_info *mtd)
379 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 338 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
380 339
381 /* Some chips always power up locked. Unlock them now */ 340 /* Some chips always power up locked. Unlock them now */
382 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 341 if ((mtd->flags & MTD_WRITEABLE)
383 error = mtd_unlock(mtd, 0, mtd->size); 342 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
384 if (error && error != -EOPNOTSUPP) 343 if (mtd->unlock(mtd, 0, mtd->size))
385 printk(KERN_WARNING 344 printk(KERN_WARNING
386 "%s: unlock failed, writes may not work\n", 345 "%s: unlock failed, writes may not work\n",
387 mtd->name); 346 mtd->name);
@@ -403,7 +362,7 @@ int add_mtd_device(struct mtd_info *mtd)
403 MTD_DEVT(i) + 1, 362 MTD_DEVT(i) + 1,
404 NULL, "mtd%dro", i); 363 NULL, "mtd%dro", i);
405 364
406 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 365 DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
407 /* No need to get a refcount on the module containing 366 /* No need to get a refcount on the module containing
408 the notifier, since we hold the mtd_table_mutex */ 367 the notifier, since we hold the mtd_table_mutex */
409 list_for_each_entry(not, &mtd_notifiers, list) 368 list_for_each_entry(not, &mtd_notifiers, list)
@@ -470,63 +429,27 @@ out_error:
470} 429}
471 430
472/** 431/**
473 * mtd_device_parse_register - parse partitions and register an MTD device. 432 * mtd_device_register - register an MTD device.
474 *
475 * @mtd: the MTD device to register
476 * @types: the list of MTD partition probes to try, see
477 * 'parse_mtd_partitions()' for more information
478 * @parser_data: MTD partition parser-specific data
479 * @parts: fallback partition information to register, if parsing fails;
480 * only valid if %nr_parts > %0
481 * @nr_parts: the number of partitions in parts, if zero then the full
482 * MTD device is registered if no partition info is found
483 * 433 *
484 * This function aggregates MTD partitions parsing (done by 434 * @master: the MTD device to register
485 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 435 * @parts: the partitions to register - only valid if nr_parts > 0
486 * basically follows the most common pattern found in many MTD drivers: 436 * @nr_parts: the number of partitions in parts. If zero then the full MTD
437 * device is registered
487 * 438 *
488 * * It first tries to probe partitions on MTD device @mtd using parsers 439 * Register an MTD device with the system and optionally, a number of
489 * specified in @types (if @types is %NULL, then the default list of parsers 440 * partitions. If nr_parts is 0 then the whole device is registered, otherwise
490 * is used, see 'parse_mtd_partitions()' for more information). If none are 441 * only the partitions are registered. To register both the full device *and*
491 * found this functions tries to fallback to information specified in 442 * the partitions, call mtd_device_register() twice, once with nr_parts == 0
492 * @parts/@nr_parts. 443 * and once equal to the number of partitions.
493 * * If any partitioning info was found, this function registers the found
494 * partitions.
495 * * If no partitions were found this function just registers the MTD device
496 * @mtd and exits.
497 *
498 * Returns zero in case of success and a negative error code in case of failure.
499 */ 444 */
500int mtd_device_parse_register(struct mtd_info *mtd, const char **types, 445int mtd_device_register(struct mtd_info *master,
501 struct mtd_part_parser_data *parser_data, 446 const struct mtd_partition *parts,
502 const struct mtd_partition *parts, 447 int nr_parts)
503 int nr_parts)
504{ 448{
505 int err; 449 return parts ? add_mtd_partitions(master, parts, nr_parts) :
506 struct mtd_partition *real_parts; 450 add_mtd_device(master);
507
508 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
509 if (err <= 0 && nr_parts && parts) {
510 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
511 GFP_KERNEL);
512 if (!real_parts)
513 err = -ENOMEM;
514 else
515 err = nr_parts;
516 }
517
518 if (err > 0) {
519 err = add_mtd_partitions(mtd, real_parts, err);
520 kfree(real_parts);
521 } else if (err == 0) {
522 err = add_mtd_device(mtd);
523 if (err == 1)
524 err = -ENODEV;
525 }
526
527 return err;
528} 451}
529EXPORT_SYMBOL_GPL(mtd_device_parse_register); 452EXPORT_SYMBOL_GPL(mtd_device_register);
530 453
531/** 454/**
532 * mtd_device_unregister - unregister an existing MTD device. 455 * mtd_device_unregister - unregister an existing MTD device.
@@ -557,6 +480,7 @@ EXPORT_SYMBOL_GPL(mtd_device_unregister);
557 * or removal of MTD devices. Causes the 'add' callback to be immediately 480 * or removal of MTD devices. Causes the 'add' callback to be immediately
558 * invoked for each MTD device currently present in the system. 481 * invoked for each MTD device currently present in the system.
559 */ 482 */
483
560void register_mtd_user (struct mtd_notifier *new) 484void register_mtd_user (struct mtd_notifier *new)
561{ 485{
562 struct mtd_info *mtd; 486 struct mtd_info *mtd;
@@ -572,7 +496,6 @@ void register_mtd_user (struct mtd_notifier *new)
572 496
573 mutex_unlock(&mtd_table_mutex); 497 mutex_unlock(&mtd_table_mutex);
574} 498}
575EXPORT_SYMBOL_GPL(register_mtd_user);
576 499
577/** 500/**
578 * unregister_mtd_user - unregister a 'user' of MTD devices. 501 * unregister_mtd_user - unregister a 'user' of MTD devices.
@@ -583,6 +506,7 @@ EXPORT_SYMBOL_GPL(register_mtd_user);
583 * 'remove' callback to be immediately invoked for each MTD device 506 * 'remove' callback to be immediately invoked for each MTD device
584 * currently present in the system. 507 * currently present in the system.
585 */ 508 */
509
586int unregister_mtd_user (struct mtd_notifier *old) 510int unregister_mtd_user (struct mtd_notifier *old)
587{ 511{
588 struct mtd_info *mtd; 512 struct mtd_info *mtd;
@@ -598,7 +522,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
598 mutex_unlock(&mtd_table_mutex); 522 mutex_unlock(&mtd_table_mutex);
599 return 0; 523 return 0;
600} 524}
601EXPORT_SYMBOL_GPL(unregister_mtd_user); 525
602 526
603/** 527/**
604 * get_mtd_device - obtain a validated handle for an MTD device 528 * get_mtd_device - obtain a validated handle for an MTD device
@@ -611,6 +535,7 @@ EXPORT_SYMBOL_GPL(unregister_mtd_user);
611 * both, return the num'th driver only if its address matches. Return 535 * both, return the num'th driver only if its address matches. Return
612 * error code if not. 536 * error code if not.
613 */ 537 */
538
614struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 539struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
615{ 540{
616 struct mtd_info *ret = NULL, *other; 541 struct mtd_info *ret = NULL, *other;
@@ -643,7 +568,6 @@ out:
643 mutex_unlock(&mtd_table_mutex); 568 mutex_unlock(&mtd_table_mutex);
644 return ret; 569 return ret;
645} 570}
646EXPORT_SYMBOL_GPL(get_mtd_device);
647 571
648 572
649int __get_mtd_device(struct mtd_info *mtd) 573int __get_mtd_device(struct mtd_info *mtd)
@@ -653,8 +577,8 @@ int __get_mtd_device(struct mtd_info *mtd)
653 if (!try_module_get(mtd->owner)) 577 if (!try_module_get(mtd->owner))
654 return -ENODEV; 578 return -ENODEV;
655 579
656 if (mtd->_get_device) { 580 if (mtd->get_device) {
657 err = mtd->_get_device(mtd); 581 err = mtd->get_device(mtd);
658 582
659 if (err) { 583 if (err) {
660 module_put(mtd->owner); 584 module_put(mtd->owner);
@@ -664,7 +588,6 @@ int __get_mtd_device(struct mtd_info *mtd)
664 mtd->usecount++; 588 mtd->usecount++;
665 return 0; 589 return 0;
666} 590}
667EXPORT_SYMBOL_GPL(__get_mtd_device);
668 591
669/** 592/**
670 * get_mtd_device_nm - obtain a validated handle for an MTD device by 593 * get_mtd_device_nm - obtain a validated handle for an MTD device by
@@ -674,6 +597,7 @@ EXPORT_SYMBOL_GPL(__get_mtd_device);
674 * This function returns MTD device description structure in case of 597 * This function returns MTD device description structure in case of
675 * success and an error code in case of failure. 598 * success and an error code in case of failure.
676 */ 599 */
600
677struct mtd_info *get_mtd_device_nm(const char *name) 601struct mtd_info *get_mtd_device_nm(const char *name)
678{ 602{
679 int err = -ENODEV; 603 int err = -ENODEV;
@@ -702,7 +626,6 @@ out_unlock:
702 mutex_unlock(&mtd_table_mutex); 626 mutex_unlock(&mtd_table_mutex);
703 return ERR_PTR(err); 627 return ERR_PTR(err);
704} 628}
705EXPORT_SYMBOL_GPL(get_mtd_device_nm);
706 629
707void put_mtd_device(struct mtd_info *mtd) 630void put_mtd_device(struct mtd_info *mtd)
708{ 631{
@@ -711,365 +634,50 @@ void put_mtd_device(struct mtd_info *mtd)
711 mutex_unlock(&mtd_table_mutex); 634 mutex_unlock(&mtd_table_mutex);
712 635
713} 636}
714EXPORT_SYMBOL_GPL(put_mtd_device);
715 637
716void __put_mtd_device(struct mtd_info *mtd) 638void __put_mtd_device(struct mtd_info *mtd)
717{ 639{
718 --mtd->usecount; 640 --mtd->usecount;
719 BUG_ON(mtd->usecount < 0); 641 BUG_ON(mtd->usecount < 0);
720 642
721 if (mtd->_put_device) 643 if (mtd->put_device)
722 mtd->_put_device(mtd); 644 mtd->put_device(mtd);
723 645
724 module_put(mtd->owner); 646 module_put(mtd->owner);
725} 647}
726EXPORT_SYMBOL_GPL(__put_mtd_device);
727
728/*
729 * Erase is an asynchronous operation. Device drivers are supposed
730 * to call instr->callback() whenever the operation completes, even
731 * if it completes with a failure.
732 * Callers are supposed to pass a callback function and wait for it
733 * to be called before writing to the block.
734 */
735int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
736{
737 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
738 return -EINVAL;
739 if (!(mtd->flags & MTD_WRITEABLE))
740 return -EROFS;
741 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
742 if (!instr->len) {
743 instr->state = MTD_ERASE_DONE;
744 mtd_erase_callback(instr);
745 return 0;
746 }
747 return mtd->_erase(mtd, instr);
748}
749EXPORT_SYMBOL_GPL(mtd_erase);
750
751/*
752 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
753 */
754int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
755 void **virt, resource_size_t *phys)
756{
757 *retlen = 0;
758 *virt = NULL;
759 if (phys)
760 *phys = 0;
761 if (!mtd->_point)
762 return -EOPNOTSUPP;
763 if (from < 0 || from > mtd->size || len > mtd->size - from)
764 return -EINVAL;
765 if (!len)
766 return 0;
767 return mtd->_point(mtd, from, len, retlen, virt, phys);
768}
769EXPORT_SYMBOL_GPL(mtd_point);
770
771/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
772int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
773{
774 if (!mtd->_point)
775 return -EOPNOTSUPP;
776 if (from < 0 || from > mtd->size || len > mtd->size - from)
777 return -EINVAL;
778 if (!len)
779 return 0;
780 return mtd->_unpoint(mtd, from, len);
781}
782EXPORT_SYMBOL_GPL(mtd_unpoint);
783
784/*
785 * Allow NOMMU mmap() to directly map the device (if not NULL)
786 * - return the address to which the offset maps
787 * - return -ENOSYS to indicate refusal to do the mapping
788 */
789unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
790 unsigned long offset, unsigned long flags)
791{
792 if (!mtd->_get_unmapped_area)
793 return -EOPNOTSUPP;
794 if (offset > mtd->size || len > mtd->size - offset)
795 return -EINVAL;
796 return mtd->_get_unmapped_area(mtd, len, offset, flags);
797}
798EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
799
800int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
801 u_char *buf)
802{
803 int ret_code;
804 *retlen = 0;
805 if (from < 0 || from > mtd->size || len > mtd->size - from)
806 return -EINVAL;
807 if (!len)
808 return 0;
809
810 /*
811 * In the absence of an error, drivers return a non-negative integer
812 * representing the maximum number of bitflips that were corrected on
813 * any one ecc region (if applicable; zero otherwise).
814 */
815 ret_code = mtd->_read(mtd, from, len, retlen, buf);
816 if (unlikely(ret_code < 0))
817 return ret_code;
818 if (mtd->ecc_strength == 0)
819 return 0; /* device lacks ecc */
820 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
821}
822EXPORT_SYMBOL_GPL(mtd_read);
823
824int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
825 const u_char *buf)
826{
827 *retlen = 0;
828 if (to < 0 || to > mtd->size || len > mtd->size - to)
829 return -EINVAL;
830 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
831 return -EROFS;
832 if (!len)
833 return 0;
834 return mtd->_write(mtd, to, len, retlen, buf);
835}
836EXPORT_SYMBOL_GPL(mtd_write);
837
838/*
839 * In blackbox flight recorder like scenarios we want to make successful writes
840 * in interrupt context. panic_write() is only intended to be called when its
841 * known the kernel is about to panic and we need the write to succeed. Since
842 * the kernel is not going to be running for much longer, this function can
843 * break locks and delay to ensure the write succeeds (but not sleep).
844 */
845int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
846 const u_char *buf)
847{
848 *retlen = 0;
849 if (!mtd->_panic_write)
850 return -EOPNOTSUPP;
851 if (to < 0 || to > mtd->size || len > mtd->size - to)
852 return -EINVAL;
853 if (!(mtd->flags & MTD_WRITEABLE))
854 return -EROFS;
855 if (!len)
856 return 0;
857 return mtd->_panic_write(mtd, to, len, retlen, buf);
858}
859EXPORT_SYMBOL_GPL(mtd_panic_write);
860 648
861int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 649/* default_mtd_writev - default mtd writev method for MTD devices that
862{ 650 * don't implement their own
863 int ret_code;
864 ops->retlen = ops->oobretlen = 0;
865 if (!mtd->_read_oob)
866 return -EOPNOTSUPP;
867 /*
868 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
869 * similar to mtd->_read(), returning a non-negative integer
870 * representing max bitflips. In other cases, mtd->_read_oob() may
871 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
872 */
873 ret_code = mtd->_read_oob(mtd, from, ops);
874 if (unlikely(ret_code < 0))
875 return ret_code;
876 if (mtd->ecc_strength == 0)
877 return 0; /* device lacks ecc */
878 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
879}
880EXPORT_SYMBOL_GPL(mtd_read_oob);
881
882/*
883 * Method to access the protection register area, present in some flash
884 * devices. The user data is one time programmable but the factory data is read
885 * only.
886 */ 651 */
887int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
888 size_t len)
889{
890 if (!mtd->_get_fact_prot_info)
891 return -EOPNOTSUPP;
892 if (!len)
893 return 0;
894 return mtd->_get_fact_prot_info(mtd, buf, len);
895}
896EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
897
898int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
899 size_t *retlen, u_char *buf)
900{
901 *retlen = 0;
902 if (!mtd->_read_fact_prot_reg)
903 return -EOPNOTSUPP;
904 if (!len)
905 return 0;
906 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
907}
908EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
909
910int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
911 size_t len)
912{
913 if (!mtd->_get_user_prot_info)
914 return -EOPNOTSUPP;
915 if (!len)
916 return 0;
917 return mtd->_get_user_prot_info(mtd, buf, len);
918}
919EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
920
921int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
922 size_t *retlen, u_char *buf)
923{
924 *retlen = 0;
925 if (!mtd->_read_user_prot_reg)
926 return -EOPNOTSUPP;
927 if (!len)
928 return 0;
929 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
930}
931EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
932
933int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
934 size_t *retlen, u_char *buf)
935{
936 *retlen = 0;
937 if (!mtd->_write_user_prot_reg)
938 return -EOPNOTSUPP;
939 if (!len)
940 return 0;
941 return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
942}
943EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
944
945int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
946{
947 if (!mtd->_lock_user_prot_reg)
948 return -EOPNOTSUPP;
949 if (!len)
950 return 0;
951 return mtd->_lock_user_prot_reg(mtd, from, len);
952}
953EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
954
955/* Chip-supported device locking */
956int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
957{
958 if (!mtd->_lock)
959 return -EOPNOTSUPP;
960 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
961 return -EINVAL;
962 if (!len)
963 return 0;
964 return mtd->_lock(mtd, ofs, len);
965}
966EXPORT_SYMBOL_GPL(mtd_lock);
967 652
968int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 653int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
969{ 654 unsigned long count, loff_t to, size_t *retlen)
970 if (!mtd->_unlock)
971 return -EOPNOTSUPP;
972 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
973 return -EINVAL;
974 if (!len)
975 return 0;
976 return mtd->_unlock(mtd, ofs, len);
977}
978EXPORT_SYMBOL_GPL(mtd_unlock);
979
980int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
981{
982 if (!mtd->_is_locked)
983 return -EOPNOTSUPP;
984 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
985 return -EINVAL;
986 if (!len)
987 return 0;
988 return mtd->_is_locked(mtd, ofs, len);
989}
990EXPORT_SYMBOL_GPL(mtd_is_locked);
991
992int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
993{
994 if (!mtd->_block_isbad)
995 return 0;
996 if (ofs < 0 || ofs > mtd->size)
997 return -EINVAL;
998 return mtd->_block_isbad(mtd, ofs);
999}
1000EXPORT_SYMBOL_GPL(mtd_block_isbad);
1001
1002int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1003{
1004 if (!mtd->_block_markbad)
1005 return -EOPNOTSUPP;
1006 if (ofs < 0 || ofs > mtd->size)
1007 return -EINVAL;
1008 if (!(mtd->flags & MTD_WRITEABLE))
1009 return -EROFS;
1010 return mtd->_block_markbad(mtd, ofs);
1011}
1012EXPORT_SYMBOL_GPL(mtd_block_markbad);
1013
1014/*
1015 * default_mtd_writev - the default writev method
1016 * @mtd: mtd device description object pointer
1017 * @vecs: the vectors to write
1018 * @count: count of vectors in @vecs
1019 * @to: the MTD device offset to write to
1020 * @retlen: on exit contains the count of bytes written to the MTD device.
1021 *
1022 * This function returns zero in case of success and a negative error code in
1023 * case of failure.
1024 */
1025static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1026 unsigned long count, loff_t to, size_t *retlen)
1027{ 655{
1028 unsigned long i; 656 unsigned long i;
1029 size_t totlen = 0, thislen; 657 size_t totlen = 0, thislen;
1030 int ret = 0; 658 int ret = 0;
1031 659
1032 for (i = 0; i < count; i++) { 660 if(!mtd->write) {
1033 if (!vecs[i].iov_len) 661 ret = -EROFS;
1034 continue; 662 } else {
1035 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 663 for (i=0; i<count; i++) {
1036 vecs[i].iov_base); 664 if (!vecs[i].iov_len)
1037 totlen += thislen; 665 continue;
1038 if (ret || thislen != vecs[i].iov_len) 666 ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
1039 break; 667 totlen += thislen;
1040 to += vecs[i].iov_len; 668 if (ret || thislen != vecs[i].iov_len)
669 break;
670 to += vecs[i].iov_len;
671 }
1041 } 672 }
1042 *retlen = totlen; 673 if (retlen)
674 *retlen = totlen;
1043 return ret; 675 return ret;
1044} 676}
1045 677
1046/*
1047 * mtd_writev - the vector-based MTD write method
1048 * @mtd: mtd device description object pointer
1049 * @vecs: the vectors to write
1050 * @count: count of vectors in @vecs
1051 * @to: the MTD device offset to write to
1052 * @retlen: on exit contains the count of bytes written to the MTD device.
1053 *
1054 * This function returns zero in case of success and a negative error code in
1055 * case of failure.
1056 */
1057int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1058 unsigned long count, loff_t to, size_t *retlen)
1059{
1060 *retlen = 0;
1061 if (!(mtd->flags & MTD_WRITEABLE))
1062 return -EROFS;
1063 if (!mtd->_writev)
1064 return default_mtd_writev(mtd, vecs, count, to, retlen);
1065 return mtd->_writev(mtd, vecs, count, to, retlen);
1066}
1067EXPORT_SYMBOL_GPL(mtd_writev);
1068
1069/** 678/**
1070 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 679 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
1071 * @mtd: mtd device description object pointer 680 * @size: A pointer to the ideal or maximum size of the allocation. Points
1072 * @size: a pointer to the ideal or maximum size of the allocation, points
1073 * to the actual allocation size on success. 681 * to the actual allocation size on success.
1074 * 682 *
1075 * This routine attempts to allocate a contiguous kernel buffer up to 683 * This routine attempts to allocate a contiguous kernel buffer up to
@@ -1114,6 +722,15 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1114 */ 722 */
1115 return kmalloc(*size, GFP_KERNEL); 723 return kmalloc(*size, GFP_KERNEL);
1116} 724}
725
726EXPORT_SYMBOL_GPL(get_mtd_device);
727EXPORT_SYMBOL_GPL(get_mtd_device_nm);
728EXPORT_SYMBOL_GPL(__get_mtd_device);
729EXPORT_SYMBOL_GPL(put_mtd_device);
730EXPORT_SYMBOL_GPL(__put_mtd_device);
731EXPORT_SYMBOL_GPL(register_mtd_user);
732EXPORT_SYMBOL_GPL(unregister_mtd_user);
733EXPORT_SYMBOL_GPL(default_mtd_writev);
1117EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 734EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1118 735
1119#ifdef CONFIG_PROC_FS 736#ifdef CONFIG_PROC_FS
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 961a3840854..0ed6126b4c1 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -15,9 +15,6 @@ extern int del_mtd_device(struct mtd_info *mtd);
15extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, 15extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
16 int); 16 int);
17extern int del_mtd_partitions(struct mtd_info *); 17extern int del_mtd_partitions(struct mtd_info *);
18extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
19 struct mtd_partition **pparts,
20 struct mtd_part_parser_data *data);
21 18
22#define mtd_for_each_device(mtd) \ 19#define mtd_for_each_device(mtd) \
23 for ((mtd) = __mtd_next_device(0); \ 20 for ((mtd) = __mtd_next_device(0); \
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 97bb8f6304d..43130e8acea 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -112,7 +112,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
112 set_current_state(TASK_INTERRUPTIBLE); 112 set_current_state(TASK_INTERRUPTIBLE);
113 add_wait_queue(&wait_q, &wait); 113 add_wait_queue(&wait_q, &wait);
114 114
115 ret = mtd_erase(mtd, &erase); 115 ret = mtd->erase(mtd, &erase);
116 if (ret) { 116 if (ret) {
117 set_current_state(TASK_RUNNING); 117 set_current_state(TASK_RUNNING);
118 remove_wait_queue(&wait_q, &wait); 118 remove_wait_queue(&wait_q, &wait);
@@ -169,7 +169,14 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
169 cxt->nextpage = 0; 169 cxt->nextpage = 0;
170 } 170 }
171 171
172 while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) { 172 while (mtd->block_isbad) {
173 ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
174 if (!ret)
175 break;
176 if (ret < 0) {
177 printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
178 return;
179 }
173badblock: 180badblock:
174 printk(KERN_WARNING "mtdoops: bad block at %08lx\n", 181 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
175 cxt->nextpage * record_size); 182 cxt->nextpage * record_size);
@@ -183,11 +190,6 @@ badblock:
183 } 190 }
184 } 191 }
185 192
186 if (ret < 0) {
187 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
188 return;
189 }
190
191 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 193 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
192 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); 194 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
193 195
@@ -197,9 +199,9 @@ badblock:
197 return; 199 return;
198 } 200 }
199 201
200 if (ret == -EIO) { 202 if (mtd->block_markbad && ret == -EIO) {
201 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); 203 ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
202 if (ret < 0 && ret != -EOPNOTSUPP) { 204 if (ret < 0) {
203 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); 205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
204 return; 206 return;
205 } 207 }
@@ -219,16 +221,12 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
219 hdr[0] = cxt->nextcount; 221 hdr[0] = cxt->nextcount;
220 hdr[1] = MTDOOPS_KERNMSG_MAGIC; 222 hdr[1] = MTDOOPS_KERNMSG_MAGIC;
221 223
222 if (panic) { 224 if (panic)
223 ret = mtd_panic_write(mtd, cxt->nextpage * record_size, 225 ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
224 record_size, &retlen, cxt->oops_buf); 226 record_size, &retlen, cxt->oops_buf);
225 if (ret == -EOPNOTSUPP) { 227 else
226 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); 228 ret = mtd->write(mtd, cxt->nextpage * record_size,
227 return; 229 record_size, &retlen, cxt->oops_buf);
228 }
229 } else
230 ret = mtd_write(mtd, cxt->nextpage * record_size,
231 record_size, &retlen, cxt->oops_buf);
232 230
233 if (retlen != record_size || ret < 0) 231 if (retlen != record_size || ret < 0)
234 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", 232 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
@@ -255,14 +253,15 @@ static void find_next_position(struct mtdoops_context *cxt)
255 size_t retlen; 253 size_t retlen;
256 254
257 for (page = 0; page < cxt->oops_pages; page++) { 255 for (page = 0; page < cxt->oops_pages; page++) {
258 if (mtd_block_isbad(mtd, page * record_size)) 256 if (mtd->block_isbad &&
257 mtd->block_isbad(mtd, page * record_size))
259 continue; 258 continue;
260 /* Assume the page is used */ 259 /* Assume the page is used */
261 mark_page_used(cxt, page); 260 mark_page_used(cxt, page);
262 ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, 261 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
263 &retlen, (u_char *)&count[0]); 262 &retlen, (u_char *) &count[0]);
264 if (retlen != MTDOOPS_HEADER_SIZE || 263 if (retlen != MTDOOPS_HEADER_SIZE ||
265 (ret < 0 && !mtd_is_bitflip(ret))) { 264 (ret < 0 && ret != -EUCLEAN)) {
266 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", 265 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
267 page * record_size, retlen, 266 page * record_size, retlen,
268 MTDOOPS_HEADER_SIZE, ret); 267 MTDOOPS_HEADER_SIZE, ret);
@@ -271,7 +270,7 @@ static void find_next_position(struct mtdoops_context *cxt)
271 270
272 if (count[0] == 0xffffffff && count[1] == 0xffffffff) 271 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
273 mark_page_unused(cxt, page); 272 mark_page_unused(cxt, page);
274 if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC) 273 if (count[0] == 0xffffffff)
275 continue; 274 continue;
276 if (maxcount == 0xffffffff) { 275 if (maxcount == 0xffffffff) {
277 maxcount = count[0]; 276 maxcount = count[0];
@@ -289,33 +288,55 @@ static void find_next_position(struct mtdoops_context *cxt)
289 } 288 }
290 } 289 }
291 if (maxcount == 0xffffffff) { 290 if (maxcount == 0xffffffff) {
292 cxt->nextpage = cxt->oops_pages - 1; 291 cxt->nextpage = 0;
293 cxt->nextcount = 0; 292 cxt->nextcount = 1;
294 } 293 schedule_work(&cxt->work_erase);
295 else { 294 return;
296 cxt->nextpage = maxpos;
297 cxt->nextcount = maxcount;
298 } 295 }
299 296
297 cxt->nextpage = maxpos;
298 cxt->nextcount = maxcount;
299
300 mtdoops_inc_counter(cxt); 300 mtdoops_inc_counter(cxt);
301} 301}
302 302
303static void mtdoops_do_dump(struct kmsg_dumper *dumper, 303static void mtdoops_do_dump(struct kmsg_dumper *dumper,
304 enum kmsg_dump_reason reason) 304 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
305 const char *s2, unsigned long l2)
305{ 306{
306 struct mtdoops_context *cxt = container_of(dumper, 307 struct mtdoops_context *cxt = container_of(dumper,
307 struct mtdoops_context, dump); 308 struct mtdoops_context, dump);
309 unsigned long s1_start, s2_start;
310 unsigned long l1_cpy, l2_cpy;
311 char *dst;
312
313 if (reason != KMSG_DUMP_OOPS &&
314 reason != KMSG_DUMP_PANIC &&
315 reason != KMSG_DUMP_KEXEC)
316 return;
308 317
309 /* Only dump oopses if dump_oops is set */ 318 /* Only dump oopses if dump_oops is set */
310 if (reason == KMSG_DUMP_OOPS && !dump_oops) 319 if (reason == KMSG_DUMP_OOPS && !dump_oops)
311 return; 320 return;
312 321
313 kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, 322 dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
314 record_size - MTDOOPS_HEADER_SIZE, NULL); 323 l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
324 l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
325
326 s2_start = l2 - l2_cpy;
327 s1_start = l1 - l1_cpy;
328
329 memcpy(dst, s1 + s1_start, l1_cpy);
330 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
315 331
316 /* Panics must be written immediately */ 332 /* Panics must be written immediately */
317 if (reason != KMSG_DUMP_OOPS) 333 if (reason != KMSG_DUMP_OOPS) {
318 mtdoops_write(cxt, 1); 334 if (!cxt->mtd->panic_write)
335 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
336 else
337 mtdoops_write(cxt, 1);
338 return;
339 }
319 340
320 /* For other cases, schedule work to write it "nicely" */ 341 /* For other cases, schedule work to write it "nicely" */
321 schedule_work(&cxt->work_write); 342 schedule_work(&cxt->work_write);
@@ -357,7 +378,6 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
357 return; 378 return;
358 } 379 }
359 380
360 cxt->dump.max_reason = KMSG_DUMP_OOPS;
361 cxt->dump.dump = mtdoops_do_dump; 381 cxt->dump.dump = mtdoops_do_dump;
362 err = kmsg_dump_register(&cxt->dump); 382 err = kmsg_dump_register(&cxt->dump);
363 if (err) { 383 if (err) {
@@ -384,8 +404,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
384 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 404 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
385 405
386 cxt->mtd = NULL; 406 cxt->mtd = NULL;
387 flush_work(&cxt->work_erase); 407 flush_work_sync(&cxt->work_erase);
388 flush_work(&cxt->work_write); 408 flush_work_sync(&cxt->work_write);
389} 409}
390 410
391 411
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 70fa70a8318..630be3e7da0 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,14 +65,19 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
65 int res; 65 int res;
66 66
67 stats = part->master->ecc_stats; 67 stats = part->master->ecc_stats;
68 res = part->master->_read(part->master, from + part->offset, len, 68
69 retlen, buf); 69 if (from >= mtd->size)
70 if (unlikely(mtd_is_eccerr(res))) 70 len = 0;
71 mtd->ecc_stats.failed += 71 else if (from + len > mtd->size)
72 part->master->ecc_stats.failed - stats.failed; 72 len = mtd->size - from;
73 else 73 res = part->master->read(part->master, from + part->offset,
74 mtd->ecc_stats.corrected += 74 len, retlen, buf);
75 part->master->ecc_stats.corrected - stats.corrected; 75 if (unlikely(res)) {
76 if (res == -EUCLEAN)
77 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
78 if (res == -EBADMSG)
79 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
80 }
76 return res; 81 return res;
77} 82}
78 83
@@ -80,16 +85,19 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, void **virt, resource_size_t *phys) 85 size_t *retlen, void **virt, resource_size_t *phys)
81{ 86{
82 struct mtd_part *part = PART(mtd); 87 struct mtd_part *part = PART(mtd);
83 88 if (from >= mtd->size)
84 return part->master->_point(part->master, from + part->offset, len, 89 len = 0;
85 retlen, virt, phys); 90 else if (from + len > mtd->size)
91 len = mtd->size - from;
92 return part->master->point (part->master, from + part->offset,
93 len, retlen, virt, phys);
86} 94}
87 95
88static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 96static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
89{ 97{
90 struct mtd_part *part = PART(mtd); 98 struct mtd_part *part = PART(mtd);
91 99
92 return part->master->_unpoint(part->master, from + part->offset, len); 100 part->master->unpoint(part->master, from + part->offset, len);
93} 101}
94 102
95static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 103static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -100,8 +108,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
100 struct mtd_part *part = PART(mtd); 108 struct mtd_part *part = PART(mtd);
101 109
102 offset += part->offset; 110 offset += part->offset;
103 return part->master->_get_unmapped_area(part->master, len, offset, 111 return part->master->get_unmapped_area(part->master, len, offset,
104 flags); 112 flags);
105} 113}
106 114
107static int part_read_oob(struct mtd_info *mtd, loff_t from, 115static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -122,7 +130,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
122 if (ops->oobbuf) { 130 if (ops->oobbuf) {
123 size_t len, pages; 131 size_t len, pages;
124 132
125 if (ops->mode == MTD_OPS_AUTO_OOB) 133 if (ops->mode == MTD_OOB_AUTO)
126 len = mtd->oobavail; 134 len = mtd->oobavail;
127 else 135 else
128 len = mtd->oobsize; 136 len = mtd->oobsize;
@@ -132,11 +140,11 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
132 return -EINVAL; 140 return -EINVAL;
133 } 141 }
134 142
135 res = part->master->_read_oob(part->master, from + part->offset, ops); 143 res = part->master->read_oob(part->master, from + part->offset, ops);
136 if (unlikely(res)) { 144 if (unlikely(res)) {
137 if (mtd_is_bitflip(res)) 145 if (res == -EUCLEAN)
138 mtd->ecc_stats.corrected++; 146 mtd->ecc_stats.corrected++;
139 if (mtd_is_eccerr(res)) 147 if (res == -EBADMSG)
140 mtd->ecc_stats.failed++; 148 mtd->ecc_stats.failed++;
141 } 149 }
142 return res; 150 return res;
@@ -146,46 +154,58 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
146 size_t len, size_t *retlen, u_char *buf) 154 size_t len, size_t *retlen, u_char *buf)
147{ 155{
148 struct mtd_part *part = PART(mtd); 156 struct mtd_part *part = PART(mtd);
149 return part->master->_read_user_prot_reg(part->master, from, len, 157 return part->master->read_user_prot_reg(part->master, from,
150 retlen, buf); 158 len, retlen, buf);
151} 159}
152 160
153static int part_get_user_prot_info(struct mtd_info *mtd, 161static int part_get_user_prot_info(struct mtd_info *mtd,
154 struct otp_info *buf, size_t len) 162 struct otp_info *buf, size_t len)
155{ 163{
156 struct mtd_part *part = PART(mtd); 164 struct mtd_part *part = PART(mtd);
157 return part->master->_get_user_prot_info(part->master, buf, len); 165 return part->master->get_user_prot_info(part->master, buf, len);
158} 166}
159 167
160static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 168static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
161 size_t len, size_t *retlen, u_char *buf) 169 size_t len, size_t *retlen, u_char *buf)
162{ 170{
163 struct mtd_part *part = PART(mtd); 171 struct mtd_part *part = PART(mtd);
164 return part->master->_read_fact_prot_reg(part->master, from, len, 172 return part->master->read_fact_prot_reg(part->master, from,
165 retlen, buf); 173 len, retlen, buf);
166} 174}
167 175
168static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 176static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
169 size_t len) 177 size_t len)
170{ 178{
171 struct mtd_part *part = PART(mtd); 179 struct mtd_part *part = PART(mtd);
172 return part->master->_get_fact_prot_info(part->master, buf, len); 180 return part->master->get_fact_prot_info(part->master, buf, len);
173} 181}
174 182
175static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 183static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
176 size_t *retlen, const u_char *buf) 184 size_t *retlen, const u_char *buf)
177{ 185{
178 struct mtd_part *part = PART(mtd); 186 struct mtd_part *part = PART(mtd);
179 return part->master->_write(part->master, to + part->offset, len, 187 if (!(mtd->flags & MTD_WRITEABLE))
180 retlen, buf); 188 return -EROFS;
189 if (to >= mtd->size)
190 len = 0;
191 else if (to + len > mtd->size)
192 len = mtd->size - to;
193 return part->master->write(part->master, to + part->offset,
194 len, retlen, buf);
181} 195}
182 196
183static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 197static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
184 size_t *retlen, const u_char *buf) 198 size_t *retlen, const u_char *buf)
185{ 199{
186 struct mtd_part *part = PART(mtd); 200 struct mtd_part *part = PART(mtd);
187 return part->master->_panic_write(part->master, to + part->offset, len, 201 if (!(mtd->flags & MTD_WRITEABLE))
188 retlen, buf); 202 return -EROFS;
203 if (to >= mtd->size)
204 len = 0;
205 else if (to + len > mtd->size)
206 len = mtd->size - to;
207 return part->master->panic_write(part->master, to + part->offset,
208 len, retlen, buf);
189} 209}
190 210
191static int part_write_oob(struct mtd_info *mtd, loff_t to, 211static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -193,43 +213,51 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
193{ 213{
194 struct mtd_part *part = PART(mtd); 214 struct mtd_part *part = PART(mtd);
195 215
216 if (!(mtd->flags & MTD_WRITEABLE))
217 return -EROFS;
218
196 if (to >= mtd->size) 219 if (to >= mtd->size)
197 return -EINVAL; 220 return -EINVAL;
198 if (ops->datbuf && to + ops->len > mtd->size) 221 if (ops->datbuf && to + ops->len > mtd->size)
199 return -EINVAL; 222 return -EINVAL;
200 return part->master->_write_oob(part->master, to + part->offset, ops); 223 return part->master->write_oob(part->master, to + part->offset, ops);
201} 224}
202 225
203static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 226static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
204 size_t len, size_t *retlen, u_char *buf) 227 size_t len, size_t *retlen, u_char *buf)
205{ 228{
206 struct mtd_part *part = PART(mtd); 229 struct mtd_part *part = PART(mtd);
207 return part->master->_write_user_prot_reg(part->master, from, len, 230 return part->master->write_user_prot_reg(part->master, from,
208 retlen, buf); 231 len, retlen, buf);
209} 232}
210 233
211static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 234static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
212 size_t len) 235 size_t len)
213{ 236{
214 struct mtd_part *part = PART(mtd); 237 struct mtd_part *part = PART(mtd);
215 return part->master->_lock_user_prot_reg(part->master, from, len); 238 return part->master->lock_user_prot_reg(part->master, from, len);
216} 239}
217 240
218static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 241static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
219 unsigned long count, loff_t to, size_t *retlen) 242 unsigned long count, loff_t to, size_t *retlen)
220{ 243{
221 struct mtd_part *part = PART(mtd); 244 struct mtd_part *part = PART(mtd);
222 return part->master->_writev(part->master, vecs, count, 245 if (!(mtd->flags & MTD_WRITEABLE))
223 to + part->offset, retlen); 246 return -EROFS;
247 return part->master->writev(part->master, vecs, count,
248 to + part->offset, retlen);
224} 249}
225 250
226static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 251static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
227{ 252{
228 struct mtd_part *part = PART(mtd); 253 struct mtd_part *part = PART(mtd);
229 int ret; 254 int ret;
230 255 if (!(mtd->flags & MTD_WRITEABLE))
256 return -EROFS;
257 if (instr->addr >= mtd->size)
258 return -EINVAL;
231 instr->addr += part->offset; 259 instr->addr += part->offset;
232 ret = part->master->_erase(part->master, instr); 260 ret = part->master->erase(part->master, instr);
233 if (ret) { 261 if (ret) {
234 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 262 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
235 instr->fail_addr -= part->offset; 263 instr->fail_addr -= part->offset;
@@ -240,7 +268,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
240 268
241void mtd_erase_callback(struct erase_info *instr) 269void mtd_erase_callback(struct erase_info *instr)
242{ 270{
243 if (instr->mtd->_erase == part_erase) { 271 if (instr->mtd->erase == part_erase) {
244 struct mtd_part *part = PART(instr->mtd); 272 struct mtd_part *part = PART(instr->mtd);
245 273
246 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 274 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -255,44 +283,52 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
255static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 283static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
256{ 284{
257 struct mtd_part *part = PART(mtd); 285 struct mtd_part *part = PART(mtd);
258 return part->master->_lock(part->master, ofs + part->offset, len); 286 if ((len + ofs) > mtd->size)
287 return -EINVAL;
288 return part->master->lock(part->master, ofs + part->offset, len);
259} 289}
260 290
261static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 291static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
262{ 292{
263 struct mtd_part *part = PART(mtd); 293 struct mtd_part *part = PART(mtd);
264 return part->master->_unlock(part->master, ofs + part->offset, len); 294 if ((len + ofs) > mtd->size)
295 return -EINVAL;
296 return part->master->unlock(part->master, ofs + part->offset, len);
265} 297}
266 298
267static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 299static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
268{ 300{
269 struct mtd_part *part = PART(mtd); 301 struct mtd_part *part = PART(mtd);
270 return part->master->_is_locked(part->master, ofs + part->offset, len); 302 if ((len + ofs) > mtd->size)
303 return -EINVAL;
304 return part->master->is_locked(part->master, ofs + part->offset, len);
271} 305}
272 306
273static void part_sync(struct mtd_info *mtd) 307static void part_sync(struct mtd_info *mtd)
274{ 308{
275 struct mtd_part *part = PART(mtd); 309 struct mtd_part *part = PART(mtd);
276 part->master->_sync(part->master); 310 part->master->sync(part->master);
277} 311}
278 312
279static int part_suspend(struct mtd_info *mtd) 313static int part_suspend(struct mtd_info *mtd)
280{ 314{
281 struct mtd_part *part = PART(mtd); 315 struct mtd_part *part = PART(mtd);
282 return part->master->_suspend(part->master); 316 return part->master->suspend(part->master);
283} 317}
284 318
285static void part_resume(struct mtd_info *mtd) 319static void part_resume(struct mtd_info *mtd)
286{ 320{
287 struct mtd_part *part = PART(mtd); 321 struct mtd_part *part = PART(mtd);
288 part->master->_resume(part->master); 322 part->master->resume(part->master);
289} 323}
290 324
291static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 325static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
292{ 326{
293 struct mtd_part *part = PART(mtd); 327 struct mtd_part *part = PART(mtd);
328 if (ofs >= mtd->size)
329 return -EINVAL;
294 ofs += part->offset; 330 ofs += part->offset;
295 return part->master->_block_isbad(part->master, ofs); 331 return part->master->block_isbad(part->master, ofs);
296} 332}
297 333
298static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 334static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -300,8 +336,12 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
300 struct mtd_part *part = PART(mtd); 336 struct mtd_part *part = PART(mtd);
301 int res; 337 int res;
302 338
339 if (!(mtd->flags & MTD_WRITEABLE))
340 return -EROFS;
341 if (ofs >= mtd->size)
342 return -EINVAL;
303 ofs += part->offset; 343 ofs += part->offset;
304 res = part->master->_block_markbad(part->master, ofs); 344 res = part->master->block_markbad(part->master, ofs);
305 if (!res) 345 if (!res)
306 mtd->ecc_stats.badblocks++; 346 mtd->ecc_stats.badblocks++;
307 return res; 347 return res;
@@ -376,55 +416,54 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
376 */ 416 */
377 slave->mtd.dev.parent = master->dev.parent; 417 slave->mtd.dev.parent = master->dev.parent;
378 418
379 slave->mtd._read = part_read; 419 slave->mtd.read = part_read;
380 slave->mtd._write = part_write; 420 slave->mtd.write = part_write;
381 421
382 if (master->_panic_write) 422 if (master->panic_write)
383 slave->mtd._panic_write = part_panic_write; 423 slave->mtd.panic_write = part_panic_write;
384 424
385 if (master->_point && master->_unpoint) { 425 if (master->point && master->unpoint) {
386 slave->mtd._point = part_point; 426 slave->mtd.point = part_point;
387 slave->mtd._unpoint = part_unpoint; 427 slave->mtd.unpoint = part_unpoint;
388 } 428 }
389 429
390 if (master->_get_unmapped_area) 430 if (master->get_unmapped_area)
391 slave->mtd._get_unmapped_area = part_get_unmapped_area; 431 slave->mtd.get_unmapped_area = part_get_unmapped_area;
392 if (master->_read_oob) 432 if (master->read_oob)
393 slave->mtd._read_oob = part_read_oob; 433 slave->mtd.read_oob = part_read_oob;
394 if (master->_write_oob) 434 if (master->write_oob)
395 slave->mtd._write_oob = part_write_oob; 435 slave->mtd.write_oob = part_write_oob;
396 if (master->_read_user_prot_reg) 436 if (master->read_user_prot_reg)
397 slave->mtd._read_user_prot_reg = part_read_user_prot_reg; 437 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
398 if (master->_read_fact_prot_reg) 438 if (master->read_fact_prot_reg)
399 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; 439 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
400 if (master->_write_user_prot_reg) 440 if (master->write_user_prot_reg)
401 slave->mtd._write_user_prot_reg = part_write_user_prot_reg; 441 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
402 if (master->_lock_user_prot_reg) 442 if (master->lock_user_prot_reg)
403 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; 443 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
404 if (master->_get_user_prot_info) 444 if (master->get_user_prot_info)
405 slave->mtd._get_user_prot_info = part_get_user_prot_info; 445 slave->mtd.get_user_prot_info = part_get_user_prot_info;
406 if (master->_get_fact_prot_info) 446 if (master->get_fact_prot_info)
407 slave->mtd._get_fact_prot_info = part_get_fact_prot_info; 447 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
408 if (master->_sync) 448 if (master->sync)
409 slave->mtd._sync = part_sync; 449 slave->mtd.sync = part_sync;
410 if (!partno && !master->dev.class && master->_suspend && 450 if (!partno && !master->dev.class && master->suspend && master->resume) {
411 master->_resume) { 451 slave->mtd.suspend = part_suspend;
412 slave->mtd._suspend = part_suspend; 452 slave->mtd.resume = part_resume;
413 slave->mtd._resume = part_resume;
414 } 453 }
415 if (master->_writev) 454 if (master->writev)
416 slave->mtd._writev = part_writev; 455 slave->mtd.writev = part_writev;
417 if (master->_lock) 456 if (master->lock)
418 slave->mtd._lock = part_lock; 457 slave->mtd.lock = part_lock;
419 if (master->_unlock) 458 if (master->unlock)
420 slave->mtd._unlock = part_unlock; 459 slave->mtd.unlock = part_unlock;
421 if (master->_is_locked) 460 if (master->is_locked)
422 slave->mtd._is_locked = part_is_locked; 461 slave->mtd.is_locked = part_is_locked;
423 if (master->_block_isbad) 462 if (master->block_isbad)
424 slave->mtd._block_isbad = part_block_isbad; 463 slave->mtd.block_isbad = part_block_isbad;
425 if (master->_block_markbad) 464 if (master->block_markbad)
426 slave->mtd._block_markbad = part_block_markbad; 465 slave->mtd.block_markbad = part_block_markbad;
427 slave->mtd._erase = part_erase; 466 slave->mtd.erase = part_erase;
428 slave->master = master; 467 slave->master = master;
429 slave->offset = part->offset; 468 slave->offset = part->offset;
430 469
@@ -440,19 +479,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
440 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 479 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
441 } 480 }
442 } 481 }
443 if (slave->offset == MTDPART_OFS_RETAIN) {
444 slave->offset = cur_offset;
445 if (master->size - slave->offset >= slave->mtd.size) {
446 slave->mtd.size = master->size - slave->offset
447 - slave->mtd.size;
448 } else {
449 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
450 part->name, master->size - slave->offset,
451 slave->mtd.size);
452 /* register to preserve ordering */
453 goto out_register;
454 }
455 }
456 if (slave->mtd.size == MTDPART_SIZ_FULL) 482 if (slave->mtd.size == MTDPART_SIZ_FULL)
457 slave->mtd.size = master->size - slave->offset; 483 slave->mtd.size = master->size - slave->offset;
458 484
@@ -516,14 +542,12 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
516 } 542 }
517 543
518 slave->mtd.ecclayout = master->ecclayout; 544 slave->mtd.ecclayout = master->ecclayout;
519 slave->mtd.ecc_strength = master->ecc_strength; 545 if (master->block_isbad) {
520 slave->mtd.bitflip_threshold = master->bitflip_threshold;
521
522 if (master->_block_isbad) {
523 uint64_t offs = 0; 546 uint64_t offs = 0;
524 547
525 while (offs < slave->mtd.size) { 548 while (offs < slave->mtd.size) {
526 if (mtd_block_isbad(master, offs + slave->offset)) 549 if (master->block_isbad(master,
550 offs + slave->offset))
527 slave->mtd.ecc_stats.badblocks++; 551 slave->mtd.ecc_stats.badblocks++;
528 offs += slave->mtd.erasesize; 552 offs += slave->mtd.erasesize;
529 } 553 }
@@ -669,8 +693,6 @@ static struct mtd_part_parser *get_partition_parser(const char *name)
669 return ret; 693 return ret;
670} 694}
671 695
672#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
673
674int register_mtd_parser(struct mtd_part_parser *p) 696int register_mtd_parser(struct mtd_part_parser *p)
675{ 697{
676 spin_lock(&part_parser_lock); 698 spin_lock(&part_parser_lock);
@@ -690,64 +712,30 @@ int deregister_mtd_parser(struct mtd_part_parser *p)
690} 712}
691EXPORT_SYMBOL_GPL(deregister_mtd_parser); 713EXPORT_SYMBOL_GPL(deregister_mtd_parser);
692 714
693/*
694 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
695 * are changing this array!
696 */
697static const char *default_mtd_part_types[] = {
698 "cmdlinepart",
699 "ofpart",
700 NULL
701};
702
703/**
704 * parse_mtd_partitions - parse MTD partitions
705 * @master: the master partition (describes whole MTD device)
706 * @types: names of partition parsers to try or %NULL
707 * @pparts: array of partitions found is returned here
708 * @data: MTD partition parser-specific data
709 *
710 * This function tries to find partition on MTD device @master. It uses MTD
711 * partition parsers, specified in @types. However, if @types is %NULL, then
712 * the default list of parsers is used. The default list contains only the
713 * "cmdlinepart" and "ofpart" parsers ATM.
714 * Note: If there are more then one parser in @types, the kernel only takes the
715 * partitions parsed out by the first parser.
716 *
717 * This function may return:
718 * o a negative error code in case of failure
719 * o zero if no partitions were found
720 * o a positive number of found partitions, in which case on exit @pparts will
721 * point to an array containing this number of &struct mtd_info objects.
722 */
723int parse_mtd_partitions(struct mtd_info *master, const char **types, 715int parse_mtd_partitions(struct mtd_info *master, const char **types,
724 struct mtd_partition **pparts, 716 struct mtd_partition **pparts, unsigned long origin)
725 struct mtd_part_parser_data *data)
726{ 717{
727 struct mtd_part_parser *parser; 718 struct mtd_part_parser *parser;
728 int ret = 0; 719 int ret = 0;
729 720
730 if (!types)
731 types = default_mtd_part_types;
732
733 for ( ; ret <= 0 && *types; types++) { 721 for ( ; ret <= 0 && *types; types++) {
734 parser = get_partition_parser(*types); 722 parser = get_partition_parser(*types);
735 if (!parser && !request_module("%s", *types)) 723 if (!parser && !request_module("%s", *types))
736 parser = get_partition_parser(*types); 724 parser = get_partition_parser(*types);
737 if (!parser) 725 if (!parser)
738 continue; 726 continue;
739 ret = (*parser->parse_fn)(master, pparts, data); 727 ret = (*parser->parse_fn)(master, pparts, origin);
740 put_partition_parser(parser);
741 if (ret > 0) { 728 if (ret > 0) {
742 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 729 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
743 ret, parser->name, master->name); 730 ret, parser->name, master->name);
744 break;
745 } 731 }
732 put_partition_parser(parser);
746 } 733 }
747 return ret; 734 return ret;
748} 735}
736EXPORT_SYMBOL_GPL(parse_mtd_partitions);
749 737
750int mtd_is_partition(const struct mtd_info *mtd) 738int mtd_is_partition(struct mtd_info *mtd)
751{ 739{
752 struct mtd_part *part; 740 struct mtd_part *part;
753 int ispart = 0; 741 int ispart = 0;
@@ -763,13 +751,3 @@ int mtd_is_partition(const struct mtd_info *mtd)
763 return ispart; 751 return ispart;
764} 752}
765EXPORT_SYMBOL_GPL(mtd_is_partition); 753EXPORT_SYMBOL_GPL(mtd_is_partition);
766
767/* Returns the size of the entire flash chip */
768uint64_t mtd_get_device_size(const struct mtd_info *mtd)
769{
770 if (!mtd_is_partition(mtd))
771 return mtd->size;
772
773 return PART(mtd)->master->size;
774}
775EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 334da5f583c..16b02a1fc10 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/mtd/super.h> 15#include <linux/mtd/super.h>
16#include <linux/namei.h> 16#include <linux/namei.h>
17#include <linux/export.h>
18#include <linux/ctype.h> 17#include <linux/ctype.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
20 19
@@ -27,12 +26,12 @@ static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
27 struct mtd_info *mtd = _mtd; 26 struct mtd_info *mtd = _mtd;
28 27
29 if (sb->s_mtd == mtd) { 28 if (sb->s_mtd == mtd) {
30 pr_debug("MTDSB: Match on device %d (\"%s\")\n", 29 DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n",
31 mtd->index, mtd->name); 30 mtd->index, mtd->name);
32 return 1; 31 return 1;
33 } 32 }
34 33
35 pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", 34 DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
36 sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); 35 sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
37 return 0; 36 return 0;
38} 37}
@@ -63,7 +62,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
63 struct super_block *sb; 62 struct super_block *sb;
64 int ret; 63 int ret;
65 64
66 sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd); 65 sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd);
67 if (IS_ERR(sb)) 66 if (IS_ERR(sb))
68 goto out_error; 67 goto out_error;
69 68
@@ -71,9 +70,11 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
71 goto already_mounted; 70 goto already_mounted;
72 71
73 /* fresh new superblock */ 72 /* fresh new superblock */
74 pr_debug("MTDSB: New superblock for device %d (\"%s\")\n", 73 DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
75 mtd->index, mtd->name); 74 mtd->index, mtd->name);
76 75
76 sb->s_flags = flags;
77
77 ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); 78 ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
78 if (ret < 0) { 79 if (ret < 0) {
79 deactivate_locked_super(sb); 80 deactivate_locked_super(sb);
@@ -86,7 +87,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
86 87
87 /* new mountpoint for an already mounted superblock */ 88 /* new mountpoint for an already mounted superblock */
88already_mounted: 89already_mounted:
89 pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n", 90 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
90 mtd->index, mtd->name); 91 mtd->index, mtd->name);
91 put_mtd_device(mtd); 92 put_mtd_device(mtd);
92 return dget(sb->s_root); 93 return dget(sb->s_root);
@@ -107,7 +108,7 @@ static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
107 108
108 mtd = get_mtd_device(NULL, mtdnr); 109 mtd = get_mtd_device(NULL, mtdnr);
109 if (IS_ERR(mtd)) { 110 if (IS_ERR(mtd)) {
110 pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr); 111 DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
111 return ERR_CAST(mtd); 112 return ERR_CAST(mtd);
112 } 113 }
113 114
@@ -130,7 +131,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
130 if (!dev_name) 131 if (!dev_name)
131 return ERR_PTR(-EINVAL); 132 return ERR_PTR(-EINVAL);
132 133
133 pr_debug("MTDSB: dev_name \"%s\"\n", dev_name); 134 DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
134 135
135 /* the preferred way of mounting in future; especially when 136 /* the preferred way of mounting in future; especially when
136 * CONFIG_BLOCK=n - we specify the underlying MTD device by number or 137 * CONFIG_BLOCK=n - we specify the underlying MTD device by number or
@@ -141,7 +142,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
141 struct mtd_info *mtd; 142 struct mtd_info *mtd;
142 143
143 /* mount by MTD device name */ 144 /* mount by MTD device name */
144 pr_debug("MTDSB: mtd:%%s, name \"%s\"\n", 145 DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
145 dev_name + 4); 146 dev_name + 4);
146 147
147 mtd = get_mtd_device_nm(dev_name + 4); 148 mtd = get_mtd_device_nm(dev_name + 4);
@@ -162,7 +163,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
162 mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); 163 mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
163 if (!*endptr) { 164 if (!*endptr) {
164 /* It was a valid number */ 165 /* It was a valid number */
165 pr_debug("MTDSB: mtd%%d, mtdnr %d\n", 166 DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
166 mtdnr); 167 mtdnr);
167 return mount_mtd_nr(fs_type, flags, 168 return mount_mtd_nr(fs_type, flags,
168 dev_name, data, 169 dev_name, data,
@@ -178,10 +179,10 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
178 bdev = lookup_bdev(dev_name); 179 bdev = lookup_bdev(dev_name);
179 if (IS_ERR(bdev)) { 180 if (IS_ERR(bdev)) {
180 ret = PTR_ERR(bdev); 181 ret = PTR_ERR(bdev);
181 pr_debug("MTDSB: lookup_bdev() returned %d\n", ret); 182 DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
182 return ERR_PTR(ret); 183 return ERR_PTR(ret);
183 } 184 }
184 pr_debug("MTDSB: lookup_bdev() returned 0\n"); 185 DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
185 186
186 ret = -EINVAL; 187 ret = -EINVAL;
187 188
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index c92f0f6bc13..fd788532761 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -86,7 +86,7 @@ struct swap_eb {
86 unsigned int flags; 86 unsigned int flags;
87 unsigned int active_count; 87 unsigned int active_count;
88 unsigned int erase_count; 88 unsigned int erase_count;
89 unsigned int pad; /* speeds up pointer decrement */ 89 unsigned int pad; /* speeds up pointer decremtnt */
90}; 90};
91 91
92#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ 92#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
@@ -274,12 +274,12 @@ static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
274 eb->root = NULL; 274 eb->root = NULL;
275 275
276 /* badblocks not supported */ 276 /* badblocks not supported */
277 if (!mtd_can_have_bb(d->mtd)) 277 if (!d->mtd->block_markbad)
278 return 1; 278 return 1;
279 279
280 offset = mtdswap_eb_offset(d, eb); 280 offset = mtdswap_eb_offset(d, eb);
281 dev_warn(d->dev, "Marking bad block at %08llx\n", offset); 281 dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
282 ret = mtd_block_markbad(d->mtd, offset); 282 ret = d->mtd->block_markbad(d->mtd, offset);
283 283
284 if (ret) { 284 if (ret) {
285 dev_warn(d->dev, "Mark block bad failed for block at %08llx " 285 dev_warn(d->dev, "Mark block bad failed for block at %08llx "
@@ -312,9 +312,9 @@ static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
312static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, 312static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
313 struct mtd_oob_ops *ops) 313 struct mtd_oob_ops *ops)
314{ 314{
315 int ret = mtd_read_oob(d->mtd, from, ops); 315 int ret = d->mtd->read_oob(d->mtd, from, ops);
316 316
317 if (mtd_is_bitflip(ret)) 317 if (ret == -EUCLEAN)
318 return ret; 318 return ret;
319 319
320 if (ret) { 320 if (ret) {
@@ -343,18 +343,18 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
343 offset = mtdswap_eb_offset(d, eb); 343 offset = mtdswap_eb_offset(d, eb);
344 344
345 /* Check first if the block is bad. */ 345 /* Check first if the block is bad. */
346 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) 346 if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
347 return MTDSWAP_SCANNED_BAD; 347 return MTDSWAP_SCANNED_BAD;
348 348
349 ops.ooblen = 2 * d->mtd->ecclayout->oobavail; 349 ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
350 ops.oobbuf = d->oob_buf; 350 ops.oobbuf = d->oob_buf;
351 ops.ooboffs = 0; 351 ops.ooboffs = 0;
352 ops.datbuf = NULL; 352 ops.datbuf = NULL;
353 ops.mode = MTD_OPS_AUTO_OOB; 353 ops.mode = MTD_OOB_AUTO;
354 354
355 ret = mtdswap_read_oob(d, offset, &ops); 355 ret = mtdswap_read_oob(d, offset, &ops);
356 356
357 if (ret && !mtd_is_bitflip(ret)) 357 if (ret && ret != -EUCLEAN)
358 return ret; 358 return ret;
359 359
360 data = (struct mtdswap_oobdata *)d->oob_buf; 360 data = (struct mtdswap_oobdata *)d->oob_buf;
@@ -363,7 +363,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
363 363
364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { 364 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
365 eb->erase_count = le32_to_cpu(data->count); 365 eb->erase_count = le32_to_cpu(data->count);
366 if (mtd_is_bitflip(ret)) 366 if (ret == -EUCLEAN)
367 ret = MTDSWAP_SCANNED_BITFLIP; 367 ret = MTDSWAP_SCANNED_BITFLIP;
368 else { 368 else {
369 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) 369 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
@@ -389,7 +389,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
389 389
390 ops.ooboffs = 0; 390 ops.ooboffs = 0;
391 ops.oobbuf = (uint8_t *)&n; 391 ops.oobbuf = (uint8_t *)&n;
392 ops.mode = MTD_OPS_AUTO_OOB; 392 ops.mode = MTD_OOB_AUTO;
393 ops.datbuf = NULL; 393 ops.datbuf = NULL;
394 394
395 if (marker == MTDSWAP_TYPE_CLEAN) { 395 if (marker == MTDSWAP_TYPE_CLEAN) {
@@ -403,12 +403,12 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
403 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; 403 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
404 } 404 }
405 405
406 ret = mtd_write_oob(d->mtd, offset, &ops); 406 ret = d->mtd->write_oob(d->mtd, offset , &ops);
407 407
408 if (ret) { 408 if (ret) {
409 dev_warn(d->dev, "Write OOB failed for block at %08llx " 409 dev_warn(d->dev, "Write OOB failed for block at %08llx "
410 "error %d\n", offset, ret); 410 "error %d\n", offset, ret);
411 if (ret == -EIO || mtd_is_eccerr(ret)) 411 if (ret == -EIO || ret == -EBADMSG)
412 mtdswap_handle_write_error(d, eb); 412 mtdswap_handle_write_error(d, eb);
413 return ret; 413 return ret;
414 } 414 }
@@ -567,7 +567,7 @@ retry:
567 erase.len = mtd->erasesize; 567 erase.len = mtd->erasesize;
568 erase.priv = (u_long)&wq; 568 erase.priv = (u_long)&wq;
569 569
570 ret = mtd_erase(mtd, &erase); 570 ret = mtd->erase(mtd, &erase);
571 if (ret) { 571 if (ret) {
572 if (retries++ < MTDSWAP_ERASE_RETRIES) { 572 if (retries++ < MTDSWAP_ERASE_RETRIES) {
573 dev_warn(d->dev, 573 dev_warn(d->dev,
@@ -628,7 +628,7 @@ static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
628 TREE_COUNT(d, CLEAN)--; 628 TREE_COUNT(d, CLEAN)--;
629 629
630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); 630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
631 } while (ret == -EIO || mtd_is_eccerr(ret)); 631 } while (ret == -EIO || ret == -EBADMSG);
632 632
633 if (ret) 633 if (ret)
634 return ret; 634 return ret;
@@ -678,7 +678,7 @@ retry:
678 ret = mtdswap_map_free_block(d, page, bp); 678 ret = mtdswap_map_free_block(d, page, bp);
679 eb = d->eb_data + (*bp / d->pages_per_eblk); 679 eb = d->eb_data + (*bp / d->pages_per_eblk);
680 680
681 if (ret == -EIO || mtd_is_eccerr(ret)) { 681 if (ret == -EIO || ret == -EBADMSG) {
682 d->curr_write = NULL; 682 d->curr_write = NULL;
683 eb->active_count--; 683 eb->active_count--;
684 d->revmap[*bp] = PAGE_UNDEF; 684 d->revmap[*bp] = PAGE_UNDEF;
@@ -689,8 +689,8 @@ retry:
689 return ret; 689 return ret;
690 690
691 writepos = (loff_t)*bp << PAGE_SHIFT; 691 writepos = (loff_t)*bp << PAGE_SHIFT;
692 ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf); 692 ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
693 if (ret == -EIO || mtd_is_eccerr(ret)) { 693 if (ret == -EIO || ret == -EBADMSG) {
694 d->curr_write_pos--; 694 d->curr_write_pos--;
695 eb->active_count--; 695 eb->active_count--;
696 d->revmap[*bp] = PAGE_UNDEF; 696 d->revmap[*bp] = PAGE_UNDEF;
@@ -736,9 +736,9 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
736 retries = 0; 736 retries = 0;
737 737
738retry: 738retry:
739 ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); 739 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
740 740
741 if (ret < 0 && !mtd_is_bitflip(ret)) { 741 if (ret < 0 && ret != -EUCLEAN) {
742 oldeb = d->eb_data + oldblock / d->pages_per_eblk; 742 oldeb = d->eb_data + oldblock / d->pages_per_eblk;
743 oldeb->flags |= EBLOCK_READERR; 743 oldeb->flags |= EBLOCK_READERR;
744 744
@@ -931,7 +931,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
931 struct mtd_oob_ops ops; 931 struct mtd_oob_ops ops;
932 int ret; 932 int ret;
933 933
934 ops.mode = MTD_OPS_AUTO_OOB; 934 ops.mode = MTD_OOB_AUTO;
935 ops.len = mtd->writesize; 935 ops.len = mtd->writesize;
936 ops.ooblen = mtd->ecclayout->oobavail; 936 ops.ooblen = mtd->ecclayout->oobavail;
937 ops.ooboffs = 0; 937 ops.ooboffs = 0;
@@ -946,7 +946,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
946 patt = mtdswap_test_patt(test + i); 946 patt = mtdswap_test_patt(test + i);
947 memset(d->page_buf, patt, mtd->writesize); 947 memset(d->page_buf, patt, mtd->writesize);
948 memset(d->oob_buf, patt, mtd->ecclayout->oobavail); 948 memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
949 ret = mtd_write_oob(mtd, pos, &ops); 949 ret = mtd->write_oob(mtd, pos, &ops);
950 if (ret) 950 if (ret)
951 goto error; 951 goto error;
952 952
@@ -955,7 +955,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
955 955
956 pos = base; 956 pos = base;
957 for (i = 0; i < mtd_pages; i++) { 957 for (i = 0; i < mtd_pages; i++) {
958 ret = mtd_read_oob(mtd, pos, &ops); 958 ret = mtd->read_oob(mtd, pos, &ops);
959 if (ret) 959 if (ret)
960 goto error; 960 goto error;
961 961
@@ -1016,7 +1016,7 @@ static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
1016 1016
1017 if (ret == 0) 1017 if (ret == 0)
1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); 1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
1019 else if (ret != -EIO && !mtd_is_eccerr(ret)) 1019 else if (ret != -EIO && ret != -EBADMSG)
1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); 1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
1021 1021
1022 return 0; 1022 return 0;
@@ -1047,7 +1047,8 @@ static int mtdswap_flush(struct mtd_blktrans_dev *dev)
1047{ 1047{
1048 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); 1048 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1049 1049
1050 mtd_sync(d->mtd); 1050 if (d->mtd->sync)
1051 d->mtd->sync(d->mtd);
1051 return 0; 1052 return 0;
1052} 1053}
1053 1054
@@ -1058,9 +1059,9 @@ static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
1058 1059
1059 badcnt = 0; 1060 badcnt = 0;
1060 1061
1061 if (mtd_can_have_bb(mtd)) 1062 if (mtd->block_isbad)
1062 for (offset = 0; offset < size; offset += mtd->erasesize) 1063 for (offset = 0; offset < size; offset += mtd->erasesize)
1063 if (mtd_block_isbad(mtd, offset)) 1064 if (mtd->block_isbad(mtd, offset))
1064 badcnt++; 1065 badcnt++;
1065 1066
1066 return badcnt; 1067 return badcnt;
@@ -1160,10 +1161,10 @@ static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
1160 retries = 0; 1161 retries = 0;
1161 1162
1162retry: 1163retry:
1163 ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf); 1164 ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
1164 1165
1165 d->mtd_read_count++; 1166 d->mtd_read_count++;
1166 if (mtd_is_bitflip(ret)) { 1167 if (ret == -EUCLEAN) {
1167 eb->flags |= EBLOCK_BITFLIP; 1168 eb->flags |= EBLOCK_BITFLIP;
1168 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); 1169 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
1169 ret = 0; 1170 ret = 0;
@@ -1373,10 +1374,11 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
1373 goto revmap_fail; 1374 goto revmap_fail;
1374 1375
1375 eblk_bytes = sizeof(struct swap_eb)*d->eblks; 1376 eblk_bytes = sizeof(struct swap_eb)*d->eblks;
1376 d->eb_data = vzalloc(eblk_bytes); 1377 d->eb_data = vmalloc(eblk_bytes);
1377 if (!d->eb_data) 1378 if (!d->eb_data)
1378 goto eb_data_fail; 1379 goto eb_data_fail;
1379 1380
1381 memset(d->eb_data, 0, eblk_bytes);
1380 for (i = 0; i < pages; i++) 1382 for (i = 0; i < pages; i++)
1381 d->page_data[i] = BLOCK_UNDEF; 1383 d->page_data[i] = BLOCK_UNDEF;
1382 1384
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5819eb57521..43173a335e4 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
1config MTD_NAND_IDS
2 tristate "Include chip ids for known NAND devices."
3 depends on MTD
4 help
5 Useful for NAND drivers that do not use the NAND subsystem but
6 still like to take advantage of the known chip information.
7
1config MTD_NAND_ECC 8config MTD_NAND_ECC
2 tristate 9 tristate
3 10
@@ -22,6 +29,15 @@ menuconfig MTD_NAND
22 29
23if MTD_NAND 30if MTD_NAND
24 31
32config MTD_NAND_VERIFY_WRITE
33 bool "Verify NAND page writes"
34 help
35 This adds an extra check when data is written to the flash. The
36 NAND flash device internally checks only bits transitioning
37 from 1 to 0. There is a rare possibility that even though the
38 device thinks the write was successful, a bit could have been
39 flipped accidentally due to device wear or something else.
40
25config MTD_NAND_BCH 41config MTD_NAND_BCH
26 tristate 42 tristate
27 select BCH 43 select BCH
@@ -49,31 +65,24 @@ config MTD_NAND_MUSEUM_IDS
49 NAND chips (page size 256 byte, erase size 4-8KiB). The IDs 65 NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
50 of these chips were reused by later, larger chips. 66 of these chips were reused by later, larger chips.
51 67
52config MTD_NAND_DENALI 68config MTD_NAND_AUTCPU12
53 tristate "Support Denali NAND controller" 69 tristate "SmartMediaCard on autronix autcpu12 board"
54 help 70 depends on ARCH_AUTCPU12
55 Enable support for the Denali NAND controller. This should be 71 help
56 combined with either the PCI or platform drivers to provide device 72 This enables the driver for the autronix autcpu12 board to
57 registration. 73 access the SmartMediaCard.
58 74
59config MTD_NAND_DENALI_PCI 75config MTD_NAND_DENALI
76 depends on PCI
60 tristate "Support Denali NAND controller on Intel Moorestown" 77 tristate "Support Denali NAND controller on Intel Moorestown"
61 depends on PCI && MTD_NAND_DENALI
62 help 78 help
63 Enable the driver for NAND flash on Intel Moorestown, using the 79 Enable the driver for NAND flash on Intel Moorestown, using the
64 Denali NAND controller core. 80 Denali NAND controller core.
65 81
66config MTD_NAND_DENALI_DT
67 tristate "Support Denali NAND controller as a DT device"
68 depends on HAVE_CLK && MTD_NAND_DENALI
69 help
70 Enable the driver for NAND flash on platforms using a Denali NAND
71 controller as a DT device.
72
73config MTD_NAND_DENALI_SCRATCH_REG_ADDR 82config MTD_NAND_DENALI_SCRATCH_REG_ADDR
74 hex "Denali NAND size scratch register address" 83 hex "Denali NAND size scratch register address"
75 default "0xFF108018" 84 default "0xFF108018"
76 depends on MTD_NAND_DENALI_PCI 85 depends on MTD_NAND_DENALI
77 help 86 help
78 Some platforms place the NAND chip size in a scratch register 87 Some platforms place the NAND chip size in a scratch register
79 because (some versions of) the driver aren't able to automatically 88 because (some versions of) the driver aren't able to automatically
@@ -81,9 +90,16 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
81 scratch register here to enable this feature. On Intel Moorestown 90 scratch register here to enable this feature. On Intel Moorestown
82 boards, the scratch register is at 0xFF108018. 91 boards, the scratch register is at 0xFF108018.
83 92
93config MTD_NAND_EDB7312
94 tristate "Support for Cirrus Logic EBD7312 evaluation board"
95 depends on ARCH_EDB7312
96 help
97 This enables the driver for the Cirrus Logic EBD7312 evaluation
98 board to access the onboard NAND Flash.
99
84config MTD_NAND_H1900 100config MTD_NAND_H1900
85 tristate "iPAQ H1900 flash" 101 tristate "iPAQ H1900 flash"
86 depends on ARCH_PXA && BROKEN 102 depends on ARCH_PXA
87 help 103 help
88 This enables the driver for the iPAQ h1900 flash. 104 This enables the driver for the iPAQ h1900 flash.
89 105
@@ -93,6 +109,12 @@ config MTD_NAND_GPIO
93 help 109 help
94 This enables a GPIO based NAND flash driver. 110 This enables a GPIO based NAND flash driver.
95 111
112config MTD_NAND_SPIA
113 tristate "NAND Flash device on SPIA board"
114 depends on ARCH_P720T
115 help
116 If you had to ask, you don't have one. Say 'N'.
117
96config MTD_NAND_AMS_DELTA 118config MTD_NAND_AMS_DELTA
97 tristate "NAND Flash device on Amstrad E3" 119 tristate "NAND Flash device on Amstrad E3"
98 depends on MACH_AMS_DELTA 120 depends on MACH_AMS_DELTA
@@ -101,51 +123,27 @@ config MTD_NAND_AMS_DELTA
101 Support for NAND flash on Amstrad E3 (Delta). 123 Support for NAND flash on Amstrad E3 (Delta).
102 124
103config MTD_NAND_OMAP2 125config MTD_NAND_OMAP2
104 tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4" 126 tristate "NAND Flash device on OMAP2 and OMAP3"
105 depends on ARCH_OMAP2PLUS 127 depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3)
106 help 128 help
107 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 129 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
108 platforms.
109 130
110config MTD_NAND_OMAP_BCH 131config MTD_NAND_OMAP_PREFETCH
111 depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3 132 bool "GPMC prefetch support for NAND Flash device"
112 bool "Enable support for hardware BCH error correction" 133 depends on MTD_NAND_OMAP2
113 default n 134 default y
114 select BCH
115 select BCH_CONST_PARAMS
116 help
117 Support for hardware BCH error correction.
118
119choice
120 prompt "BCH error correction capability"
121 depends on MTD_NAND_OMAP_BCH
122
123config MTD_NAND_OMAP_BCH8
124 bool "8 bits / 512 bytes (recommended)"
125 help 135 help
126 Support correcting up to 8 bitflips per 512-byte block. 136 The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
127 This will use 13 bytes of spare area per 512 bytes of page data. 137 to improve the performance.
128 This is the recommended mode, as 4-bit mode does not work
129 on some OMAP3 revisions, due to a hardware bug.
130 138
131config MTD_NAND_OMAP_BCH4 139config MTD_NAND_OMAP_PREFETCH_DMA
132 bool "4 bits / 512 bytes" 140 depends on MTD_NAND_OMAP_PREFETCH
141 bool "DMA mode"
142 default n
133 help 143 help
134 Support correcting up to 4 bitflips per 512-byte block. 144 The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
135 This will use 7 bytes of spare area per 512 bytes of page data. 145 or in DMA interrupt mode.
136 Note that this mode does not work on some OMAP3 revisions, due to a 146 Say y for DMA mode or MPU mode will be used
137 hardware bug. Please check your OMAP datasheet before selecting this
138 mode.
139
140endchoice
141
142if MTD_NAND_OMAP_BCH
143config BCH_CONST_M
144 default 13
145config BCH_CONST_T
146 default 4 if MTD_NAND_OMAP_BCH4
147 default 8 if MTD_NAND_OMAP_BCH8
148endif
149 147
150config MTD_NAND_IDS 148config MTD_NAND_IDS
151 tristate 149 tristate
@@ -164,7 +162,7 @@ config MTD_NAND_RICOH
164 162
165config MTD_NAND_AU1550 163config MTD_NAND_AU1550
166 tristate "Au1550/1200 NAND support" 164 tristate "Au1550/1200 NAND support"
167 depends on MIPS_ALCHEMY 165 depends on SOC_AU1200 || SOC_AU1550
168 help 166 help
169 This enables the driver for the NAND flash controller on the 167 This enables the driver for the NAND flash controller on the
170 AMD/Alchemy 1550 SOC. 168 AMD/Alchemy 1550 SOC.
@@ -219,7 +217,7 @@ config MTD_NAND_PPCHAMELEONEVB
219 217
220config MTD_NAND_S3C2410 218config MTD_NAND_S3C2410
221 tristate "NAND Flash support for Samsung S3C SoCs" 219 tristate "NAND Flash support for Samsung S3C SoCs"
222 depends on ARCH_S3C24XX || ARCH_S3C64XX 220 depends on ARCH_S3C2410 || ARCH_S3C64XX
223 help 221 help
224 This enables the NAND flash controller on the S3C24xx and S3C64xx 222 This enables the NAND flash controller on the S3C24xx and S3C64xx
225 SoCs 223 SoCs
@@ -259,10 +257,25 @@ config MTD_NAND_S3C2410_CLKSTOP
259 when the is NAND chip selected or released, but will save 257 when the is NAND chip selected or released, but will save
260 approximately 5mA of power when there is nothing happening. 258 approximately 5mA of power when there is nothing happening.
261 259
260config MTD_NAND_BCM_UMI
261 tristate "NAND Flash support for BCM Reference Boards"
262 depends on ARCH_BCMRING
263 help
264 This enables the NAND flash controller on the BCM UMI block.
265
266 No board specific support is done by this driver, each board
267 must advertise a platform_device for the driver to attach.
268
269config MTD_NAND_BCM_UMI_HWCS
270 bool "BCM UMI NAND Hardware CS"
271 depends on MTD_NAND_BCM_UMI
272 help
273 Enable the use of the BCM UMI block's internal CS using NAND.
274 This should only be used if you know the external NAND CS can toggle.
275
262config MTD_NAND_DISKONCHIP 276config MTD_NAND_DISKONCHIP
263 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" 277 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
264 depends on EXPERIMENTAL 278 depends on EXPERIMENTAL
265 depends on HAS_IOMEM
266 select REED_SOLOMON 279 select REED_SOLOMON
267 select REED_SOLOMON_DEC16 280 select REED_SOLOMON_DEC16
268 help 281 help
@@ -330,26 +343,6 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
330 load time (assuming you build diskonchip as a module) with the module 343 load time (assuming you build diskonchip as a module) with the module
331 parameter "inftl_bbt_write=1". 344 parameter "inftl_bbt_write=1".
332 345
333config MTD_NAND_DOCG4
334 tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
335 depends on EXPERIMENTAL && HAS_IOMEM
336 select BCH
337 select BITREVERSE
338 help
339 Support for diskonchip G4 nand flash, found in various smartphones and
340 PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
341 Portege G900, Asus P526, and O2 XDA Zinc.
342
343 With this driver you will be able to use UBI and create a ubifs on the
344 device, so you may wish to consider enabling UBI and UBIFS as well.
345
346 These devices ship with the Mys/Sandisk SAFTL formatting, for which
347 there is currently no mtd parser, so you may want to use command line
348 partitioning to segregate write-protected blocks. On the Treo680, the
349 first five erase blocks (256KiB each) are write-protected, followed
350 by the block containing the saftl partition table. This is probably
351 typical.
352
353config MTD_NAND_SHARPSL 346config MTD_NAND_SHARPSL
354 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 347 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
355 depends on ARCH_PXA 348 depends on ARCH_PXA
@@ -382,35 +375,53 @@ config MTD_NAND_ATMEL
382 help 375 help
383 Enables support for NAND Flash / Smart Media Card interface 376 Enables support for NAND Flash / Smart Media Card interface
384 on Atmel AT91 and AVR32 processors. 377 on Atmel AT91 and AVR32 processors.
378choice
379 prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
380 depends on MTD_NAND_ATMEL
385 381
386config MTD_NAND_PXA3xx 382config MTD_NAND_ATMEL_ECC_HW
387 tristate "Support for NAND flash devices on PXA3xx" 383 bool "Hardware ECC"
388 depends on PXA3xx || ARCH_MMP 384 depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
389 help 385 help
390 This enables the driver for the NAND flash device found on 386 Use hardware ECC instead of software ECC when the chip
391 PXA3xx processors 387 supports it.
388
389 The hardware ECC controller is capable of single bit error
390 correction and 2-bit random detection per page.
391
392 NB : hardware and software ECC schemes are incompatible.
393 If you switch from one to another, you'll have to erase your
394 mtd partition.
392 395
393config MTD_NAND_SLC_LPC32XX 396 If unsure, say Y
394 tristate "NXP LPC32xx SLC Controller" 397
395 depends on ARCH_LPC32XX 398config MTD_NAND_ATMEL_ECC_SOFT
399 bool "Software ECC"
396 help 400 help
397 Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell 401 Use software ECC.
398 chips) NAND controller. This is the default for the PHYTEC 3250
399 reference board which contains a NAND256R3A2CZA6 chip.
400 402
401 Please check the actual NAND chip connected and its support 403 NB : hardware and software ECC schemes are incompatible.
402 by the SLC NAND controller. 404 If you switch from one to another, you'll have to erase your
405 mtd partition.
403 406
404config MTD_NAND_MLC_LPC32XX 407config MTD_NAND_ATMEL_ECC_NONE
405 tristate "NXP LPC32xx MLC Controller" 408 bool "No ECC (testing only, DANGEROUS)"
406 depends on ARCH_LPC32XX 409 depends on DEBUG_KERNEL
407 help 410 help
408 Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND 411 No ECC will be used.
409 controller. This is the default for the WORK92105 controller 412 It's not a good idea and it should be reserved for testing
410 board. 413 purpose only.
414
415 If unsure, say N
416
417endchoice
411 418
412 Please check the actual NAND chip connected and its support 419config MTD_NAND_PXA3xx
413 by the MLC NAND controller. 420 tristate "Support for NAND flash devices on PXA3xx"
421 depends on PXA3xx || ARCH_MMP
422 help
423 This enables the driver for the NAND flash device found on
424 PXA3xx processors
414 425
415config MTD_NAND_CM_X270 426config MTD_NAND_CM_X270
416 tristate "Support for NAND Flash on CM-X270 modules" 427 tristate "Support for NAND Flash on CM-X270 modules"
@@ -436,28 +447,8 @@ config MTD_NAND_NANDSIM
436 The simulator may simulate various NAND flash chips for the 447 The simulator may simulate various NAND flash chips for the
437 MTD nand layer. 448 MTD nand layer.
438 449
439config MTD_NAND_GPMI_NAND
440 tristate "GPMI NAND Flash Controller driver"
441 depends on MTD_NAND && MXS_DMA
442 help
443 Enables NAND Flash support for IMX23, IMX28 or IMX6.
444 The GPMI controller is very powerful, with the help of BCH
445 module, it can do the hardware ECC. The GPMI supports several
446 NAND flashs at the same time. The GPMI may conflicts with other
447 block, such as SD card. So pay attention to it when you enable
448 the GPMI.
449
450config MTD_NAND_BCM47XXNFLASH
451 tristate "Support for NAND flash on BCM4706 BCMA bus"
452 depends on BCMA_NFLASH
453 help
454 BCMA bus can have various flash memories attached, they are
455 registered by bcma as platform devices. This enables driver for
456 NAND flash memories. For now only BCM4706 is supported.
457
458config MTD_NAND_PLATFORM 450config MTD_NAND_PLATFORM
459 tristate "Support for generic platform NAND driver" 451 tristate "Support for generic platform NAND driver"
460 depends on HAS_IOMEM
461 help 452 help
462 This implements a generic NAND driver for on-SOC platform 453 This implements a generic NAND driver for on-SOC platform
463 devices. You will need to provide platform-specific functions 454 devices. You will need to provide platform-specific functions
@@ -489,16 +480,6 @@ config MTD_NAND_FSL_ELBC
489 Enabling this option will enable you to use this to control 480 Enabling this option will enable you to use this to control
490 external NAND devices. 481 external NAND devices.
491 482
492config MTD_NAND_FSL_IFC
493 tristate "NAND support for Freescale IFC controller"
494 depends on MTD_NAND && FSL_SOC
495 select FSL_IFC
496 help
497 Various Freescale chips e.g P1010, include a NAND Flash machine
498 with built-in hardware ECC capabilities.
499 Enabling this option will enable you to use this to control
500 external NAND devices.
501
502config MTD_NAND_FSL_UPM 483config MTD_NAND_FSL_UPM
503 tristate "Support for NAND on Freescale UPM" 484 tristate "Support for NAND on Freescale UPM"
504 depends on PPC_83xx || PPC_85xx 485 depends on PPC_83xx || PPC_85xx
@@ -516,11 +497,17 @@ config MTD_NAND_MPC5121_NFC
516 497
517config MTD_NAND_MXC 498config MTD_NAND_MXC
518 tristate "MXC NAND support" 499 tristate "MXC NAND support"
519 depends on ARCH_MXC 500 depends on IMX_HAVE_PLATFORM_MXC_NAND
520 help 501 help
521 This enables the driver for the NAND flash controller on the 502 This enables the driver for the NAND flash controller on the
522 MXC processors. 503 MXC processors.
523 504
505config MTD_NAND_NOMADIK
506 tristate "ST Nomadik 8815 NAND support"
507 depends on ARCH_NOMADIK
508 help
509 Driver for the NAND flash controller on the Nomadik, with ECC.
510
524config MTD_NAND_SH_FLCTL 511config MTD_NAND_SH_FLCTL
525 tristate "Support for NAND on Renesas SuperH FLCTL" 512 tristate "Support for NAND on Renesas SuperH FLCTL"
526 depends on SUPERH || ARCH_SHMOBILE 513 depends on SUPERH || ARCH_SHMOBILE
@@ -562,17 +549,9 @@ config MTD_NAND_JZ4740
562 549
563config MTD_NAND_FSMC 550config MTD_NAND_FSMC
564 tristate "Support for NAND on ST Micros FSMC" 551 tristate "Support for NAND on ST Micros FSMC"
565 depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300 552 depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
566 help 553 help
567 Enables support for NAND Flash chips on the ST Microelectronics 554 Enables support for NAND Flash chips on the ST Microelectronics
568 Flexible Static Memory Controller (FSMC) 555 Flexible Static Memory Controller (FSMC)
569 556
570config MTD_NAND_XWAY
571 tristate "Support for NAND on Lantiq XWAY SoC"
572 depends on LANTIQ && SOC_TYPE_XWAY
573 select MTD_NAND_PLATFORM
574 help
575 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
576 to the External Bus Unit (EBU).
577
578endif # MTD_NAND 557endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d76d9120569..5745d831168 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -9,17 +9,17 @@ obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
9obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o 9obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
10 10
11obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 11obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
12obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
12obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 13obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
14obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
13obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 15obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
14obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o 16obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
15obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
16obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 17obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
17obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o 18obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
18obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o 19obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o 20obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o 21obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o 22obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
22obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
23obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o 23obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
24obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 24obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
25obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 25obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
@@ -38,20 +38,16 @@ obj-$(CONFIG_MTD_ALAUDA) += alauda.o
38obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 38obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
39obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o 39obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
40obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 40obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
41obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
42obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o 41obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
43obj-$(CONFIG_MTD_NAND_SLC_LPC32XX) += lpc32xx_slc.o
44obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
45obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o 42obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
46obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 43obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
47obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 44obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
48obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 45obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
49obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o 46obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
47obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
48obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
50obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 49obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
51obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 50obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
52obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 51obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
53obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
54obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
55obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
56 52
57nand-objs := nand_base.o nand_bbt.o 53nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 60a0dfdb080..eb40ea829ab 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
414 } 414 }
415 err = 0; 415 err = 0;
416 if (corrected) 416 if (corrected)
417 err = 1; /* return max_bitflips per ecc step */ 417 err = -EUCLEAN;
418 if (uncorrected) 418 if (uncorrected)
419 err = -EBADMSG; 419 err = -EBADMSG;
420out: 420out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
446 } 446 }
447 err = 0; 447 err = 0;
448 if (corrected) 448 if (corrected)
449 err = 1; /* return max_bitflips per ecc step */ 449 err = -EUCLEAN;
450 if (uncorrected) 450 if (uncorrected)
451 err = -EBADMSG; 451 err = -EBADMSG;
452 return err; 452 return err;
@@ -585,13 +585,12 @@ static int alauda_init_media(struct alauda *al)
585 mtd->writesize = 1<<card->pageshift; 585 mtd->writesize = 1<<card->pageshift;
586 mtd->type = MTD_NANDFLASH; 586 mtd->type = MTD_NANDFLASH;
587 mtd->flags = MTD_CAP_NANDFLASH; 587 mtd->flags = MTD_CAP_NANDFLASH;
588 mtd->_read = alauda_read; 588 mtd->read = alauda_read;
589 mtd->_write = alauda_write; 589 mtd->write = alauda_write;
590 mtd->_erase = alauda_erase; 590 mtd->erase = alauda_erase;
591 mtd->_block_isbad = alauda_isbad; 591 mtd->block_isbad = alauda_isbad;
592 mtd->priv = al; 592 mtd->priv = al;
593 mtd->owner = THIS_MODULE; 593 mtd->owner = THIS_MODULE;
594 mtd->ecc_strength = 1;
595 594
596 err = mtd_device_register(mtd, NULL, 0); 595 err = mtd_device_register(mtd, NULL, 0);
597 if (err) { 596 if (err) {
@@ -718,6 +717,17 @@ static struct usb_driver alauda_driver = {
718 .id_table = alauda_table, 717 .id_table = alauda_table,
719}; 718};
720 719
721module_usb_driver(alauda_driver); 720static int __init alauda_init(void)
721{
722 return usb_register(&alauda_driver);
723}
724
725static void __exit alauda_exit(void)
726{
727 usb_deregister(&alauda_driver);
728}
729
730module_init(alauda_init);
731module_exit(alauda_exit);
722 732
723MODULE_LICENSE("GPL"); 733MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index f1d71cdc8aa..78017eb9318 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -23,21 +23,19 @@
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h> 24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/gpio.h>
27#include <linux/platform_data/gpio-omap.h>
28
29#include <asm/io.h> 26#include <asm/io.h>
30#include <asm/sizes.h>
31
32#include <mach/board-ams-delta.h>
33
34#include <mach/hardware.h> 27#include <mach/hardware.h>
28#include <asm/sizes.h>
29#include <mach/gpio.h>
30#include <plat/board-ams-delta.h>
35 31
36/* 32/*
37 * MTD structure for E3 (Delta) 33 * MTD structure for E3 (Delta)
38 */ 34 */
39static struct mtd_info *ams_delta_mtd = NULL; 35static struct mtd_info *ams_delta_mtd = NULL;
40 36
37#define NAND_MASK (AMS_DELTA_LATCH2_NAND_NRE | AMS_DELTA_LATCH2_NAND_NWE | AMS_DELTA_LATCH2_NAND_CLE | AMS_DELTA_LATCH2_NAND_ALE | AMS_DELTA_LATCH2_NAND_NCE | AMS_DELTA_LATCH2_NAND_NWP)
38
41/* 39/*
42 * Define partitions for flash devices 40 * Define partitions for flash devices
43 */ 41 */
@@ -70,9 +68,10 @@ static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
70 68
71 writew(0, io_base + OMAP_MPUIO_IO_CNTL); 69 writew(0, io_base + OMAP_MPUIO_IO_CNTL);
72 writew(byte, this->IO_ADDR_W); 70 writew(byte, this->IO_ADDR_W);
73 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0); 71 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
74 ndelay(40); 72 ndelay(40);
75 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1); 73 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
74 AMS_DELTA_LATCH2_NAND_NWE);
76} 75}
77 76
78static u_char ams_delta_read_byte(struct mtd_info *mtd) 77static u_char ams_delta_read_byte(struct mtd_info *mtd)
@@ -81,11 +80,12 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
81 struct nand_chip *this = mtd->priv; 80 struct nand_chip *this = mtd->priv;
82 void __iomem *io_base = this->priv; 81 void __iomem *io_base = this->priv;
83 82
84 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0); 83 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
85 ndelay(40); 84 ndelay(40);
86 writew(~0, io_base + OMAP_MPUIO_IO_CNTL); 85 writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
87 res = readw(this->IO_ADDR_R); 86 res = readw(this->IO_ADDR_R);
88 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1); 87 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
88 AMS_DELTA_LATCH2_NAND_NRE);
89 89
90 return res; 90 return res;
91} 91}
@@ -107,6 +107,18 @@ static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
107 buf[i] = ams_delta_read_byte(mtd); 107 buf[i] = ams_delta_read_byte(mtd);
108} 108}
109 109
110static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
111 int len)
112{
113 int i;
114
115 for (i=0; i<len; i++)
116 if (buf[i] != ams_delta_read_byte(mtd))
117 return -EFAULT;
118
119 return 0;
120}
121
110/* 122/*
111 * Command control function 123 * Command control function
112 * 124 *
@@ -120,12 +132,15 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
120{ 132{
121 133
122 if (ctrl & NAND_CTRL_CHANGE) { 134 if (ctrl & NAND_CTRL_CHANGE) {
123 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE, 135 unsigned long bits;
124 (ctrl & NAND_NCE) == 0); 136
125 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE, 137 bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
126 (ctrl & NAND_CLE) != 0); 138 bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
127 gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE, 139 bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
128 (ctrl & NAND_ALE) != 0); 140
141 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
142 AMS_DELTA_LATCH2_NAND_ALE |
143 AMS_DELTA_LATCH2_NAND_NCE, bits);
129 } 144 }
130 145
131 if (cmd != NAND_CMD_NONE) 146 if (cmd != NAND_CMD_NONE)
@@ -137,43 +152,10 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
137 return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB); 152 return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
138} 153}
139 154
140static const struct gpio _mandatory_gpio[] = {
141 {
142 .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE,
143 .flags = GPIOF_OUT_INIT_HIGH,
144 .label = "nand_nce",
145 },
146 {
147 .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE,
148 .flags = GPIOF_OUT_INIT_HIGH,
149 .label = "nand_nre",
150 },
151 {
152 .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP,
153 .flags = GPIOF_OUT_INIT_HIGH,
154 .label = "nand_nwp",
155 },
156 {
157 .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE,
158 .flags = GPIOF_OUT_INIT_HIGH,
159 .label = "nand_nwe",
160 },
161 {
162 .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE,
163 .flags = GPIOF_OUT_INIT_LOW,
164 .label = "nand_ale",
165 },
166 {
167 .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE,
168 .flags = GPIOF_OUT_INIT_LOW,
169 .label = "nand_cle",
170 },
171};
172
173/* 155/*
174 * Main initialization routine 156 * Main initialization routine
175 */ 157 */
176static int ams_delta_init(struct platform_device *pdev) 158static int __devinit ams_delta_init(struct platform_device *pdev)
177{ 159{
178 struct nand_chip *this; 160 struct nand_chip *this;
179 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 161 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -204,17 +186,18 @@ static int ams_delta_init(struct platform_device *pdev)
204 /* Link the private data with the MTD structure */ 186 /* Link the private data with the MTD structure */
205 ams_delta_mtd->priv = this; 187 ams_delta_mtd->priv = this;
206 188
207 /* 189 if (!request_mem_region(res->start, resource_size(res),
208 * Don't try to request the memory region from here, 190 dev_name(&pdev->dev))) {
209 * it should have been already requested from the 191 dev_err(&pdev->dev, "request_mem_region failed\n");
210 * gpio-omap driver and requesting it again would fail. 192 err = -EBUSY;
211 */ 193 goto out_free;
194 }
212 195
213 io_base = ioremap(res->start, resource_size(res)); 196 io_base = ioremap(res->start, resource_size(res));
214 if (io_base == NULL) { 197 if (io_base == NULL) {
215 dev_err(&pdev->dev, "ioremap failed\n"); 198 dev_err(&pdev->dev, "ioremap failed\n");
216 err = -EIO; 199 err = -EIO;
217 goto out_free; 200 goto out_release_io;
218 } 201 }
219 202
220 this->priv = io_base; 203 this->priv = io_base;
@@ -225,6 +208,7 @@ static int ams_delta_init(struct platform_device *pdev)
225 this->read_byte = ams_delta_read_byte; 208 this->read_byte = ams_delta_read_byte;
226 this->write_buf = ams_delta_write_buf; 209 this->write_buf = ams_delta_write_buf;
227 this->read_buf = ams_delta_read_buf; 210 this->read_buf = ams_delta_read_buf;
211 this->verify_buf = ams_delta_verify_buf;
228 this->cmd_ctrl = ams_delta_hwcontrol; 212 this->cmd_ctrl = ams_delta_hwcontrol;
229 if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) { 213 if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
230 this->dev_ready = ams_delta_nand_ready; 214 this->dev_ready = ams_delta_nand_ready;
@@ -239,9 +223,10 @@ static int ams_delta_init(struct platform_device *pdev)
239 platform_set_drvdata(pdev, io_base); 223 platform_set_drvdata(pdev, io_base);
240 224
241 /* Set chip enabled, but */ 225 /* Set chip enabled, but */
242 err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); 226 ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
243 if (err) 227 AMS_DELTA_LATCH2_NAND_NWE |
244 goto out_gpio; 228 AMS_DELTA_LATCH2_NAND_NCE |
229 AMS_DELTA_LATCH2_NAND_NWP);
245 230
246 /* Scan to find existence of the device */ 231 /* Scan to find existence of the device */
247 if (nand_scan(ams_delta_mtd, 1)) { 232 if (nand_scan(ams_delta_mtd, 1)) {
@@ -256,11 +241,10 @@ static int ams_delta_init(struct platform_device *pdev)
256 goto out; 241 goto out;
257 242
258 out_mtd: 243 out_mtd:
259 gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
260out_gpio:
261 platform_set_drvdata(pdev, NULL); 244 platform_set_drvdata(pdev, NULL);
262 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
263 iounmap(io_base); 245 iounmap(io_base);
246out_release_io:
247 release_mem_region(res->start, resource_size(res));
264out_free: 248out_free:
265 kfree(ams_delta_mtd); 249 kfree(ams_delta_mtd);
266 out: 250 out:
@@ -270,16 +254,16 @@ out_free:
270/* 254/*
271 * Clean up routine 255 * Clean up routine
272 */ 256 */
273static int ams_delta_cleanup(struct platform_device *pdev) 257static int __devexit ams_delta_cleanup(struct platform_device *pdev)
274{ 258{
275 void __iomem *io_base = platform_get_drvdata(pdev); 259 void __iomem *io_base = platform_get_drvdata(pdev);
260 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
276 261
277 /* Release resources, unregister device */ 262 /* Release resources, unregister device */
278 nand_release(ams_delta_mtd); 263 nand_release(ams_delta_mtd);
279 264
280 gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
281 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
282 iounmap(io_base); 265 iounmap(io_base);
266 release_mem_region(res->start, resource_size(res));
283 267
284 /* Free the MTD device structure */ 268 /* Free the MTD device structure */
285 kfree(ams_delta_mtd); 269 kfree(ams_delta_mtd);
@@ -289,14 +273,24 @@ static int ams_delta_cleanup(struct platform_device *pdev)
289 273
290static struct platform_driver ams_delta_nand_driver = { 274static struct platform_driver ams_delta_nand_driver = {
291 .probe = ams_delta_init, 275 .probe = ams_delta_init,
292 .remove = ams_delta_cleanup, 276 .remove = __devexit_p(ams_delta_cleanup),
293 .driver = { 277 .driver = {
294 .name = "ams-delta-nand", 278 .name = "ams-delta-nand",
295 .owner = THIS_MODULE, 279 .owner = THIS_MODULE,
296 }, 280 },
297}; 281};
298 282
299module_platform_driver(ams_delta_nand_driver); 283static int __init ams_delta_nand_init(void)
284{
285 return platform_driver_register(&ams_delta_nand_driver);
286}
287module_init(ams_delta_nand_init);
288
289static void __exit ams_delta_nand_exit(void)
290{
291 platform_driver_unregister(&ams_delta_nand_driver);
292}
293module_exit(ams_delta_nand_exit);
300 294
301MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
302MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>"); 296MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index c516a940808..55da20ccc7a 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,22 +1,20 @@
1/* 1/*
2 * Copyright © 2003 Rick Bronson 2 * Copyright (C) 2003 Rick Bronson
3 * 3 *
4 * Derived from drivers/mtd/nand/autcpu12.c 4 * Derived from drivers/mtd/nand/autcpu12.c
5 * Copyright © 2001 Thomas Gleixner (gleixner@autronix.de) 5 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
6 * 6 *
7 * Derived from drivers/mtd/spia.c 7 * Derived from drivers/mtd/spia.c
8 * Copyright © 2000 Steven J. Hill (sjhill@cotw.com) 8 * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
9 * 9 *
10 * 10 *
11 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263 11 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
12 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007 12 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
13 * 13 *
14 * Derived from Das U-Boot source code 14 * Derived from Das U-Boot source code
15 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c) 15 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
16 * © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas 16 * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
17 * 17 *
18 * Add Programmable Multibit ECC support for various AT91 SoC
19 * © Copyright 2012 ATMEL, Hong Xu
20 * 18 *
21 * This program is free software; you can redistribute it and/or modify 19 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License version 2 as 20 * it under the terms of the GNU General Public License version 2 as
@@ -29,10 +27,6 @@
29#include <linux/module.h> 27#include <linux/module.h>
30#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
31#include <linux/platform_device.h> 29#include <linux/platform_device.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_gpio.h>
35#include <linux/of_mtd.h>
36#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
37#include <linux/mtd/nand.h> 31#include <linux/mtd/nand.h>
38#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
@@ -40,11 +34,22 @@
40#include <linux/dmaengine.h> 34#include <linux/dmaengine.h>
41#include <linux/gpio.h> 35#include <linux/gpio.h>
42#include <linux/io.h> 36#include <linux/io.h>
43#include <linux/platform_data/atmel.h>
44#include <linux/pinctrl/consumer.h>
45 37
38#include <mach/board.h>
46#include <mach/cpu.h> 39#include <mach/cpu.h>
47 40
41#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW
42#define hard_ecc 1
43#else
44#define hard_ecc 0
45#endif
46
47#ifdef CONFIG_MTD_NAND_ATMEL_ECC_NONE
48#define no_ecc 1
49#else
50#define no_ecc 0
51#endif
52
48static int use_dma = 1; 53static int use_dma = 1;
49module_param(use_dma, int, 0); 54module_param(use_dma, int, 0);
50 55
@@ -90,42 +95,14 @@ struct atmel_nand_host {
90 struct mtd_info mtd; 95 struct mtd_info mtd;
91 void __iomem *io_base; 96 void __iomem *io_base;
92 dma_addr_t io_phys; 97 dma_addr_t io_phys;
93 struct atmel_nand_data board; 98 struct atmel_nand_data *board;
94 struct device *dev; 99 struct device *dev;
95 void __iomem *ecc; 100 void __iomem *ecc;
96 101
97 struct completion comp; 102 struct completion comp;
98 struct dma_chan *dma_chan; 103 struct dma_chan *dma_chan;
99
100 bool has_pmecc;
101 u8 pmecc_corr_cap;
102 u16 pmecc_sector_size;
103 u32 pmecc_lookup_table_offset;
104
105 int pmecc_bytes_per_sector;
106 int pmecc_sector_number;
107 int pmecc_degree; /* Degree of remainders */
108 int pmecc_cw_len; /* Length of codeword */
109
110 void __iomem *pmerrloc_base;
111 void __iomem *pmecc_rom_base;
112
113 /* lookup table for alpha_to and index_of */
114 void __iomem *pmecc_alpha_to;
115 void __iomem *pmecc_index_of;
116
117 /* data for pmecc computation */
118 int16_t *pmecc_partial_syn;
119 int16_t *pmecc_si;
120 int16_t *pmecc_smu; /* Sigma table */
121 int16_t *pmecc_lmu; /* polynomal order */
122 int *pmecc_mu;
123 int *pmecc_dmu;
124 int *pmecc_delta;
125}; 104};
126 105
127static struct nand_ecclayout atmel_pmecc_oobinfo;
128
129static int cpu_has_dma(void) 106static int cpu_has_dma(void)
130{ 107{
131 return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); 108 return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
@@ -136,8 +113,8 @@ static int cpu_has_dma(void)
136 */ 113 */
137static void atmel_nand_enable(struct atmel_nand_host *host) 114static void atmel_nand_enable(struct atmel_nand_host *host)
138{ 115{
139 if (gpio_is_valid(host->board.enable_pin)) 116 if (host->board->enable_pin)
140 gpio_set_value(host->board.enable_pin, 0); 117 gpio_set_value(host->board->enable_pin, 0);
141} 118}
142 119
143/* 120/*
@@ -145,8 +122,8 @@ static void atmel_nand_enable(struct atmel_nand_host *host)
145 */ 122 */
146static void atmel_nand_disable(struct atmel_nand_host *host) 123static void atmel_nand_disable(struct atmel_nand_host *host)
147{ 124{
148 if (gpio_is_valid(host->board.enable_pin)) 125 if (host->board->enable_pin)
149 gpio_set_value(host->board.enable_pin, 1); 126 gpio_set_value(host->board->enable_pin, 1);
150} 127}
151 128
152/* 129/*
@@ -167,9 +144,9 @@ static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
167 return; 144 return;
168 145
169 if (ctrl & NAND_CLE) 146 if (ctrl & NAND_CLE)
170 writeb(cmd, host->io_base + (1 << host->board.cle)); 147 writeb(cmd, host->io_base + (1 << host->board->cle));
171 else 148 else
172 writeb(cmd, host->io_base + (1 << host->board.ale)); 149 writeb(cmd, host->io_base + (1 << host->board->ale));
173} 150}
174 151
175/* 152/*
@@ -180,8 +157,8 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
180 struct nand_chip *nand_chip = mtd->priv; 157 struct nand_chip *nand_chip = mtd->priv;
181 struct atmel_nand_host *host = nand_chip->priv; 158 struct atmel_nand_host *host = nand_chip->priv;
182 159
183 return gpio_get_value(host->board.rdy_pin) ^ 160 return gpio_get_value(host->board->rdy_pin) ^
184 !!host->board.rdy_pin_active_low; 161 !!host->board->rdy_pin_active_low;
185} 162}
186 163
187/* 164/*
@@ -296,7 +273,7 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
296 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) 273 if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
297 return; 274 return;
298 275
299 if (host->board.bus_width_16) 276 if (host->board->bus_width_16)
300 atmel_read_buf16(mtd, buf, len); 277 atmel_read_buf16(mtd, buf, len);
301 else 278 else
302 atmel_read_buf8(mtd, buf, len); 279 atmel_read_buf8(mtd, buf, len);
@@ -312,716 +289,13 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
312 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) 289 if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
313 return; 290 return;
314 291
315 if (host->board.bus_width_16) 292 if (host->board->bus_width_16)
316 atmel_write_buf16(mtd, buf, len); 293 atmel_write_buf16(mtd, buf, len);
317 else 294 else
318 atmel_write_buf8(mtd, buf, len); 295 atmel_write_buf8(mtd, buf, len);
319} 296}
320 297
321/* 298/*
322 * Return number of ecc bytes per sector according to sector size and
323 * correction capability
324 *
325 * Following table shows what at91 PMECC supported:
326 * Correction Capability Sector_512_bytes Sector_1024_bytes
327 * ===================== ================ =================
328 * 2-bits 4-bytes 4-bytes
329 * 4-bits 7-bytes 7-bytes
330 * 8-bits 13-bytes 14-bytes
331 * 12-bits 20-bytes 21-bytes
332 * 24-bits 39-bytes 42-bytes
333 */
334static int pmecc_get_ecc_bytes(int cap, int sector_size)
335{
336 int m = 12 + sector_size / 512;
337 return (m * cap + 7) / 8;
338}
339
340static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
341 int oobsize, int ecc_len)
342{
343 int i;
344
345 layout->eccbytes = ecc_len;
346
347 /* ECC will occupy the last ecc_len bytes continuously */
348 for (i = 0; i < ecc_len; i++)
349 layout->eccpos[i] = oobsize - ecc_len + i;
350
351 layout->oobfree[0].offset = 2;
352 layout->oobfree[0].length =
353 oobsize - ecc_len - layout->oobfree[0].offset;
354}
355
356static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
357{
358 int table_size;
359
360 table_size = host->pmecc_sector_size == 512 ?
361 PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
362
363 return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
364 table_size * sizeof(int16_t);
365}
366
367static void pmecc_data_free(struct atmel_nand_host *host)
368{
369 kfree(host->pmecc_partial_syn);
370 kfree(host->pmecc_si);
371 kfree(host->pmecc_lmu);
372 kfree(host->pmecc_smu);
373 kfree(host->pmecc_mu);
374 kfree(host->pmecc_dmu);
375 kfree(host->pmecc_delta);
376}
377
378static int pmecc_data_alloc(struct atmel_nand_host *host)
379{
380 const int cap = host->pmecc_corr_cap;
381
382 host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t),
383 GFP_KERNEL);
384 host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL);
385 host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL);
386 host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t),
387 GFP_KERNEL);
388 host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
389 host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
390 host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
391
392 if (host->pmecc_partial_syn &&
393 host->pmecc_si &&
394 host->pmecc_lmu &&
395 host->pmecc_smu &&
396 host->pmecc_mu &&
397 host->pmecc_dmu &&
398 host->pmecc_delta)
399 return 0;
400
401 /* error happened */
402 pmecc_data_free(host);
403 return -ENOMEM;
404}
405
406static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
407{
408 struct nand_chip *nand_chip = mtd->priv;
409 struct atmel_nand_host *host = nand_chip->priv;
410 int i;
411 uint32_t value;
412
413 /* Fill odd syndromes */
414 for (i = 0; i < host->pmecc_corr_cap; i++) {
415 value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
416 if (i & 1)
417 value >>= 16;
418 value &= 0xffff;
419 host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
420 }
421}
422
423static void pmecc_substitute(struct mtd_info *mtd)
424{
425 struct nand_chip *nand_chip = mtd->priv;
426 struct atmel_nand_host *host = nand_chip->priv;
427 int16_t __iomem *alpha_to = host->pmecc_alpha_to;
428 int16_t __iomem *index_of = host->pmecc_index_of;
429 int16_t *partial_syn = host->pmecc_partial_syn;
430 const int cap = host->pmecc_corr_cap;
431 int16_t *si;
432 int i, j;
433
434 /* si[] is a table that holds the current syndrome value,
435 * an element of that table belongs to the field
436 */
437 si = host->pmecc_si;
438
439 memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
440
441 /* Computation 2t syndromes based on S(x) */
442 /* Odd syndromes */
443 for (i = 1; i < 2 * cap; i += 2) {
444 for (j = 0; j < host->pmecc_degree; j++) {
445 if (partial_syn[i] & ((unsigned short)0x1 << j))
446 si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
447 }
448 }
449 /* Even syndrome = (Odd syndrome) ** 2 */
450 for (i = 2, j = 1; j <= cap; i = ++j << 1) {
451 if (si[j] == 0) {
452 si[i] = 0;
453 } else {
454 int16_t tmp;
455
456 tmp = readw_relaxed(index_of + si[j]);
457 tmp = (tmp * 2) % host->pmecc_cw_len;
458 si[i] = readw_relaxed(alpha_to + tmp);
459 }
460 }
461
462 return;
463}
464
465static void pmecc_get_sigma(struct mtd_info *mtd)
466{
467 struct nand_chip *nand_chip = mtd->priv;
468 struct atmel_nand_host *host = nand_chip->priv;
469
470 int16_t *lmu = host->pmecc_lmu;
471 int16_t *si = host->pmecc_si;
472 int *mu = host->pmecc_mu;
473 int *dmu = host->pmecc_dmu; /* Discrepancy */
474 int *delta = host->pmecc_delta; /* Delta order */
475 int cw_len = host->pmecc_cw_len;
476 const int16_t cap = host->pmecc_corr_cap;
477 const int num = 2 * cap + 1;
478 int16_t __iomem *index_of = host->pmecc_index_of;
479 int16_t __iomem *alpha_to = host->pmecc_alpha_to;
480 int i, j, k;
481 uint32_t dmu_0_count, tmp;
482 int16_t *smu = host->pmecc_smu;
483
484 /* index of largest delta */
485 int ro;
486 int largest;
487 int diff;
488
489 dmu_0_count = 0;
490
491 /* First Row */
492
493 /* Mu */
494 mu[0] = -1;
495
496 memset(smu, 0, sizeof(int16_t) * num);
497 smu[0] = 1;
498
499 /* discrepancy set to 1 */
500 dmu[0] = 1;
501 /* polynom order set to 0 */
502 lmu[0] = 0;
503 delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
504
505 /* Second Row */
506
507 /* Mu */
508 mu[1] = 0;
509 /* Sigma(x) set to 1 */
510 memset(&smu[num], 0, sizeof(int16_t) * num);
511 smu[num] = 1;
512
513 /* discrepancy set to S1 */
514 dmu[1] = si[1];
515
516 /* polynom order set to 0 */
517 lmu[1] = 0;
518
519 delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
520
521 /* Init the Sigma(x) last row */
522 memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
523
524 for (i = 1; i <= cap; i++) {
525 mu[i + 1] = i << 1;
526 /* Begin Computing Sigma (Mu+1) and L(mu) */
527 /* check if discrepancy is set to 0 */
528 if (dmu[i] == 0) {
529 dmu_0_count++;
530
531 tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
532 if ((cap - (lmu[i] >> 1) - 1) & 0x1)
533 tmp += 2;
534 else
535 tmp += 1;
536
537 if (dmu_0_count == tmp) {
538 for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
539 smu[(cap + 1) * num + j] =
540 smu[i * num + j];
541
542 lmu[cap + 1] = lmu[i];
543 return;
544 }
545
546 /* copy polynom */
547 for (j = 0; j <= lmu[i] >> 1; j++)
548 smu[(i + 1) * num + j] = smu[i * num + j];
549
550 /* copy previous polynom order to the next */
551 lmu[i + 1] = lmu[i];
552 } else {
553 ro = 0;
554 largest = -1;
555 /* find largest delta with dmu != 0 */
556 for (j = 0; j < i; j++) {
557 if ((dmu[j]) && (delta[j] > largest)) {
558 largest = delta[j];
559 ro = j;
560 }
561 }
562
563 /* compute difference */
564 diff = (mu[i] - mu[ro]);
565
566 /* Compute degree of the new smu polynomial */
567 if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
568 lmu[i + 1] = lmu[i];
569 else
570 lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
571
572 /* Init smu[i+1] with 0 */
573 for (k = 0; k < num; k++)
574 smu[(i + 1) * num + k] = 0;
575
576 /* Compute smu[i+1] */
577 for (k = 0; k <= lmu[ro] >> 1; k++) {
578 int16_t a, b, c;
579
580 if (!(smu[ro * num + k] && dmu[i]))
581 continue;
582 a = readw_relaxed(index_of + dmu[i]);
583 b = readw_relaxed(index_of + dmu[ro]);
584 c = readw_relaxed(index_of + smu[ro * num + k]);
585 tmp = a + (cw_len - b) + c;
586 a = readw_relaxed(alpha_to + tmp % cw_len);
587 smu[(i + 1) * num + (k + diff)] = a;
588 }
589
590 for (k = 0; k <= lmu[i] >> 1; k++)
591 smu[(i + 1) * num + k] ^= smu[i * num + k];
592 }
593
594 /* End Computing Sigma (Mu+1) and L(mu) */
595 /* In either case compute delta */
596 delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
597
598 /* Do not compute discrepancy for the last iteration */
599 if (i >= cap)
600 continue;
601
602 for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
603 tmp = 2 * (i - 1);
604 if (k == 0) {
605 dmu[i + 1] = si[tmp + 3];
606 } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
607 int16_t a, b, c;
608 a = readw_relaxed(index_of +
609 smu[(i + 1) * num + k]);
610 b = si[2 * (i - 1) + 3 - k];
611 c = readw_relaxed(index_of + b);
612 tmp = a + c;
613 tmp %= cw_len;
614 dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
615 dmu[i + 1];
616 }
617 }
618 }
619
620 return;
621}
622
623static int pmecc_err_location(struct mtd_info *mtd)
624{
625 struct nand_chip *nand_chip = mtd->priv;
626 struct atmel_nand_host *host = nand_chip->priv;
627 unsigned long end_time;
628 const int cap = host->pmecc_corr_cap;
629 const int num = 2 * cap + 1;
630 int sector_size = host->pmecc_sector_size;
631 int err_nbr = 0; /* number of error */
632 int roots_nbr; /* number of roots */
633 int i;
634 uint32_t val;
635 int16_t *smu = host->pmecc_smu;
636
637 pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
638
639 for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
640 pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
641 smu[(cap + 1) * num + i]);
642 err_nbr++;
643 }
644
645 val = (err_nbr - 1) << 16;
646 if (sector_size == 1024)
647 val |= 1;
648
649 pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
650 pmerrloc_writel(host->pmerrloc_base, ELEN,
651 sector_size * 8 + host->pmecc_degree * cap);
652
653 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
654 while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
655 & PMERRLOC_CALC_DONE)) {
656 if (unlikely(time_after(jiffies, end_time))) {
657 dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
658 return -1;
659 }
660 cpu_relax();
661 }
662
663 roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
664 & PMERRLOC_ERR_NUM_MASK) >> 8;
665 /* Number of roots == degree of smu hence <= cap */
666 if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
667 return err_nbr - 1;
668
669 /* Number of roots does not match the degree of smu
670 * unable to correct error */
671 return -1;
672}
673
674static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
675 int sector_num, int extra_bytes, int err_nbr)
676{
677 struct nand_chip *nand_chip = mtd->priv;
678 struct atmel_nand_host *host = nand_chip->priv;
679 int i = 0;
680 int byte_pos, bit_pos, sector_size, pos;
681 uint32_t tmp;
682 uint8_t err_byte;
683
684 sector_size = host->pmecc_sector_size;
685
686 while (err_nbr) {
687 tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
688 byte_pos = tmp / 8;
689 bit_pos = tmp % 8;
690
691 if (byte_pos >= (sector_size + extra_bytes))
692 BUG(); /* should never happen */
693
694 if (byte_pos < sector_size) {
695 err_byte = *(buf + byte_pos);
696 *(buf + byte_pos) ^= (1 << bit_pos);
697
698 pos = sector_num * host->pmecc_sector_size + byte_pos;
699 dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
700 pos, bit_pos, err_byte, *(buf + byte_pos));
701 } else {
702 /* Bit flip in OOB area */
703 tmp = sector_num * host->pmecc_bytes_per_sector
704 + (byte_pos - sector_size);
705 err_byte = ecc[tmp];
706 ecc[tmp] ^= (1 << bit_pos);
707
708 pos = tmp + nand_chip->ecc.layout->eccpos[0];
709 dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
710 pos, bit_pos, err_byte, ecc[tmp]);
711 }
712
713 i++;
714 err_nbr--;
715 }
716
717 return;
718}
719
720static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
721 u8 *ecc)
722{
723 struct nand_chip *nand_chip = mtd->priv;
724 struct atmel_nand_host *host = nand_chip->priv;
725 int i, err_nbr, eccbytes;
726 uint8_t *buf_pos;
727 int total_err = 0;
728
729 eccbytes = nand_chip->ecc.bytes;
730 for (i = 0; i < eccbytes; i++)
731 if (ecc[i] != 0xff)
732 goto normal_check;
733 /* Erased page, return OK */
734 return 0;
735
736normal_check:
737 for (i = 0; i < host->pmecc_sector_number; i++) {
738 err_nbr = 0;
739 if (pmecc_stat & 0x1) {
740 buf_pos = buf + i * host->pmecc_sector_size;
741
742 pmecc_gen_syndrome(mtd, i);
743 pmecc_substitute(mtd);
744 pmecc_get_sigma(mtd);
745
746 err_nbr = pmecc_err_location(mtd);
747 if (err_nbr == -1) {
748 dev_err(host->dev, "PMECC: Too many errors\n");
749 mtd->ecc_stats.failed++;
750 return -EIO;
751 } else {
752 pmecc_correct_data(mtd, buf_pos, ecc, i,
753 host->pmecc_bytes_per_sector, err_nbr);
754 mtd->ecc_stats.corrected += err_nbr;
755 total_err += err_nbr;
756 }
757 }
758 pmecc_stat >>= 1;
759 }
760
761 return total_err;
762}
763
764static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
765 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
766{
767 struct atmel_nand_host *host = chip->priv;
768 int eccsize = chip->ecc.size;
769 uint8_t *oob = chip->oob_poi;
770 uint32_t *eccpos = chip->ecc.layout->eccpos;
771 uint32_t stat;
772 unsigned long end_time;
773 int bitflips = 0;
774
775 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
776 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
777 pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG)
778 & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
779
780 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
781 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
782
783 chip->read_buf(mtd, buf, eccsize);
784 chip->read_buf(mtd, oob, mtd->oobsize);
785
786 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
787 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
788 if (unlikely(time_after(jiffies, end_time))) {
789 dev_err(host->dev, "PMECC: Timeout to get error status.\n");
790 return -EIO;
791 }
792 cpu_relax();
793 }
794
795 stat = pmecc_readl_relaxed(host->ecc, ISR);
796 if (stat != 0) {
797 bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
798 if (bitflips < 0)
799 /* uncorrectable errors */
800 return 0;
801 }
802
803 return bitflips;
804}
805
806static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
807 struct nand_chip *chip, const uint8_t *buf, int oob_required)
808{
809 struct atmel_nand_host *host = chip->priv;
810 uint32_t *eccpos = chip->ecc.layout->eccpos;
811 int i, j;
812 unsigned long end_time;
813
814 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
815 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
816
817 pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) |
818 PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
819
820 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
821 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
822
823 chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
824
825 end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
826 while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
827 if (unlikely(time_after(jiffies, end_time))) {
828 dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
829 return -EIO;
830 }
831 cpu_relax();
832 }
833
834 for (i = 0; i < host->pmecc_sector_number; i++) {
835 for (j = 0; j < host->pmecc_bytes_per_sector; j++) {
836 int pos;
837
838 pos = i * host->pmecc_bytes_per_sector + j;
839 chip->oob_poi[eccpos[pos]] =
840 pmecc_readb_ecc_relaxed(host->ecc, i, j);
841 }
842 }
843 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
844
845 return 0;
846}
847
848static void atmel_pmecc_core_init(struct mtd_info *mtd)
849{
850 struct nand_chip *nand_chip = mtd->priv;
851 struct atmel_nand_host *host = nand_chip->priv;
852 uint32_t val = 0;
853 struct nand_ecclayout *ecc_layout;
854
855 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
856 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
857
858 switch (host->pmecc_corr_cap) {
859 case 2:
860 val = PMECC_CFG_BCH_ERR2;
861 break;
862 case 4:
863 val = PMECC_CFG_BCH_ERR4;
864 break;
865 case 8:
866 val = PMECC_CFG_BCH_ERR8;
867 break;
868 case 12:
869 val = PMECC_CFG_BCH_ERR12;
870 break;
871 case 24:
872 val = PMECC_CFG_BCH_ERR24;
873 break;
874 }
875
876 if (host->pmecc_sector_size == 512)
877 val |= PMECC_CFG_SECTOR512;
878 else if (host->pmecc_sector_size == 1024)
879 val |= PMECC_CFG_SECTOR1024;
880
881 switch (host->pmecc_sector_number) {
882 case 1:
883 val |= PMECC_CFG_PAGE_1SECTOR;
884 break;
885 case 2:
886 val |= PMECC_CFG_PAGE_2SECTORS;
887 break;
888 case 4:
889 val |= PMECC_CFG_PAGE_4SECTORS;
890 break;
891 case 8:
892 val |= PMECC_CFG_PAGE_8SECTORS;
893 break;
894 }
895
896 val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
897 | PMECC_CFG_AUTO_DISABLE);
898 pmecc_writel(host->ecc, CFG, val);
899
900 ecc_layout = nand_chip->ecc.layout;
901 pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
902 pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
903 pmecc_writel(host->ecc, EADDR,
904 ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
905 /* See datasheet about PMECC Clock Control Register */
906 pmecc_writel(host->ecc, CLK, 2);
907 pmecc_writel(host->ecc, IDR, 0xff);
908 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
909}
910
911static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
912 struct atmel_nand_host *host)
913{
914 struct mtd_info *mtd = &host->mtd;
915 struct nand_chip *nand_chip = &host->nand_chip;
916 struct resource *regs, *regs_pmerr, *regs_rom;
917 int cap, sector_size, err_no;
918
919 cap = host->pmecc_corr_cap;
920 sector_size = host->pmecc_sector_size;
921 dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
922 cap, sector_size);
923
924 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
925 if (!regs) {
926 dev_warn(host->dev,
927 "Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
928 nand_chip->ecc.mode = NAND_ECC_SOFT;
929 return 0;
930 }
931
932 host->ecc = ioremap(regs->start, resource_size(regs));
933 if (host->ecc == NULL) {
934 dev_err(host->dev, "ioremap failed\n");
935 err_no = -EIO;
936 goto err_pmecc_ioremap;
937 }
938
939 regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
940 regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
941 if (regs_pmerr && regs_rom) {
942 host->pmerrloc_base = ioremap(regs_pmerr->start,
943 resource_size(regs_pmerr));
944 host->pmecc_rom_base = ioremap(regs_rom->start,
945 resource_size(regs_rom));
946 }
947
948 if (!host->pmerrloc_base || !host->pmecc_rom_base) {
949 dev_err(host->dev,
950 "Can not get I/O resource for PMECC ERRLOC controller or ROM!\n");
951 err_no = -EIO;
952 goto err_pmloc_ioremap;
953 }
954
955 /* ECC is calculated for the whole page (1 step) */
956 nand_chip->ecc.size = mtd->writesize;
957
958 /* set ECC page size and oob layout */
959 switch (mtd->writesize) {
960 case 2048:
961 host->pmecc_degree = PMECC_GF_DIMENSION_13;
962 host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
963 host->pmecc_sector_number = mtd->writesize / sector_size;
964 host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
965 cap, sector_size);
966 host->pmecc_alpha_to = pmecc_get_alpha_to(host);
967 host->pmecc_index_of = host->pmecc_rom_base +
968 host->pmecc_lookup_table_offset;
969
970 nand_chip->ecc.steps = 1;
971 nand_chip->ecc.strength = cap;
972 nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
973 host->pmecc_sector_number;
974 if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
975 dev_err(host->dev, "No room for ECC bytes\n");
976 err_no = -EINVAL;
977 goto err_no_ecc_room;
978 }
979 pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
980 mtd->oobsize,
981 nand_chip->ecc.bytes);
982 nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
983 break;
984 case 512:
985 case 1024:
986 case 4096:
987 /* TODO */
988 dev_warn(host->dev,
989 "Unsupported page size for PMECC, use Software ECC\n");
990 default:
991 /* page size not handled by HW ECC */
992 /* switching back to soft ECC */
993 nand_chip->ecc.mode = NAND_ECC_SOFT;
994 return 0;
995 }
996
997 /* Allocate data for PMECC computation */
998 err_no = pmecc_data_alloc(host);
999 if (err_no) {
1000 dev_err(host->dev,
1001 "Cannot allocate memory for PMECC computation!\n");
1002 goto err_pmecc_data_alloc;
1003 }
1004
1005 nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
1006 nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
1007
1008 atmel_pmecc_core_init(mtd);
1009
1010 return 0;
1011
1012err_pmecc_data_alloc:
1013err_no_ecc_room:
1014err_pmloc_ioremap:
1015 iounmap(host->ecc);
1016 if (host->pmerrloc_base)
1017 iounmap(host->pmerrloc_base);
1018 if (host->pmecc_rom_base)
1019 iounmap(host->pmecc_rom_base);
1020err_pmecc_ioremap:
1021 return err_no;
1022}
1023
1024/*
1025 * Calculate HW ECC 299 * Calculate HW ECC
1026 * 300 *
1027 * function called after a write 301 * function called after a write
@@ -1058,10 +332,9 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
1058 * mtd: mtd info structure 332 * mtd: mtd info structure
1059 * chip: nand chip info structure 333 * chip: nand chip info structure
1060 * buf: buffer to store read data 334 * buf: buffer to store read data
1061 * oob_required: caller expects OOB data read to chip->oob_poi
1062 */ 335 */
1063static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, 336static int atmel_nand_read_page(struct mtd_info *mtd,
1064 uint8_t *buf, int oob_required, int page) 337 struct nand_chip *chip, uint8_t *buf, int page)
1065{ 338{
1066 int eccsize = chip->ecc.size; 339 int eccsize = chip->ecc.size;
1067 int eccbytes = chip->ecc.bytes; 340 int eccbytes = chip->ecc.bytes;
@@ -1070,7 +343,6 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1070 uint8_t *oob = chip->oob_poi; 343 uint8_t *oob = chip->oob_poi;
1071 uint8_t *ecc_pos; 344 uint8_t *ecc_pos;
1072 int stat; 345 int stat;
1073 unsigned int max_bitflips = 0;
1074 346
1075 /* 347 /*
1076 * Errata: ALE is incorrectly wired up to the ECC controller 348 * Errata: ALE is incorrectly wired up to the ECC controller
@@ -1107,12 +379,10 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1107 /* check if there's an error */ 379 /* check if there's an error */
1108 stat = chip->ecc.correct(mtd, p, oob, NULL); 380 stat = chip->ecc.correct(mtd, p, oob, NULL);
1109 381
1110 if (stat < 0) { 382 if (stat < 0)
1111 mtd->ecc_stats.failed++; 383 mtd->ecc_stats.failed++;
1112 } else { 384 else
1113 mtd->ecc_stats.corrected += stat; 385 mtd->ecc_stats.corrected += stat;
1114 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1115 }
1116 386
1117 /* get back to oob start (end of page) */ 387 /* get back to oob start (end of page) */
1118 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 388 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -1120,7 +390,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1120 /* read the oob */ 390 /* read the oob */
1121 chip->read_buf(mtd, oob, mtd->oobsize); 391 chip->read_buf(mtd, oob, mtd->oobsize);
1122 392
1123 return max_bitflips; 393 return 0;
1124} 394}
1125 395
1126/* 396/*
@@ -1211,161 +481,10 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
1211 } 481 }
1212} 482}
1213 483
1214#if defined(CONFIG_OF) 484#ifdef CONFIG_MTD_CMDLINE_PARTS
1215static int atmel_of_init_port(struct atmel_nand_host *host, 485static const char *part_probes[] = { "cmdlinepart", NULL };
1216 struct device_node *np)
1217{
1218 u32 val, table_offset;
1219 u32 offset[2];
1220 int ecc_mode;
1221 struct atmel_nand_data *board = &host->board;
1222 enum of_gpio_flags flags;
1223
1224 if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
1225 if (val >= 32) {
1226 dev_err(host->dev, "invalid addr-offset %u\n", val);
1227 return -EINVAL;
1228 }
1229 board->ale = val;
1230 }
1231
1232 if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
1233 if (val >= 32) {
1234 dev_err(host->dev, "invalid cmd-offset %u\n", val);
1235 return -EINVAL;
1236 }
1237 board->cle = val;
1238 }
1239
1240 ecc_mode = of_get_nand_ecc_mode(np);
1241
1242 board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
1243
1244 board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
1245
1246 if (of_get_nand_bus_width(np) == 16)
1247 board->bus_width_16 = 1;
1248
1249 board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
1250 board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
1251
1252 board->enable_pin = of_get_gpio(np, 1);
1253 board->det_pin = of_get_gpio(np, 2);
1254
1255 host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
1256
1257 if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
1258 return 0; /* Not using PMECC */
1259
1260 /* use PMECC, get correction capability, sector size and lookup
1261 * table offset.
1262 */
1263 if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
1264 dev_err(host->dev, "Cannot decide PMECC Capability\n");
1265 return -EINVAL;
1266 } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
1267 (val != 24)) {
1268 dev_err(host->dev,
1269 "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
1270 val);
1271 return -EINVAL;
1272 }
1273 host->pmecc_corr_cap = (u8)val;
1274
1275 if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
1276 dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
1277 return -EINVAL;
1278 } else if ((val != 512) && (val != 1024)) {
1279 dev_err(host->dev,
1280 "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
1281 val);
1282 return -EINVAL;
1283 }
1284 host->pmecc_sector_size = (u16)val;
1285
1286 if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
1287 offset, 2) != 0) {
1288 dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
1289 return -EINVAL;
1290 }
1291 table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
1292
1293 if (!table_offset) {
1294 dev_err(host->dev, "Invalid PMECC lookup table offset\n");
1295 return -EINVAL;
1296 }
1297 host->pmecc_lookup_table_offset = table_offset;
1298
1299 return 0;
1300}
1301#else
1302static int atmel_of_init_port(struct atmel_nand_host *host,
1303 struct device_node *np)
1304{
1305 return -EINVAL;
1306}
1307#endif 486#endif
1308 487
1309static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
1310 struct atmel_nand_host *host)
1311{
1312 struct mtd_info *mtd = &host->mtd;
1313 struct nand_chip *nand_chip = &host->nand_chip;
1314 struct resource *regs;
1315
1316 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1317 if (!regs) {
1318 dev_err(host->dev,
1319 "Can't get I/O resource regs, use software ECC\n");
1320 nand_chip->ecc.mode = NAND_ECC_SOFT;
1321 return 0;
1322 }
1323
1324 host->ecc = ioremap(regs->start, resource_size(regs));
1325 if (host->ecc == NULL) {
1326 dev_err(host->dev, "ioremap failed\n");
1327 return -EIO;
1328 }
1329
1330 /* ECC is calculated for the whole page (1 step) */
1331 nand_chip->ecc.size = mtd->writesize;
1332
1333 /* set ECC page size and oob layout */
1334 switch (mtd->writesize) {
1335 case 512:
1336 nand_chip->ecc.layout = &atmel_oobinfo_small;
1337 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
1338 break;
1339 case 1024:
1340 nand_chip->ecc.layout = &atmel_oobinfo_large;
1341 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
1342 break;
1343 case 2048:
1344 nand_chip->ecc.layout = &atmel_oobinfo_large;
1345 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
1346 break;
1347 case 4096:
1348 nand_chip->ecc.layout = &atmel_oobinfo_large;
1349 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
1350 break;
1351 default:
1352 /* page size not handled by HW ECC */
1353 /* switching back to soft ECC */
1354 nand_chip->ecc.mode = NAND_ECC_SOFT;
1355 return 0;
1356 }
1357
1358 /* set up for HW ECC */
1359 nand_chip->ecc.calculate = atmel_nand_calculate;
1360 nand_chip->ecc.correct = atmel_nand_correct;
1361 nand_chip->ecc.hwctl = atmel_nand_hwctl;
1362 nand_chip->ecc.read_page = atmel_nand_read_page;
1363 nand_chip->ecc.bytes = 4;
1364 nand_chip->ecc.strength = 1;
1365
1366 return 0;
1367}
1368
1369/* 488/*
1370 * Probe for the NAND device. 489 * Probe for the NAND device.
1371 */ 490 */
@@ -1374,10 +493,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1374 struct atmel_nand_host *host; 493 struct atmel_nand_host *host;
1375 struct mtd_info *mtd; 494 struct mtd_info *mtd;
1376 struct nand_chip *nand_chip; 495 struct nand_chip *nand_chip;
496 struct resource *regs;
1377 struct resource *mem; 497 struct resource *mem;
1378 struct mtd_part_parser_data ppdata = {};
1379 int res; 498 int res;
1380 struct pinctrl *pinctrl; 499 struct mtd_partition *partitions = NULL;
500 int num_partitions = 0;
1381 501
1382 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 502 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1383 if (!mem) { 503 if (!mem) {
@@ -1403,15 +523,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1403 523
1404 mtd = &host->mtd; 524 mtd = &host->mtd;
1405 nand_chip = &host->nand_chip; 525 nand_chip = &host->nand_chip;
526 host->board = pdev->dev.platform_data;
1406 host->dev = &pdev->dev; 527 host->dev = &pdev->dev;
1407 if (pdev->dev.of_node) {
1408 res = atmel_of_init_port(host, pdev->dev.of_node);
1409 if (res)
1410 goto err_ecc_ioremap;
1411 } else {
1412 memcpy(&host->board, pdev->dev.platform_data,
1413 sizeof(struct atmel_nand_data));
1414 }
1415 528
1416 nand_chip->priv = host; /* link the private data structures */ 529 nand_chip->priv = host; /* link the private data structures */
1417 mtd->priv = nand_chip; 530 mtd->priv = nand_chip;
@@ -1422,55 +535,36 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1422 nand_chip->IO_ADDR_W = host->io_base; 535 nand_chip->IO_ADDR_W = host->io_base;
1423 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl; 536 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
1424 537
1425 pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 538 if (host->board->rdy_pin)
1426 if (IS_ERR(pinctrl)) {
1427 dev_err(host->dev, "Failed to request pinctrl\n");
1428 res = PTR_ERR(pinctrl);
1429 goto err_ecc_ioremap;
1430 }
1431
1432 if (gpio_is_valid(host->board.rdy_pin)) {
1433 res = gpio_request(host->board.rdy_pin, "nand_rdy");
1434 if (res < 0) {
1435 dev_err(&pdev->dev,
1436 "can't request rdy gpio %d\n",
1437 host->board.rdy_pin);
1438 goto err_ecc_ioremap;
1439 }
1440
1441 res = gpio_direction_input(host->board.rdy_pin);
1442 if (res < 0) {
1443 dev_err(&pdev->dev,
1444 "can't request input direction rdy gpio %d\n",
1445 host->board.rdy_pin);
1446 goto err_ecc_ioremap;
1447 }
1448
1449 nand_chip->dev_ready = atmel_nand_device_ready; 539 nand_chip->dev_ready = atmel_nand_device_ready;
1450 }
1451 540
1452 if (gpio_is_valid(host->board.enable_pin)) { 541 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1453 res = gpio_request(host->board.enable_pin, "nand_enable"); 542 if (!regs && hard_ecc) {
1454 if (res < 0) { 543 printk(KERN_ERR "atmel_nand: can't get I/O resource "
1455 dev_err(&pdev->dev, 544 "regs\nFalling back on software ECC\n");
1456 "can't request enable gpio %d\n", 545 }
1457 host->board.enable_pin); 546
1458 goto err_ecc_ioremap; 547 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
1459 } 548 if (no_ecc)
1460 549 nand_chip->ecc.mode = NAND_ECC_NONE;
1461 res = gpio_direction_output(host->board.enable_pin, 1); 550 if (hard_ecc && regs) {
1462 if (res < 0) { 551 host->ecc = ioremap(regs->start, resource_size(regs));
1463 dev_err(&pdev->dev, 552 if (host->ecc == NULL) {
1464 "can't request output direction enable gpio %d\n", 553 printk(KERN_ERR "atmel_nand: ioremap failed\n");
1465 host->board.enable_pin); 554 res = -EIO;
1466 goto err_ecc_ioremap; 555 goto err_ecc_ioremap;
1467 } 556 }
557 nand_chip->ecc.mode = NAND_ECC_HW;
558 nand_chip->ecc.calculate = atmel_nand_calculate;
559 nand_chip->ecc.correct = atmel_nand_correct;
560 nand_chip->ecc.hwctl = atmel_nand_hwctl;
561 nand_chip->ecc.read_page = atmel_nand_read_page;
562 nand_chip->ecc.bytes = 4;
1468 } 563 }
1469 564
1470 nand_chip->ecc.mode = host->board.ecc_mode;
1471 nand_chip->chip_delay = 20; /* 20us command delay time */ 565 nand_chip->chip_delay = 20; /* 20us command delay time */
1472 566
1473 if (host->board.bus_width_16) /* 16-bit bus width */ 567 if (host->board->bus_width_16) /* 16-bit bus width */
1474 nand_chip->options |= NAND_BUSWIDTH_16; 568 nand_chip->options |= NAND_BUSWIDTH_16;
1475 569
1476 nand_chip->read_buf = atmel_read_buf; 570 nand_chip->read_buf = atmel_read_buf;
@@ -1479,33 +573,17 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1479 platform_set_drvdata(pdev, host); 573 platform_set_drvdata(pdev, host);
1480 atmel_nand_enable(host); 574 atmel_nand_enable(host);
1481 575
1482 if (gpio_is_valid(host->board.det_pin)) { 576 if (host->board->det_pin) {
1483 res = gpio_request(host->board.det_pin, "nand_det"); 577 if (gpio_get_value(host->board->det_pin)) {
1484 if (res < 0) {
1485 dev_err(&pdev->dev,
1486 "can't request det gpio %d\n",
1487 host->board.det_pin);
1488 goto err_no_card;
1489 }
1490
1491 res = gpio_direction_input(host->board.det_pin);
1492 if (res < 0) {
1493 dev_err(&pdev->dev,
1494 "can't request input direction det gpio %d\n",
1495 host->board.det_pin);
1496 goto err_no_card;
1497 }
1498
1499 if (gpio_get_value(host->board.det_pin)) {
1500 printk(KERN_INFO "No SmartMedia card inserted.\n"); 578 printk(KERN_INFO "No SmartMedia card inserted.\n");
1501 res = -ENXIO; 579 res = -ENXIO;
1502 goto err_no_card; 580 goto err_no_card;
1503 } 581 }
1504 } 582 }
1505 583
1506 if (host->board.on_flash_bbt || on_flash_bbt) { 584 if (on_flash_bbt) {
1507 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); 585 printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
1508 nand_chip->bbt_options |= NAND_BBT_USE_FLASH; 586 nand_chip->options |= NAND_USE_FLASH_BBT;
1509 } 587 }
1510 588
1511 if (!cpu_has_dma()) 589 if (!cpu_has_dma())
@@ -1516,7 +594,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1516 594
1517 dma_cap_zero(mask); 595 dma_cap_zero(mask);
1518 dma_cap_set(DMA_MEMCPY, mask); 596 dma_cap_set(DMA_MEMCPY, mask);
1519 host->dma_chan = dma_request_channel(mask, NULL, NULL); 597 host->dma_chan = dma_request_channel(mask, 0, NULL);
1520 if (!host->dma_chan) { 598 if (!host->dma_chan) {
1521 dev_err(host->dev, "Failed to request DMA channel\n"); 599 dev_err(host->dev, "Failed to request DMA channel\n");
1522 use_dma = 0; 600 use_dma = 0;
@@ -1535,13 +613,40 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1535 } 613 }
1536 614
1537 if (nand_chip->ecc.mode == NAND_ECC_HW) { 615 if (nand_chip->ecc.mode == NAND_ECC_HW) {
1538 if (host->has_pmecc) 616 /* ECC is calculated for the whole page (1 step) */
1539 res = atmel_pmecc_nand_init_params(pdev, host); 617 nand_chip->ecc.size = mtd->writesize;
1540 else 618
1541 res = atmel_hw_nand_init_params(pdev, host); 619 /* set ECC page size and oob layout */
1542 620 switch (mtd->writesize) {
1543 if (res != 0) 621 case 512:
1544 goto err_hw_ecc; 622 nand_chip->ecc.layout = &atmel_oobinfo_small;
623 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
624 break;
625 case 1024:
626 nand_chip->ecc.layout = &atmel_oobinfo_large;
627 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
628 break;
629 case 2048:
630 nand_chip->ecc.layout = &atmel_oobinfo_large;
631 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
632 break;
633 case 4096:
634 nand_chip->ecc.layout = &atmel_oobinfo_large;
635 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
636 break;
637 default:
638 /* page size not handled by HW ECC */
639 /* switching back to soft ECC */
640 nand_chip->ecc.mode = NAND_ECC_SOFT;
641 nand_chip->ecc.calculate = NULL;
642 nand_chip->ecc.correct = NULL;
643 nand_chip->ecc.hwctl = NULL;
644 nand_chip->ecc.read_page = NULL;
645 nand_chip->ecc.postpad = 0;
646 nand_chip->ecc.prepad = 0;
647 nand_chip->ecc.bytes = 0;
648 break;
649 }
1545 } 650 }
1546 651
1547 /* second phase scan */ 652 /* second phase scan */
@@ -1550,31 +655,36 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
1550 goto err_scan_tail; 655 goto err_scan_tail;
1551 } 656 }
1552 657
658#ifdef CONFIG_MTD_CMDLINE_PARTS
1553 mtd->name = "atmel_nand"; 659 mtd->name = "atmel_nand";
1554 ppdata.of_node = pdev->dev.of_node; 660 num_partitions = parse_mtd_partitions(mtd, part_probes,
1555 res = mtd_device_parse_register(mtd, NULL, &ppdata, 661 &partitions, 0);
1556 host->board.parts, host->board.num_parts); 662#endif
663 if (num_partitions <= 0 && host->board->partition_info)
664 partitions = host->board->partition_info(mtd->size,
665 &num_partitions);
666
667 if ((!partitions) || (num_partitions == 0)) {
668 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
669 res = -ENXIO;
670 goto err_no_partitions;
671 }
672
673 res = mtd_device_register(mtd, partitions, num_partitions);
1557 if (!res) 674 if (!res)
1558 return res; 675 return res;
1559 676
677err_no_partitions:
678 nand_release(mtd);
1560err_scan_tail: 679err_scan_tail:
1561 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
1562 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
1563 pmecc_data_free(host);
1564 }
1565 if (host->ecc)
1566 iounmap(host->ecc);
1567 if (host->pmerrloc_base)
1568 iounmap(host->pmerrloc_base);
1569 if (host->pmecc_rom_base)
1570 iounmap(host->pmecc_rom_base);
1571err_hw_ecc:
1572err_scan_ident: 680err_scan_ident:
1573err_no_card: 681err_no_card:
1574 atmel_nand_disable(host); 682 atmel_nand_disable(host);
1575 platform_set_drvdata(pdev, NULL); 683 platform_set_drvdata(pdev, NULL);
1576 if (host->dma_chan) 684 if (host->dma_chan)
1577 dma_release_channel(host->dma_chan); 685 dma_release_channel(host->dma_chan);
686 if (host->ecc)
687 iounmap(host->ecc);
1578err_ecc_ioremap: 688err_ecc_ioremap:
1579 iounmap(host->io_base); 689 iounmap(host->io_base);
1580err_nand_ioremap: 690err_nand_ioremap:
@@ -1594,28 +704,8 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
1594 704
1595 atmel_nand_disable(host); 705 atmel_nand_disable(host);
1596 706
1597 if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
1598 pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
1599 pmerrloc_writel(host->pmerrloc_base, ELDIS,
1600 PMERRLOC_DISABLE);
1601 pmecc_data_free(host);
1602 }
1603
1604 if (gpio_is_valid(host->board.det_pin))
1605 gpio_free(host->board.det_pin);
1606
1607 if (gpio_is_valid(host->board.enable_pin))
1608 gpio_free(host->board.enable_pin);
1609
1610 if (gpio_is_valid(host->board.rdy_pin))
1611 gpio_free(host->board.rdy_pin);
1612
1613 if (host->ecc) 707 if (host->ecc)
1614 iounmap(host->ecc); 708 iounmap(host->ecc);
1615 if (host->pmecc_rom_base)
1616 iounmap(host->pmecc_rom_base);
1617 if (host->pmerrloc_base)
1618 iounmap(host->pmerrloc_base);
1619 709
1620 if (host->dma_chan) 710 if (host->dma_chan)
1621 dma_release_channel(host->dma_chan); 711 dma_release_channel(host->dma_chan);
@@ -1626,21 +716,11 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
1626 return 0; 716 return 0;
1627} 717}
1628 718
1629#if defined(CONFIG_OF)
1630static const struct of_device_id atmel_nand_dt_ids[] = {
1631 { .compatible = "atmel,at91rm9200-nand" },
1632 { /* sentinel */ }
1633};
1634
1635MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
1636#endif
1637
1638static struct platform_driver atmel_nand_driver = { 719static struct platform_driver atmel_nand_driver = {
1639 .remove = __exit_p(atmel_nand_remove), 720 .remove = __exit_p(atmel_nand_remove),
1640 .driver = { 721 .driver = {
1641 .name = "atmel_nand", 722 .name = "atmel_nand",
1642 .owner = THIS_MODULE, 723 .owner = THIS_MODULE,
1643 .of_match_table = of_match_ptr(atmel_nand_dt_ids),
1644 }, 724 },
1645}; 725};
1646 726
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 8a1e9a68675..578c776e135 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -3,7 +3,7 @@
3 * Based on AT91SAM9260 datasheet revision B. 3 * Based on AT91SAM9260 datasheet revision B.
4 * 4 *
5 * Copyright (C) 2007 Andrew Victor 5 * Copyright (C) 2007 Andrew Victor
6 * Copyright (C) 2007 - 2012 Atmel Corporation. 6 * Copyright (C) 2007 Atmel Corporation.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -36,116 +36,4 @@
36#define ATMEL_ECC_NPR 0x10 /* NParity register */ 36#define ATMEL_ECC_NPR 0x10 /* NParity register */
37#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */ 37#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
38 38
39/* PMECC Register Definitions */
40#define ATMEL_PMECC_CFG 0x000 /* Configuration Register */
41#define PMECC_CFG_BCH_ERR2 (0 << 0)
42#define PMECC_CFG_BCH_ERR4 (1 << 0)
43#define PMECC_CFG_BCH_ERR8 (2 << 0)
44#define PMECC_CFG_BCH_ERR12 (3 << 0)
45#define PMECC_CFG_BCH_ERR24 (4 << 0)
46
47#define PMECC_CFG_SECTOR512 (0 << 4)
48#define PMECC_CFG_SECTOR1024 (1 << 4)
49
50#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
51#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
52#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
53#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
54
55#define PMECC_CFG_READ_OP (0 << 12)
56#define PMECC_CFG_WRITE_OP (1 << 12)
57
58#define PMECC_CFG_SPARE_ENABLE (1 << 16)
59#define PMECC_CFG_SPARE_DISABLE (0 << 16)
60
61#define PMECC_CFG_AUTO_ENABLE (1 << 20)
62#define PMECC_CFG_AUTO_DISABLE (0 << 20)
63
64#define ATMEL_PMECC_SAREA 0x004 /* Spare area size */
65#define ATMEL_PMECC_SADDR 0x008 /* PMECC starting address */
66#define ATMEL_PMECC_EADDR 0x00c /* PMECC ending address */
67#define ATMEL_PMECC_CLK 0x010 /* PMECC clock control */
68#define PMECC_CLK_133MHZ (2 << 0)
69
70#define ATMEL_PMECC_CTRL 0x014 /* PMECC control register */
71#define PMECC_CTRL_RST (1 << 0)
72#define PMECC_CTRL_DATA (1 << 1)
73#define PMECC_CTRL_USER (1 << 2)
74#define PMECC_CTRL_ENABLE (1 << 4)
75#define PMECC_CTRL_DISABLE (1 << 5)
76
77#define ATMEL_PMECC_SR 0x018 /* PMECC status register */
78#define PMECC_SR_BUSY (1 << 0)
79#define PMECC_SR_ENABLE (1 << 4)
80
81#define ATMEL_PMECC_IER 0x01c /* PMECC interrupt enable */
82#define PMECC_IER_ENABLE (1 << 0)
83#define ATMEL_PMECC_IDR 0x020 /* PMECC interrupt disable */
84#define PMECC_IER_DISABLE (1 << 0)
85#define ATMEL_PMECC_IMR 0x024 /* PMECC interrupt mask */
86#define PMECC_IER_MASK (1 << 0)
87#define ATMEL_PMECC_ISR 0x028 /* PMECC interrupt status */
88#define ATMEL_PMECC_ECCx 0x040 /* PMECC ECC x */
89#define ATMEL_PMECC_REMx 0x240 /* PMECC REM x */
90
91/* PMERRLOC Register Definitions */
92#define ATMEL_PMERRLOC_ELCFG 0x000 /* Error location config */
93#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
94#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
95#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
96
97#define ATMEL_PMERRLOC_ELPRIM 0x004 /* Error location primitive */
98#define ATMEL_PMERRLOC_ELEN 0x008 /* Error location enable */
99#define ATMEL_PMERRLOC_ELDIS 0x00c /* Error location disable */
100#define PMERRLOC_DISABLE (1 << 0)
101
102#define ATMEL_PMERRLOC_ELSR 0x010 /* Error location status */
103#define PMERRLOC_ELSR_BUSY (1 << 0)
104#define ATMEL_PMERRLOC_ELIER 0x014 /* Error location int enable */
105#define ATMEL_PMERRLOC_ELIDR 0x018 /* Error location int disable */
106#define ATMEL_PMERRLOC_ELIMR 0x01c /* Error location int mask */
107#define ATMEL_PMERRLOC_ELISR 0x020 /* Error location int status */
108#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
109#define PMERRLOC_CALC_DONE (1 << 0)
110#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
111#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
112
113/* Register access macros for PMECC */
114#define pmecc_readl_relaxed(addr, reg) \
115 readl_relaxed((addr) + ATMEL_PMECC_##reg)
116
117#define pmecc_writel(addr, reg, value) \
118 writel((value), (addr) + ATMEL_PMECC_##reg)
119
120#define pmecc_readb_ecc_relaxed(addr, sector, n) \
121 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
122
123#define pmecc_readl_rem_relaxed(addr, sector, n) \
124 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
125
126#define pmerrloc_readl_relaxed(addr, reg) \
127 readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
128
129#define pmerrloc_writel(addr, reg, value) \
130 writel((value), (addr) + ATMEL_PMERRLOC_##reg)
131
132#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
133 writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
134
135#define pmerrloc_readl_sigma_relaxed(addr, n) \
136 readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
137
138#define pmerrloc_readl_el_relaxed(addr, n) \
139 readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
140
141/* Galois field dimension */
142#define PMECC_GF_DIMENSION_13 13
143#define PMECC_GF_DIMENSION_14 14
144
145#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
146#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
147
148/* Time out value for reading PMECC status register */
149#define PMECC_MAX_TIMEOUT_MS 100
150
151#endif 39#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 217459d02b2..e7767eef450 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -17,26 +17,38 @@
17#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h> 18#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20#include <linux/platform_device.h>
21#include <asm/io.h> 20#include <asm/io.h>
22#include <asm/mach-au1x00/au1000.h>
23#include <asm/mach-au1x00/au1550nd.h>
24 21
22#include <asm/mach-au1x00/au1xxx.h>
23#include <asm/mach-db1x00/bcsr.h>
25 24
26struct au1550nd_ctx { 25/*
27 struct mtd_info info; 26 * MTD structure for NAND controller
28 struct nand_chip chip; 27 */
28static struct mtd_info *au1550_mtd = NULL;
29static void __iomem *p_nand;
30static int nand_width = 1; /* default x8 */
31static void (*au1550_write_byte)(struct mtd_info *, u_char);
29 32
30 int cs; 33/*
31 void __iomem *base; 34 * Define partitions for flash device
32 void (*write_byte)(struct mtd_info *, u_char); 35 */
36static const struct mtd_partition partition_info[] = {
37 {
38 .name = "NAND FS 0",
39 .offset = 0,
40 .size = 8 * 1024 * 1024},
41 {
42 .name = "NAND FS 1",
43 .offset = MTDPART_OFS_APPEND,
44 .size = MTDPART_SIZ_FULL}
33}; 45};
34 46
35/** 47/**
36 * au_read_byte - read one byte from the chip 48 * au_read_byte - read one byte from the chip
37 * @mtd: MTD device structure 49 * @mtd: MTD device structure
38 * 50 *
39 * read function for 8bit buswidth 51 * read function for 8bit buswith
40 */ 52 */
41static u_char au_read_byte(struct mtd_info *mtd) 53static u_char au_read_byte(struct mtd_info *mtd)
42{ 54{
@@ -51,7 +63,7 @@ static u_char au_read_byte(struct mtd_info *mtd)
51 * @mtd: MTD device structure 63 * @mtd: MTD device structure
52 * @byte: pointer to data byte to write 64 * @byte: pointer to data byte to write
53 * 65 *
54 * write function for 8it buswidth 66 * write function for 8it buswith
55 */ 67 */
56static void au_write_byte(struct mtd_info *mtd, u_char byte) 68static void au_write_byte(struct mtd_info *mtd, u_char byte)
57{ 69{
@@ -61,10 +73,11 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte)
61} 73}
62 74
63/** 75/**
64 * au_read_byte16 - read one byte endianness aware from the chip 76 * au_read_byte16 - read one byte endianess aware from the chip
65 * @mtd: MTD device structure 77 * @mtd: MTD device structure
66 * 78 *
67 * read function for 16bit buswidth with endianness conversion 79 * read function for 16bit buswith with
80 * endianess conversion
68 */ 81 */
69static u_char au_read_byte16(struct mtd_info *mtd) 82static u_char au_read_byte16(struct mtd_info *mtd)
70{ 83{
@@ -75,11 +88,12 @@ static u_char au_read_byte16(struct mtd_info *mtd)
75} 88}
76 89
77/** 90/**
78 * au_write_byte16 - write one byte endianness aware to the chip 91 * au_write_byte16 - write one byte endianess aware to the chip
79 * @mtd: MTD device structure 92 * @mtd: MTD device structure
80 * @byte: pointer to data byte to write 93 * @byte: pointer to data byte to write
81 * 94 *
82 * write function for 16bit buswidth with endianness conversion 95 * write function for 16bit buswith with
96 * endianess conversion
83 */ 97 */
84static void au_write_byte16(struct mtd_info *mtd, u_char byte) 98static void au_write_byte16(struct mtd_info *mtd, u_char byte)
85{ 99{
@@ -92,7 +106,8 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte)
92 * au_read_word - read one word from the chip 106 * au_read_word - read one word from the chip
93 * @mtd: MTD device structure 107 * @mtd: MTD device structure
94 * 108 *
95 * read function for 16bit buswidth without endianness conversion 109 * read function for 16bit buswith without
110 * endianess conversion
96 */ 111 */
97static u16 au_read_word(struct mtd_info *mtd) 112static u16 au_read_word(struct mtd_info *mtd)
98{ 113{
@@ -108,7 +123,7 @@ static u16 au_read_word(struct mtd_info *mtd)
108 * @buf: data buffer 123 * @buf: data buffer
109 * @len: number of bytes to write 124 * @len: number of bytes to write
110 * 125 *
111 * write function for 8bit buswidth 126 * write function for 8bit buswith
112 */ 127 */
113static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 128static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
114{ 129{
@@ -127,7 +142,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
127 * @buf: buffer to store date 142 * @buf: buffer to store date
128 * @len: number of bytes to read 143 * @len: number of bytes to read
129 * 144 *
130 * read function for 8bit buswidth 145 * read function for 8bit buswith
131 */ 146 */
132static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) 147static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
133{ 148{
@@ -141,12 +156,34 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
141} 156}
142 157
143/** 158/**
159 * au_verify_buf - Verify chip data against buffer
160 * @mtd: MTD device structure
161 * @buf: buffer containing the data to compare
162 * @len: number of bytes to compare
163 *
164 * verify function for 8bit buswith
165 */
166static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
167{
168 int i;
169 struct nand_chip *this = mtd->priv;
170
171 for (i = 0; i < len; i++) {
172 if (buf[i] != readb(this->IO_ADDR_R))
173 return -EFAULT;
174 au_sync();
175 }
176
177 return 0;
178}
179
180/**
144 * au_write_buf16 - write buffer to chip 181 * au_write_buf16 - write buffer to chip
145 * @mtd: MTD device structure 182 * @mtd: MTD device structure
146 * @buf: data buffer 183 * @buf: data buffer
147 * @len: number of bytes to write 184 * @len: number of bytes to write
148 * 185 *
149 * write function for 16bit buswidth 186 * write function for 16bit buswith
150 */ 187 */
151static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) 188static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
152{ 189{
@@ -168,7 +205,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
168 * @buf: buffer to store date 205 * @buf: buffer to store date
169 * @len: number of bytes to read 206 * @len: number of bytes to read
170 * 207 *
171 * read function for 16bit buswidth 208 * read function for 16bit buswith
172 */ 209 */
173static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 210static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
174{ 211{
@@ -183,6 +220,29 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
183 } 220 }
184} 221}
185 222
223/**
224 * au_verify_buf16 - Verify chip data against buffer
225 * @mtd: MTD device structure
226 * @buf: buffer containing the data to compare
227 * @len: number of bytes to compare
228 *
229 * verify function for 16bit buswith
230 */
231static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
232{
233 int i;
234 struct nand_chip *this = mtd->priv;
235 u16 *p = (u16 *) buf;
236 len >>= 1;
237
238 for (i = 0; i < len; i++) {
239 if (p[i] != readw(this->IO_ADDR_R))
240 return -EFAULT;
241 au_sync();
242 }
243 return 0;
244}
245
186/* Select the chip by setting nCE to low */ 246/* Select the chip by setting nCE to low */
187#define NAND_CTL_SETNCE 1 247#define NAND_CTL_SETNCE 1
188/* Deselect the chip by setting nCE to high */ 248/* Deselect the chip by setting nCE to high */
@@ -198,25 +258,24 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
198 258
199static void au1550_hwcontrol(struct mtd_info *mtd, int cmd) 259static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
200{ 260{
201 struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info); 261 register struct nand_chip *this = mtd->priv;
202 struct nand_chip *this = mtd->priv;
203 262
204 switch (cmd) { 263 switch (cmd) {
205 264
206 case NAND_CTL_SETCLE: 265 case NAND_CTL_SETCLE:
207 this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD; 266 this->IO_ADDR_W = p_nand + MEM_STNAND_CMD;
208 break; 267 break;
209 268
210 case NAND_CTL_CLRCLE: 269 case NAND_CTL_CLRCLE:
211 this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA; 270 this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
212 break; 271 break;
213 272
214 case NAND_CTL_SETALE: 273 case NAND_CTL_SETALE:
215 this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR; 274 this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR;
216 break; 275 break;
217 276
218 case NAND_CTL_CLRALE: 277 case NAND_CTL_CLRALE:
219 this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA; 278 this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
220 /* FIXME: Nobody knows why this is necessary, 279 /* FIXME: Nobody knows why this is necessary,
221 * but it works only that way */ 280 * but it works only that way */
222 udelay(1); 281 udelay(1);
@@ -224,7 +283,7 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
224 283
225 case NAND_CTL_SETNCE: 284 case NAND_CTL_SETNCE:
226 /* assert (force assert) chip enable */ 285 /* assert (force assert) chip enable */
227 au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL); 286 au_writel((1 << (4 + NAND_CS)), MEM_STNDCTL);
228 break; 287 break;
229 288
230 case NAND_CTL_CLRNCE: 289 case NAND_CTL_CLRNCE:
@@ -271,10 +330,9 @@ static void au1550_select_chip(struct mtd_info *mtd, int chip)
271 */ 330 */
272static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr) 331static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
273{ 332{
274 struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info); 333 register struct nand_chip *this = mtd->priv;
275 struct nand_chip *this = mtd->priv;
276 int ce_override = 0, i; 334 int ce_override = 0, i;
277 unsigned long flags = 0; 335 ulong flags;
278 336
279 /* Begin command latch cycle */ 337 /* Begin command latch cycle */
280 au1550_hwcontrol(mtd, NAND_CTL_SETCLE); 338 au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
@@ -295,9 +353,9 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
295 column -= 256; 353 column -= 256;
296 readcmd = NAND_CMD_READ1; 354 readcmd = NAND_CMD_READ1;
297 } 355 }
298 ctx->write_byte(mtd, readcmd); 356 au1550_write_byte(mtd, readcmd);
299 } 357 }
300 ctx->write_byte(mtd, command); 358 au1550_write_byte(mtd, command);
301 359
302 /* Set ALE and clear CLE to start address cycle */ 360 /* Set ALE and clear CLE to start address cycle */
303 au1550_hwcontrol(mtd, NAND_CTL_CLRCLE); 361 au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
@@ -310,10 +368,10 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
310 /* Adjust columns for 16 bit buswidth */ 368 /* Adjust columns for 16 bit buswidth */
311 if (this->options & NAND_BUSWIDTH_16) 369 if (this->options & NAND_BUSWIDTH_16)
312 column >>= 1; 370 column >>= 1;
313 ctx->write_byte(mtd, column); 371 au1550_write_byte(mtd, column);
314 } 372 }
315 if (page_addr != -1) { 373 if (page_addr != -1) {
316 ctx->write_byte(mtd, (u8)(page_addr & 0xff)); 374 au1550_write_byte(mtd, (u8)(page_addr & 0xff));
317 375
318 if (command == NAND_CMD_READ0 || 376 if (command == NAND_CMD_READ0 ||
319 command == NAND_CMD_READ1 || 377 command == NAND_CMD_READ1 ||
@@ -331,12 +389,11 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
331 au1550_hwcontrol(mtd, NAND_CTL_SETNCE); 389 au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
332 } 390 }
333 391
334 ctx->write_byte(mtd, (u8)(page_addr >> 8)); 392 au1550_write_byte(mtd, (u8)(page_addr >> 8));
335 393
336 /* One more address cycle for devices > 32MiB */ 394 /* One more address cycle for devices > 32MiB */
337 if (this->chipsize > (32 << 20)) 395 if (this->chipsize > (32 << 20))
338 ctx->write_byte(mtd, 396 au1550_write_byte(mtd, (u8)((page_addr >> 16) & 0x0f));
339 ((page_addr >> 16) & 0x0f));
340 } 397 }
341 /* Latch in address */ 398 /* Latch in address */
342 au1550_hwcontrol(mtd, NAND_CTL_CLRALE); 399 au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
@@ -382,79 +439,121 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
382 while(!this->dev_ready(mtd)); 439 while(!this->dev_ready(mtd));
383} 440}
384 441
385static int find_nand_cs(unsigned long nand_base)
386{
387 void __iomem *base =
388 (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
389 unsigned long addr, staddr, start, mask, end;
390 int i;
391 442
392 for (i = 0; i < 4; i++) { 443/*
393 addr = 0x1000 + (i * 0x10); /* CSx */ 444 * Main initialization routine
394 staddr = __raw_readl(base + addr + 0x08); /* STADDRx */ 445 */
395 /* figure out the decoded range of this CS */ 446static int __init au1xxx_nand_init(void)
396 start = (staddr << 4) & 0xfffc0000; 447{
397 mask = (staddr << 18) & 0xfffc0000; 448 struct nand_chip *this;
398 end = (start | (start - 1)) & ~(start ^ mask); 449 u16 boot_swapboot = 0; /* default value */
399 if ((nand_base >= start) && (nand_base < end)) 450 int retval;
400 return i; 451 u32 mem_staddr;
452 u32 nand_phys;
453
454 /* Allocate memory for MTD device structure and private data */
455 au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
456 if (!au1550_mtd) {
457 printk("Unable to allocate NAND MTD dev structure.\n");
458 return -ENOMEM;
401 } 459 }
402 460
403 return -ENODEV; 461 /* Get pointer to private data */
404} 462 this = (struct nand_chip *)(&au1550_mtd[1]);
405 463
406static int au1550nd_probe(struct platform_device *pdev) 464 /* Link the private data with the MTD structure */
407{ 465 au1550_mtd->priv = this;
408 struct au1550nd_platdata *pd; 466 au1550_mtd->owner = THIS_MODULE;
409 struct au1550nd_ctx *ctx;
410 struct nand_chip *this;
411 struct resource *r;
412 int ret, cs;
413 467
414 pd = pdev->dev.platform_data;
415 if (!pd) {
416 dev_err(&pdev->dev, "missing platform data\n");
417 return -ENODEV;
418 }
419 468
420 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 469 /* MEM_STNDCTL: disable ints, disable nand boot */
421 if (!ctx) { 470 au_writel(0, MEM_STNDCTL);
422 dev_err(&pdev->dev, "no memory for NAND context\n"); 471
423 return -ENOMEM; 472#ifdef CONFIG_MIPS_PB1550
424 } 473 /* set gpio206 high */
474 gpio_direction_input(206);
425 475
426 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 476 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
427 if (!r) { 477
428 dev_err(&pdev->dev, "no NAND memory resource\n"); 478 switch (boot_swapboot) {
429 ret = -ENODEV; 479 case 0:
430 goto out1; 480 case 2:
481 case 8:
482 case 0xC:
483 case 0xD:
484 /* x16 NAND Flash */
485 nand_width = 0;
486 break;
487 case 1:
488 case 9:
489 case 3:
490 case 0xE:
491 case 0xF:
492 /* x8 NAND Flash */
493 nand_width = 1;
494 break;
495 default:
496 printk("Pb1550 NAND: bad boot:swap\n");
497 retval = -EINVAL;
498 goto outmem;
431 } 499 }
432 if (request_mem_region(r->start, resource_size(r), "au1550-nand")) { 500#endif
433 dev_err(&pdev->dev, "cannot claim NAND memory area\n"); 501
434 ret = -ENOMEM; 502 /* Configure chip-select; normally done by boot code, e.g. YAMON */
435 goto out1; 503#ifdef NAND_STCFG
504 if (NAND_CS == 0) {
505 au_writel(NAND_STCFG, MEM_STCFG0);
506 au_writel(NAND_STTIME, MEM_STTIME0);
507 au_writel(NAND_STADDR, MEM_STADDR0);
436 } 508 }
437 509 if (NAND_CS == 1) {
438 ctx->base = ioremap_nocache(r->start, 0x1000); 510 au_writel(NAND_STCFG, MEM_STCFG1);
439 if (!ctx->base) { 511 au_writel(NAND_STTIME, MEM_STTIME1);
440 dev_err(&pdev->dev, "cannot remap NAND memory area\n"); 512 au_writel(NAND_STADDR, MEM_STADDR1);
441 ret = -ENODEV; 513 }
442 goto out2; 514 if (NAND_CS == 2) {
515 au_writel(NAND_STCFG, MEM_STCFG2);
516 au_writel(NAND_STTIME, MEM_STTIME2);
517 au_writel(NAND_STADDR, MEM_STADDR2);
518 }
519 if (NAND_CS == 3) {
520 au_writel(NAND_STCFG, MEM_STCFG3);
521 au_writel(NAND_STTIME, MEM_STTIME3);
522 au_writel(NAND_STADDR, MEM_STADDR3);
523 }
524#endif
525
526 /* Locate NAND chip-select in order to determine NAND phys address */
527 mem_staddr = 0x00000000;
528 if (((au_readl(MEM_STCFG0) & 0x7) == 0x5) && (NAND_CS == 0))
529 mem_staddr = au_readl(MEM_STADDR0);
530 else if (((au_readl(MEM_STCFG1) & 0x7) == 0x5) && (NAND_CS == 1))
531 mem_staddr = au_readl(MEM_STADDR1);
532 else if (((au_readl(MEM_STCFG2) & 0x7) == 0x5) && (NAND_CS == 2))
533 mem_staddr = au_readl(MEM_STADDR2);
534 else if (((au_readl(MEM_STCFG3) & 0x7) == 0x5) && (NAND_CS == 3))
535 mem_staddr = au_readl(MEM_STADDR3);
536
537 if (mem_staddr == 0x00000000) {
538 printk("Au1xxx NAND: ERROR WITH NAND CHIP-SELECT\n");
539 kfree(au1550_mtd);
540 return 1;
443 } 541 }
542 nand_phys = (mem_staddr << 4) & 0xFFFC0000;
444 543
445 this = &ctx->chip; 544 p_nand = ioremap(nand_phys, 0x1000);
446 ctx->info.priv = this;
447 ctx->info.owner = THIS_MODULE;
448 545
449 /* figure out which CS# r->start belongs to */ 546 /* make controller and MTD agree */
450 cs = find_nand_cs(r->start); 547 if (NAND_CS == 0)
451 if (cs < 0) { 548 nand_width = au_readl(MEM_STCFG0) & (1 << 22);
452 dev_err(&pdev->dev, "cannot detect NAND chipselect\n"); 549 if (NAND_CS == 1)
453 ret = -ENODEV; 550 nand_width = au_readl(MEM_STCFG1) & (1 << 22);
454 goto out3; 551 if (NAND_CS == 2)
455 } 552 nand_width = au_readl(MEM_STCFG2) & (1 << 22);
456 ctx->cs = cs; 553 if (NAND_CS == 3)
554 nand_width = au_readl(MEM_STCFG3) & (1 << 22);
457 555
556 /* Set address of hardware control function */
458 this->dev_ready = au1550_device_ready; 557 this->dev_ready = au1550_device_ready;
459 this->select_chip = au1550_select_chip; 558 this->select_chip = au1550_select_chip;
460 this->cmdfunc = au1550_command; 559 this->cmdfunc = au1550_command;
@@ -463,56 +562,56 @@ static int au1550nd_probe(struct platform_device *pdev)
463 this->chip_delay = 30; 562 this->chip_delay = 30;
464 this->ecc.mode = NAND_ECC_SOFT; 563 this->ecc.mode = NAND_ECC_SOFT;
465 564
466 if (pd->devwidth) 565 this->options = NAND_NO_AUTOINCR;
566
567 if (!nand_width)
467 this->options |= NAND_BUSWIDTH_16; 568 this->options |= NAND_BUSWIDTH_16;
468 569
469 this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte; 570 this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
470 ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte; 571 au1550_write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
471 this->read_word = au_read_word; 572 this->read_word = au_read_word;
472 this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; 573 this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
473 this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; 574 this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
474 575 this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
475 ret = nand_scan(&ctx->info, 1); 576
476 if (ret) { 577 /* Scan to find existence of the device */
477 dev_err(&pdev->dev, "NAND scan failed with %d\n", ret); 578 if (nand_scan(au1550_mtd, 1)) {
478 goto out3; 579 retval = -ENXIO;
580 goto outio;
479 } 581 }
480 582
481 mtd_device_register(&ctx->info, pd->parts, pd->num_parts); 583 /* Register the partitions */
584 mtd_device_register(au1550_mtd, partition_info,
585 ARRAY_SIZE(partition_info));
482 586
483 return 0; 587 return 0;
484 588
485out3: 589 outio:
486 iounmap(ctx->base); 590 iounmap(p_nand);
487out2: 591
488 release_mem_region(r->start, resource_size(r)); 592 outmem:
489out1: 593 kfree(au1550_mtd);
490 kfree(ctx); 594 return retval;
491 return ret;
492} 595}
493 596
494static int au1550nd_remove(struct platform_device *pdev) 597module_init(au1xxx_nand_init);
598
599/*
600 * Clean up routine
601 */
602static void __exit au1550_cleanup(void)
495{ 603{
496 struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); 604 /* Release resources, unregister device */
497 struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 605 nand_release(au1550_mtd);
498 606
499 nand_release(&ctx->info); 607 /* Free the MTD device structure */
500 iounmap(ctx->base); 608 kfree(au1550_mtd);
501 release_mem_region(r->start, 0x1000);
502 kfree(ctx);
503 return 0;
504}
505 609
506static struct platform_driver au1550nd_driver = { 610 /* Unmap */
507 .driver = { 611 iounmap(p_nand);
508 .name = "au1550-nand", 612}
509 .owner = THIS_MODULE,
510 },
511 .probe = au1550nd_probe,
512 .remove = au1550nd_remove,
513};
514 613
515module_platform_driver(au1550nd_driver); 614module_exit(au1550_cleanup);
516 615
517MODULE_LICENSE("GPL"); 616MODULE_LICENSE("GPL");
518MODULE_AUTHOR("Embedded Edge, LLC"); 617MODULE_AUTHOR("Embedded Edge, LLC");
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
deleted file mode 100644
index f05b119e134..00000000000
--- a/drivers/mtd/nand/bcm47xxnflash/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
1bcm47xxnflash-y += main.o
2bcm47xxnflash-y += ops_bcm4706.o
3
4obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
deleted file mode 100644
index 0bdb2ce4da7..00000000000
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef __BCM47XXNFLASH_H
2#define __BCM47XXNFLASH_H
3
4#include <linux/mtd/mtd.h>
5#include <linux/mtd/nand.h>
6
7struct bcm47xxnflash {
8 struct bcma_drv_cc *cc;
9
10 struct nand_chip nand_chip;
11 struct mtd_info mtd;
12
13 unsigned curr_command;
14 int curr_page_addr;
15 int curr_column;
16
17 u8 id_data[8];
18};
19
20int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
21
22#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
deleted file mode 100644
index 8363a9a5fa3..00000000000
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * BCM47XX NAND flash driver
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h>
17
18#include "bcm47xxnflash.h"
19
20MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Rafał Miłecki");
23
24static const char *probes[] = { "bcm47xxpart", NULL };
25
26static int bcm47xxnflash_probe(struct platform_device *pdev)
27{
28 struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
29 struct bcm47xxnflash *b47n;
30 int err = 0;
31
32 b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
33 if (!b47n) {
34 err = -ENOMEM;
35 goto out;
36 }
37
38 b47n->nand_chip.priv = b47n;
39 b47n->mtd.owner = THIS_MODULE;
40 b47n->mtd.priv = &b47n->nand_chip; /* Required */
41 b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
42
43 if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
44 err = bcm47xxnflash_ops_bcm4706_init(b47n);
45 } else {
46 pr_err("Device not supported\n");
47 err = -ENOTSUPP;
48 }
49 if (err) {
50 pr_err("Initialization failed: %d\n", err);
51 goto err_init;
52 }
53
54 err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
55 if (err) {
56 pr_err("Failed to register MTD device: %d\n", err);
57 goto err_dev_reg;
58 }
59
60 return 0;
61
62err_dev_reg:
63err_init:
64 kfree(b47n);
65out:
66 return err;
67}
68
69static int bcm47xxnflash_remove(struct platform_device *pdev)
70{
71 struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
72
73 if (nflash->mtd)
74 mtd_device_unregister(nflash->mtd);
75
76 return 0;
77}
78
79static struct platform_driver bcm47xxnflash_driver = {
80 .remove = bcm47xxnflash_remove,
81 .driver = {
82 .name = "bcma_nflash",
83 .owner = THIS_MODULE,
84 },
85};
86
87static int __init bcm47xxnflash_init(void)
88{
89 int err;
90
91 /*
92 * Platform device "bcma_nflash" exists on SoCs and is registered very
93 * early, it won't be added during runtime (use platform_driver_probe).
94 */
95 err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
96 if (err)
97 pr_err("Failed to register serial flash driver: %d\n", err);
98
99 return err;
100}
101
102static void __exit bcm47xxnflash_exit(void)
103{
104 platform_driver_unregister(&bcm47xxnflash_driver);
105}
106
107module_init(bcm47xxnflash_init);
108module_exit(bcm47xxnflash_exit);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
deleted file mode 100644
index 86c9a79b89b..00000000000
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ /dev/null
@@ -1,413 +0,0 @@
1/*
2 * BCM47XX NAND flash driver
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/bcma/bcma.h>
16
17#include "bcm47xxnflash.h"
18
19/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
20 * shown 164 retries as maxiumum. */
21#define NFLASH_READY_RETRIES 1000
22
23#define NFLASH_SECTOR_SIZE 512
24
25#define NCTL_CMD0 0x00010000
26#define NCTL_CMD1W 0x00080000
27#define NCTL_READ 0x00100000
28#define NCTL_WRITE 0x00200000
29#define NCTL_SPECADDR 0x01000000
30#define NCTL_READY 0x04000000
31#define NCTL_ERR 0x08000000
32#define NCTL_CSA 0x40000000
33#define NCTL_START 0x80000000
34
35/**************************************************
36 * Various helpers
37 **************************************************/
38
39static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
40{
41 return ((ns * 1000 * clock) / 1000000) + 1;
42}
43
44static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
45{
46 int i = 0;
47
48 bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
49 for (i = 0; i < NFLASH_READY_RETRIES; i++) {
50 if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
51 i = 0;
52 break;
53 }
54 }
55 if (i) {
56 pr_err("NFLASH control command not ready!\n");
57 return -EBUSY;
58 }
59 return 0;
60}
61
62static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
63{
64 int i;
65
66 for (i = 0; i < NFLASH_READY_RETRIES; i++) {
67 if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
68 if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
69 BCMA_CC_NFLASH_CTL_ERR) {
70 pr_err("Error on polling\n");
71 return -EBUSY;
72 } else {
73 return 0;
74 }
75 }
76 }
77
78 pr_err("Polling timeout!\n");
79 return -EBUSY;
80}
81
82/**************************************************
83 * R/W
84 **************************************************/
85
86static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
87 int len)
88{
89 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
90 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
91
92 u32 ctlcode;
93 u32 *dest = (u32 *)buf;
94 int i;
95 int toread;
96
97 BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
98 /* Don't validate column using nand_chip->page_shift, it may be bigger
99 * when accessing OOB */
100
101 while (len) {
102 /* We can read maximum of 0x200 bytes at once */
103 toread = min(len, 0x200);
104
105 /* Set page and column */
106 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
107 b47n->curr_column);
108 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
109 b47n->curr_page_addr);
110
111 /* Prepare to read */
112 ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 |
113 NCTL_CMD0;
114 ctlcode |= NAND_CMD_READSTART << 8;
115 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
116 return;
117 if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
118 return;
119
120 /* Eventually read some data :) */
121 for (i = 0; i < toread; i += 4, dest++) {
122 ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
123 if (i == toread - 4) /* Last read goes without that */
124 ctlcode &= ~NCTL_CSA;
125 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
126 ctlcode))
127 return;
128 *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
129 }
130
131 b47n->curr_column += toread;
132 len -= toread;
133 }
134}
135
136static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
137 const uint8_t *buf, int len)
138{
139 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
140 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
141 struct bcma_drv_cc *cc = b47n->cc;
142
143 u32 ctlcode;
144 const u32 *data = (u32 *)buf;
145 int i;
146
147 BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
148 /* Don't validate column using nand_chip->page_shift, it may be bigger
149 * when accessing OOB */
150
151 for (i = 0; i < len; i += 4, data++) {
152 bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
153
154 ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
155 if (i == len - 4) /* Last read goes without that */
156 ctlcode &= ~NCTL_CSA;
157 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
158 pr_err("%s ctl_cmd didn't work!\n", __func__);
159 return;
160 }
161 }
162
163 b47n->curr_column += len;
164}
165
166/**************************************************
167 * NAND chip ops
168 **************************************************/
169
170/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
171static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
172 int chip)
173{
174 return;
175}
176
177/*
178 * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
179 * For example, reading chip id is performed in a non-standard way.
180 * Setting column and page is also handled differently, we use a special
181 * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
182 * standard commands would be much more complicated.
183 */
184static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
185 unsigned command, int column,
186 int page_addr)
187{
188 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
189 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
190 struct bcma_drv_cc *cc = b47n->cc;
191 u32 ctlcode;
192 int i;
193
194 if (column != -1)
195 b47n->curr_column = column;
196 if (page_addr != -1)
197 b47n->curr_page_addr = page_addr;
198
199 switch (command) {
200 case NAND_CMD_RESET:
201 pr_warn("Chip reset not implemented yet\n");
202 break;
203 case NAND_CMD_READID:
204 ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
205 ctlcode |= NAND_CMD_READID;
206 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
207 pr_err("READID error\n");
208 break;
209 }
210
211 /*
212 * Reading is specific, last one has to go without NCTL_CSA
213 * bit. We don't know how many reads NAND subsystem is going
214 * to perform, so cache everything.
215 */
216 for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
217 ctlcode = NCTL_CSA | NCTL_READ;
218 if (i == ARRAY_SIZE(b47n->id_data) - 1)
219 ctlcode &= ~NCTL_CSA;
220 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
221 ctlcode)) {
222 pr_err("READID error\n");
223 break;
224 }
225 b47n->id_data[i] =
226 bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
227 & 0xFF;
228 }
229
230 break;
231 case NAND_CMD_STATUS:
232 ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
233 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
234 pr_err("STATUS command error\n");
235 break;
236 case NAND_CMD_READ0:
237 break;
238 case NAND_CMD_READOOB:
239 if (page_addr != -1)
240 b47n->curr_column += mtd->writesize;
241 break;
242 case NAND_CMD_ERASE1:
243 bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
244 b47n->curr_page_addr);
245 ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 |
246 NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
247 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
248 pr_err("ERASE1 failed\n");
249 break;
250 case NAND_CMD_ERASE2:
251 break;
252 case NAND_CMD_SEQIN:
253 /* Set page and column */
254 bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
255 b47n->curr_column);
256 bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
257 b47n->curr_page_addr);
258
259 /* Prepare to write */
260 ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000;
261 ctlcode |= NAND_CMD_SEQIN;
262 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
263 pr_err("SEQIN failed\n");
264 break;
265 case NAND_CMD_PAGEPROG:
266 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 |
267 NAND_CMD_PAGEPROG))
268 pr_err("PAGEPROG failed\n");
269 if (bcm47xxnflash_ops_bcm4706_poll(cc))
270 pr_err("PAGEPROG not ready\n");
271 break;
272 default:
273 pr_err("Command 0x%X unsupported\n", command);
274 break;
275 }
276 b47n->curr_command = command;
277}
278
279static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
280{
281 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
282 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
283 struct bcma_drv_cc *cc = b47n->cc;
284 u32 tmp = 0;
285
286 switch (b47n->curr_command) {
287 case NAND_CMD_READID:
288 if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
289 pr_err("Requested invalid id_data: %d\n",
290 b47n->curr_column);
291 return 0;
292 }
293 return b47n->id_data[b47n->curr_column++];
294 case NAND_CMD_STATUS:
295 if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
296 return 0;
297 return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
298 case NAND_CMD_READOOB:
299 bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
300 return tmp & 0xFF;
301 }
302
303 pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
304 return 0;
305}
306
307static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
308 uint8_t *buf, int len)
309{
310 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
311 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
312
313 switch (b47n->curr_command) {
314 case NAND_CMD_READ0:
315 case NAND_CMD_READOOB:
316 bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
317 return;
318 }
319
320 pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
321}
322
323static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
324 const uint8_t *buf, int len)
325{
326 struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
327 struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
328
329 switch (b47n->curr_command) {
330 case NAND_CMD_SEQIN:
331 bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
332 return;
333 }
334
335 pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
336}
337
338/**************************************************
339 * Init
340 **************************************************/
341
342int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
343{
344 int err;
345 u32 freq;
346 u16 clock;
347 u8 w0, w1, w2, w3, w4;
348
349 unsigned long chipsize; /* MiB */
350 u8 tbits, col_bits, col_size, row_bits, row_bsize;
351 u32 val;
352
353 b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
354 b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
355 b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
356 b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
357 b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
358 b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
359 b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
360
361 /* Enable NAND flash access */
362 bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
363 BCMA_CC_4706_FLASHSCFG_NF1);
364
365 /* Configure wait counters */
366 if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
367 freq = 100000000;
368 } else {
369 freq = bcma_chipco_pll_read(b47n->cc, 4);
370 freq = (freq * 0xFFF) >> 3;
371 freq = (freq * 25000000) >> 3;
372 }
373 clock = freq / 1000000;
374 w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
375 w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
376 w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
377 w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
378 w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
379 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
380 (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
381
382 /* Scan NAND */
383 err = nand_scan(&b47n->mtd, 1);
384 if (err) {
385 pr_err("Could not scan NAND flash: %d\n", err);
386 goto exit;
387 }
388
389 /* Configure FLASH */
390 chipsize = b47n->nand_chip.chipsize >> 20;
391 tbits = ffs(chipsize); /* find first bit set */
392 if (!tbits || tbits != fls(chipsize)) {
393 pr_err("Invalid flash size: 0x%lX\n", chipsize);
394 err = -ENOTSUPP;
395 goto exit;
396 }
397 tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
398
399 col_bits = b47n->nand_chip.page_shift + 1;
400 col_size = (col_bits + 7) / 8;
401
402 row_bits = tbits - col_bits + 1;
403 row_bsize = (row_bits + 7) / 8;
404
405 val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
406 bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
407
408exit:
409 if (err)
410 bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
411 ~BCMA_CC_4706_FLASHSCFG_NF1);
412 return err;
413}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4271e948d1e..dd899cb5d36 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
558} 558}
559 559
560static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 560static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
561 uint8_t *buf, int oob_required, int page) 561 uint8_t *buf, int page)
562{ 562{
563 bf5xx_nand_read_buf(mtd, buf, mtd->writesize); 563 bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
564 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); 564 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -566,13 +566,11 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
566 return 0; 566 return 0;
567} 567}
568 568
569static int bf5xx_nand_write_page_raw(struct mtd_info *mtd, 569static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
570 struct nand_chip *chip, const uint8_t *buf, int oob_required) 570 const uint8_t *buf)
571{ 571{
572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize); 572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); 573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
574
575 return 0;
576} 574}
577 575
578/* 576/*
@@ -658,7 +656,7 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
658/* 656/*
659 * Device management interface 657 * Device management interface
660 */ 658 */
661static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info) 659static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
662{ 660{
663 struct mtd_info *mtd = &info->mtd; 661 struct mtd_info *mtd = &info->mtd;
664 struct mtd_partition *parts = info->platform->partitions; 662 struct mtd_partition *parts = info->platform->partitions;
@@ -667,7 +665,7 @@ static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
667 return mtd_device_register(mtd, parts, nr); 665 return mtd_device_register(mtd, parts, nr);
668} 666}
669 667
670static int bf5xx_nand_remove(struct platform_device *pdev) 668static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
671{ 669{
672 struct bf5xx_nand_info *info = to_nand_info(pdev); 670 struct bf5xx_nand_info *info = to_nand_info(pdev);
673 671
@@ -704,11 +702,9 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
704 if (likely(mtd->writesize >= 512)) { 702 if (likely(mtd->writesize >= 512)) {
705 chip->ecc.size = 512; 703 chip->ecc.size = 512;
706 chip->ecc.bytes = 6; 704 chip->ecc.bytes = 6;
707 chip->ecc.strength = 2;
708 } else { 705 } else {
709 chip->ecc.size = 256; 706 chip->ecc.size = 256;
710 chip->ecc.bytes = 3; 707 chip->ecc.bytes = 3;
711 chip->ecc.strength = 1;
712 bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); 708 bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
713 SSYNC(); 709 SSYNC();
714 } 710 }
@@ -725,7 +721,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
725 * it can allocate all necessary resources then calls the 721 * it can allocate all necessary resources then calls the
726 * nand layer to look for devices 722 * nand layer to look for devices
727 */ 723 */
728static int bf5xx_nand_probe(struct platform_device *pdev) 724static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
729{ 725{
730 struct bf5xx_nand_platform *plat = to_nand_plat(pdev); 726 struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
731 struct bf5xx_nand_info *info = NULL; 727 struct bf5xx_nand_info *info = NULL;
@@ -865,7 +861,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
865/* driver device registration */ 861/* driver device registration */
866static struct platform_driver bf5xx_nand_driver = { 862static struct platform_driver bf5xx_nand_driver = {
867 .probe = bf5xx_nand_probe, 863 .probe = bf5xx_nand_probe,
868 .remove = bf5xx_nand_remove, 864 .remove = __devexit_p(bf5xx_nand_remove),
869 .suspend = bf5xx_nand_suspend, 865 .suspend = bf5xx_nand_suspend,
870 .resume = bf5xx_nand_resume, 866 .resume = bf5xx_nand_resume,
871 .driver = { 867 .driver = {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 010d6126653..87ebb4e5b0c 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -21,7 +21,6 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h>
25#include <asm/io.h> 24#include <asm/io.h>
26 25
27#define CAFE_NAND_CTRL1 0x00 26#define CAFE_NAND_CTRL1 0x00
@@ -58,6 +57,7 @@
58 57
59struct cafe_priv { 58struct cafe_priv {
60 struct nand_chip nand; 59 struct nand_chip nand;
60 struct mtd_partition *parts;
61 struct pci_dev *pdev; 61 struct pci_dev *pdev;
62 void __iomem *mmio; 62 void __iomem *mmio;
63 struct rs_control *rs; 63 struct rs_control *rs;
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
102static int cafe_device_ready(struct mtd_info *mtd) 102static int cafe_device_ready(struct mtd_info *mtd)
103{ 103{
104 struct cafe_priv *cafe = mtd->priv; 104 struct cafe_priv *cafe = mtd->priv;
105 int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000); 105 int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
106 uint32_t irqs = cafe_readl(cafe, NAND_IRQ); 106 uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
107 107
108 cafe_writel(cafe, irqs, NAND_IRQ); 108 cafe_writel(cafe, irqs, NAND_IRQ);
@@ -364,27 +364,25 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
364 364
365/* Don't use -- use nand_read_oob_std for now */ 365/* Don't use -- use nand_read_oob_std for now */
366static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 366static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
367 int page) 367 int page, int sndcmd)
368{ 368{
369 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 369 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
370 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 370 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
371 return 0; 371 return 1;
372} 372}
373/** 373/**
374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read 374 * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
375 * @mtd: mtd info structure 375 * @mtd: mtd info structure
376 * @chip: nand chip info structure 376 * @chip: nand chip info structure
377 * @buf: buffer to store read data 377 * @buf: buffer to store read data
378 * @oob_required: caller expects OOB data read to chip->oob_poi
379 * 378 *
380 * The hw generator calculates the error syndrome automatically. Therefore 379 * The hw generator calculates the error syndrome automatically. Therefor
381 * we need a special oob layout and handling. 380 * we need a special oob layout and handling.
382 */ 381 */
383static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, 382static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
384 uint8_t *buf, int oob_required, int page) 383 uint8_t *buf, int page)
385{ 384{
386 struct cafe_priv *cafe = mtd->priv; 385 struct cafe_priv *cafe = mtd->priv;
387 unsigned int max_bitflips = 0;
388 386
389 cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n", 387 cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
390 cafe_readl(cafe, NAND_ECC_RESULT), 388 cafe_readl(cafe, NAND_ECC_RESULT),
@@ -451,11 +449,10 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
451 } else { 449 } else {
452 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n); 450 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
453 mtd->ecc_stats.corrected += n; 451 mtd->ecc_stats.corrected += n;
454 max_bitflips = max_t(unsigned int, max_bitflips, n);
455 } 452 }
456 } 453 }
457 454
458 return max_bitflips; 455 return 0;
459} 456}
460 457
461static struct nand_ecclayout cafe_oobinfo_2048 = { 458static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -520,9 +517,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
520}; 517};
521 518
522 519
523static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd, 520static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
524 struct nand_chip *chip, 521 struct nand_chip *chip, const uint8_t *buf)
525 const uint8_t *buf, int oob_required)
526{ 522{
527 struct cafe_priv *cafe = mtd->priv; 523 struct cafe_priv *cafe = mtd->priv;
528 524
@@ -531,25 +527,19 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
531 527
532 /* Set up ECC autogeneration */ 528 /* Set up ECC autogeneration */
533 cafe->ctl2 |= (1<<30); 529 cafe->ctl2 |= (1<<30);
534
535 return 0;
536} 530}
537 531
538static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 532static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
539 const uint8_t *buf, int oob_required, int page, 533 const uint8_t *buf, int page, int cached, int raw)
540 int cached, int raw)
541{ 534{
542 int status; 535 int status;
543 536
544 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 537 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
545 538
546 if (unlikely(raw)) 539 if (unlikely(raw))
547 status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); 540 chip->ecc.write_page_raw(mtd, chip, buf);
548 else 541 else
549 status = chip->ecc.write_page(mtd, chip, buf, oob_required); 542 chip->ecc.write_page(mtd, chip, buf);
550
551 if (status < 0)
552 return status;
553 543
554 /* 544 /*
555 * Cached progamming disabled for now, Not sure if its worth the 545 * Cached progamming disabled for now, Not sure if its worth the
@@ -576,6 +566,13 @@ static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
576 status = chip->waitfunc(mtd, chip); 566 status = chip->waitfunc(mtd, chip);
577 } 567 }
578 568
569#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
570 /* Send command to read back the data */
571 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
572
573 if (chip->verify_buf(mtd, buf, mtd->writesize))
574 return -EIO;
575#endif
579 return 0; 576 return 0;
580} 577}
581 578
@@ -585,7 +582,7 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
585} 582}
586 583
587/* F_2[X]/(X**6+X+1) */ 584/* F_2[X]/(X**6+X+1) */
588static unsigned short gf64_mul(u8 a, u8 b) 585static unsigned short __devinit gf64_mul(u8 a, u8 b)
589{ 586{
590 u8 c; 587 u8 c;
591 unsigned int i; 588 unsigned int i;
@@ -604,7 +601,7 @@ static unsigned short gf64_mul(u8 a, u8 b)
604} 601}
605 602
606/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */ 603/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
607static u16 gf4096_mul(u16 a, u16 b) 604static u16 __devinit gf4096_mul(u16 a, u16 b)
608{ 605{
609 u8 ah, al, bh, bl, ch, cl; 606 u8 ah, al, bh, bl, ch, cl;
610 607
@@ -619,20 +616,22 @@ static u16 gf4096_mul(u16 a, u16 b)
619 return (ch << 6) ^ cl; 616 return (ch << 6) ^ cl;
620} 617}
621 618
622static int cafe_mul(int x) 619static int __devinit cafe_mul(int x)
623{ 620{
624 if (x == 0) 621 if (x == 0)
625 return 1; 622 return 1;
626 return gf4096_mul(x, 0xe01); 623 return gf4096_mul(x, 0xe01);
627} 624}
628 625
629static int cafe_nand_probe(struct pci_dev *pdev, 626static int __devinit cafe_nand_probe(struct pci_dev *pdev,
630 const struct pci_device_id *ent) 627 const struct pci_device_id *ent)
631{ 628{
632 struct mtd_info *mtd; 629 struct mtd_info *mtd;
633 struct cafe_priv *cafe; 630 struct cafe_priv *cafe;
634 uint32_t ctrl; 631 uint32_t ctrl;
635 int err = 0; 632 int err = 0;
633 struct mtd_partition *parts;
634 int nr_parts;
636 635
637 /* Very old versions shared the same PCI ident for all three 636 /* Very old versions shared the same PCI ident for all three
638 functions on the chip. Verify the class too... */ 637 functions on the chip. Verify the class too... */
@@ -687,8 +686,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
687 cafe->nand.chip_delay = 0; 686 cafe->nand.chip_delay = 0;
688 687
689 /* Enable the following for a flash based bad block table */ 688 /* Enable the following for a flash based bad block table */
690 cafe->nand.bbt_options = NAND_BBT_USE_FLASH; 689 cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
691 cafe->nand.options = NAND_OWN_BUFFERS;
692 690
693 if (skipbbt) { 691 if (skipbbt) {
694 cafe->nand.options |= NAND_SKIP_BBTSCAN; 692 cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -786,7 +784,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
786 cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 784 cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
787 cafe->nand.ecc.size = mtd->writesize; 785 cafe->nand.ecc.size = mtd->writesize;
788 cafe->nand.ecc.bytes = 14; 786 cafe->nand.ecc.bytes = 14;
789 cafe->nand.ecc.strength = 4;
790 cafe->nand.ecc.hwctl = (void *)cafe_nand_bug; 787 cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
791 cafe->nand.ecc.calculate = (void *)cafe_nand_bug; 788 cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
792 cafe->nand.ecc.correct = (void *)cafe_nand_bug; 789 cafe->nand.ecc.correct = (void *)cafe_nand_bug;
@@ -802,9 +799,18 @@ static int cafe_nand_probe(struct pci_dev *pdev,
802 799
803 pci_set_drvdata(pdev, mtd); 800 pci_set_drvdata(pdev, mtd);
804 801
805 mtd->name = "cafe_nand"; 802 /* We register the whole device first, separate from the partitions */
806 mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0); 803 mtd_device_register(mtd, NULL, 0);
807 804
805#ifdef CONFIG_MTD_CMDLINE_PARTS
806 mtd->name = "cafe_nand";
807#endif
808 nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
809 if (nr_parts > 0) {
810 cafe->parts = parts;
811 dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
812 mtd_device_register(mtd, parts, nr_parts);
813 }
808 goto out; 814 goto out;
809 815
810 out_irq: 816 out_irq:
@@ -821,7 +827,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
821 return err; 827 return err;
822} 828}
823 829
824static void cafe_nand_remove(struct pci_dev *pdev) 830static void __devexit cafe_nand_remove(struct pci_dev *pdev)
825{ 831{
826 struct mtd_info *mtd = pci_get_drvdata(pdev); 832 struct mtd_info *mtd = pci_get_drvdata(pdev);
827 struct cafe_priv *cafe = mtd->priv; 833 struct cafe_priv *cafe = mtd->priv;
@@ -887,11 +893,21 @@ static struct pci_driver cafe_nand_pci_driver = {
887 .name = "CAFÉ NAND", 893 .name = "CAFÉ NAND",
888 .id_table = cafe_nand_tbl, 894 .id_table = cafe_nand_tbl,
889 .probe = cafe_nand_probe, 895 .probe = cafe_nand_probe,
890 .remove = cafe_nand_remove, 896 .remove = __devexit_p(cafe_nand_remove),
891 .resume = cafe_nand_resume, 897 .resume = cafe_nand_resume,
892}; 898};
893 899
894module_pci_driver(cafe_nand_pci_driver); 900static int __init cafe_nand_init(void)
901{
902 return pci_register_driver(&cafe_nand_pci_driver);
903}
904
905static void __exit cafe_nand_exit(void)
906{
907 pci_unregister_driver(&cafe_nand_pci_driver);
908}
909module_init(cafe_nand_init);
910module_exit(cafe_nand_exit);
895 911
896MODULE_LICENSE("GPL"); 912MODULE_LICENSE("GPL");
897MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 913MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 39b2ef84881..6fc043a30d1 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -22,7 +22,6 @@
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/module.h>
26 25
27#include <asm/io.h> 26#include <asm/io.h>
28#include <asm/irq.h> 27#include <asm/irq.h>
@@ -51,6 +50,8 @@ static struct mtd_partition partition_info[] = {
51}; 50};
52#define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) 51#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
53 52
53const char *part_probes[] = { "cmdlinepart", NULL };
54
54static u_char cmx270_read_byte(struct mtd_info *mtd) 55static u_char cmx270_read_byte(struct mtd_info *mtd)
55{ 56{
56 struct nand_chip *this = mtd->priv; 57 struct nand_chip *this = mtd->priv;
@@ -76,6 +77,18 @@ static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
76 *buf++ = readl(this->IO_ADDR_R) >> 16; 77 *buf++ = readl(this->IO_ADDR_R) >> 16;
77} 78}
78 79
80static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
81{
82 int i;
83 struct nand_chip *this = mtd->priv;
84
85 for (i=0; i<len; i++)
86 if (buf[i] != (u_char)(readl(this->IO_ADDR_R) >> 16))
87 return -EFAULT;
88
89 return 0;
90}
91
79static inline void nand_cs_on(void) 92static inline void nand_cs_on(void)
80{ 93{
81 gpio_set_value(GPIO_NAND_CS, 0); 94 gpio_set_value(GPIO_NAND_CS, 0);
@@ -138,6 +151,9 @@ static int cmx270_device_ready(struct mtd_info *mtd)
138static int __init cmx270_init(void) 151static int __init cmx270_init(void)
139{ 152{
140 struct nand_chip *this; 153 struct nand_chip *this;
154 const char *part_type;
155 struct mtd_partition *mtd_parts;
156 int mtd_parts_nb = 0;
141 int ret; 157 int ret;
142 158
143 if (!(machine_is_armcore() && cpu_is_pxa27x())) 159 if (!(machine_is_armcore() && cpu_is_pxa27x()))
@@ -197,6 +213,7 @@ static int __init cmx270_init(void)
197 this->read_byte = cmx270_read_byte; 213 this->read_byte = cmx270_read_byte;
198 this->read_buf = cmx270_read_buf; 214 this->read_buf = cmx270_read_buf;
199 this->write_buf = cmx270_write_buf; 215 this->write_buf = cmx270_write_buf;
216 this->verify_buf = cmx270_verify_buf;
200 217
201 /* Scan to find existence of the device */ 218 /* Scan to find existence of the device */
202 if (nand_scan (cmx270_nand_mtd, 1)) { 219 if (nand_scan (cmx270_nand_mtd, 1)) {
@@ -205,9 +222,23 @@ static int __init cmx270_init(void)
205 goto err_scan; 222 goto err_scan;
206 } 223 }
207 224
225#ifdef CONFIG_MTD_CMDLINE_PARTS
226 mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
227 &mtd_parts, 0);
228 if (mtd_parts_nb > 0)
229 part_type = "command line";
230 else
231 mtd_parts_nb = 0;
232#endif
233 if (!mtd_parts_nb) {
234 mtd_parts = partition_info;
235 mtd_parts_nb = NUM_PARTITIONS;
236 part_type = "static";
237 }
238
208 /* Register the partitions */ 239 /* Register the partitions */
209 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL, 240 pr_notice("Using %s partition definition\n", part_type);
210 partition_info, NUM_PARTITIONS); 241 ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
211 if (ret) 242 if (ret)
212 goto err_scan; 243 goto err_scan;
213 244
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 2cdeab8bebc..f59ad1f2d5d 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -237,10 +237,9 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
237 this->ecc.hwctl = cs_enable_hwecc; 237 this->ecc.hwctl = cs_enable_hwecc;
238 this->ecc.calculate = cs_calculate_ecc; 238 this->ecc.calculate = cs_calculate_ecc;
239 this->ecc.correct = nand_correct_data; 239 this->ecc.correct = nand_correct_data;
240 this->ecc.strength = 1;
241 240
242 /* Enable the following for a flash based bad block table */ 241 /* Enable the following for a flash based bad block table */
243 this->bbt_options = NAND_BBT_USE_FLASH; 242 this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
244 243
245 /* Scan to find existence of the device */ 244 /* Scan to find existence of the device */
246 if (nand_scan(new_mtd, 1)) { 245 if (nand_scan(new_mtd, 1)) {
@@ -278,11 +277,15 @@ static int is_geode(void)
278 return 0; 277 return 0;
279} 278}
280 279
280static const char *part_probes[] = { "cmdlinepart", NULL };
281
281static int __init cs553x_init(void) 282static int __init cs553x_init(void)
282{ 283{
283 int err = -ENXIO; 284 int err = -ENXIO;
284 int i; 285 int i;
285 uint64_t val; 286 uint64_t val;
287 int mtd_parts_nb = 0;
288 struct mtd_partition *mtd_parts = NULL;
286 289
287 /* If the CPU isn't a Geode GX or LX, abort */ 290 /* If the CPU isn't a Geode GX or LX, abort */
288 if (!is_geode()) 291 if (!is_geode())
@@ -312,9 +315,13 @@ static int __init cs553x_init(void)
312 do mtdconcat etc. if we want to. */ 315 do mtdconcat etc. if we want to. */
313 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 316 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
314 if (cs553x_mtd[i]) { 317 if (cs553x_mtd[i]) {
318
315 /* If any devices registered, return success. Else the last error. */ 319 /* If any devices registered, return success. Else the last error. */
316 mtd_device_parse_register(cs553x_mtd[i], NULL, NULL, 320 mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
317 NULL, 0); 321 if (mtd_parts_nb > 0)
322 printk(KERN_NOTICE "Using command line partition definition\n");
323 mtd_device_register(cs553x_mtd[i], mtd_parts,
324 mtd_parts_nb);
318 err = 0; 325 err = 0;
319 } 326 }
320 } 327 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 3502606f648..1f34951ae1a 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -33,10 +33,9 @@
33#include <linux/mtd/nand.h> 33#include <linux/mtd/nand.h>
34#include <linux/mtd/partitions.h> 34#include <linux/mtd/partitions.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/of_device.h>
37 36
38#include <linux/platform_data/mtd-davinci.h> 37#include <mach/nand.h>
39#include <linux/platform_data/mtd-davinci-aemif.h> 38#include <mach/aemif.h>
40 39
41/* 40/*
42 * This is a device driver for the NAND flash controller found on the 41 * This is a device driver for the NAND flash controller found on the
@@ -58,6 +57,7 @@ struct davinci_nand_info {
58 57
59 struct device *dev; 58 struct device *dev;
60 struct clk *clk; 59 struct clk *clk;
60 bool partitioned;
61 61
62 bool is_readmode; 62 bool is_readmode;
63 63
@@ -519,75 +519,9 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
519 }, 519 },
520}; 520};
521 521
522#if defined(CONFIG_OF)
523static const struct of_device_id davinci_nand_of_match[] = {
524 {.compatible = "ti,davinci-nand", },
525 {},
526}
527MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
528
529static struct davinci_nand_pdata
530 *nand_davinci_get_pdata(struct platform_device *pdev)
531{
532 if (!pdev->dev.platform_data && pdev->dev.of_node) {
533 struct davinci_nand_pdata *pdata;
534 const char *mode;
535 u32 prop;
536 int len;
537
538 pdata = devm_kzalloc(&pdev->dev,
539 sizeof(struct davinci_nand_pdata),
540 GFP_KERNEL);
541 pdev->dev.platform_data = pdata;
542 if (!pdata)
543 return NULL;
544 if (!of_property_read_u32(pdev->dev.of_node,
545 "ti,davinci-chipselect", &prop))
546 pdev->id = prop;
547 if (!of_property_read_u32(pdev->dev.of_node,
548 "ti,davinci-mask-ale", &prop))
549 pdata->mask_ale = prop;
550 if (!of_property_read_u32(pdev->dev.of_node,
551 "ti,davinci-mask-cle", &prop))
552 pdata->mask_cle = prop;
553 if (!of_property_read_u32(pdev->dev.of_node,
554 "ti,davinci-mask-chipsel", &prop))
555 pdata->mask_chipsel = prop;
556 if (!of_property_read_string(pdev->dev.of_node,
557 "ti,davinci-ecc-mode", &mode)) {
558 if (!strncmp("none", mode, 4))
559 pdata->ecc_mode = NAND_ECC_NONE;
560 if (!strncmp("soft", mode, 4))
561 pdata->ecc_mode = NAND_ECC_SOFT;
562 if (!strncmp("hw", mode, 2))
563 pdata->ecc_mode = NAND_ECC_HW;
564 }
565 if (!of_property_read_u32(pdev->dev.of_node,
566 "ti,davinci-ecc-bits", &prop))
567 pdata->ecc_bits = prop;
568 if (!of_property_read_u32(pdev->dev.of_node,
569 "ti,davinci-nand-buswidth", &prop))
570 if (prop == 16)
571 pdata->options |= NAND_BUSWIDTH_16;
572 if (of_find_property(pdev->dev.of_node,
573 "ti,davinci-nand-use-bbt", &len))
574 pdata->bbt_options = NAND_BBT_USE_FLASH;
575 }
576
577 return pdev->dev.platform_data;
578}
579#else
580#define davinci_nand_of_match NULL
581static struct davinci_nand_pdata
582 *nand_davinci_get_pdata(struct platform_device *pdev)
583{
584 return pdev->dev.platform_data;
585}
586#endif
587
588static int __init nand_davinci_probe(struct platform_device *pdev) 522static int __init nand_davinci_probe(struct platform_device *pdev)
589{ 523{
590 struct davinci_nand_pdata *pdata; 524 struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
591 struct davinci_nand_info *info; 525 struct davinci_nand_info *info;
592 struct resource *res1; 526 struct resource *res1;
593 struct resource *res2; 527 struct resource *res2;
@@ -596,8 +530,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
596 int ret; 530 int ret;
597 uint32_t val; 531 uint32_t val;
598 nand_ecc_modes_t ecc_mode; 532 nand_ecc_modes_t ecc_mode;
533 struct mtd_partition *mtd_parts = NULL;
534 int mtd_parts_nb = 0;
599 535
600 pdata = nand_davinci_get_pdata(pdev);
601 /* insist on board-specific configuration */ 536 /* insist on board-specific configuration */
602 if (!pdata) 537 if (!pdata)
603 return -ENODEV; 538 return -ENODEV;
@@ -646,9 +581,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
646 info->chip.chip_delay = 0; 581 info->chip.chip_delay = 0;
647 info->chip.select_chip = nand_davinci_select_chip; 582 info->chip.select_chip = nand_davinci_select_chip;
648 583
649 /* options such as NAND_BBT_USE_FLASH */ 584 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */
650 info->chip.bbt_options = pdata->bbt_options;
651 /* options such as 16-bit widths */
652 info->chip.options = pdata->options; 585 info->chip.options = pdata->options;
653 info->chip.bbt_td = pdata->bbt_td; 586 info->chip.bbt_td = pdata->bbt_td;
654 info->chip.bbt_md = pdata->bbt_md; 587 info->chip.bbt_md = pdata->bbt_md;
@@ -709,7 +642,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
709 info->chip.ecc.bytes = 3; 642 info->chip.ecc.bytes = 3;
710 } 643 }
711 info->chip.ecc.size = 512; 644 info->chip.ecc.size = 512;
712 info->chip.ecc.strength = pdata->ecc_bits;
713 break; 645 break;
714 default: 646 default:
715 ret = -EINVAL; 647 ret = -EINVAL;
@@ -724,7 +656,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
724 goto err_clk; 656 goto err_clk;
725 } 657 }
726 658
727 ret = clk_prepare_enable(info->clk); 659 ret = clk_enable(info->clk);
728 if (ret < 0) { 660 if (ret < 0) {
729 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", 661 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
730 ret); 662 ret);
@@ -744,9 +676,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
744 676
745 davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val); 677 davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
746 678
747 ret = 0; 679 ret = davinci_aemif_setup_timing(info->timing, info->base,
748 if (info->timing)
749 ret = davinci_aemif_setup_timing(info->timing, info->base,
750 info->core_chipsel); 680 info->core_chipsel);
751 if (ret < 0) { 681 if (ret < 0) {
752 dev_dbg(&pdev->dev, "NAND timing values setup fail\n"); 682 dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
@@ -821,16 +751,34 @@ syndrome_done:
821 if (ret < 0) 751 if (ret < 0)
822 goto err_scan; 752 goto err_scan;
823 753
824 if (pdata->parts) 754 if (mtd_has_cmdlinepart()) {
825 ret = mtd_device_parse_register(&info->mtd, NULL, NULL, 755 static const char *probes[] __initconst = {
826 pdata->parts, pdata->nr_parts); 756 "cmdlinepart", NULL
827 else { 757 };
828 struct mtd_part_parser_data ppdata; 758
759 mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
760 &mtd_parts, 0);
761 }
762
763 if (mtd_parts_nb <= 0) {
764 mtd_parts = pdata->parts;
765 mtd_parts_nb = pdata->nr_parts;
766 }
829 767
830 ppdata.of_node = pdev->dev.of_node; 768 /* Register any partitions */
831 ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata, 769 if (mtd_parts_nb > 0) {
832 NULL, 0); 770 ret = mtd_device_register(&info->mtd, mtd_parts,
771 mtd_parts_nb);
772 if (ret == 0)
773 info->partitioned = true;
833 } 774 }
775
776 /* If there's no partition info, just package the whole chip
777 * as a single MTD device.
778 */
779 if (!info->partitioned)
780 ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
781
834 if (ret < 0) 782 if (ret < 0)
835 goto err_scan; 783 goto err_scan;
836 784
@@ -842,7 +790,7 @@ syndrome_done:
842 790
843err_scan: 791err_scan:
844err_timing: 792err_timing:
845 clk_disable_unprepare(info->clk); 793 clk_disable(info->clk);
846 794
847err_clk_enable: 795err_clk_enable:
848 clk_put(info->clk); 796 clk_put(info->clk);
@@ -868,6 +816,9 @@ err_nomem:
868static int __exit nand_davinci_remove(struct platform_device *pdev) 816static int __exit nand_davinci_remove(struct platform_device *pdev)
869{ 817{
870 struct davinci_nand_info *info = platform_get_drvdata(pdev); 818 struct davinci_nand_info *info = platform_get_drvdata(pdev);
819 int status;
820
821 status = mtd_device_unregister(&info->mtd);
871 822
872 spin_lock_irq(&davinci_nand_lock); 823 spin_lock_irq(&davinci_nand_lock);
873 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) 824 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
@@ -879,7 +830,7 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
879 830
880 nand_release(&info->mtd); 831 nand_release(&info->mtd);
881 832
882 clk_disable_unprepare(info->clk); 833 clk_disable(info->clk);
883 clk_put(info->clk); 834 clk_put(info->clk);
884 835
885 kfree(info); 836 kfree(info);
@@ -891,8 +842,6 @@ static struct platform_driver nand_davinci_driver = {
891 .remove = __exit_p(nand_davinci_remove), 842 .remove = __exit_p(nand_davinci_remove),
892 .driver = { 843 .driver = {
893 .name = "davinci_nand", 844 .name = "davinci_nand",
894 .owner = THIS_MODULE,
895 .of_match_table = davinci_nand_of_match,
896 }, 845 },
897}; 846};
898MODULE_ALIAS("platform:davinci_nand"); 847MODULE_ALIAS("platform:davinci_nand");
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0c8bb6bf842..d5276218945 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -16,12 +16,14 @@
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * 17 *
18 */ 18 */
19
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
22#include <linux/wait.h> 23#include <linux/wait.h>
23#include <linux/mutex.h> 24#include <linux/mutex.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/pci.h>
25#include <linux/mtd/mtd.h> 27#include <linux/mtd/mtd.h>
26#include <linux/module.h> 28#include <linux/module.h>
27 29
@@ -87,6 +89,13 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
87 * format the bank into the proper bits for the controller */ 89 * format the bank into the proper bits for the controller */
88#define BANK(x) ((x) << 24) 90#define BANK(x) ((x) << 24)
89 91
92/* List of platforms this NAND controller has be integrated into */
93static const struct pci_device_id denali_pci_ids[] = {
94 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
95 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
96 { /* end: all zeroes */ }
97};
98
90/* forward declarations */ 99/* forward declarations */
91static void clear_interrupts(struct denali_nand_info *denali); 100static void clear_interrupts(struct denali_nand_info *denali);
92static uint32_t wait_for_irq(struct denali_nand_info *denali, 101static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -690,7 +699,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
690 699
691 if (comp_res == 0) { 700 if (comp_res == 0) {
692 /* timeout */ 701 /* timeout */
693 pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n", 702 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
694 intr_status, irq_mask); 703 intr_status, irq_mask);
695 704
696 intr_status = 0; 705 intr_status = 0;
@@ -915,10 +924,9 @@ bool is_erased(uint8_t *buf, int len)
915#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 924#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
916 925
917static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, 926static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
918 uint32_t irq_status, unsigned int *max_bitflips) 927 uint32_t irq_status)
919{ 928{
920 bool check_erased_page = false; 929 bool check_erased_page = false;
921 unsigned int bitflips = 0;
922 930
923 if (irq_status & INTR_STATUS__ECC_ERR) { 931 if (irq_status & INTR_STATUS__ECC_ERR) {
924 /* read the ECC errors. we'll ignore them for now */ 932 /* read the ECC errors. we'll ignore them for now */
@@ -957,7 +965,6 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
957 /* correct the ECC error */ 965 /* correct the ECC error */
958 buf[offset] ^= err_correction_value; 966 buf[offset] ^= err_correction_value;
959 denali->mtd.ecc_stats.corrected++; 967 denali->mtd.ecc_stats.corrected++;
960 bitflips++;
961 } 968 }
962 } else { 969 } else {
963 /* if the error is not correctable, need to 970 /* if the error is not correctable, need to
@@ -977,7 +984,6 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
977 clear_interrupts(denali); 984 clear_interrupts(denali);
978 denali_set_intr_modes(denali, true); 985 denali_set_intr_modes(denali, true);
979 } 986 }
980 *max_bitflips = bitflips;
981 return check_erased_page; 987 return check_erased_page;
982} 988}
983 989
@@ -1019,7 +1025,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
1019 1025
1020/* writes a page. user specifies type, and this function handles the 1026/* writes a page. user specifies type, and this function handles the
1021 * configuration details. */ 1027 * configuration details. */
1022static int write_page(struct mtd_info *mtd, struct nand_chip *chip, 1028static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1023 const uint8_t *buf, bool raw_xfer) 1029 const uint8_t *buf, bool raw_xfer)
1024{ 1030{
1025 struct denali_nand_info *denali = mtd_to_denali(mtd); 1031 struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1069,8 +1075,6 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
1069 1075
1070 denali_enable_dma(denali, false); 1076 denali_enable_dma(denali, false);
1071 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); 1077 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1072
1073 return 0;
1074} 1078}
1075 1079
1076/* NAND core entry points */ 1080/* NAND core entry points */
@@ -1079,24 +1083,24 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
1079 * writing a page with ECC or without is similar, all the work is done 1083 * writing a page with ECC or without is similar, all the work is done
1080 * by write_page above. 1084 * by write_page above.
1081 * */ 1085 * */
1082static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1086static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1083 const uint8_t *buf, int oob_required) 1087 const uint8_t *buf)
1084{ 1088{
1085 /* for regular page writes, we let HW handle all the ECC 1089 /* for regular page writes, we let HW handle all the ECC
1086 * data written to the device. */ 1090 * data written to the device. */
1087 return write_page(mtd, chip, buf, false); 1091 write_page(mtd, chip, buf, false);
1088} 1092}
1089 1093
1090/* This is the callback that the NAND core calls to write a page without ECC. 1094/* This is the callback that the NAND core calls to write a page without ECC.
1091 * raw access is similar to ECC page writes, so all the work is done in the 1095 * raw access is similar to ECC page writes, so all the work is done in the
1092 * write_page() function above. 1096 * write_page() function above.
1093 */ 1097 */
1094static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1098static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1095 const uint8_t *buf, int oob_required) 1099 const uint8_t *buf)
1096{ 1100{
1097 /* for raw page writes, we want to disable ECC and simply write 1101 /* for raw page writes, we want to disable ECC and simply write
1098 whatever data is in the buffer. */ 1102 whatever data is in the buffer. */
1099 return write_page(mtd, chip, buf, true); 1103 write_page(mtd, chip, buf, true);
1100} 1104}
1101 1105
1102static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 1106static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1106,17 +1110,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1106} 1110}
1107 1111
1108static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1112static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1109 int page) 1113 int page, int sndcmd)
1110{ 1114{
1111 read_oob_data(mtd, chip->oob_poi, page); 1115 read_oob_data(mtd, chip->oob_poi, page);
1112 1116
1113 return 0; 1117 return 0; /* notify NAND core to send command to
1118 NAND device. */
1114} 1119}
1115 1120
1116static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1121static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1117 uint8_t *buf, int oob_required, int page) 1122 uint8_t *buf, int page)
1118{ 1123{
1119 unsigned int max_bitflips;
1120 struct denali_nand_info *denali = mtd_to_denali(mtd); 1124 struct denali_nand_info *denali = mtd_to_denali(mtd);
1121 1125
1122 dma_addr_t addr = denali->buf.dma_buf; 1126 dma_addr_t addr = denali->buf.dma_buf;
@@ -1149,7 +1153,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1149 1153
1150 memcpy(buf, denali->buf.buf, mtd->writesize); 1154 memcpy(buf, denali->buf.buf, mtd->writesize);
1151 1155
1152 check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips); 1156 check_erased_page = handle_ecc(denali, buf, irq_status);
1153 denali_enable_dma(denali, false); 1157 denali_enable_dma(denali, false);
1154 1158
1155 if (check_erased_page) { 1159 if (check_erased_page) {
@@ -1163,11 +1167,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1163 denali->mtd.ecc_stats.failed++; 1167 denali->mtd.ecc_stats.failed++;
1164 } 1168 }
1165 } 1169 }
1166 return max_bitflips; 1170 return 0;
1167} 1171}
1168 1172
1169static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1173static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1170 uint8_t *buf, int oob_required, int page) 1174 uint8_t *buf, int page)
1171{ 1175{
1172 struct denali_nand_info *denali = mtd_to_denali(mtd); 1176 struct denali_nand_info *denali = mtd_to_denali(mtd);
1173 1177
@@ -1296,7 +1300,8 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1296 /* TODO: Read OOB data */ 1300 /* TODO: Read OOB data */
1297 break; 1301 break;
1298 default: 1302 default:
1299 pr_err(": unsupported command received 0x%x\n", cmd); 1303 printk(KERN_ERR ": unsupported command"
1304 " received 0x%x\n", cmd);
1300 break; 1305 break;
1301 } 1306 }
1302} 1307}
@@ -1341,7 +1346,6 @@ static void denali_hw_init(struct denali_nand_info *denali)
1341 * */ 1346 * */
1342 denali->bbtskipbytes = ioread32(denali->flash_reg + 1347 denali->bbtskipbytes = ioread32(denali->flash_reg +
1343 SPARE_AREA_SKIP_BYTES); 1348 SPARE_AREA_SKIP_BYTES);
1344 detect_max_banks(denali);
1345 denali_nand_reset(denali); 1349 denali_nand_reset(denali);
1346 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); 1350 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1347 iowrite32(CHIP_EN_DONT_CARE__FLAG, 1351 iowrite32(CHIP_EN_DONT_CARE__FLAG,
@@ -1352,6 +1356,7 @@ static void denali_hw_init(struct denali_nand_info *denali)
1352 /* Should set value for these registers when init */ 1356 /* Should set value for these registers when init */
1353 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1357 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1354 iowrite32(1, denali->flash_reg + ECC_ENABLE); 1358 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1359 detect_max_banks(denali);
1355 denali_nand_timing_set(denali); 1360 denali_nand_timing_set(denali);
1356 denali_irq_init(denali); 1361 denali_irq_init(denali);
1357} 1362}
@@ -1415,48 +1420,107 @@ void denali_drv_init(struct denali_nand_info *denali)
1415 denali->irq_status = 0; 1420 denali->irq_status = 0;
1416} 1421}
1417 1422
1418int denali_init(struct denali_nand_info *denali) 1423/* driver entry point */
1424static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1419{ 1425{
1420 int ret; 1426 int ret = -ENODEV;
1427 resource_size_t csr_base, mem_base;
1428 unsigned long csr_len, mem_len;
1429 struct denali_nand_info *denali;
1421 1430
1422 if (denali->platform == INTEL_CE4100) { 1431 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1432 if (!denali)
1433 return -ENOMEM;
1434
1435 ret = pci_enable_device(dev);
1436 if (ret) {
1437 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
1438 goto failed_alloc_memery;
1439 }
1440
1441 if (id->driver_data == INTEL_CE4100) {
1423 /* Due to a silicon limitation, we can only support 1442 /* Due to a silicon limitation, we can only support
1424 * ONFI timing mode 1 and below. 1443 * ONFI timing mode 1 and below.
1425 */ 1444 */
1426 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { 1445 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
1427 pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n"); 1446 printk(KERN_ERR "Intel CE4100 only supports"
1428 return -EINVAL; 1447 " ONFI timing mode 1 or below\n");
1448 ret = -EINVAL;
1449 goto failed_enable_dev;
1450 }
1451 denali->platform = INTEL_CE4100;
1452 mem_base = pci_resource_start(dev, 0);
1453 mem_len = pci_resource_len(dev, 1);
1454 csr_base = pci_resource_start(dev, 1);
1455 csr_len = pci_resource_len(dev, 1);
1456 } else {
1457 denali->platform = INTEL_MRST;
1458 csr_base = pci_resource_start(dev, 0);
1459 csr_len = pci_resource_len(dev, 0);
1460 mem_base = pci_resource_start(dev, 1);
1461 mem_len = pci_resource_len(dev, 1);
1462 if (!mem_len) {
1463 mem_base = csr_base + csr_len;
1464 mem_len = csr_len;
1429 } 1465 }
1430 } 1466 }
1431 1467
1432 /* Is 32-bit DMA supported? */ 1468 /* Is 32-bit DMA supported? */
1433 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); 1469 ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
1434 if (ret) { 1470 if (ret) {
1435 pr_err("Spectra: no usable DMA configuration\n"); 1471 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1436 return ret; 1472 goto failed_enable_dev;
1437 } 1473 }
1438 denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf, 1474 denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
1439 DENALI_BUF_SIZE, 1475 DENALI_BUF_SIZE,
1440 DMA_BIDIRECTIONAL); 1476 DMA_BIDIRECTIONAL);
1441 1477
1442 if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) { 1478 if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
1443 dev_err(denali->dev, "Spectra: failed to map DMA buffer\n"); 1479 dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
1444 return -EIO; 1480 goto failed_enable_dev;
1445 } 1481 }
1446 denali->mtd.dev.parent = denali->dev; 1482
1483 pci_set_master(dev);
1484 denali->dev = &dev->dev;
1485 denali->mtd.dev.parent = &dev->dev;
1486
1487 ret = pci_request_regions(dev, DENALI_NAND_NAME);
1488 if (ret) {
1489 printk(KERN_ERR "Spectra: Unable to request memory regions\n");
1490 goto failed_dma_map;
1491 }
1492
1493 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
1494 if (!denali->flash_reg) {
1495 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
1496 ret = -ENOMEM;
1497 goto failed_req_regions;
1498 }
1499
1500 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
1501 if (!denali->flash_mem) {
1502 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
1503 ret = -ENOMEM;
1504 goto failed_remap_reg;
1505 }
1506
1447 denali_hw_init(denali); 1507 denali_hw_init(denali);
1448 denali_drv_init(denali); 1508 denali_drv_init(denali);
1449 1509
1450 /* denali_isr register is done after all the hardware 1510 /* denali_isr register is done after all the hardware
1451 * initilization is finished*/ 1511 * initilization is finished*/
1452 if (request_irq(denali->irq, denali_isr, IRQF_SHARED, 1512 if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
1453 DENALI_NAND_NAME, denali)) { 1513 DENALI_NAND_NAME, denali)) {
1454 pr_err("Spectra: Unable to allocate IRQ\n"); 1514 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
1455 return -ENODEV; 1515 ret = -ENODEV;
1516 goto failed_remap_mem;
1456 } 1517 }
1457 1518
1458 /* now that our ISR is registered, we can enable interrupts */ 1519 /* now that our ISR is registered, we can enable interrupts */
1459 denali_set_intr_modes(denali, true); 1520 denali_set_intr_modes(denali, true);
1521
1522 pci_set_drvdata(dev, denali);
1523
1460 denali->mtd.name = "denali-nand"; 1524 denali->mtd.name = "denali-nand";
1461 denali->mtd.owner = THIS_MODULE; 1525 denali->mtd.owner = THIS_MODULE;
1462 denali->mtd.priv = &denali->nand; 1526 denali->mtd.priv = &denali->nand;
@@ -1480,7 +1544,8 @@ int denali_init(struct denali_nand_info *denali)
1480 */ 1544 */
1481 if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) { 1545 if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
1482 ret = -ENODEV; 1546 ret = -ENODEV;
1483 pr_err("Spectra: device size not supported by this version of MTD."); 1547 printk(KERN_ERR "Spectra: device size not supported by this "
1548 "version of MTD.");
1484 goto failed_req_irq; 1549 goto failed_req_irq;
1485 } 1550 }
1486 1551
@@ -1512,8 +1577,7 @@ int denali_init(struct denali_nand_info *denali)
1512 denali->nand.bbt_md = &bbt_mirror_descr; 1577 denali->nand.bbt_md = &bbt_mirror_descr;
1513 1578
1514 /* skip the scan for now until we have OOB read and write support */ 1579 /* skip the scan for now until we have OOB read and write support */
1515 denali->nand.bbt_options |= NAND_BBT_USE_FLASH; 1580 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
1516 denali->nand.options |= NAND_SKIP_BBTSCAN;
1517 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1581 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
1518 1582
1519 /* Denali Controller only support 15bit and 8bit ECC in MRST, 1583 /* Denali Controller only support 15bit and 8bit ECC in MRST,
@@ -1525,25 +1589,22 @@ int denali_init(struct denali_nand_info *denali)
1525 ECC_15BITS * (denali->mtd.writesize / 1589 ECC_15BITS * (denali->mtd.writesize /
1526 ECC_SECTOR_SIZE)))) { 1590 ECC_SECTOR_SIZE)))) {
1527 /* if MLC OOB size is large enough, use 15bit ECC*/ 1591 /* if MLC OOB size is large enough, use 15bit ECC*/
1528 denali->nand.ecc.strength = 15;
1529 denali->nand.ecc.layout = &nand_15bit_oob; 1592 denali->nand.ecc.layout = &nand_15bit_oob;
1530 denali->nand.ecc.bytes = ECC_15BITS; 1593 denali->nand.ecc.bytes = ECC_15BITS;
1531 iowrite32(15, denali->flash_reg + ECC_CORRECTION); 1594 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
1532 } else if (denali->mtd.oobsize < (denali->bbtskipbytes + 1595 } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
1533 ECC_8BITS * (denali->mtd.writesize / 1596 ECC_8BITS * (denali->mtd.writesize /
1534 ECC_SECTOR_SIZE))) { 1597 ECC_SECTOR_SIZE))) {
1535 pr_err("Your NAND chip OOB is not large enough to \ 1598 printk(KERN_ERR "Your NAND chip OOB is not large enough to"
1536 contain 8bit ECC correction codes"); 1599 " contain 8bit ECC correction codes");
1537 goto failed_req_irq; 1600 goto failed_req_irq;
1538 } else { 1601 } else {
1539 denali->nand.ecc.strength = 8;
1540 denali->nand.ecc.layout = &nand_8bit_oob; 1602 denali->nand.ecc.layout = &nand_8bit_oob;
1541 denali->nand.ecc.bytes = ECC_8BITS; 1603 denali->nand.ecc.bytes = ECC_8BITS;
1542 iowrite32(8, denali->flash_reg + ECC_CORRECTION); 1604 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1543 } 1605 }
1544 1606
1545 denali->nand.ecc.bytes *= denali->devnum; 1607 denali->nand.ecc.bytes *= denali->devnum;
1546 denali->nand.ecc.strength *= denali->devnum;
1547 denali->nand.ecc.layout->eccbytes *= 1608 denali->nand.ecc.layout->eccbytes *=
1548 denali->mtd.writesize / ECC_SECTOR_SIZE; 1609 denali->mtd.writesize / ECC_SECTOR_SIZE;
1549 denali->nand.ecc.layout->oobfree[0].offset = 1610 denali->nand.ecc.layout->oobfree[0].offset =
@@ -1585,24 +1646,70 @@ int denali_init(struct denali_nand_info *denali)
1585 1646
1586 ret = mtd_device_register(&denali->mtd, NULL, 0); 1647 ret = mtd_device_register(&denali->mtd, NULL, 0);
1587 if (ret) { 1648 if (ret) {
1588 dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n", 1649 dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
1589 ret); 1650 ret);
1590 goto failed_req_irq; 1651 goto failed_req_irq;
1591 } 1652 }
1592 return 0; 1653 return 0;
1593 1654
1594failed_req_irq: 1655failed_req_irq:
1595 denali_irq_cleanup(denali->irq, denali); 1656 denali_irq_cleanup(dev->irq, denali);
1596 1657failed_remap_mem:
1658 iounmap(denali->flash_mem);
1659failed_remap_reg:
1660 iounmap(denali->flash_reg);
1661failed_req_regions:
1662 pci_release_regions(dev);
1663failed_dma_map:
1664 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1665 DMA_BIDIRECTIONAL);
1666failed_enable_dev:
1667 pci_disable_device(dev);
1668failed_alloc_memery:
1669 kfree(denali);
1597 return ret; 1670 return ret;
1598} 1671}
1599EXPORT_SYMBOL(denali_init);
1600 1672
1601/* driver exit point */ 1673/* driver exit point */
1602void denali_remove(struct denali_nand_info *denali) 1674static void denali_pci_remove(struct pci_dev *dev)
1603{ 1675{
1604 denali_irq_cleanup(denali->irq, denali); 1676 struct denali_nand_info *denali = pci_get_drvdata(dev);
1605 dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 1677
1606 DMA_BIDIRECTIONAL); 1678 nand_release(&denali->mtd);
1679 mtd_device_unregister(&denali->mtd);
1680
1681 denali_irq_cleanup(dev->irq, denali);
1682
1683 iounmap(denali->flash_reg);
1684 iounmap(denali->flash_mem);
1685 pci_release_regions(dev);
1686 pci_disable_device(dev);
1687 dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
1688 DMA_BIDIRECTIONAL);
1689 pci_set_drvdata(dev, NULL);
1690 kfree(denali);
1607} 1691}
1608EXPORT_SYMBOL(denali_remove); 1692
1693MODULE_DEVICE_TABLE(pci, denali_pci_ids);
1694
1695static struct pci_driver denali_pci_driver = {
1696 .name = DENALI_NAND_NAME,
1697 .id_table = denali_pci_ids,
1698 .probe = denali_pci_probe,
1699 .remove = denali_pci_remove,
1700};
1701
1702static int __devinit denali_init(void)
1703{
1704 printk(KERN_INFO "Spectra MTD driver\n");
1705 return pci_register_driver(&denali_pci_driver);
1706}
1707
1708/* Free memory */
1709static void __devexit denali_exit(void)
1710{
1711 pci_unregister_driver(&denali_pci_driver);
1712}
1713
1714module_init(denali_init);
1715module_exit(denali_exit);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index cec5712862c..fabb9d56b39 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -466,7 +466,6 @@ struct nand_buf {
466 466
467#define INTEL_CE4100 1 467#define INTEL_CE4100 1
468#define INTEL_MRST 2 468#define INTEL_MRST 2
469#define DT 3
470 469
471struct denali_nand_info { 470struct denali_nand_info {
472 struct mtd_info mtd; 471 struct mtd_info mtd;
@@ -488,7 +487,6 @@ struct denali_nand_info {
488 uint32_t irq_status; 487 uint32_t irq_status;
489 int irq_debug_array[32]; 488 int irq_debug_array[32];
490 int idx; 489 int idx;
491 int irq;
492 490
493 uint32_t devnum; /* represent how many nands connected */ 491 uint32_t devnum; /* represent how many nands connected */
494 uint32_t fwblks; /* represent how many blocks FW used */ 492 uint32_t fwblks; /* represent how many blocks FW used */
@@ -498,7 +496,4 @@ struct denali_nand_info {
498 uint32_t max_banks; 496 uint32_t max_banks;
499}; 497};
500 498
501extern int denali_init(struct denali_nand_info *denali);
502extern void denali_remove(struct denali_nand_info *denali);
503
504#endif /*_LLD_NAND_*/ 499#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
deleted file mode 100644
index 546f8cb5688..00000000000
--- a/drivers/mtd/nand/denali_dt.c
+++ /dev/null
@@ -1,167 +0,0 @@
1/*
2 * NAND Flash Controller Device Driver for DT
3 *
4 * Copyright © 2011, Picochip.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/ioport.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/slab.h>
25
26#include "denali.h"
27
28struct denali_dt {
29 struct denali_nand_info denali;
30 struct clk *clk;
31};
32
33static void __iomem *request_and_map(struct device *dev,
34 const struct resource *res)
35{
36 void __iomem *ptr;
37
38 if (!devm_request_mem_region(dev, res->start, resource_size(res),
39 "denali-dt")) {
40 dev_err(dev, "unable to request %s\n", res->name);
41 return NULL;
42 }
43
44 ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
45 if (!res)
46 dev_err(dev, "ioremap_nocache of %s failed!", res->name);
47
48 return ptr;
49}
50
51static const struct of_device_id denali_nand_dt_ids[] = {
52 { .compatible = "denali,denali-nand-dt" },
53 { /* sentinel */ }
54 };
55
56MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
57
58static u64 denali_dma_mask;
59
60static int denali_dt_probe(struct platform_device *ofdev)
61{
62 struct resource *denali_reg, *nand_data;
63 struct denali_dt *dt;
64 struct denali_nand_info *denali;
65 int ret;
66 const struct of_device_id *of_id;
67
68 of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
69 if (of_id) {
70 ofdev->id_entry = of_id->data;
71 } else {
72 pr_err("Failed to find the right device id.\n");
73 return -ENOMEM;
74 }
75
76 dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
77 if (!dt)
78 return -ENOMEM;
79 denali = &dt->denali;
80
81 denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
82 nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
83 if (!denali_reg || !nand_data) {
84 dev_err(&ofdev->dev, "resources not completely defined\n");
85 return -EINVAL;
86 }
87
88 denali->platform = DT;
89 denali->dev = &ofdev->dev;
90 denali->irq = platform_get_irq(ofdev, 0);
91 if (denali->irq < 0) {
92 dev_err(&ofdev->dev, "no irq defined\n");
93 return -ENXIO;
94 }
95
96 denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
97 if (!denali->flash_reg)
98 return -ENOMEM;
99
100 denali->flash_mem = request_and_map(&ofdev->dev, nand_data);
101 if (!denali->flash_mem)
102 return -ENOMEM;
103
104 if (!of_property_read_u32(ofdev->dev.of_node,
105 "dma-mask", (u32 *)&denali_dma_mask)) {
106 denali->dev->dma_mask = &denali_dma_mask;
107 } else {
108 denali->dev->dma_mask = NULL;
109 }
110
111 dt->clk = clk_get(&ofdev->dev, NULL);
112 if (IS_ERR(dt->clk)) {
113 dev_err(&ofdev->dev, "no clk available\n");
114 return PTR_ERR(dt->clk);
115 }
116 clk_prepare_enable(dt->clk);
117
118 ret = denali_init(denali);
119 if (ret)
120 goto out_disable_clk;
121
122 platform_set_drvdata(ofdev, dt);
123 return 0;
124
125out_disable_clk:
126 clk_disable_unprepare(dt->clk);
127 clk_put(dt->clk);
128
129 return ret;
130}
131
132static int denali_dt_remove(struct platform_device *ofdev)
133{
134 struct denali_dt *dt = platform_get_drvdata(ofdev);
135
136 denali_remove(&dt->denali);
137 clk_disable(dt->clk);
138 clk_put(dt->clk);
139
140 return 0;
141}
142
143static struct platform_driver denali_dt_driver = {
144 .probe = denali_dt_probe,
145 .remove = denali_dt_remove,
146 .driver = {
147 .name = "denali-nand-dt",
148 .owner = THIS_MODULE,
149 .of_match_table = of_match_ptr(denali_nand_dt_ids),
150 },
151};
152
153static int __init denali_init_dt(void)
154{
155 return platform_driver_register(&denali_dt_driver);
156}
157module_init(denali_init_dt);
158
159static void __exit denali_exit_dt(void)
160{
161 platform_driver_unregister(&denali_dt_driver);
162}
163module_exit(denali_exit_dt);
164
165MODULE_LICENSE("GPL");
166MODULE_AUTHOR("Jamie Iles");
167MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
deleted file mode 100644
index e3e46623b2b..00000000000
--- a/drivers/mtd/nand/denali_pci.c
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18
19#include "denali.h"
20
21#define DENALI_NAND_NAME "denali-nand-pci"
22
23/* List of platforms this NAND controller has be integrated into */
24static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
25 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
26 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
27 { /* end: all zeroes */ }
28};
29MODULE_DEVICE_TABLE(pci, denali_pci_ids);
30
31static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
32{
33 int ret = -ENODEV;
34 resource_size_t csr_base, mem_base;
35 unsigned long csr_len, mem_len;
36 struct denali_nand_info *denali;
37
38 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
39 if (!denali)
40 return -ENOMEM;
41
42 ret = pci_enable_device(dev);
43 if (ret) {
44 pr_err("Spectra: pci_enable_device failed.\n");
45 goto failed_alloc_memery;
46 }
47
48 if (id->driver_data == INTEL_CE4100) {
49 denali->platform = INTEL_CE4100;
50 mem_base = pci_resource_start(dev, 0);
51 mem_len = pci_resource_len(dev, 1);
52 csr_base = pci_resource_start(dev, 1);
53 csr_len = pci_resource_len(dev, 1);
54 } else {
55 denali->platform = INTEL_MRST;
56 csr_base = pci_resource_start(dev, 0);
57 csr_len = pci_resource_len(dev, 0);
58 mem_base = pci_resource_start(dev, 1);
59 mem_len = pci_resource_len(dev, 1);
60 if (!mem_len) {
61 mem_base = csr_base + csr_len;
62 mem_len = csr_len;
63 }
64 }
65
66 pci_set_master(dev);
67 denali->dev = &dev->dev;
68 denali->irq = dev->irq;
69
70 ret = pci_request_regions(dev, DENALI_NAND_NAME);
71 if (ret) {
72 pr_err("Spectra: Unable to request memory regions\n");
73 goto failed_enable_dev;
74 }
75
76 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
77 if (!denali->flash_reg) {
78 pr_err("Spectra: Unable to remap memory region\n");
79 ret = -ENOMEM;
80 goto failed_req_regions;
81 }
82
83 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
84 if (!denali->flash_mem) {
85 pr_err("Spectra: ioremap_nocache failed!");
86 ret = -ENOMEM;
87 goto failed_remap_reg;
88 }
89
90 ret = denali_init(denali);
91 if (ret)
92 goto failed_remap_mem;
93
94 pci_set_drvdata(dev, denali);
95
96 return 0;
97
98failed_remap_mem:
99 iounmap(denali->flash_mem);
100failed_remap_reg:
101 iounmap(denali->flash_reg);
102failed_req_regions:
103 pci_release_regions(dev);
104failed_enable_dev:
105 pci_disable_device(dev);
106failed_alloc_memery:
107 kfree(denali);
108
109 return ret;
110}
111
112/* driver exit point */
113static void denali_pci_remove(struct pci_dev *dev)
114{
115 struct denali_nand_info *denali = pci_get_drvdata(dev);
116
117 denali_remove(denali);
118 iounmap(denali->flash_reg);
119 iounmap(denali->flash_mem);
120 pci_release_regions(dev);
121 pci_disable_device(dev);
122 pci_set_drvdata(dev, NULL);
123 kfree(denali);
124}
125
126static struct pci_driver denali_pci_driver = {
127 .name = DENALI_NAND_NAME,
128 .id_table = denali_pci_ids,
129 .probe = denali_pci_probe,
130 .remove = denali_pci_remove,
131};
132
133static int denali_init_pci(void)
134{
135 pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
136 return pci_register_driver(&denali_pci_driver);
137}
138module_init(denali_init_pci);
139
140static void denali_exit_pci(void)
141{
142 pci_unregister_driver(&denali_pci_driver);
143}
144module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 81fa5784f98..7837728d02f 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -31,7 +31,6 @@
31#include <linux/mtd/doc2000.h> 31#include <linux/mtd/doc2000.h>
32#include <linux/mtd/partitions.h> 32#include <linux/mtd/partitions.h>
33#include <linux/mtd/inftl.h> 33#include <linux/mtd/inftl.h>
34#include <linux/module.h>
35 34
36/* Where to look for the devices? */ 35/* Where to look for the devices? */
37#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 36#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
@@ -53,6 +52,8 @@ static unsigned long __initdata doc_locations[] = {
53 0xe0000, 0xe2000, 0xe4000, 0xe6000, 52 0xe0000, 0xe2000, 0xe4000, 0xe6000,
54 0xe8000, 0xea000, 0xec000, 0xee000, 53 0xe8000, 0xea000, 0xec000, 0xee000,
55#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 54#endif /* CONFIG_MTD_DOCPROBE_HIGH */
55#else
56#warning Unknown architecture for DiskOnChip. No default probe locations defined
56#endif 57#endif
57 0xffffffff }; 58 0xffffffff };
58 59
@@ -131,7 +132,7 @@ static struct rs_control *rs_decoder;
131 132
132/* 133/*
133 * The HW decoder in the DoC ASIC's provides us a error syndrome, 134 * The HW decoder in the DoC ASIC's provides us a error syndrome,
134 * which we must convert to a standard syndrome usable by the generic 135 * which we must convert to a standard syndrom usable by the generic
135 * Reed-Solomon library code. 136 * Reed-Solomon library code.
136 * 137 *
137 * Fabrice Bellard figured this out in the old docecc code. I added 138 * Fabrice Bellard figured this out in the old docecc code. I added
@@ -152,7 +153,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
152 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); 153 ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
153 parity = ecc[1]; 154 parity = ecc[1];
154 155
155 /* Initialize the syndrome buffer */ 156 /* Initialize the syndrom buffer */
156 for (i = 0; i < NROOTS; i++) 157 for (i = 0; i < NROOTS; i++)
157 s[i] = ds[0]; 158 s[i] = ds[0];
158 /* 159 /*
@@ -374,6 +375,19 @@ static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
374 } 375 }
375} 376}
376 377
378static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
379{
380 struct nand_chip *this = mtd->priv;
381 struct doc_priv *doc = this->priv;
382 void __iomem *docptr = doc->virtadr;
383 int i;
384
385 for (i = 0; i < len; i++)
386 if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
387 return -EFAULT;
388 return 0;
389}
390
377static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) 391static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
378{ 392{
379 struct nand_chip *this = mtd->priv; 393 struct nand_chip *this = mtd->priv;
@@ -511,6 +525,26 @@ static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
511 buf[i] = ReadDOC(docptr, LastDataRead); 525 buf[i] = ReadDOC(docptr, LastDataRead);
512} 526}
513 527
528static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
529{
530 struct nand_chip *this = mtd->priv;
531 struct doc_priv *doc = this->priv;
532 void __iomem *docptr = doc->virtadr;
533 int i;
534
535 /* Start read pipeline */
536 ReadDOC(docptr, ReadPipeInit);
537
538 for (i = 0; i < len - 1; i++)
539 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
540 ReadDOC(docptr, LastDataRead);
541 return i;
542 }
543 if (buf[i] != ReadDOC(docptr, LastDataRead))
544 return i;
545 return 0;
546}
547
514static u_char doc2001plus_read_byte(struct mtd_info *mtd) 548static u_char doc2001plus_read_byte(struct mtd_info *mtd)
515{ 549{
516 struct nand_chip *this = mtd->priv; 550 struct nand_chip *this = mtd->priv;
@@ -575,6 +609,33 @@ static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
575 printk("\n"); 609 printk("\n");
576} 610}
577 611
612static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
613{
614 struct nand_chip *this = mtd->priv;
615 struct doc_priv *doc = this->priv;
616 void __iomem *docptr = doc->virtadr;
617 int i;
618
619 if (debug)
620 printk("verifybuf of %d bytes: ", len);
621
622 /* Start read pipeline */
623 ReadDOC(docptr, Mplus_ReadPipeInit);
624 ReadDOC(docptr, Mplus_ReadPipeInit);
625
626 for (i = 0; i < len - 2; i++)
627 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
628 ReadDOC(docptr, Mplus_LastDataRead);
629 ReadDOC(docptr, Mplus_LastDataRead);
630 return i;
631 }
632 if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
633 return len - 2;
634 if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
635 return len - 1;
636 return 0;
637}
638
578static void doc2001plus_select_chip(struct mtd_info *mtd, int chip) 639static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
579{ 640{
580 struct nand_chip *this = mtd->priv; 641 struct nand_chip *this = mtd->priv;
@@ -970,7 +1031,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
970 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); 1031 WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
971 else 1032 else
972 WriteDOC(DOC_ECC_DIS, docptr, ECCConf); 1033 WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
973 if (no_ecc_failures && mtd_is_eccerr(ret)) { 1034 if (no_ecc_failures && (ret == -EBADMSG)) {
974 printk(KERN_ERR "suppressing ECC failure\n"); 1035 printk(KERN_ERR "suppressing ECC failure\n");
975 ret = 0; 1036 ret = 0;
976 } 1037 }
@@ -1010,7 +1071,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
1010 size_t retlen; 1071 size_t retlen;
1011 1072
1012 for (offs = 0; offs < mtd->size; offs += mtd->erasesize) { 1073 for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
1013 ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf); 1074 ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
1014 if (retlen != mtd->writesize) 1075 if (retlen != mtd->writesize)
1015 continue; 1076 continue;
1016 if (ret) { 1077 if (ret) {
@@ -1035,7 +1096,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
1035 /* Only one mediaheader was found. We want buf to contain a 1096 /* Only one mediaheader was found. We want buf to contain a
1036 mediaheader on return, so we'll have to re-read the one we found. */ 1097 mediaheader on return, so we'll have to re-read the one we found. */
1037 offs = doc->mh0_page << this->page_shift; 1098 offs = doc->mh0_page << this->page_shift;
1038 ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf); 1099 ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
1039 if (retlen != mtd->writesize) { 1100 if (retlen != mtd->writesize) {
1040 /* Insanity. Give up. */ 1101 /* Insanity. Give up. */
1041 printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n"); 1102 printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
@@ -1370,6 +1431,7 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
1370 this->read_byte = doc2000_read_byte; 1431 this->read_byte = doc2000_read_byte;
1371 this->write_buf = doc2000_writebuf; 1432 this->write_buf = doc2000_writebuf;
1372 this->read_buf = doc2000_readbuf; 1433 this->read_buf = doc2000_readbuf;
1434 this->verify_buf = doc2000_verifybuf;
1373 this->scan_bbt = nftl_scan_bbt; 1435 this->scan_bbt = nftl_scan_bbt;
1374 1436
1375 doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; 1437 doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1386,6 +1448,7 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
1386 this->read_byte = doc2001_read_byte; 1448 this->read_byte = doc2001_read_byte;
1387 this->write_buf = doc2001_writebuf; 1449 this->write_buf = doc2001_writebuf;
1388 this->read_buf = doc2001_readbuf; 1450 this->read_buf = doc2001_readbuf;
1451 this->verify_buf = doc2001_verifybuf;
1389 1452
1390 ReadDOC(doc->virtadr, ChipID); 1453 ReadDOC(doc->virtadr, ChipID);
1391 ReadDOC(doc->virtadr, ChipID); 1454 ReadDOC(doc->virtadr, ChipID);
@@ -1416,6 +1479,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
1416 this->read_byte = doc2001plus_read_byte; 1479 this->read_byte = doc2001plus_read_byte;
1417 this->write_buf = doc2001plus_writebuf; 1480 this->write_buf = doc2001plus_writebuf;
1418 this->read_buf = doc2001plus_readbuf; 1481 this->read_buf = doc2001plus_readbuf;
1482 this->verify_buf = doc2001plus_verifybuf;
1419 this->scan_bbt = inftl_scan_bbt; 1483 this->scan_bbt = inftl_scan_bbt;
1420 this->cmd_ctrl = NULL; 1484 this->cmd_ctrl = NULL;
1421 this->select_chip = doc2001plus_select_chip; 1485 this->select_chip = doc2001plus_select_chip;
@@ -1588,8 +1652,7 @@ static int __init doc_probe(unsigned long physadr)
1588 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1652 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1589 nand->ecc.size = 512; 1653 nand->ecc.size = 512;
1590 nand->ecc.bytes = 6; 1654 nand->ecc.bytes = 6;
1591 nand->ecc.strength = 2; 1655 nand->options = NAND_USE_FLASH_BBT;
1592 nand->bbt_options = NAND_BBT_USE_FLASH;
1593 1656
1594 doc->physadr = physadr; 1657 doc->physadr = physadr;
1595 doc->virtadr = virtadr; 1658 doc->virtadr = virtadr;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
deleted file mode 100644
index 18fa4489e52..00000000000
--- a/drivers/mtd/nand/docg4.c
+++ /dev/null
@@ -1,1415 +0,0 @@
1/*
2 * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
3 *
4 * mtd nand driver for M-Systems DiskOnChip G4
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
12 * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
13 * Should work on these as well. Let me know!
14 *
15 * TODO:
16 *
17 * Mechanism for management of password-protected areas
18 *
19 * Hamming ecc when reading oob only
20 *
21 * According to the M-Sys documentation, this device is also available in a
22 * "dual-die" configuration having a 256MB capacity, but no mechanism for
23 * detecting this variant is documented. Currently this driver assumes 128MB
24 * capacity.
25 *
26 * Support for multiple cascaded devices ("floors"). Not sure which gadgets
27 * contain multiple G4s in a cascaded configuration, if any.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/string.h>
35#include <linux/sched.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/export.h>
39#include <linux/platform_device.h>
40#include <linux/io.h>
41#include <linux/bitops.h>
42#include <linux/mtd/partitions.h>
43#include <linux/mtd/mtd.h>
44#include <linux/mtd/nand.h>
45#include <linux/bch.h>
46#include <linux/bitrev.h>
47
48/*
49 * In "reliable mode" consecutive 2k pages are used in parallel (in some
50 * fashion) to store the same data. The data can be read back from the
51 * even-numbered pages in the normal manner; odd-numbered pages will appear to
52 * contain junk. Systems that boot from the docg4 typically write the secondary
53 * program loader (SPL) code in this mode. The SPL is loaded by the initial
54 * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
55 * to the reset vector address). This module parameter enables you to use this
56 * driver to write the SPL. When in this mode, no more than 2k of data can be
57 * written at a time, because the addresses do not increment in the normal
58 * manner, and the starting offset must be within an even-numbered 2k region;
59 * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
60 * 0x1a00, ... Reliable mode is a special case and should not be used unless
61 * you know what you're doing.
62 */
63static bool reliable_mode;
64module_param(reliable_mode, bool, 0);
65MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
66
67/*
68 * You'll want to ignore badblocks if you're reading a partition that contains
69 * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
70 * it does not use mtd nand's method for marking bad blocks (using oob area).
71 * This will also skip the check of the "page written" flag.
72 */
73static bool ignore_badblocks;
74module_param(ignore_badblocks, bool, 0);
75MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
76
77struct docg4_priv {
78 struct mtd_info *mtd;
79 struct device *dev;
80 void __iomem *virtadr;
81 int status;
82 struct {
83 unsigned int command;
84 int column;
85 int page;
86 } last_command;
87 uint8_t oob_buf[16];
88 uint8_t ecc_buf[7];
89 int oob_page;
90 struct bch_control *bch;
91};
92
93/*
94 * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
95 * shared with other diskonchip devices (P3, G3 at least).
96 *
97 * Functions with names prefixed with docg4_ are mtd / nand interface functions
98 * (though they may also be called internally). All others are internal.
99 */
100
101#define DOC_IOSPACE_DATA 0x0800
102
103/* register offsets */
104#define DOC_CHIPID 0x1000
105#define DOC_DEVICESELECT 0x100a
106#define DOC_ASICMODE 0x100c
107#define DOC_DATAEND 0x101e
108#define DOC_NOP 0x103e
109
110#define DOC_FLASHSEQUENCE 0x1032
111#define DOC_FLASHCOMMAND 0x1034
112#define DOC_FLASHADDRESS 0x1036
113#define DOC_FLASHCONTROL 0x1038
114#define DOC_ECCCONF0 0x1040
115#define DOC_ECCCONF1 0x1042
116#define DOC_HAMMINGPARITY 0x1046
117#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
118
119#define DOC_ASICMODECONFIRM 0x1072
120#define DOC_CHIPID_INV 0x1074
121#define DOC_POWERMODE 0x107c
122
123#define DOCG4_MYSTERY_REG 0x1050
124
125/* apparently used only to write oob bytes 6 and 7 */
126#define DOCG4_OOB_6_7 0x1052
127
128/* DOC_FLASHSEQUENCE register commands */
129#define DOC_SEQ_RESET 0x00
130#define DOCG4_SEQ_PAGE_READ 0x03
131#define DOCG4_SEQ_FLUSH 0x29
132#define DOCG4_SEQ_PAGEWRITE 0x16
133#define DOCG4_SEQ_PAGEPROG 0x1e
134#define DOCG4_SEQ_BLOCKERASE 0x24
135#define DOCG4_SEQ_SETMODE 0x45
136
137/* DOC_FLASHCOMMAND register commands */
138#define DOCG4_CMD_PAGE_READ 0x00
139#define DOC_CMD_ERASECYCLE2 0xd0
140#define DOCG4_CMD_FLUSH 0x70
141#define DOCG4_CMD_READ2 0x30
142#define DOC_CMD_PROG_BLOCK_ADDR 0x60
143#define DOCG4_CMD_PAGEWRITE 0x80
144#define DOC_CMD_PROG_CYCLE2 0x10
145#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
146#define DOC_CMD_RELIABLE_MODE 0x22
147#define DOC_CMD_RESET 0xff
148
149/* DOC_POWERMODE register bits */
150#define DOC_POWERDOWN_READY 0x80
151
152/* DOC_FLASHCONTROL register bits */
153#define DOC_CTRL_CE 0x10
154#define DOC_CTRL_UNKNOWN 0x40
155#define DOC_CTRL_FLASHREADY 0x01
156
157/* DOC_ECCCONF0 register bits */
158#define DOC_ECCCONF0_READ_MODE 0x8000
159#define DOC_ECCCONF0_UNKNOWN 0x2000
160#define DOC_ECCCONF0_ECC_ENABLE 0x1000
161#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
162
163/* DOC_ECCCONF1 register bits */
164#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
165#define DOC_ECCCONF1_ECC_ENABLE 0x07
166#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
167
168/* DOC_ASICMODE register bits */
169#define DOC_ASICMODE_RESET 0x00
170#define DOC_ASICMODE_NORMAL 0x01
171#define DOC_ASICMODE_POWERDOWN 0x02
172#define DOC_ASICMODE_MDWREN 0x04
173#define DOC_ASICMODE_BDETCT_RESET 0x08
174#define DOC_ASICMODE_RSTIN_RESET 0x10
175#define DOC_ASICMODE_RAM_WE 0x20
176
177/* good status values read after read/write/erase operations */
178#define DOCG4_PROGSTATUS_GOOD 0x51
179#define DOCG4_PROGSTATUS_GOOD_2 0xe0
180
181/*
182 * On read operations (page and oob-only), the first byte read from I/O reg is a
183 * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
184 * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
185 */
186#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
187
188/* anatomy of the device */
189#define DOCG4_CHIP_SIZE 0x8000000
190#define DOCG4_PAGE_SIZE 0x200
191#define DOCG4_PAGES_PER_BLOCK 0x200
192#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
193#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
194#define DOCG4_OOB_SIZE 0x10
195#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
196#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
197#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
198
199/* all but the last byte is included in ecc calculation */
200#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
201
202#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
203
204/* expected values from the ID registers */
205#define DOCG4_IDREG1_VALUE 0x0400
206#define DOCG4_IDREG2_VALUE 0xfbff
207
208/* primitive polynomial used to build the Galois field used by hw ecc gen */
209#define DOCG4_PRIMITIVE_POLY 0x4443
210
211#define DOCG4_M 14 /* Galois field is of order 2^14 */
212#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
213
214#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
215#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
216
217/*
218 * Bytes 0, 1 are used as badblock marker.
219 * Bytes 2 - 6 are available to the user.
220 * Byte 7 is hamming ecc for first 7 oob bytes only.
221 * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
222 * Byte 15 (the last) is used by the driver as a "page written" flag.
223 */
224static struct nand_ecclayout docg4_oobinfo = {
225 .eccbytes = 9,
226 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
227 .oobavail = 5,
228 .oobfree = { {.offset = 2, .length = 5} }
229};
230
231/*
232 * The device has a nop register which M-Sys claims is for the purpose of
233 * inserting precise delays. But beware; at least some operations fail if the
234 * nop writes are replaced with a generic delay!
235 */
236static inline void write_nop(void __iomem *docptr)
237{
238 writew(0, docptr + DOC_NOP);
239}
240
241static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
242{
243 int i;
244 struct nand_chip *nand = mtd->priv;
245 uint16_t *p = (uint16_t *) buf;
246 len >>= 1;
247
248 for (i = 0; i < len; i++)
249 p[i] = readw(nand->IO_ADDR_R);
250}
251
252static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
253{
254 int i;
255 struct nand_chip *nand = mtd->priv;
256 uint16_t *p = (uint16_t *) buf;
257 len >>= 1;
258
259 for (i = 0; i < len; i++)
260 writew(p[i], nand->IO_ADDR_W);
261}
262
263static int poll_status(struct docg4_priv *doc)
264{
265 /*
266 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
267 * register. Operations known to take a long time (e.g., block erase)
268 * should sleep for a while before calling this.
269 */
270
271 uint16_t flash_status;
272 unsigned int timeo;
273 void __iomem *docptr = doc->virtadr;
274
275 dev_dbg(doc->dev, "%s...\n", __func__);
276
277 /* hardware quirk requires reading twice initially */
278 flash_status = readw(docptr + DOC_FLASHCONTROL);
279
280 timeo = 1000;
281 do {
282 cpu_relax();
283 flash_status = readb(docptr + DOC_FLASHCONTROL);
284 } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
285
286
287 if (!timeo) {
288 dev_err(doc->dev, "%s: timed out!\n", __func__);
289 return NAND_STATUS_FAIL;
290 }
291
292 if (unlikely(timeo < 50))
293 dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
294 __func__, timeo);
295
296 return 0;
297}
298
299
300static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
301{
302
303 struct docg4_priv *doc = nand->priv;
304 int status = NAND_STATUS_WP; /* inverse logic?? */
305 dev_dbg(doc->dev, "%s...\n", __func__);
306
307 /* report any previously unreported error */
308 if (doc->status) {
309 status |= doc->status;
310 doc->status = 0;
311 return status;
312 }
313
314 status |= poll_status(doc);
315 return status;
316}
317
318static void docg4_select_chip(struct mtd_info *mtd, int chip)
319{
320 /*
321 * Select among multiple cascaded chips ("floors"). Multiple floors are
322 * not yet supported, so the only valid non-negative value is 0.
323 */
324 struct nand_chip *nand = mtd->priv;
325 struct docg4_priv *doc = nand->priv;
326 void __iomem *docptr = doc->virtadr;
327
328 dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
329
330 if (chip < 0)
331 return; /* deselected */
332
333 if (chip > 0)
334 dev_warn(doc->dev, "multiple floors currently unsupported\n");
335
336 writew(0, docptr + DOC_DEVICESELECT);
337}
338
339static void reset(struct mtd_info *mtd)
340{
341 /* full device reset */
342
343 struct nand_chip *nand = mtd->priv;
344 struct docg4_priv *doc = nand->priv;
345 void __iomem *docptr = doc->virtadr;
346
347 writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
348 docptr + DOC_ASICMODE);
349 writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
350 docptr + DOC_ASICMODECONFIRM);
351 write_nop(docptr);
352
353 writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
354 docptr + DOC_ASICMODE);
355 writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
356 docptr + DOC_ASICMODECONFIRM);
357
358 writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
359
360 poll_status(doc);
361}
362
363static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
364{
365 /* read the 7 hw-generated ecc bytes */
366
367 int i;
368 for (i = 0; i < 7; i++) { /* hw quirk; read twice */
369 ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
370 ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
371 }
372}
373
374static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
375{
376 /*
377 * Called after a page read when hardware reports bitflips.
378 * Up to four bitflips can be corrected.
379 */
380
381 struct nand_chip *nand = mtd->priv;
382 struct docg4_priv *doc = nand->priv;
383 void __iomem *docptr = doc->virtadr;
384 int i, numerrs, errpos[4];
385 const uint8_t blank_read_hwecc[8] = {
386 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
387
388 read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
389
390 /* check if read error is due to a blank page */
391 if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
392 return 0; /* yes */
393
394 /* skip additional check of "written flag" if ignore_badblocks */
395 if (ignore_badblocks == false) {
396
397 /*
398 * If the hw ecc bytes are not those of a blank page, there's
399 * still a chance that the page is blank, but was read with
400 * errors. Check the "written flag" in last oob byte, which
401 * is set to zero when a page is written. If more than half
402 * the bits are set, assume a blank page. Unfortunately, the
403 * bit flips(s) are not reported in stats.
404 */
405
406 if (nand->oob_poi[15]) {
407 int bit, numsetbits = 0;
408 unsigned long written_flag = nand->oob_poi[15];
409 for_each_set_bit(bit, &written_flag, 8)
410 numsetbits++;
411 if (numsetbits > 4) { /* assume blank */
412 dev_warn(doc->dev,
413 "error(s) in blank page "
414 "at offset %08x\n",
415 page * DOCG4_PAGE_SIZE);
416 return 0;
417 }
418 }
419 }
420
421 /*
422 * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
423 * algorithm is used to decode this. However the hw operates on page
424 * data in a bit order that is the reverse of that of the bch alg,
425 * requiring that the bits be reversed on the result. Thanks to Ivan
426 * Djelic for his analysis!
427 */
428 for (i = 0; i < 7; i++)
429 doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
430
431 numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
432 doc->ecc_buf, NULL, errpos);
433
434 if (numerrs == -EBADMSG) {
435 dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
436 page * DOCG4_PAGE_SIZE);
437 return -EBADMSG;
438 }
439
440 BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
441
442 /* undo last step in BCH alg (modulo mirroring not needed) */
443 for (i = 0; i < numerrs; i++)
444 errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
445
446 /* fix the errors */
447 for (i = 0; i < numerrs; i++) {
448
449 /* ignore if error within oob ecc bytes */
450 if (errpos[i] > DOCG4_USERDATA_LEN * 8)
451 continue;
452
453 /* if error within oob area preceeding ecc bytes... */
454 if (errpos[i] > DOCG4_PAGE_SIZE * 8)
455 change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
456 (unsigned long *)nand->oob_poi);
457
458 else /* error in page data */
459 change_bit(errpos[i], (unsigned long *)buf);
460 }
461
462 dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
463 numerrs, page * DOCG4_PAGE_SIZE);
464
465 return numerrs;
466}
467
468static uint8_t docg4_read_byte(struct mtd_info *mtd)
469{
470 struct nand_chip *nand = mtd->priv;
471 struct docg4_priv *doc = nand->priv;
472
473 dev_dbg(doc->dev, "%s\n", __func__);
474
475 if (doc->last_command.command == NAND_CMD_STATUS) {
476 int status;
477
478 /*
479 * Previous nand command was status request, so nand
480 * infrastructure code expects to read the status here. If an
481 * error occurred in a previous operation, report it.
482 */
483 doc->last_command.command = 0;
484
485 if (doc->status) {
486 status = doc->status;
487 doc->status = 0;
488 }
489
490 /* why is NAND_STATUS_WP inverse logic?? */
491 else
492 status = NAND_STATUS_WP | NAND_STATUS_READY;
493
494 return status;
495 }
496
497 dev_warn(doc->dev, "unexpectd call to read_byte()\n");
498
499 return 0;
500}
501
502static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
503{
504 /* write the four address bytes packed in docg4_addr to the device */
505
506 void __iomem *docptr = doc->virtadr;
507 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
508 docg4_addr >>= 8;
509 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
510 docg4_addr >>= 8;
511 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
512 docg4_addr >>= 8;
513 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
514}
515
516static int read_progstatus(struct docg4_priv *doc)
517{
518 /*
519 * This apparently checks the status of programming. Done after an
520 * erasure, and after page data is written. On error, the status is
521 * saved, to be later retrieved by the nand infrastructure code.
522 */
523 void __iomem *docptr = doc->virtadr;
524
525 /* status is read from the I/O reg */
526 uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
527 uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
528 uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
529
530 dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
531 __func__, status1, status2, status3);
532
533 if (status1 != DOCG4_PROGSTATUS_GOOD
534 || status2 != DOCG4_PROGSTATUS_GOOD_2
535 || status3 != DOCG4_PROGSTATUS_GOOD_2) {
536 doc->status = NAND_STATUS_FAIL;
537 dev_warn(doc->dev, "read_progstatus failed: "
538 "%02x, %02x, %02x\n", status1, status2, status3);
539 return -EIO;
540 }
541 return 0;
542}
543
544static int pageprog(struct mtd_info *mtd)
545{
546 /*
547 * Final step in writing a page. Writes the contents of its
548 * internal buffer out to the flash array, or some such.
549 */
550
551 struct nand_chip *nand = mtd->priv;
552 struct docg4_priv *doc = nand->priv;
553 void __iomem *docptr = doc->virtadr;
554 int retval = 0;
555
556 dev_dbg(doc->dev, "docg4: %s\n", __func__);
557
558 writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
559 writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
560 write_nop(docptr);
561 write_nop(docptr);
562
563 /* Just busy-wait; usleep_range() slows things down noticeably. */
564 poll_status(doc);
565
566 writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
567 writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
568 writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
569 write_nop(docptr);
570 write_nop(docptr);
571 write_nop(docptr);
572 write_nop(docptr);
573 write_nop(docptr);
574
575 retval = read_progstatus(doc);
576 writew(0, docptr + DOC_DATAEND);
577 write_nop(docptr);
578 poll_status(doc);
579 write_nop(docptr);
580
581 return retval;
582}
583
584static void sequence_reset(struct mtd_info *mtd)
585{
586 /* common starting sequence for all operations */
587
588 struct nand_chip *nand = mtd->priv;
589 struct docg4_priv *doc = nand->priv;
590 void __iomem *docptr = doc->virtadr;
591
592 writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
593 writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
594 writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
595 write_nop(docptr);
596 write_nop(docptr);
597 poll_status(doc);
598 write_nop(docptr);
599}
600
601static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
602{
603 /* first step in reading a page */
604
605 struct nand_chip *nand = mtd->priv;
606 struct docg4_priv *doc = nand->priv;
607 void __iomem *docptr = doc->virtadr;
608
609 dev_dbg(doc->dev,
610 "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
611
612 sequence_reset(mtd);
613
614 writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
615 writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
616 write_nop(docptr);
617
618 write_addr(doc, docg4_addr);
619
620 write_nop(docptr);
621 writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
622 write_nop(docptr);
623 write_nop(docptr);
624
625 poll_status(doc);
626}
627
628static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
629{
630 /* first step in writing a page */
631
632 struct nand_chip *nand = mtd->priv;
633 struct docg4_priv *doc = nand->priv;
634 void __iomem *docptr = doc->virtadr;
635
636 dev_dbg(doc->dev,
637 "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
638 sequence_reset(mtd);
639
640 if (unlikely(reliable_mode)) {
641 writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
642 writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
643 writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
644 write_nop(docptr);
645 }
646
647 writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
648 writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
649 write_nop(docptr);
650 write_addr(doc, docg4_addr);
651 write_nop(docptr);
652 write_nop(docptr);
653 poll_status(doc);
654}
655
656static uint32_t mtd_to_docg4_address(int page, int column)
657{
658 /*
659 * Convert mtd address to format used by the device, 32 bit packed.
660 *
661 * Some notes on G4 addressing... The M-Sys documentation on this device
662 * claims that pages are 2K in length, and indeed, the format of the
663 * address used by the device reflects that. But within each page are
664 * four 512 byte "sub-pages", each with its own oob data that is
665 * read/written immediately after the 512 bytes of page data. This oob
666 * data contains the ecc bytes for the preceeding 512 bytes.
667 *
668 * Rather than tell the mtd nand infrastructure that page size is 2k,
669 * with four sub-pages each, we engage in a little subterfuge and tell
670 * the infrastructure code that pages are 512 bytes in size. This is
671 * done because during the course of reverse-engineering the device, I
672 * never observed an instance where an entire 2K "page" was read or
673 * written as a unit. Each "sub-page" is always addressed individually,
674 * its data read/written, and ecc handled before the next "sub-page" is
675 * addressed.
676 *
677 * This requires us to convert addresses passed by the mtd nand
678 * infrastructure code to those used by the device.
679 *
680 * The address that is written to the device consists of four bytes: the
681 * first two are the 2k page number, and the second is the index into
682 * the page. The index is in terms of 16-bit half-words and includes
683 * the preceeding oob data, so e.g., the index into the second
684 * "sub-page" is 0x108, and the full device address of the start of mtd
685 * page 0x201 is 0x00800108.
686 */
687 int g4_page = page / 4; /* device's 2K page */
688 int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
689 return (g4_page << 16) | g4_index; /* pack */
690}
691
692static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
693 int page_addr)
694{
695 /* handle standard nand commands */
696
697 struct nand_chip *nand = mtd->priv;
698 struct docg4_priv *doc = nand->priv;
699 uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
700
701 dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
702 __func__, command, page_addr, column);
703
704 /*
705 * Save the command and its arguments. This enables emulation of
706 * standard flash devices, and also some optimizations.
707 */
708 doc->last_command.command = command;
709 doc->last_command.column = column;
710 doc->last_command.page = page_addr;
711
712 switch (command) {
713
714 case NAND_CMD_RESET:
715 reset(mtd);
716 break;
717
718 case NAND_CMD_READ0:
719 read_page_prologue(mtd, g4_addr);
720 break;
721
722 case NAND_CMD_STATUS:
723 /* next call to read_byte() will expect a status */
724 break;
725
726 case NAND_CMD_SEQIN:
727 if (unlikely(reliable_mode)) {
728 uint16_t g4_page = g4_addr >> 16;
729
730 /* writes to odd-numbered 2k pages are invalid */
731 if (g4_page & 0x01)
732 dev_warn(doc->dev,
733 "invalid reliable mode address\n");
734 }
735
736 write_page_prologue(mtd, g4_addr);
737
738 /* hack for deferred write of oob bytes */
739 if (doc->oob_page == page_addr)
740 memcpy(nand->oob_poi, doc->oob_buf, 16);
741 break;
742
743 case NAND_CMD_PAGEPROG:
744 pageprog(mtd);
745 break;
746
747 /* we don't expect these, based on review of nand_base.c */
748 case NAND_CMD_READOOB:
749 case NAND_CMD_READID:
750 case NAND_CMD_ERASE1:
751 case NAND_CMD_ERASE2:
752 dev_warn(doc->dev, "docg4_command: "
753 "unexpected nand command 0x%x\n", command);
754 break;
755
756 }
757}
758
759static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
760 uint8_t *buf, int page, bool use_ecc)
761{
762 struct docg4_priv *doc = nand->priv;
763 void __iomem *docptr = doc->virtadr;
764 uint16_t status, edc_err, *buf16;
765 int bits_corrected = 0;
766
767 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
768
769 writew(DOC_ECCCONF0_READ_MODE |
770 DOC_ECCCONF0_ECC_ENABLE |
771 DOC_ECCCONF0_UNKNOWN |
772 DOCG4_BCH_SIZE,
773 docptr + DOC_ECCCONF0);
774 write_nop(docptr);
775 write_nop(docptr);
776 write_nop(docptr);
777 write_nop(docptr);
778 write_nop(docptr);
779
780 /* the 1st byte from the I/O reg is a status; the rest is page data */
781 status = readw(docptr + DOC_IOSPACE_DATA);
782 if (status & DOCG4_READ_ERROR) {
783 dev_err(doc->dev,
784 "docg4_read_page: bad status: 0x%02x\n", status);
785 writew(0, docptr + DOC_DATAEND);
786 return -EIO;
787 }
788
789 dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
790
791 docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
792
793 /* this device always reads oob after page data */
794 /* first 14 oob bytes read from I/O reg */
795 docg4_read_buf(mtd, nand->oob_poi, 14);
796
797 /* last 2 read from another reg */
798 buf16 = (uint16_t *)(nand->oob_poi + 14);
799 *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
800
801 write_nop(docptr);
802
803 if (likely(use_ecc == true)) {
804
805 /* read the register that tells us if bitflip(s) detected */
806 edc_err = readw(docptr + DOC_ECCCONF1);
807 edc_err = readw(docptr + DOC_ECCCONF1);
808 dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
809
810 /* If bitflips are reported, attempt to correct with ecc */
811 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
812 bits_corrected = correct_data(mtd, buf, page);
813 if (bits_corrected == -EBADMSG)
814 mtd->ecc_stats.failed++;
815 else
816 mtd->ecc_stats.corrected += bits_corrected;
817 }
818 }
819
820 writew(0, docptr + DOC_DATAEND);
821 if (bits_corrected == -EBADMSG) /* uncorrectable errors */
822 return 0;
823 return bits_corrected;
824}
825
826
827static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
828 uint8_t *buf, int oob_required, int page)
829{
830 return read_page(mtd, nand, buf, page, false);
831}
832
833static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
834 uint8_t *buf, int oob_required, int page)
835{
836 return read_page(mtd, nand, buf, page, true);
837}
838
839static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
840 int page)
841{
842 struct docg4_priv *doc = nand->priv;
843 void __iomem *docptr = doc->virtadr;
844 uint16_t status;
845
846 dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
847
848 docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
849
850 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
851 write_nop(docptr);
852 write_nop(docptr);
853 write_nop(docptr);
854 write_nop(docptr);
855 write_nop(docptr);
856
857 /* the 1st byte from the I/O reg is a status; the rest is oob data */
858 status = readw(docptr + DOC_IOSPACE_DATA);
859 if (status & DOCG4_READ_ERROR) {
860 dev_warn(doc->dev,
861 "docg4_read_oob failed: status = 0x%02x\n", status);
862 return -EIO;
863 }
864
865 dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
866
867 docg4_read_buf(mtd, nand->oob_poi, 16);
868
869 write_nop(docptr);
870 write_nop(docptr);
871 write_nop(docptr);
872 writew(0, docptr + DOC_DATAEND);
873 write_nop(docptr);
874
875 return 0;
876}
877
878static void docg4_erase_block(struct mtd_info *mtd, int page)
879{
880 struct nand_chip *nand = mtd->priv;
881 struct docg4_priv *doc = nand->priv;
882 void __iomem *docptr = doc->virtadr;
883 uint16_t g4_page;
884
885 dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
886
887 sequence_reset(mtd);
888
889 writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
890 writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
891 write_nop(docptr);
892
893 /* only 2 bytes of address are written to specify erase block */
894 g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
895 writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
896 g4_page >>= 8;
897 writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
898 write_nop(docptr);
899
900 /* start the erasure */
901 writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
902 write_nop(docptr);
903 write_nop(docptr);
904
905 usleep_range(500, 1000); /* erasure is long; take a snooze */
906 poll_status(doc);
907 writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
908 writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
909 writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
910 write_nop(docptr);
911 write_nop(docptr);
912 write_nop(docptr);
913 write_nop(docptr);
914 write_nop(docptr);
915
916 read_progstatus(doc);
917
918 writew(0, docptr + DOC_DATAEND);
919 write_nop(docptr);
920 poll_status(doc);
921 write_nop(docptr);
922}
923
924static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
925 const uint8_t *buf, bool use_ecc)
926{
927 struct docg4_priv *doc = nand->priv;
928 void __iomem *docptr = doc->virtadr;
929 uint8_t ecc_buf[8];
930
931 dev_dbg(doc->dev, "%s...\n", __func__);
932
933 writew(DOC_ECCCONF0_ECC_ENABLE |
934 DOC_ECCCONF0_UNKNOWN |
935 DOCG4_BCH_SIZE,
936 docptr + DOC_ECCCONF0);
937 write_nop(docptr);
938
939 /* write the page data */
940 docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
941
942 /* oob bytes 0 through 5 are written to I/O reg */
943 docg4_write_buf16(mtd, nand->oob_poi, 6);
944
945 /* oob byte 6 written to a separate reg */
946 writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
947
948 write_nop(docptr);
949 write_nop(docptr);
950
951 /* write hw-generated ecc bytes to oob */
952 if (likely(use_ecc == true)) {
953 /* oob byte 7 is hamming code */
954 uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
955 hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
956 writew(hamming, docptr + DOCG4_OOB_6_7);
957 write_nop(docptr);
958
959 /* read the 7 bch bytes from ecc regs */
960 read_hw_ecc(docptr, ecc_buf);
961 ecc_buf[7] = 0; /* clear the "page written" flag */
962 }
963
964 /* write user-supplied bytes to oob */
965 else {
966 writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
967 write_nop(docptr);
968 memcpy(ecc_buf, &nand->oob_poi[8], 8);
969 }
970
971 docg4_write_buf16(mtd, ecc_buf, 8);
972 write_nop(docptr);
973 write_nop(docptr);
974 writew(0, docptr + DOC_DATAEND);
975 write_nop(docptr);
976
977 return 0;
978}
979
980static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
981 const uint8_t *buf, int oob_required)
982{
983 return write_page(mtd, nand, buf, false);
984}
985
986static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
987 const uint8_t *buf, int oob_required)
988{
989 return write_page(mtd, nand, buf, true);
990}
991
992static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
993 int page)
994{
995 /*
996 * Writing oob-only is not really supported, because MLC nand must write
997 * oob bytes at the same time as page data. Nonetheless, we save the
998 * oob buffer contents here, and then write it along with the page data
999 * if the same page is subsequently written. This allows user space
1000 * utilities that write the oob data prior to the page data to work
1001 * (e.g., nandwrite). The disdvantage is that, if the intention was to
1002 * write oob only, the operation is quietly ignored. Also, oob can get
1003 * corrupted if two concurrent processes are running nandwrite.
1004 */
1005
1006 /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
1007 struct docg4_priv *doc = nand->priv;
1008 doc->oob_page = page;
1009 memcpy(doc->oob_buf, nand->oob_poi, 16);
1010 return 0;
1011}
1012
1013static int __init read_factory_bbt(struct mtd_info *mtd)
1014{
1015 /*
1016 * The device contains a read-only factory bad block table. Read it and
1017 * update the memory-based bbt accordingly.
1018 */
1019
1020 struct nand_chip *nand = mtd->priv;
1021 struct docg4_priv *doc = nand->priv;
1022 uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
1023 uint8_t *buf;
1024 int i, block;
1025 __u32 eccfailed_stats = mtd->ecc_stats.failed;
1026
1027 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
1028 if (buf == NULL)
1029 return -ENOMEM;
1030
1031 read_page_prologue(mtd, g4_addr);
1032 docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
1033
1034 /*
1035 * If no memory-based bbt was created, exit. This will happen if module
1036 * parameter ignore_badblocks is set. Then why even call this function?
1037 * For an unknown reason, block erase always fails if it's the first
1038 * operation after device power-up. The above read ensures it never is.
1039 * Ugly, I know.
1040 */
1041 if (nand->bbt == NULL) /* no memory-based bbt */
1042 goto exit;
1043
1044 if (mtd->ecc_stats.failed > eccfailed_stats) {
1045 /*
1046 * Whoops, an ecc failure ocurred reading the factory bbt.
1047 * It is stored redundantly, so we get another chance.
1048 */
1049 eccfailed_stats = mtd->ecc_stats.failed;
1050 docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
1051 if (mtd->ecc_stats.failed > eccfailed_stats) {
1052 dev_warn(doc->dev,
1053 "The factory bbt could not be read!\n");
1054 goto exit;
1055 }
1056 }
1057
1058 /*
1059 * Parse factory bbt and update memory-based bbt. Factory bbt format is
1060 * simple: one bit per block, block numbers increase left to right (msb
1061 * to lsb). Bit clear means bad block.
1062 */
1063 for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
1064 int bitnum;
1065 unsigned long bits = ~buf[i];
1066 for_each_set_bit(bitnum, &bits, 8) {
1067 int badblock = block + 7 - bitnum;
1068 nand->bbt[badblock / 4] |=
1069 0x03 << ((badblock % 4) * 2);
1070 mtd->ecc_stats.badblocks++;
1071 dev_notice(doc->dev, "factory-marked bad block: %d\n",
1072 badblock);
1073 }
1074 }
1075 exit:
1076 kfree(buf);
1077 return 0;
1078}
1079
1080static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
1081{
1082 /*
1083 * Mark a block as bad. Bad blocks are marked in the oob area of the
1084 * first page of the block. The default scan_bbt() in the nand
1085 * infrastructure code works fine for building the memory-based bbt
1086 * during initialization, as does the nand infrastructure function that
1087 * checks if a block is bad by reading the bbt. This function replaces
1088 * the nand default because writes to oob-only are not supported.
1089 */
1090
1091 int ret, i;
1092 uint8_t *buf;
1093 struct nand_chip *nand = mtd->priv;
1094 struct docg4_priv *doc = nand->priv;
1095 struct nand_bbt_descr *bbtd = nand->badblock_pattern;
1096 int block = (int)(ofs >> nand->bbt_erase_shift);
1097 int page = (int)(ofs >> nand->page_shift);
1098 uint32_t g4_addr = mtd_to_docg4_address(page, 0);
1099
1100 dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
1101
1102 if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
1103 dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
1104 __func__, ofs);
1105
1106 /* allocate blank buffer for page data */
1107 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
1108 if (buf == NULL)
1109 return -ENOMEM;
1110
1111 /* update bbt in memory */
1112 nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
1113
1114 /* write bit-wise negation of pattern to oob buffer */
1115 memset(nand->oob_poi, 0xff, mtd->oobsize);
1116 for (i = 0; i < bbtd->len; i++)
1117 nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
1118
1119 /* write first page of block */
1120 write_page_prologue(mtd, g4_addr);
1121 docg4_write_page(mtd, nand, buf, 1);
1122 ret = pageprog(mtd);
1123 if (!ret)
1124 mtd->ecc_stats.badblocks++;
1125
1126 kfree(buf);
1127
1128 return ret;
1129}
1130
1131static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
1132{
1133 /* only called when module_param ignore_badblocks is set */
1134 return 0;
1135}
1136
1137static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
1138{
1139 /*
1140 * Put the device into "deep power-down" mode. Note that CE# must be
1141 * deasserted for this to take effect. The xscale, e.g., can be
1142 * configured to float this signal when the processor enters power-down,
1143 * and a suitable pull-up ensures its deassertion.
1144 */
1145
1146 int i;
1147 uint8_t pwr_down;
1148 struct docg4_priv *doc = platform_get_drvdata(pdev);
1149 void __iomem *docptr = doc->virtadr;
1150
1151 dev_dbg(doc->dev, "%s...\n", __func__);
1152
1153 /* poll the register that tells us we're ready to go to sleep */
1154 for (i = 0; i < 10; i++) {
1155 pwr_down = readb(docptr + DOC_POWERMODE);
1156 if (pwr_down & DOC_POWERDOWN_READY)
1157 break;
1158 usleep_range(1000, 4000);
1159 }
1160
1161 if (pwr_down & DOC_POWERDOWN_READY) {
1162 dev_err(doc->dev, "suspend failed; "
1163 "timeout polling DOC_POWERDOWN_READY\n");
1164 return -EIO;
1165 }
1166
1167 writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
1168 docptr + DOC_ASICMODE);
1169 writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
1170 docptr + DOC_ASICMODECONFIRM);
1171
1172 write_nop(docptr);
1173
1174 return 0;
1175}
1176
1177static int docg4_resume(struct platform_device *pdev)
1178{
1179
1180 /*
1181 * Exit power-down. Twelve consecutive reads of the address below
1182 * accomplishes this, assuming CE# has been asserted.
1183 */
1184
1185 struct docg4_priv *doc = platform_get_drvdata(pdev);
1186 void __iomem *docptr = doc->virtadr;
1187 int i;
1188
1189 dev_dbg(doc->dev, "%s...\n", __func__);
1190
1191 for (i = 0; i < 12; i++)
1192 readb(docptr + 0x1fff);
1193
1194 return 0;
1195}
1196
1197static void __init init_mtd_structs(struct mtd_info *mtd)
1198{
1199 /* initialize mtd and nand data structures */
1200
1201 /*
1202 * Note that some of the following initializations are not usually
1203 * required within a nand driver because they are performed by the nand
1204 * infrastructure code as part of nand_scan(). In this case they need
1205 * to be initialized here because we skip call to nand_scan_ident() (the
1206 * first half of nand_scan()). The call to nand_scan_ident() is skipped
1207 * because for this device the chip id is not read in the manner of a
1208 * standard nand device. Unfortunately, nand_scan_ident() does other
1209 * things as well, such as call nand_set_defaults().
1210 */
1211
1212 struct nand_chip *nand = mtd->priv;
1213 struct docg4_priv *doc = nand->priv;
1214
1215 mtd->size = DOCG4_CHIP_SIZE;
1216 mtd->name = "Msys_Diskonchip_G4";
1217 mtd->writesize = DOCG4_PAGE_SIZE;
1218 mtd->erasesize = DOCG4_BLOCK_SIZE;
1219 mtd->oobsize = DOCG4_OOB_SIZE;
1220 nand->chipsize = DOCG4_CHIP_SIZE;
1221 nand->chip_shift = DOCG4_CHIP_SHIFT;
1222 nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
1223 nand->chip_delay = 20;
1224 nand->page_shift = DOCG4_PAGE_SHIFT;
1225 nand->pagemask = 0x3ffff;
1226 nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
1227 nand->badblockbits = 8;
1228 nand->ecc.layout = &docg4_oobinfo;
1229 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1230 nand->ecc.size = DOCG4_PAGE_SIZE;
1231 nand->ecc.prepad = 8;
1232 nand->ecc.bytes = 8;
1233 nand->ecc.strength = DOCG4_T;
1234 nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
1235 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
1236 nand->controller = &nand->hwcontrol;
1237 spin_lock_init(&nand->controller->lock);
1238 init_waitqueue_head(&nand->controller->wq);
1239
1240 /* methods */
1241 nand->cmdfunc = docg4_command;
1242 nand->waitfunc = docg4_wait;
1243 nand->select_chip = docg4_select_chip;
1244 nand->read_byte = docg4_read_byte;
1245 nand->block_markbad = docg4_block_markbad;
1246 nand->read_buf = docg4_read_buf;
1247 nand->write_buf = docg4_write_buf16;
1248 nand->scan_bbt = nand_default_bbt;
1249 nand->erase_cmd = docg4_erase_block;
1250 nand->ecc.read_page = docg4_read_page;
1251 nand->ecc.write_page = docg4_write_page;
1252 nand->ecc.read_page_raw = docg4_read_page_raw;
1253 nand->ecc.write_page_raw = docg4_write_page_raw;
1254 nand->ecc.read_oob = docg4_read_oob;
1255 nand->ecc.write_oob = docg4_write_oob;
1256
1257 /*
1258 * The way the nand infrastructure code is written, a memory-based bbt
1259 * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
1260 * nand->block_bad() is used. So when ignoring bad blocks, we skip the
1261 * scan and define a dummy block_bad() which always returns 0.
1262 */
1263 if (ignore_badblocks) {
1264 nand->options |= NAND_SKIP_BBTSCAN;
1265 nand->block_bad = docg4_block_neverbad;
1266 }
1267
1268}
1269
1270static int __init read_id_reg(struct mtd_info *mtd)
1271{
1272 struct nand_chip *nand = mtd->priv;
1273 struct docg4_priv *doc = nand->priv;
1274 void __iomem *docptr = doc->virtadr;
1275 uint16_t id1, id2;
1276
1277 /* check for presence of g4 chip by reading id registers */
1278 id1 = readw(docptr + DOC_CHIPID);
1279 id1 = readw(docptr + DOCG4_MYSTERY_REG);
1280 id2 = readw(docptr + DOC_CHIPID_INV);
1281 id2 = readw(docptr + DOCG4_MYSTERY_REG);
1282
1283 if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
1284 dev_info(doc->dev,
1285 "NAND device: 128MiB Diskonchip G4 detected\n");
1286 return 0;
1287 }
1288
1289 return -ENODEV;
1290}
1291
1292static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
1293
1294static int __init probe_docg4(struct platform_device *pdev)
1295{
1296 struct mtd_info *mtd;
1297 struct nand_chip *nand;
1298 void __iomem *virtadr;
1299 struct docg4_priv *doc;
1300 int len, retval;
1301 struct resource *r;
1302 struct device *dev = &pdev->dev;
1303
1304 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1305 if (r == NULL) {
1306 dev_err(dev, "no io memory resource defined!\n");
1307 return -ENODEV;
1308 }
1309
1310 virtadr = ioremap(r->start, resource_size(r));
1311 if (!virtadr) {
1312 dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
1313 return -EIO;
1314 }
1315
1316 len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
1317 sizeof(struct docg4_priv);
1318 mtd = kzalloc(len, GFP_KERNEL);
1319 if (mtd == NULL) {
1320 retval = -ENOMEM;
1321 goto fail;
1322 }
1323 nand = (struct nand_chip *) (mtd + 1);
1324 doc = (struct docg4_priv *) (nand + 1);
1325 mtd->priv = nand;
1326 nand->priv = doc;
1327 mtd->owner = THIS_MODULE;
1328 doc->virtadr = virtadr;
1329 doc->dev = dev;
1330
1331 init_mtd_structs(mtd);
1332
1333 /* initialize kernel bch algorithm */
1334 doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
1335 if (doc->bch == NULL) {
1336 retval = -EINVAL;
1337 goto fail;
1338 }
1339
1340 platform_set_drvdata(pdev, doc);
1341
1342 reset(mtd);
1343 retval = read_id_reg(mtd);
1344 if (retval == -ENODEV) {
1345 dev_warn(dev, "No diskonchip G4 device found.\n");
1346 goto fail;
1347 }
1348
1349 retval = nand_scan_tail(mtd);
1350 if (retval)
1351 goto fail;
1352
1353 retval = read_factory_bbt(mtd);
1354 if (retval)
1355 goto fail;
1356
1357 retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
1358 if (retval)
1359 goto fail;
1360
1361 doc->mtd = mtd;
1362 return 0;
1363
1364 fail:
1365 iounmap(virtadr);
1366 if (mtd) {
1367 /* re-declarations avoid compiler warning */
1368 struct nand_chip *nand = mtd->priv;
1369 struct docg4_priv *doc = nand->priv;
1370 nand_release(mtd); /* deletes partitions and mtd devices */
1371 platform_set_drvdata(pdev, NULL);
1372 free_bch(doc->bch);
1373 kfree(mtd);
1374 }
1375
1376 return retval;
1377}
1378
1379static int __exit cleanup_docg4(struct platform_device *pdev)
1380{
1381 struct docg4_priv *doc = platform_get_drvdata(pdev);
1382 nand_release(doc->mtd);
1383 platform_set_drvdata(pdev, NULL);
1384 free_bch(doc->bch);
1385 kfree(doc->mtd);
1386 iounmap(doc->virtadr);
1387 return 0;
1388}
1389
1390static struct platform_driver docg4_driver = {
1391 .driver = {
1392 .name = "docg4",
1393 .owner = THIS_MODULE,
1394 },
1395 .suspend = docg4_suspend,
1396 .resume = docg4_resume,
1397 .remove = __exit_p(cleanup_docg4),
1398};
1399
1400static int __init docg4_init(void)
1401{
1402 return platform_driver_probe(&docg4_driver, probe_docg4);
1403}
1404
1405static void __exit docg4_exit(void)
1406{
1407 platform_driver_unregister(&docg4_driver);
1408}
1409
1410module_init(docg4_init);
1411module_exit(docg4_exit);
1412
1413MODULE_LICENSE("GPL");
1414MODULE_AUTHOR("Mike Dunn");
1415MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 20657209a47..33d8aad8bba 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,7 +75,7 @@ struct fsl_elbc_fcm_ctrl {
75 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */
76 unsigned int oob; /* Non zero if operating on OOB data */ 76 unsigned int oob; /* Non zero if operating on OOB data */
77 unsigned int counter; /* counter for the initializations */ 77 unsigned int counter; /* counter for the initializations */
78 unsigned int max_bitflips; /* Saved during READ0 cmd */ 78 char *oob_poi; /* Place to write ECC after read back */
79}; 79};
80 80
81/* These map to the positions used by the FCM hardware ECC generator */ 81/* These map to the positions used by the FCM hardware ECC generator */
@@ -109,6 +109,20 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
109}; 109};
110 110
111/* 111/*
112 * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
113 * 1, so we have to adjust bad block pattern. This pattern should be used for
114 * x8 chips only. So far hardware does not support x16 chips anyway.
115 */
116static u8 scan_ff_pattern[] = { 0xff, };
117
118static struct nand_bbt_descr largepage_memorybased = {
119 .options = 0,
120 .offs = 0,
121 .len = 1,
122 .pattern = scan_ff_pattern,
123};
124
125/*
112 * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt, 126 * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
113 * interfere with ECC positions, that's why we implement our own descriptors. 127 * interfere with ECC positions, that's why we implement our own descriptors.
114 * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0. 128 * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
@@ -153,22 +167,15 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
153 167
154 elbc_fcm_ctrl->page = page_addr; 168 elbc_fcm_ctrl->page = page_addr;
155 169
170 out_be32(&lbc->fbar,
171 page_addr >> (chip->phys_erase_shift - chip->page_shift));
172
156 if (priv->page_size) { 173 if (priv->page_size) {
157 /*
158 * large page size chip : FPAR[PI] save the lowest 6 bits,
159 * FBAR[BLK] save the other bits.
160 */
161 out_be32(&lbc->fbar, page_addr >> 6);
162 out_be32(&lbc->fpar, 174 out_be32(&lbc->fpar,
163 ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) | 175 ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
164 (oob ? FPAR_LP_MS : 0) | column); 176 (oob ? FPAR_LP_MS : 0) | column);
165 buf_num = (page_addr & 1) << 2; 177 buf_num = (page_addr & 1) << 2;
166 } else { 178 } else {
167 /*
168 * small page size chip : FPAR[PI] save the lowest 5 bits,
169 * FBAR[BLK] save the other bits.
170 */
171 out_be32(&lbc->fbar, page_addr >> 5);
172 out_be32(&lbc->fpar, 179 out_be32(&lbc->fpar,
173 ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) | 180 ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
174 (oob ? FPAR_SP_MS : 0) | column); 181 (oob ? FPAR_SP_MS : 0) | column);
@@ -237,32 +244,6 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
237 return -EIO; 244 return -EIO;
238 } 245 }
239 246
240 if (chip->ecc.mode != NAND_ECC_HW)
241 return 0;
242
243 elbc_fcm_ctrl->max_bitflips = 0;
244
245 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
246 uint32_t lteccr = in_be32(&lbc->lteccr);
247 /*
248 * if command was a full page read and the ELBC
249 * has the LTECCR register, then bits 12-15 (ppc order) of
250 * LTECCR indicates which 512 byte sub-pages had fixed errors.
251 * bits 28-31 are uncorrectable errors, marked elsewhere.
252 * for small page nand only 1 bit is used.
253 * if the ELBC doesn't have the lteccr register it reads 0
254 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
255 * count the number of sub-pages with bitflips and update
256 * ecc_stats.corrected accordingly.
257 */
258 if (lteccr & 0x000F000F)
259 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
260 if (lteccr & 0x000F0000) {
261 mtd->ecc_stats.corrected++;
262 elbc_fcm_ctrl->max_bitflips = 1;
263 }
264 }
265
266 return 0; 247 return 0;
267} 248}
268 249
@@ -350,22 +331,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
350 fsl_elbc_run_command(mtd); 331 fsl_elbc_run_command(mtd);
351 return; 332 return;
352 333
334 /* READID must read all 5 possible bytes while CEB is active */
353 case NAND_CMD_READID: 335 case NAND_CMD_READID:
354 case NAND_CMD_PARAM: 336 dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
355 dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
356 337
357 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | 338 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
358 (FIR_OP_UA << FIR_OP1_SHIFT) | 339 (FIR_OP_UA << FIR_OP1_SHIFT) |
359 (FIR_OP_RBW << FIR_OP2_SHIFT)); 340 (FIR_OP_RBW << FIR_OP2_SHIFT));
360 out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT); 341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
361 /* 342 /* nand_get_flash_type() reads 8 bytes of entire ID string */
362 * although currently it's 8 bytes for READID, we always read 343 out_be32(&lbc->fbcr, 8);
363 * the maximum 256 bytes(for PARAM) 344 elbc_fcm_ctrl->read_bytes = 8;
364 */
365 out_be32(&lbc->fbcr, 256);
366 elbc_fcm_ctrl->read_bytes = 256;
367 elbc_fcm_ctrl->use_mdr = 1; 345 elbc_fcm_ctrl->use_mdr = 1;
368 elbc_fcm_ctrl->mdr = column; 346 elbc_fcm_ctrl->mdr = 0;
347
369 set_addr(mtd, 0, 0, 0); 348 set_addr(mtd, 0, 0, 0);
370 fsl_elbc_run_command(mtd); 349 fsl_elbc_run_command(mtd);
371 return; 350 return;
@@ -410,17 +389,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
410 page_addr, column); 389 page_addr, column);
411 390
412 elbc_fcm_ctrl->column = column; 391 elbc_fcm_ctrl->column = column;
392 elbc_fcm_ctrl->oob = 0;
413 elbc_fcm_ctrl->use_mdr = 1; 393 elbc_fcm_ctrl->use_mdr = 1;
414 394
415 if (column >= mtd->writesize) {
416 /* OOB area */
417 column -= mtd->writesize;
418 elbc_fcm_ctrl->oob = 1;
419 } else {
420 WARN_ON(column != 0);
421 elbc_fcm_ctrl->oob = 0;
422 }
423
424 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | 395 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
425 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) | 396 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
426 (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT); 397 (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
@@ -445,12 +416,16 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
445 (FIR_OP_CW1 << FIR_OP6_SHIFT) | 416 (FIR_OP_CW1 << FIR_OP6_SHIFT) |
446 (FIR_OP_RS << FIR_OP7_SHIFT)); 417 (FIR_OP_RS << FIR_OP7_SHIFT));
447 418
448 if (elbc_fcm_ctrl->oob) 419 if (column >= mtd->writesize) {
449 /* OOB area --> READOOB */ 420 /* OOB area --> READOOB */
421 column -= mtd->writesize;
450 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 422 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
451 else 423 elbc_fcm_ctrl->oob = 1;
424 } else {
425 WARN_ON(column != 0);
452 /* First 256 bytes --> READ0 */ 426 /* First 256 bytes --> READ0 */
453 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 427 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
428 }
454 } 429 }
455 430
456 out_be32(&lbc->fcr, fcr); 431 out_be32(&lbc->fcr, fcr);
@@ -460,6 +435,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
460 435
461 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 436 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
462 case NAND_CMD_PAGEPROG: { 437 case NAND_CMD_PAGEPROG: {
438 int full_page;
463 dev_vdbg(priv->dev, 439 dev_vdbg(priv->dev,
464 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 440 "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
465 "writing %d bytes.\n", elbc_fcm_ctrl->index); 441 "writing %d bytes.\n", elbc_fcm_ctrl->index);
@@ -469,13 +445,34 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
469 * write so the HW generates the ECC. 445 * write so the HW generates the ECC.
470 */ 446 */
471 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || 447 if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
472 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) 448 elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) {
473 out_be32(&lbc->fbcr, 449 out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
474 elbc_fcm_ctrl->index - elbc_fcm_ctrl->column); 450 full_page = 0;
475 else 451 } else {
476 out_be32(&lbc->fbcr, 0); 452 out_be32(&lbc->fbcr, 0);
453 full_page = 1;
454 }
477 455
478 fsl_elbc_run_command(mtd); 456 fsl_elbc_run_command(mtd);
457
458 /* Read back the page in order to fill in the ECC for the
459 * caller. Is this really needed?
460 */
461 if (full_page && elbc_fcm_ctrl->oob_poi) {
462 out_be32(&lbc->fbcr, 3);
463 set_addr(mtd, 6, page_addr, 1);
464
465 elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
466
467 fsl_elbc_do_read(chip, 1);
468 fsl_elbc_run_command(mtd);
469
470 memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
471 &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
472 elbc_fcm_ctrl->index += 3;
473 }
474
475 elbc_fcm_ctrl->oob_poi = NULL;
479 return; 476 return;
480 } 477 }
481 478
@@ -600,6 +597,41 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
600 len, avail); 597 len, avail);
601} 598}
602 599
600/*
601 * Verify buffer against the FCM Controller Data Buffer
602 */
603static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
604{
605 struct nand_chip *chip = mtd->priv;
606 struct fsl_elbc_mtd *priv = chip->priv;
607 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
608 int i;
609
610 if (len < 0) {
611 dev_err(priv->dev, "write_buf of %d bytes", len);
612 return -EINVAL;
613 }
614
615 if ((unsigned int)len >
616 elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index) {
617 dev_err(priv->dev,
618 "verify_buf beyond end of buffer "
619 "(%d requested, %u available)\n",
620 len, elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
621
622 elbc_fcm_ctrl->index = elbc_fcm_ctrl->read_bytes;
623 return -EINVAL;
624 }
625
626 for (i = 0; i < len; i++)
627 if (in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index + i])
628 != buf[i])
629 break;
630
631 elbc_fcm_ctrl->index += len;
632 return i == len && elbc_fcm_ctrl->status == LTESR_CC ? 0 : -EIO;
633}
634
603/* This function is called after Program and Erase Operations to 635/* This function is called after Program and Erase Operations to
604 * check for success or failure. 636 * check for success or failure.
605 */ 637 */
@@ -632,7 +664,9 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
632 if (chip->pagemask & 0xff000000) 664 if (chip->pagemask & 0xff000000)
633 al++; 665 al++;
634 666
635 priv->fmr |= al << FMR_AL_SHIFT; 667 /* add to ECCM mode set in fsl_elbc_init */
668 priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
669 (al << FMR_AL_SHIFT);
636 670
637 dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n", 671 dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
638 chip->numchips); 672 chip->numchips);
@@ -685,6 +719,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
685 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 719 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
686 &fsl_elbc_oob_lp_eccm1 : 720 &fsl_elbc_oob_lp_eccm1 :
687 &fsl_elbc_oob_lp_eccm0; 721 &fsl_elbc_oob_lp_eccm0;
722 chip->badblock_pattern = &largepage_memorybased;
688 } 723 }
689 } else { 724 } else {
690 dev_err(priv->dev, 725 dev_err(priv->dev,
@@ -696,33 +731,34 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
696 return 0; 731 return 0;
697} 732}
698 733
699static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 734static int fsl_elbc_read_page(struct mtd_info *mtd,
700 uint8_t *buf, int oob_required, int page) 735 struct nand_chip *chip,
736 uint8_t *buf,
737 int page)
701{ 738{
702 struct fsl_elbc_mtd *priv = chip->priv;
703 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
704 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
705
706 fsl_elbc_read_buf(mtd, buf, mtd->writesize); 739 fsl_elbc_read_buf(mtd, buf, mtd->writesize);
707 if (oob_required) 740 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
708 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
709 741
710 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) 742 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
711 mtd->ecc_stats.failed++; 743 mtd->ecc_stats.failed++;
712 744
713 return elbc_fcm_ctrl->max_bitflips; 745 return 0;
714} 746}
715 747
716/* ECC will be calculated automatically, and errors will be detected in 748/* ECC will be calculated automatically, and errors will be detected in
717 * waitfunc. 749 * waitfunc.
718 */ 750 */
719static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, 751static void fsl_elbc_write_page(struct mtd_info *mtd,
720 const uint8_t *buf, int oob_required) 752 struct nand_chip *chip,
753 const uint8_t *buf)
721{ 754{
755 struct fsl_elbc_mtd *priv = chip->priv;
756 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
757
722 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 758 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
723 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 759 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
724 760
725 return 0; 761 elbc_fcm_ctrl->oob_poi = chip->oob_poi;
726} 762}
727 763
728static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 764static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -738,16 +774,15 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
738 priv->mtd.priv = chip; 774 priv->mtd.priv = chip;
739 priv->mtd.owner = THIS_MODULE; 775 priv->mtd.owner = THIS_MODULE;
740 776
741 /* set timeout to maximum */ 777 /* Set the ECCM according to the settings in bootloader.*/
742 priv->fmr = 15 << FMR_CWTO_SHIFT; 778 priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
743 if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
744 priv->fmr |= FMR_ECCM;
745 779
746 /* fill in nand_chip structure */ 780 /* fill in nand_chip structure */
747 /* set up function call table */ 781 /* set up function call table */
748 chip->read_byte = fsl_elbc_read_byte; 782 chip->read_byte = fsl_elbc_read_byte;
749 chip->write_buf = fsl_elbc_write_buf; 783 chip->write_buf = fsl_elbc_write_buf;
750 chip->read_buf = fsl_elbc_read_buf; 784 chip->read_buf = fsl_elbc_read_buf;
785 chip->verify_buf = fsl_elbc_verify_buf;
751 chip->select_chip = fsl_elbc_select_chip; 786 chip->select_chip = fsl_elbc_select_chip;
752 chip->cmdfunc = fsl_elbc_cmdfunc; 787 chip->cmdfunc = fsl_elbc_cmdfunc;
753 chip->waitfunc = fsl_elbc_wait; 788 chip->waitfunc = fsl_elbc_wait;
@@ -756,7 +791,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
756 chip->bbt_md = &bbt_mirror_descr; 791 chip->bbt_md = &bbt_mirror_descr;
757 792
758 /* set up nand options */ 793 /* set up nand options */
759 chip->bbt_options = NAND_BBT_USE_FLASH; 794 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
795 NAND_USE_FLASH_BBT;
760 796
761 chip->controller = &elbc_fcm_ctrl->controller; 797 chip->controller = &elbc_fcm_ctrl->controller;
762 chip->priv = priv; 798 chip->priv = priv;
@@ -773,7 +809,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
773 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; 809 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
774 chip->ecc.size = 512; 810 chip->ecc.size = 512;
775 chip->ecc.bytes = 3; 811 chip->ecc.bytes = 3;
776 chip->ecc.strength = 1;
777 } else { 812 } else {
778 /* otherwise fall back to default software ECC */ 813 /* otherwise fall back to default software ECC */
779 chip->ecc.mode = NAND_ECC_SOFT; 814 chip->ecc.mode = NAND_ECC_SOFT;
@@ -794,26 +829,26 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
794 829
795 elbc_fcm_ctrl->chips[priv->bank] = NULL; 830 elbc_fcm_ctrl->chips[priv->bank] = NULL;
796 kfree(priv); 831 kfree(priv);
832 kfree(elbc_fcm_ctrl);
797 return 0; 833 return 0;
798} 834}
799 835
800static DEFINE_MUTEX(fsl_elbc_nand_mutex); 836static DEFINE_MUTEX(fsl_elbc_nand_mutex);
801 837
802static int fsl_elbc_nand_probe(struct platform_device *pdev) 838static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
803{ 839{
804 struct fsl_lbc_regs __iomem *lbc; 840 struct fsl_lbc_regs __iomem *lbc;
805 struct fsl_elbc_mtd *priv; 841 struct fsl_elbc_mtd *priv;
806 struct resource res; 842 struct resource res;
807 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; 843 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
808 static const char *part_probe_types[] 844 static const char *part_probe_types[]
809 = { "cmdlinepart", "RedBoot", "ofpart", NULL }; 845 = { "cmdlinepart", "RedBoot", NULL };
846 struct mtd_partition *parts;
810 int ret; 847 int ret;
811 int bank; 848 int bank;
812 struct device *dev; 849 struct device *dev;
813 struct device_node *node = pdev->dev.of_node; 850 struct device_node *node = pdev->dev.of_node;
814 struct mtd_part_parser_data ppdata;
815 851
816 ppdata.of_node = pdev->dev.of_node;
817 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) 852 if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
818 return -ENODEV; 853 return -ENODEV;
819 lbc = fsl_lbc_ctrl_dev->regs; 854 lbc = fsl_lbc_ctrl_dev->regs;
@@ -866,8 +901,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
866 elbc_fcm_ctrl->chips[bank] = priv; 901 elbc_fcm_ctrl->chips[bank] = priv;
867 priv->bank = bank; 902 priv->bank = bank;
868 priv->ctrl = fsl_lbc_ctrl_dev; 903 priv->ctrl = fsl_lbc_ctrl_dev;
869 priv->dev = &pdev->dev; 904 priv->dev = dev;
870 dev_set_drvdata(priv->dev, priv);
871 905
872 priv->vbase = ioremap(res.start, resource_size(&res)); 906 priv->vbase = ioremap(res.start, resource_size(&res));
873 if (!priv->vbase) { 907 if (!priv->vbase) {
@@ -900,8 +934,17 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
900 934
901 /* First look for RedBoot table or partitions on the command 935 /* First look for RedBoot table or partitions on the command
902 * line, these take precedence over device tree information */ 936 * line, these take precedence over device tree information */
903 mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata, 937 ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
904 NULL, 0); 938 if (ret < 0)
939 goto err;
940
941 if (ret == 0) {
942 ret = of_mtd_parse_partitions(priv->dev, node, &parts);
943 if (ret < 0)
944 goto err;
945 }
946
947 mtd_device_register(&priv->mtd, parts, ret);
905 948
906 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", 949 printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
907 (unsigned long long)res.start, priv->bank); 950 (unsigned long long)res.start, priv->bank);
@@ -914,10 +957,11 @@ err:
914 957
915static int fsl_elbc_nand_remove(struct platform_device *pdev) 958static int fsl_elbc_nand_remove(struct platform_device *pdev)
916{ 959{
960 int i;
917 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; 961 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
918 struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev); 962 for (i = 0; i < MAX_BANKS; i++)
919 963 if (elbc_fcm_ctrl->chips[i])
920 fsl_elbc_chip_remove(priv); 964 fsl_elbc_chip_remove(elbc_fcm_ctrl->chips[i]);
921 965
922 mutex_lock(&fsl_elbc_nand_mutex); 966 mutex_lock(&fsl_elbc_nand_mutex);
923 elbc_fcm_ctrl->counter--; 967 elbc_fcm_ctrl->counter--;
@@ -946,7 +990,18 @@ static struct platform_driver fsl_elbc_nand_driver = {
946 .remove = fsl_elbc_nand_remove, 990 .remove = fsl_elbc_nand_remove,
947}; 991};
948 992
949module_platform_driver(fsl_elbc_nand_driver); 993static int __init fsl_elbc_nand_init(void)
994{
995 return platform_driver_register(&fsl_elbc_nand_driver);
996}
997
998static void __exit fsl_elbc_nand_exit(void)
999{
1000 platform_driver_unregister(&fsl_elbc_nand_driver);
1001}
1002
1003module_init(fsl_elbc_nand_init);
1004module_exit(fsl_elbc_nand_exit);
950 1005
951MODULE_LICENSE("GPL"); 1006MODULE_LICENSE("GPL");
952MODULE_AUTHOR("Freescale"); 1007MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
deleted file mode 100644
index ad6222627fe..00000000000
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ /dev/null
@@ -1,1103 +0,0 @@
1/*
2 * Freescale Integrated Flash Controller NAND driver
3 *
4 * Copyright 2011-2012 Freescale Semiconductor, Inc
5 *
6 * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/partitions.h>
31#include <linux/mtd/nand_ecc.h>
32#include <asm/fsl_ifc.h>
33
34#define FSL_IFC_V1_1_0 0x01010000
35#define ERR_BYTE 0xFF /* Value returned for read
36 bytes when read failed */
37#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
38 for IFC NAND Machine */
39
40struct fsl_ifc_ctrl;
41
42/* mtd information per set */
43struct fsl_ifc_mtd {
44 struct mtd_info mtd;
45 struct nand_chip chip;
46 struct fsl_ifc_ctrl *ctrl;
47
48 struct device *dev;
49 int bank; /* Chip select bank number */
50 unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
51 u8 __iomem *vbase; /* Chip select base virtual address */
52};
53
54/* overview of the fsl ifc controller */
55struct fsl_ifc_nand_ctrl {
56 struct nand_hw_control controller;
57 struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
58
59 u8 __iomem *addr; /* Address of assigned IFC buffer */
60 unsigned int page; /* Last page written to / read from */
61 unsigned int read_bytes;/* Number of bytes read during command */
62 unsigned int column; /* Saved column from SEQIN */
63 unsigned int index; /* Pointer to next byte to 'read' */
64 unsigned int oob; /* Non zero if operating on OOB data */
65 unsigned int eccread; /* Non zero for a full-page ECC read */
66 unsigned int counter; /* counter for the initializations */
67 unsigned int max_bitflips; /* Saved during READ0 cmd */
68};
69
70static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
71
72/* 512-byte page with 4-bit ECC, 8-bit */
73static struct nand_ecclayout oob_512_8bit_ecc4 = {
74 .eccbytes = 8,
75 .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
76 .oobfree = { {0, 5}, {6, 2} },
77};
78
79/* 512-byte page with 4-bit ECC, 16-bit */
80static struct nand_ecclayout oob_512_16bit_ecc4 = {
81 .eccbytes = 8,
82 .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
83 .oobfree = { {2, 6}, },
84};
85
86/* 2048-byte page size with 4-bit ECC */
87static struct nand_ecclayout oob_2048_ecc4 = {
88 .eccbytes = 32,
89 .eccpos = {
90 8, 9, 10, 11, 12, 13, 14, 15,
91 16, 17, 18, 19, 20, 21, 22, 23,
92 24, 25, 26, 27, 28, 29, 30, 31,
93 32, 33, 34, 35, 36, 37, 38, 39,
94 },
95 .oobfree = { {2, 6}, {40, 24} },
96};
97
98/* 4096-byte page size with 4-bit ECC */
99static struct nand_ecclayout oob_4096_ecc4 = {
100 .eccbytes = 64,
101 .eccpos = {
102 8, 9, 10, 11, 12, 13, 14, 15,
103 16, 17, 18, 19, 20, 21, 22, 23,
104 24, 25, 26, 27, 28, 29, 30, 31,
105 32, 33, 34, 35, 36, 37, 38, 39,
106 40, 41, 42, 43, 44, 45, 46, 47,
107 48, 49, 50, 51, 52, 53, 54, 55,
108 56, 57, 58, 59, 60, 61, 62, 63,
109 64, 65, 66, 67, 68, 69, 70, 71,
110 },
111 .oobfree = { {2, 6}, {72, 56} },
112};
113
114/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
115static struct nand_ecclayout oob_4096_ecc8 = {
116 .eccbytes = 128,
117 .eccpos = {
118 8, 9, 10, 11, 12, 13, 14, 15,
119 16, 17, 18, 19, 20, 21, 22, 23,
120 24, 25, 26, 27, 28, 29, 30, 31,
121 32, 33, 34, 35, 36, 37, 38, 39,
122 40, 41, 42, 43, 44, 45, 46, 47,
123 48, 49, 50, 51, 52, 53, 54, 55,
124 56, 57, 58, 59, 60, 61, 62, 63,
125 64, 65, 66, 67, 68, 69, 70, 71,
126 72, 73, 74, 75, 76, 77, 78, 79,
127 80, 81, 82, 83, 84, 85, 86, 87,
128 88, 89, 90, 91, 92, 93, 94, 95,
129 96, 97, 98, 99, 100, 101, 102, 103,
130 104, 105, 106, 107, 108, 109, 110, 111,
131 112, 113, 114, 115, 116, 117, 118, 119,
132 120, 121, 122, 123, 124, 125, 126, 127,
133 128, 129, 130, 131, 132, 133, 134, 135,
134 },
135 .oobfree = { {2, 6}, {136, 82} },
136};
137
138
139/*
140 * Generic flash bbt descriptors
141 */
142static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
143static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
144
145static struct nand_bbt_descr bbt_main_descr = {
146 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
147 NAND_BBT_2BIT | NAND_BBT_VERSION,
148 .offs = 2, /* 0 on 8-bit small page */
149 .len = 4,
150 .veroffs = 6,
151 .maxblocks = 4,
152 .pattern = bbt_pattern,
153};
154
155static struct nand_bbt_descr bbt_mirror_descr = {
156 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
157 NAND_BBT_2BIT | NAND_BBT_VERSION,
158 .offs = 2, /* 0 on 8-bit small page */
159 .len = 4,
160 .veroffs = 6,
161 .maxblocks = 4,
162 .pattern = mirror_pattern,
163};
164
165/*
166 * Set up the IFC hardware block and page address fields, and the ifc nand
167 * structure addr field to point to the correct IFC buffer in memory
168 */
169static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
170{
171 struct nand_chip *chip = mtd->priv;
172 struct fsl_ifc_mtd *priv = chip->priv;
173 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
174 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
175 int buf_num;
176
177 ifc_nand_ctrl->page = page_addr;
178 /* Program ROW0/COL0 */
179 out_be32(&ifc->ifc_nand.row0, page_addr);
180 out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
181
182 buf_num = page_addr & priv->bufnum_mask;
183
184 ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
185 ifc_nand_ctrl->index = column;
186
187 /* for OOB data point to the second half of the buffer */
188 if (oob)
189 ifc_nand_ctrl->index += mtd->writesize;
190}
191
192static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
193{
194 struct nand_chip *chip = mtd->priv;
195 struct fsl_ifc_mtd *priv = chip->priv;
196 u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
197 u32 __iomem *mainarea = (u32 __iomem *)addr;
198 u8 __iomem *oob = addr + mtd->writesize;
199 int i;
200
201 for (i = 0; i < mtd->writesize / 4; i++) {
202 if (__raw_readl(&mainarea[i]) != 0xffffffff)
203 return 0;
204 }
205
206 for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
207 int pos = chip->ecc.layout->eccpos[i];
208
209 if (__raw_readb(&oob[pos]) != 0xff)
210 return 0;
211 }
212
213 return 1;
214}
215
216/* returns nonzero if entire page is blank */
217static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
218 u32 *eccstat, unsigned int bufnum)
219{
220 u32 reg = eccstat[bufnum / 4];
221 int errors;
222
223 errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
224
225 return errors;
226}
227
228/*
229 * execute IFC NAND command and wait for it to complete
230 */
231static void fsl_ifc_run_command(struct mtd_info *mtd)
232{
233 struct nand_chip *chip = mtd->priv;
234 struct fsl_ifc_mtd *priv = chip->priv;
235 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
236 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
237 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
238 u32 eccstat[4];
239 int i;
240
241 /* set the chip select for NAND Transaction */
242 out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
243
244 dev_vdbg(priv->dev,
245 "%s: fir0=%08x fcr0=%08x\n",
246 __func__,
247 in_be32(&ifc->ifc_nand.nand_fir0),
248 in_be32(&ifc->ifc_nand.nand_fcr0));
249
250 ctrl->nand_stat = 0;
251
252 /* start read/write seq */
253 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
254
255 /* wait for command complete flag or timeout */
256 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
257 IFC_TIMEOUT_MSECS * HZ/1000);
258
259 /* ctrl->nand_stat will be updated from IRQ context */
260 if (!ctrl->nand_stat)
261 dev_err(priv->dev, "Controller is not responding\n");
262 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
263 dev_err(priv->dev, "NAND Flash Timeout Error\n");
264 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
265 dev_err(priv->dev, "NAND Flash Write Protect Error\n");
266
267 nctrl->max_bitflips = 0;
268
269 if (nctrl->eccread) {
270 int errors;
271 int bufnum = nctrl->page & priv->bufnum_mask;
272 int sector = bufnum * chip->ecc.steps;
273 int sector_end = sector + chip->ecc.steps - 1;
274
275 for (i = sector / 4; i <= sector_end / 4; i++)
276 eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
277
278 for (i = sector; i <= sector_end; i++) {
279 errors = check_read_ecc(mtd, ctrl, eccstat, i);
280
281 if (errors == 15) {
282 /*
283 * Uncorrectable error.
284 * OK only if the whole page is blank.
285 *
286 * We disable ECCER reporting due to...
287 * erratum IFC-A002770 -- so report it now if we
288 * see an uncorrectable error in ECCSTAT.
289 */
290 if (!is_blank(mtd, bufnum))
291 ctrl->nand_stat |=
292 IFC_NAND_EVTER_STAT_ECCER;
293 break;
294 }
295
296 mtd->ecc_stats.corrected += errors;
297 nctrl->max_bitflips = max_t(unsigned int,
298 nctrl->max_bitflips,
299 errors);
300 }
301
302 nctrl->eccread = 0;
303 }
304}
305
306static void fsl_ifc_do_read(struct nand_chip *chip,
307 int oob,
308 struct mtd_info *mtd)
309{
310 struct fsl_ifc_mtd *priv = chip->priv;
311 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
312 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
313
314 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
315 if (mtd->writesize > 512) {
316 out_be32(&ifc->ifc_nand.nand_fir0,
317 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
318 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
319 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
320 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
321 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
322 out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
323
324 out_be32(&ifc->ifc_nand.nand_fcr0,
325 (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
326 (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
327 } else {
328 out_be32(&ifc->ifc_nand.nand_fir0,
329 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
330 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
331 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
332 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
333 out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
334
335 if (oob)
336 out_be32(&ifc->ifc_nand.nand_fcr0,
337 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
338 else
339 out_be32(&ifc->ifc_nand.nand_fcr0,
340 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
341 }
342}
343
344/* cmdfunc send commands to the IFC NAND Machine */
345static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
346 int column, int page_addr) {
347 struct nand_chip *chip = mtd->priv;
348 struct fsl_ifc_mtd *priv = chip->priv;
349 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
350 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
351
352 /* clear the read buffer */
353 ifc_nand_ctrl->read_bytes = 0;
354 if (command != NAND_CMD_PAGEPROG)
355 ifc_nand_ctrl->index = 0;
356
357 switch (command) {
358 /* READ0 read the entire buffer to use hardware ECC. */
359 case NAND_CMD_READ0:
360 out_be32(&ifc->ifc_nand.nand_fbcr, 0);
361 set_addr(mtd, 0, page_addr, 0);
362
363 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
364 ifc_nand_ctrl->index += column;
365
366 if (chip->ecc.mode == NAND_ECC_HW)
367 ifc_nand_ctrl->eccread = 1;
368
369 fsl_ifc_do_read(chip, 0, mtd);
370 fsl_ifc_run_command(mtd);
371 return;
372
373 /* READOOB reads only the OOB because no ECC is performed. */
374 case NAND_CMD_READOOB:
375 out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
376 set_addr(mtd, column, page_addr, 1);
377
378 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
379
380 fsl_ifc_do_read(chip, 1, mtd);
381 fsl_ifc_run_command(mtd);
382
383 return;
384
385 case NAND_CMD_READID:
386 case NAND_CMD_PARAM: {
387 int timing = IFC_FIR_OP_RB;
388 if (command == NAND_CMD_PARAM)
389 timing = IFC_FIR_OP_RBCD;
390
391 out_be32(&ifc->ifc_nand.nand_fir0,
392 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
393 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
394 (timing << IFC_NAND_FIR0_OP2_SHIFT));
395 out_be32(&ifc->ifc_nand.nand_fcr0,
396 command << IFC_NAND_FCR0_CMD0_SHIFT);
397 out_be32(&ifc->ifc_nand.row3, column);
398
399 /*
400 * although currently it's 8 bytes for READID, we always read
401 * the maximum 256 bytes(for PARAM)
402 */
403 out_be32(&ifc->ifc_nand.nand_fbcr, 256);
404 ifc_nand_ctrl->read_bytes = 256;
405
406 set_addr(mtd, 0, 0, 0);
407 fsl_ifc_run_command(mtd);
408 return;
409 }
410
411 /* ERASE1 stores the block and page address */
412 case NAND_CMD_ERASE1:
413 set_addr(mtd, 0, page_addr, 0);
414 return;
415
416 /* ERASE2 uses the block and page address from ERASE1 */
417 case NAND_CMD_ERASE2:
418 out_be32(&ifc->ifc_nand.nand_fir0,
419 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
420 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
421 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
422
423 out_be32(&ifc->ifc_nand.nand_fcr0,
424 (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
425 (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
426
427 out_be32(&ifc->ifc_nand.nand_fbcr, 0);
428 ifc_nand_ctrl->read_bytes = 0;
429 fsl_ifc_run_command(mtd);
430 return;
431
432 /* SEQIN sets up the addr buffer and all registers except the length */
433 case NAND_CMD_SEQIN: {
434 u32 nand_fcr0;
435 ifc_nand_ctrl->column = column;
436 ifc_nand_ctrl->oob = 0;
437
438 if (mtd->writesize > 512) {
439 nand_fcr0 =
440 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
441 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
442
443 out_be32(&ifc->ifc_nand.nand_fir0,
444 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
445 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
446 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
447 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
448 (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
449 } else {
450 nand_fcr0 = ((NAND_CMD_PAGEPROG <<
451 IFC_NAND_FCR0_CMD1_SHIFT) |
452 (NAND_CMD_SEQIN <<
453 IFC_NAND_FCR0_CMD2_SHIFT));
454
455 out_be32(&ifc->ifc_nand.nand_fir0,
456 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
457 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
458 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
459 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
460 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
461 out_be32(&ifc->ifc_nand.nand_fir1,
462 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
463
464 if (column >= mtd->writesize)
465 nand_fcr0 |=
466 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
467 else
468 nand_fcr0 |=
469 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
470 }
471
472 if (column >= mtd->writesize) {
473 /* OOB area --> READOOB */
474 column -= mtd->writesize;
475 ifc_nand_ctrl->oob = 1;
476 }
477 out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
478 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
479 return;
480 }
481
482 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
483 case NAND_CMD_PAGEPROG: {
484 if (ifc_nand_ctrl->oob) {
485 out_be32(&ifc->ifc_nand.nand_fbcr,
486 ifc_nand_ctrl->index - ifc_nand_ctrl->column);
487 } else {
488 out_be32(&ifc->ifc_nand.nand_fbcr, 0);
489 }
490
491 fsl_ifc_run_command(mtd);
492 return;
493 }
494
495 case NAND_CMD_STATUS:
496 out_be32(&ifc->ifc_nand.nand_fir0,
497 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
498 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
499 out_be32(&ifc->ifc_nand.nand_fcr0,
500 NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
501 out_be32(&ifc->ifc_nand.nand_fbcr, 1);
502 set_addr(mtd, 0, 0, 0);
503 ifc_nand_ctrl->read_bytes = 1;
504
505 fsl_ifc_run_command(mtd);
506
507 /*
508 * The chip always seems to report that it is
509 * write-protected, even when it is not.
510 */
511 setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
512 return;
513
514 case NAND_CMD_RESET:
515 out_be32(&ifc->ifc_nand.nand_fir0,
516 IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
517 out_be32(&ifc->ifc_nand.nand_fcr0,
518 NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
519 fsl_ifc_run_command(mtd);
520 return;
521
522 default:
523 dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
524 __func__, command);
525 }
526}
527
528static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
529{
530 /* The hardware does not seem to support multiple
531 * chips per bank.
532 */
533}
534
535/*
536 * Write buf to the IFC NAND Controller Data Buffer
537 */
538static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
539{
540 struct nand_chip *chip = mtd->priv;
541 struct fsl_ifc_mtd *priv = chip->priv;
542 unsigned int bufsize = mtd->writesize + mtd->oobsize;
543
544 if (len <= 0) {
545 dev_err(priv->dev, "%s: len %d bytes", __func__, len);
546 return;
547 }
548
549 if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
550 dev_err(priv->dev,
551 "%s: beyond end of buffer (%d requested, %u available)\n",
552 __func__, len, bufsize - ifc_nand_ctrl->index);
553 len = bufsize - ifc_nand_ctrl->index;
554 }
555
556 memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len);
557 ifc_nand_ctrl->index += len;
558}
559
560/*
561 * Read a byte from either the IFC hardware buffer
562 * read function for 8-bit buswidth
563 */
564static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
565{
566 struct nand_chip *chip = mtd->priv;
567 struct fsl_ifc_mtd *priv = chip->priv;
568
569 /*
570 * If there are still bytes in the IFC buffer, then use the
571 * next byte.
572 */
573 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes)
574 return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]);
575
576 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
577 return ERR_BYTE;
578}
579
580/*
581 * Read two bytes from the IFC hardware buffer
582 * read function for 16-bit buswith
583 */
584static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
585{
586 struct nand_chip *chip = mtd->priv;
587 struct fsl_ifc_mtd *priv = chip->priv;
588 uint16_t data;
589
590 /*
591 * If there are still bytes in the IFC buffer, then use the
592 * next byte.
593 */
594 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
595 data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl->
596 addr[ifc_nand_ctrl->index]);
597 ifc_nand_ctrl->index += 2;
598 return (uint8_t) data;
599 }
600
601 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
602 return ERR_BYTE;
603}
604
605/*
606 * Read from the IFC Controller Data Buffer
607 */
608static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
609{
610 struct nand_chip *chip = mtd->priv;
611 struct fsl_ifc_mtd *priv = chip->priv;
612 int avail;
613
614 if (len < 0) {
615 dev_err(priv->dev, "%s: len %d bytes", __func__, len);
616 return;
617 }
618
619 avail = min((unsigned int)len,
620 ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
621 memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail);
622 ifc_nand_ctrl->index += avail;
623
624 if (len > avail)
625 dev_err(priv->dev,
626 "%s: beyond end of buffer (%d requested, %d available)\n",
627 __func__, len, avail);
628}
629
630/*
631 * This function is called after Program and Erase Operations to
632 * check for success or failure.
633 */
634static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
635{
636 struct fsl_ifc_mtd *priv = chip->priv;
637 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
638 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
639 u32 nand_fsr;
640
641 /* Use READ_STATUS command, but wait for the device to be ready */
642 out_be32(&ifc->ifc_nand.nand_fir0,
643 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
644 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
645 out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
646 IFC_NAND_FCR0_CMD0_SHIFT);
647 out_be32(&ifc->ifc_nand.nand_fbcr, 1);
648 set_addr(mtd, 0, 0, 0);
649 ifc_nand_ctrl->read_bytes = 1;
650
651 fsl_ifc_run_command(mtd);
652
653 nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
654
655 /*
656 * The chip always seems to report that it is
657 * write-protected, even when it is not.
658 */
659 return nand_fsr | NAND_STATUS_WP;
660}
661
662static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
663 uint8_t *buf, int oob_required, int page)
664{
665 struct fsl_ifc_mtd *priv = chip->priv;
666 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
667 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
668
669 fsl_ifc_read_buf(mtd, buf, mtd->writesize);
670 if (oob_required)
671 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
672
673 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
674 dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
675
676 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
677 mtd->ecc_stats.failed++;
678
679 return nctrl->max_bitflips;
680}
681
682/* ECC will be calculated automatically, and errors will be detected in
683 * waitfunc.
684 */
685static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
686 const uint8_t *buf, int oob_required)
687{
688 fsl_ifc_write_buf(mtd, buf, mtd->writesize);
689 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
690
691 return 0;
692}
693
694static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
695{
696 struct nand_chip *chip = mtd->priv;
697 struct fsl_ifc_mtd *priv = chip->priv;
698
699 dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
700 chip->numchips);
701 dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
702 chip->chipsize);
703 dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
704 chip->pagemask);
705 dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
706 chip->chip_delay);
707 dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
708 chip->badblockpos);
709 dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
710 chip->chip_shift);
711 dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
712 chip->page_shift);
713 dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
714 chip->phys_erase_shift);
715 dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
716 chip->ecclayout);
717 dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
718 chip->ecc.mode);
719 dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
720 chip->ecc.steps);
721 dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
722 chip->ecc.bytes);
723 dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
724 chip->ecc.total);
725 dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
726 chip->ecc.layout);
727 dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
728 dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
729 dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
730 mtd->erasesize);
731 dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
732 mtd->writesize);
733 dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
734 mtd->oobsize);
735
736 return 0;
737}
738
739static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
740{
741 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
742 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
743 uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
744 uint32_t cs = priv->bank;
745
746 /* Save CSOR and CSOR_ext */
747 csor = in_be32(&ifc->csor_cs[cs].csor);
748 csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
749
750 /* chage PageSize 8K and SpareSize 1K*/
751 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
752 out_be32(&ifc->csor_cs[cs].csor, csor_8k);
753 out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
754
755 /* READID */
756 out_be32(&ifc->ifc_nand.nand_fir0,
757 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
758 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
759 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
760 out_be32(&ifc->ifc_nand.nand_fcr0,
761 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
762 out_be32(&ifc->ifc_nand.row3, 0x0);
763
764 out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
765
766 /* Program ROW0/COL0 */
767 out_be32(&ifc->ifc_nand.row0, 0x0);
768 out_be32(&ifc->ifc_nand.col0, 0x0);
769
770 /* set the chip select for NAND Transaction */
771 out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
772
773 /* start read seq */
774 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
775
776 /* wait for command complete flag or timeout */
777 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
778 IFC_TIMEOUT_MSECS * HZ/1000);
779
780 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
781 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
782
783 /* Restore CSOR and CSOR_ext */
784 out_be32(&ifc->csor_cs[cs].csor, csor);
785 out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
786}
787
788static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
789{
790 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
791 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
792 struct nand_chip *chip = &priv->chip;
793 struct nand_ecclayout *layout;
794 u32 csor, ver;
795
796 /* Fill in fsl_ifc_mtd structure */
797 priv->mtd.priv = chip;
798 priv->mtd.owner = THIS_MODULE;
799
800 /* fill in nand_chip structure */
801 /* set up function call table */
802 if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
803 chip->read_byte = fsl_ifc_read_byte16;
804 else
805 chip->read_byte = fsl_ifc_read_byte;
806
807 chip->write_buf = fsl_ifc_write_buf;
808 chip->read_buf = fsl_ifc_read_buf;
809 chip->select_chip = fsl_ifc_select_chip;
810 chip->cmdfunc = fsl_ifc_cmdfunc;
811 chip->waitfunc = fsl_ifc_wait;
812
813 chip->bbt_td = &bbt_main_descr;
814 chip->bbt_md = &bbt_mirror_descr;
815
816 out_be32(&ifc->ifc_nand.ncfgr, 0x0);
817
818 /* set up nand options */
819 chip->bbt_options = NAND_BBT_USE_FLASH;
820
821
822 if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
823 chip->read_byte = fsl_ifc_read_byte16;
824 chip->options |= NAND_BUSWIDTH_16;
825 } else {
826 chip->read_byte = fsl_ifc_read_byte;
827 }
828
829 chip->controller = &ifc_nand_ctrl->controller;
830 chip->priv = priv;
831
832 chip->ecc.read_page = fsl_ifc_read_page;
833 chip->ecc.write_page = fsl_ifc_write_page;
834
835 csor = in_be32(&ifc->csor_cs[priv->bank].csor);
836
837 /* Hardware generates ECC per 512 Bytes */
838 chip->ecc.size = 512;
839 chip->ecc.bytes = 8;
840 chip->ecc.strength = 4;
841
842 switch (csor & CSOR_NAND_PGS_MASK) {
843 case CSOR_NAND_PGS_512:
844 if (chip->options & NAND_BUSWIDTH_16) {
845 layout = &oob_512_16bit_ecc4;
846 } else {
847 layout = &oob_512_8bit_ecc4;
848
849 /* Avoid conflict with bad block marker */
850 bbt_main_descr.offs = 0;
851 bbt_mirror_descr.offs = 0;
852 }
853
854 priv->bufnum_mask = 15;
855 break;
856
857 case CSOR_NAND_PGS_2K:
858 layout = &oob_2048_ecc4;
859 priv->bufnum_mask = 3;
860 break;
861
862 case CSOR_NAND_PGS_4K:
863 if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
864 CSOR_NAND_ECC_MODE_4) {
865 layout = &oob_4096_ecc4;
866 } else {
867 layout = &oob_4096_ecc8;
868 chip->ecc.bytes = 16;
869 }
870
871 priv->bufnum_mask = 1;
872 break;
873
874 default:
875 dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
876 return -ENODEV;
877 }
878
879 /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
880 if (csor & CSOR_NAND_ECC_DEC_EN) {
881 chip->ecc.mode = NAND_ECC_HW;
882 chip->ecc.layout = layout;
883 } else {
884 chip->ecc.mode = NAND_ECC_SOFT;
885 }
886
887 ver = in_be32(&ifc->ifc_rev);
888 if (ver == FSL_IFC_V1_1_0)
889 fsl_ifc_sram_init(priv);
890
891 return 0;
892}
893
894static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
895{
896 nand_release(&priv->mtd);
897
898 kfree(priv->mtd.name);
899
900 if (priv->vbase)
901 iounmap(priv->vbase);
902
903 ifc_nand_ctrl->chips[priv->bank] = NULL;
904 dev_set_drvdata(priv->dev, NULL);
905 kfree(priv);
906
907 return 0;
908}
909
910static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
911 phys_addr_t addr)
912{
913 u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
914
915 if (!(cspr & CSPR_V))
916 return 0;
917 if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
918 return 0;
919
920 return (cspr & CSPR_BA) == convert_ifc_address(addr);
921}
922
923static DEFINE_MUTEX(fsl_ifc_nand_mutex);
924
925static int fsl_ifc_nand_probe(struct platform_device *dev)
926{
927 struct fsl_ifc_regs __iomem *ifc;
928 struct fsl_ifc_mtd *priv;
929 struct resource res;
930 static const char *part_probe_types[]
931 = { "cmdlinepart", "RedBoot", "ofpart", NULL };
932 int ret;
933 int bank;
934 struct device_node *node = dev->dev.of_node;
935 struct mtd_part_parser_data ppdata;
936
937 ppdata.of_node = dev->dev.of_node;
938 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
939 return -ENODEV;
940 ifc = fsl_ifc_ctrl_dev->regs;
941
942 /* get, allocate and map the memory resource */
943 ret = of_address_to_resource(node, 0, &res);
944 if (ret) {
945 dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
946 return ret;
947 }
948
949 /* find which chip select it is connected to */
950 for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
951 if (match_bank(ifc, bank, res.start))
952 break;
953 }
954
955 if (bank >= FSL_IFC_BANK_COUNT) {
956 dev_err(&dev->dev, "%s: address did not match any chip selects\n",
957 __func__);
958 return -ENODEV;
959 }
960
961 priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
962 if (!priv)
963 return -ENOMEM;
964
965 mutex_lock(&fsl_ifc_nand_mutex);
966 if (!fsl_ifc_ctrl_dev->nand) {
967 ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
968 if (!ifc_nand_ctrl) {
969 dev_err(&dev->dev, "failed to allocate memory\n");
970 mutex_unlock(&fsl_ifc_nand_mutex);
971 return -ENOMEM;
972 }
973
974 ifc_nand_ctrl->read_bytes = 0;
975 ifc_nand_ctrl->index = 0;
976 ifc_nand_ctrl->addr = NULL;
977 fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
978
979 spin_lock_init(&ifc_nand_ctrl->controller.lock);
980 init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
981 } else {
982 ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
983 }
984 mutex_unlock(&fsl_ifc_nand_mutex);
985
986 ifc_nand_ctrl->chips[bank] = priv;
987 priv->bank = bank;
988 priv->ctrl = fsl_ifc_ctrl_dev;
989 priv->dev = &dev->dev;
990
991 priv->vbase = ioremap(res.start, resource_size(&res));
992 if (!priv->vbase) {
993 dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
994 ret = -ENOMEM;
995 goto err;
996 }
997
998 dev_set_drvdata(priv->dev, priv);
999
1000 out_be32(&ifc->ifc_nand.nand_evter_en,
1001 IFC_NAND_EVTER_EN_OPC_EN |
1002 IFC_NAND_EVTER_EN_FTOER_EN |
1003 IFC_NAND_EVTER_EN_WPER_EN);
1004
1005 /* enable NAND Machine Interrupts */
1006 out_be32(&ifc->ifc_nand.nand_evter_intr_en,
1007 IFC_NAND_EVTER_INTR_OPCIR_EN |
1008 IFC_NAND_EVTER_INTR_FTOERIR_EN |
1009 IFC_NAND_EVTER_INTR_WPERIR_EN);
1010
1011 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
1012 if (!priv->mtd.name) {
1013 ret = -ENOMEM;
1014 goto err;
1015 }
1016
1017 ret = fsl_ifc_chip_init(priv);
1018 if (ret)
1019 goto err;
1020
1021 ret = nand_scan_ident(&priv->mtd, 1, NULL);
1022 if (ret)
1023 goto err;
1024
1025 ret = fsl_ifc_chip_init_tail(&priv->mtd);
1026 if (ret)
1027 goto err;
1028
1029 ret = nand_scan_tail(&priv->mtd);
1030 if (ret)
1031 goto err;
1032
1033 /* First look for RedBoot table or partitions on the command
1034 * line, these take precedence over device tree information */
1035 mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
1036 NULL, 0);
1037
1038 dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
1039 (unsigned long long)res.start, priv->bank);
1040 return 0;
1041
1042err:
1043 fsl_ifc_chip_remove(priv);
1044 return ret;
1045}
1046
1047static int fsl_ifc_nand_remove(struct platform_device *dev)
1048{
1049 struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
1050
1051 fsl_ifc_chip_remove(priv);
1052
1053 mutex_lock(&fsl_ifc_nand_mutex);
1054 ifc_nand_ctrl->counter--;
1055 if (!ifc_nand_ctrl->counter) {
1056 fsl_ifc_ctrl_dev->nand = NULL;
1057 kfree(ifc_nand_ctrl);
1058 }
1059 mutex_unlock(&fsl_ifc_nand_mutex);
1060
1061 return 0;
1062}
1063
1064static const struct of_device_id fsl_ifc_nand_match[] = {
1065 {
1066 .compatible = "fsl,ifc-nand",
1067 },
1068 {}
1069};
1070
1071static struct platform_driver fsl_ifc_nand_driver = {
1072 .driver = {
1073 .name = "fsl,ifc-nand",
1074 .owner = THIS_MODULE,
1075 .of_match_table = fsl_ifc_nand_match,
1076 },
1077 .probe = fsl_ifc_nand_probe,
1078 .remove = fsl_ifc_nand_remove,
1079};
1080
1081static int __init fsl_ifc_nand_init(void)
1082{
1083 int ret;
1084
1085 ret = platform_driver_register(&fsl_ifc_nand_driver);
1086 if (ret)
1087 printk(KERN_ERR "fsl-ifc: Failed to register platform"
1088 "driver\n");
1089
1090 return ret;
1091}
1092
1093static void __exit fsl_ifc_nand_exit(void)
1094{
1095 platform_driver_unregister(&fsl_ifc_nand_driver);
1096}
1097
1098module_init(fsl_ifc_nand_init);
1099module_exit(fsl_ifc_nand_exit);
1100
1101MODULE_LICENSE("GPL");
1102MODULE_AUTHOR("Freescale");
1103MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 04e07252d74..23752fd5bc5 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -152,13 +152,13 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
152 fun_wait_rnb(fun); 152 fun_wait_rnb(fun);
153} 153}
154 154
155static int fun_chip_init(struct fsl_upm_nand *fun, 155static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
156 const struct device_node *upm_np, 156 const struct device_node *upm_np,
157 const struct resource *io_res) 157 const struct resource *io_res)
158{ 158{
159 int ret; 159 int ret;
160 struct device_node *flash_np; 160 struct device_node *flash_np;
161 struct mtd_part_parser_data ppdata; 161 static const char *part_types[] = { "cmdlinepart", NULL, };
162 162
163 fun->chip.IO_ADDR_R = fun->io_base; 163 fun->chip.IO_ADDR_R = fun->io_base;
164 fun->chip.IO_ADDR_W = fun->io_base; 164 fun->chip.IO_ADDR_W = fun->io_base;
@@ -192,16 +192,22 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
192 if (ret) 192 if (ret)
193 goto err; 193 goto err;
194 194
195 ppdata.of_node = flash_np; 195 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
196 ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0); 196
197#ifdef CONFIG_MTD_OF_PARTS
198 if (ret == 0) {
199 ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
200 if (ret < 0)
201 goto err;
202 }
203#endif
204 ret = mtd_device_register(&fun->mtd, fun->parts, ret);
197err: 205err:
198 of_node_put(flash_np); 206 of_node_put(flash_np);
199 if (ret)
200 kfree(fun->mtd.name);
201 return ret; 207 return ret;
202} 208}
203 209
204static int fun_probe(struct platform_device *ofdev) 210static int __devinit fun_probe(struct platform_device *ofdev)
205{ 211{
206 struct fsl_upm_nand *fun; 212 struct fsl_upm_nand *fun;
207 struct resource io_res; 213 struct resource io_res;
@@ -318,7 +324,7 @@ err1:
318 return ret; 324 return ret;
319} 325}
320 326
321static int fun_remove(struct platform_device *ofdev) 327static int __devexit fun_remove(struct platform_device *ofdev)
322{ 328{
323 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); 329 struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
324 int i; 330 int i;
@@ -350,10 +356,20 @@ static struct platform_driver of_fun_driver = {
350 .of_match_table = of_fun_match, 356 .of_match_table = of_fun_match,
351 }, 357 },
352 .probe = fun_probe, 358 .probe = fun_probe,
353 .remove = fun_remove, 359 .remove = __devexit_p(fun_remove),
354}; 360};
355 361
356module_platform_driver(of_fun_driver); 362static int __init fun_module_init(void)
363{
364 return platform_driver_register(&of_fun_driver);
365}
366module_init(fun_module_init);
367
368static void __exit fun_module_exit(void)
369{
370 platform_driver_unregister(&of_fun_driver);
371}
372module_exit(fun_module_exit);
357 373
358MODULE_LICENSE("GPL"); 374MODULE_LICENSE("GPL");
359MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); 375MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 67e62d3d495..e9b275ac381 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,10 +17,6 @@
17 */ 17 */
18 18
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/dmaengine.h>
22#include <linux/dma-direction.h>
23#include <linux/dma-mapping.h>
24#include <linux/err.h> 20#include <linux/err.h>
25#include <linux/init.h> 21#include <linux/init.h>
26#include <linux/module.h> 22#include <linux/module.h>
@@ -31,7 +27,6 @@
31#include <linux/mtd/nand.h> 27#include <linux/mtd/nand.h>
32#include <linux/mtd/nand_ecc.h> 28#include <linux/mtd/nand_ecc.h>
33#include <linux/platform_device.h> 29#include <linux/platform_device.h>
34#include <linux/of.h>
35#include <linux/mtd/partitions.h> 30#include <linux/mtd/partitions.h>
36#include <linux/io.h> 31#include <linux/io.h>
37#include <linux/slab.h> 32#include <linux/slab.h>
@@ -39,7 +34,7 @@
39#include <linux/amba/bus.h> 34#include <linux/amba/bus.h>
40#include <mtd/mtd-abi.h> 35#include <mtd/mtd-abi.h>
41 36
42static struct nand_ecclayout fsmc_ecc1_128_layout = { 37static struct nand_ecclayout fsmc_ecc1_layout = {
43 .eccbytes = 24, 38 .eccbytes = 24,
44 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, 39 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
45 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, 40 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
@@ -55,127 +50,7 @@ static struct nand_ecclayout fsmc_ecc1_128_layout = {
55 } 50 }
56}; 51};
57 52
58static struct nand_ecclayout fsmc_ecc1_64_layout = { 53static struct nand_ecclayout fsmc_ecc4_lp_layout = {
59 .eccbytes = 12,
60 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
61 .oobfree = {
62 {.offset = 8, .length = 8},
63 {.offset = 24, .length = 8},
64 {.offset = 40, .length = 8},
65 {.offset = 56, .length = 8},
66 }
67};
68
69static struct nand_ecclayout fsmc_ecc1_16_layout = {
70 .eccbytes = 3,
71 .eccpos = {2, 3, 4},
72 .oobfree = {
73 {.offset = 8, .length = 8},
74 }
75};
76
77/*
78 * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
79 * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
80 * bytes are free for use.
81 */
82static struct nand_ecclayout fsmc_ecc4_256_layout = {
83 .eccbytes = 208,
84 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
85 9, 10, 11, 12, 13, 14,
86 18, 19, 20, 21, 22, 23, 24,
87 25, 26, 27, 28, 29, 30,
88 34, 35, 36, 37, 38, 39, 40,
89 41, 42, 43, 44, 45, 46,
90 50, 51, 52, 53, 54, 55, 56,
91 57, 58, 59, 60, 61, 62,
92 66, 67, 68, 69, 70, 71, 72,
93 73, 74, 75, 76, 77, 78,
94 82, 83, 84, 85, 86, 87, 88,
95 89, 90, 91, 92, 93, 94,
96 98, 99, 100, 101, 102, 103, 104,
97 105, 106, 107, 108, 109, 110,
98 114, 115, 116, 117, 118, 119, 120,
99 121, 122, 123, 124, 125, 126,
100 130, 131, 132, 133, 134, 135, 136,
101 137, 138, 139, 140, 141, 142,
102 146, 147, 148, 149, 150, 151, 152,
103 153, 154, 155, 156, 157, 158,
104 162, 163, 164, 165, 166, 167, 168,
105 169, 170, 171, 172, 173, 174,
106 178, 179, 180, 181, 182, 183, 184,
107 185, 186, 187, 188, 189, 190,
108 194, 195, 196, 197, 198, 199, 200,
109 201, 202, 203, 204, 205, 206,
110 210, 211, 212, 213, 214, 215, 216,
111 217, 218, 219, 220, 221, 222,
112 226, 227, 228, 229, 230, 231, 232,
113 233, 234, 235, 236, 237, 238,
114 242, 243, 244, 245, 246, 247, 248,
115 249, 250, 251, 252, 253, 254
116 },
117 .oobfree = {
118 {.offset = 15, .length = 3},
119 {.offset = 31, .length = 3},
120 {.offset = 47, .length = 3},
121 {.offset = 63, .length = 3},
122 {.offset = 79, .length = 3},
123 {.offset = 95, .length = 3},
124 {.offset = 111, .length = 3},
125 {.offset = 127, .length = 3},
126 {.offset = 143, .length = 3},
127 {.offset = 159, .length = 3},
128 {.offset = 175, .length = 3},
129 {.offset = 191, .length = 3},
130 {.offset = 207, .length = 3},
131 {.offset = 223, .length = 3},
132 {.offset = 239, .length = 3},
133 {.offset = 255, .length = 1}
134 }
135};
136
137/*
138 * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
139 * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
140 * bytes are free for use.
141 */
142static struct nand_ecclayout fsmc_ecc4_224_layout = {
143 .eccbytes = 104,
144 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
145 9, 10, 11, 12, 13, 14,
146 18, 19, 20, 21, 22, 23, 24,
147 25, 26, 27, 28, 29, 30,
148 34, 35, 36, 37, 38, 39, 40,
149 41, 42, 43, 44, 45, 46,
150 50, 51, 52, 53, 54, 55, 56,
151 57, 58, 59, 60, 61, 62,
152 66, 67, 68, 69, 70, 71, 72,
153 73, 74, 75, 76, 77, 78,
154 82, 83, 84, 85, 86, 87, 88,
155 89, 90, 91, 92, 93, 94,
156 98, 99, 100, 101, 102, 103, 104,
157 105, 106, 107, 108, 109, 110,
158 114, 115, 116, 117, 118, 119, 120,
159 121, 122, 123, 124, 125, 126
160 },
161 .oobfree = {
162 {.offset = 15, .length = 3},
163 {.offset = 31, .length = 3},
164 {.offset = 47, .length = 3},
165 {.offset = 63, .length = 3},
166 {.offset = 79, .length = 3},
167 {.offset = 95, .length = 3},
168 {.offset = 111, .length = 3},
169 {.offset = 127, .length = 97}
170 }
171};
172
173/*
174 * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
175 * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
176 * bytes are free for use.
177 */
178static struct nand_ecclayout fsmc_ecc4_128_layout = {
179 .eccbytes = 104, 54 .eccbytes = 104,
180 .eccpos = { 2, 3, 4, 5, 6, 7, 8, 55 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
181 9, 10, 11, 12, 13, 14, 56 9, 10, 11, 12, 13, 14,
@@ -207,45 +82,6 @@ static struct nand_ecclayout fsmc_ecc4_128_layout = {
207}; 82};
208 83
209/* 84/*
210 * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
211 * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
212 * bytes are free for use.
213 */
214static struct nand_ecclayout fsmc_ecc4_64_layout = {
215 .eccbytes = 52,
216 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
217 9, 10, 11, 12, 13, 14,
218 18, 19, 20, 21, 22, 23, 24,
219 25, 26, 27, 28, 29, 30,
220 34, 35, 36, 37, 38, 39, 40,
221 41, 42, 43, 44, 45, 46,
222 50, 51, 52, 53, 54, 55, 56,
223 57, 58, 59, 60, 61, 62,
224 },
225 .oobfree = {
226 {.offset = 15, .length = 3},
227 {.offset = 31, .length = 3},
228 {.offset = 47, .length = 3},
229 {.offset = 63, .length = 1},
230 }
231};
232
233/*
234 * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
235 * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
236 * byte is free for use.
237 */
238static struct nand_ecclayout fsmc_ecc4_16_layout = {
239 .eccbytes = 13,
240 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
241 9, 10, 11, 12, 13, 14
242 },
243 .oobfree = {
244 {.offset = 15, .length = 1},
245 }
246};
247
248/*
249 * ECC placement definitions in oobfree type format. 85 * ECC placement definitions in oobfree type format.
250 * There are 13 bytes of ecc for every 512 byte block and it has to be read 86 * There are 13 bytes of ecc for every 512 byte block and it has to be read
251 * consecutively and immediately after the 512 byte data block for hardware to 87 * consecutively and immediately after the 512 byte data block for hardware to
@@ -267,6 +103,16 @@ static struct fsmc_eccplace fsmc_ecc4_lp_place = {
267 } 103 }
268}; 104};
269 105
106static struct nand_ecclayout fsmc_ecc4_sp_layout = {
107 .eccbytes = 13,
108 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
109 9, 10, 11, 12, 13, 14
110 },
111 .oobfree = {
112 {.offset = 15, .length = 1},
113 }
114};
115
270static struct fsmc_eccplace fsmc_ecc4_sp_place = { 116static struct fsmc_eccplace fsmc_ecc4_sp_place = {
271 .eccplace = { 117 .eccplace = {
272 {.offset = 0, .length = 4}, 118 {.offset = 0, .length = 4},
@@ -274,6 +120,67 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
274 } 120 }
275}; 121};
276 122
123/*
124 * Default partition tables to be used if the partition information not
125 * provided through platform data.
126 *
127 * Default partition layout for small page(= 512 bytes) devices
128 * Size for "Root file system" is updated in driver based on actual device size
129 */
130static struct mtd_partition partition_info_16KB_blk[] = {
131 {
132 .name = "X-loader",
133 .offset = 0,
134 .size = 4*0x4000,
135 },
136 {
137 .name = "U-Boot",
138 .offset = 0x10000,
139 .size = 20*0x4000,
140 },
141 {
142 .name = "Kernel",
143 .offset = 0x60000,
144 .size = 256*0x4000,
145 },
146 {
147 .name = "Root File System",
148 .offset = 0x460000,
149 .size = 0,
150 },
151};
152
153/*
154 * Default partition layout for large page(> 512 bytes) devices
155 * Size for "Root file system" is updated in driver based on actual device size
156 */
157static struct mtd_partition partition_info_128KB_blk[] = {
158 {
159 .name = "X-loader",
160 .offset = 0,
161 .size = 4*0x20000,
162 },
163 {
164 .name = "U-Boot",
165 .offset = 0x80000,
166 .size = 12*0x20000,
167 },
168 {
169 .name = "Kernel",
170 .offset = 0x200000,
171 .size = 48*0x20000,
172 },
173 {
174 .name = "Root File System",
175 .offset = 0x800000,
176 .size = 0,
177 },
178};
179
180#ifdef CONFIG_MTD_CMDLINE_PARTS
181const char *part_probes[] = { "cmdlinepart", NULL };
182#endif
183
277/** 184/**
278 * struct fsmc_nand_data - structure for FSMC NAND device state 185 * struct fsmc_nand_data - structure for FSMC NAND device state
279 * 186 *
@@ -287,11 +194,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
287 * @bank: Bank number for probed device. 194 * @bank: Bank number for probed device.
288 * @clk: Clock structure for FSMC. 195 * @clk: Clock structure for FSMC.
289 * 196 *
290 * @read_dma_chan: DMA channel for read access
291 * @write_dma_chan: DMA channel for write access to NAND
292 * @dma_access_complete: Completion structure
293 *
294 * @data_pa: NAND Physical port for Data.
295 * @data_va: NAND port for Data. 197 * @data_va: NAND port for Data.
296 * @cmd_va: NAND port for Command. 198 * @cmd_va: NAND port for Command.
297 * @addr_va: NAND port for Address. 199 * @addr_va: NAND port for Address.
@@ -306,18 +208,13 @@ struct fsmc_nand_data {
306 208
307 struct fsmc_eccplace *ecc_place; 209 struct fsmc_eccplace *ecc_place;
308 unsigned int bank; 210 unsigned int bank;
309 struct device *dev;
310 enum access_mode mode;
311 struct clk *clk; 211 struct clk *clk;
312 212
313 /* DMA related objects */ 213 struct resource *resregs;
314 struct dma_chan *read_dma_chan; 214 struct resource *rescmd;
315 struct dma_chan *write_dma_chan; 215 struct resource *resaddr;
316 struct completion dma_access_complete; 216 struct resource *resdata;
317
318 struct fsmc_nand_timings *dev_timings;
319 217
320 dma_addr_t data_pa;
321 void __iomem *data_va; 218 void __iomem *data_va;
322 void __iomem *cmd_va; 219 void __iomem *cmd_va;
323 void __iomem *addr_va; 220 void __iomem *addr_va;
@@ -361,35 +258,34 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
361 struct nand_chip *this = mtd->priv; 258 struct nand_chip *this = mtd->priv;
362 struct fsmc_nand_data *host = container_of(mtd, 259 struct fsmc_nand_data *host = container_of(mtd,
363 struct fsmc_nand_data, mtd); 260 struct fsmc_nand_data, mtd);
364 void __iomem *regs = host->regs_va; 261 struct fsmc_regs *regs = host->regs_va;
365 unsigned int bank = host->bank; 262 unsigned int bank = host->bank;
366 263
367 if (ctrl & NAND_CTRL_CHANGE) { 264 if (ctrl & NAND_CTRL_CHANGE) {
368 u32 pc;
369
370 if (ctrl & NAND_CLE) { 265 if (ctrl & NAND_CLE) {
371 this->IO_ADDR_R = host->cmd_va; 266 this->IO_ADDR_R = (void __iomem *)host->cmd_va;
372 this->IO_ADDR_W = host->cmd_va; 267 this->IO_ADDR_W = (void __iomem *)host->cmd_va;
373 } else if (ctrl & NAND_ALE) { 268 } else if (ctrl & NAND_ALE) {
374 this->IO_ADDR_R = host->addr_va; 269 this->IO_ADDR_R = (void __iomem *)host->addr_va;
375 this->IO_ADDR_W = host->addr_va; 270 this->IO_ADDR_W = (void __iomem *)host->addr_va;
376 } else { 271 } else {
377 this->IO_ADDR_R = host->data_va; 272 this->IO_ADDR_R = (void __iomem *)host->data_va;
378 this->IO_ADDR_W = host->data_va; 273 this->IO_ADDR_W = (void __iomem *)host->data_va;
379 } 274 }
380 275
381 pc = readl(FSMC_NAND_REG(regs, bank, PC)); 276 if (ctrl & NAND_NCE) {
382 if (ctrl & NAND_NCE) 277 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE,
383 pc |= FSMC_ENABLE; 278 &regs->bank_regs[bank].pc);
384 else 279 } else {
385 pc &= ~FSMC_ENABLE; 280 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE,
386 writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC)); 281 &regs->bank_regs[bank].pc);
282 }
387 } 283 }
388 284
389 mb(); 285 mb();
390 286
391 if (cmd != NAND_CMD_NONE) 287 if (cmd != NAND_CMD_NONE)
392 writeb_relaxed(cmd, this->IO_ADDR_W); 288 writeb(cmd, this->IO_ADDR_W);
393} 289}
394 290
395/* 291/*
@@ -398,46 +294,22 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
398 * This routine initializes timing parameters related to NAND memory access in 294 * This routine initializes timing parameters related to NAND memory access in
399 * FSMC registers 295 * FSMC registers
400 */ 296 */
401static void fsmc_nand_setup(void __iomem *regs, uint32_t bank, 297static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank,
402 uint32_t busw, struct fsmc_nand_timings *timings) 298 uint32_t busw)
403{ 299{
404 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; 300 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
405 uint32_t tclr, tar, thiz, thold, twait, tset;
406 struct fsmc_nand_timings *tims;
407 struct fsmc_nand_timings default_timings = {
408 .tclr = FSMC_TCLR_1,
409 .tar = FSMC_TAR_1,
410 .thiz = FSMC_THIZ_1,
411 .thold = FSMC_THOLD_4,
412 .twait = FSMC_TWAIT_6,
413 .tset = FSMC_TSET_0,
414 };
415
416 if (timings)
417 tims = timings;
418 else
419 tims = &default_timings;
420
421 tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
422 tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
423 thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
424 thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
425 twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
426 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
427 301
428 if (busw) 302 if (busw)
429 writel_relaxed(value | FSMC_DEVWID_16, 303 writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc);
430 FSMC_NAND_REG(regs, bank, PC));
431 else 304 else
432 writel_relaxed(value | FSMC_DEVWID_8, 305 writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc);
433 FSMC_NAND_REG(regs, bank, PC)); 306
434 307 writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1,
435 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar, 308 &regs->bank_regs[bank].pc);
436 FSMC_NAND_REG(regs, bank, PC)); 309 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
437 writel_relaxed(thiz | thold | twait | tset, 310 &regs->bank_regs[bank].comm);
438 FSMC_NAND_REG(regs, bank, COMM)); 311 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
439 writel_relaxed(thiz | thold | twait | tset, 312 &regs->bank_regs[bank].attrib);
440 FSMC_NAND_REG(regs, bank, ATTRIB));
441} 313}
442 314
443/* 315/*
@@ -447,15 +319,15 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
447{ 319{
448 struct fsmc_nand_data *host = container_of(mtd, 320 struct fsmc_nand_data *host = container_of(mtd,
449 struct fsmc_nand_data, mtd); 321 struct fsmc_nand_data, mtd);
450 void __iomem *regs = host->regs_va; 322 struct fsmc_regs *regs = host->regs_va;
451 uint32_t bank = host->bank; 323 uint32_t bank = host->bank;
452 324
453 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256, 325 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256,
454 FSMC_NAND_REG(regs, bank, PC)); 326 &regs->bank_regs[bank].pc);
455 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN, 327 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN,
456 FSMC_NAND_REG(regs, bank, PC)); 328 &regs->bank_regs[bank].pc);
457 writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN, 329 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN,
458 FSMC_NAND_REG(regs, bank, PC)); 330 &regs->bank_regs[bank].pc);
459} 331}
460 332
461/* 333/*
@@ -468,42 +340,37 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
468{ 340{
469 struct fsmc_nand_data *host = container_of(mtd, 341 struct fsmc_nand_data *host = container_of(mtd,
470 struct fsmc_nand_data, mtd); 342 struct fsmc_nand_data, mtd);
471 void __iomem *regs = host->regs_va; 343 struct fsmc_regs *regs = host->regs_va;
472 uint32_t bank = host->bank; 344 uint32_t bank = host->bank;
473 uint32_t ecc_tmp; 345 uint32_t ecc_tmp;
474 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; 346 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
475 347
476 do { 348 do {
477 if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY) 349 if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY)
478 break; 350 break;
479 else 351 else
480 cond_resched(); 352 cond_resched();
481 } while (!time_after_eq(jiffies, deadline)); 353 } while (!time_after_eq(jiffies, deadline));
482 354
483 if (time_after_eq(jiffies, deadline)) { 355 ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
484 dev_err(host->dev, "calculate ecc timed out\n");
485 return -ETIMEDOUT;
486 }
487
488 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
489 ecc[0] = (uint8_t) (ecc_tmp >> 0); 356 ecc[0] = (uint8_t) (ecc_tmp >> 0);
490 ecc[1] = (uint8_t) (ecc_tmp >> 8); 357 ecc[1] = (uint8_t) (ecc_tmp >> 8);
491 ecc[2] = (uint8_t) (ecc_tmp >> 16); 358 ecc[2] = (uint8_t) (ecc_tmp >> 16);
492 ecc[3] = (uint8_t) (ecc_tmp >> 24); 359 ecc[3] = (uint8_t) (ecc_tmp >> 24);
493 360
494 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2)); 361 ecc_tmp = readl(&regs->bank_regs[bank].ecc2);
495 ecc[4] = (uint8_t) (ecc_tmp >> 0); 362 ecc[4] = (uint8_t) (ecc_tmp >> 0);
496 ecc[5] = (uint8_t) (ecc_tmp >> 8); 363 ecc[5] = (uint8_t) (ecc_tmp >> 8);
497 ecc[6] = (uint8_t) (ecc_tmp >> 16); 364 ecc[6] = (uint8_t) (ecc_tmp >> 16);
498 ecc[7] = (uint8_t) (ecc_tmp >> 24); 365 ecc[7] = (uint8_t) (ecc_tmp >> 24);
499 366
500 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3)); 367 ecc_tmp = readl(&regs->bank_regs[bank].ecc3);
501 ecc[8] = (uint8_t) (ecc_tmp >> 0); 368 ecc[8] = (uint8_t) (ecc_tmp >> 0);
502 ecc[9] = (uint8_t) (ecc_tmp >> 8); 369 ecc[9] = (uint8_t) (ecc_tmp >> 8);
503 ecc[10] = (uint8_t) (ecc_tmp >> 16); 370 ecc[10] = (uint8_t) (ecc_tmp >> 16);
504 ecc[11] = (uint8_t) (ecc_tmp >> 24); 371 ecc[11] = (uint8_t) (ecc_tmp >> 24);
505 372
506 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS)); 373 ecc_tmp = readl(&regs->bank_regs[bank].sts);
507 ecc[12] = (uint8_t) (ecc_tmp >> 16); 374 ecc[12] = (uint8_t) (ecc_tmp >> 16);
508 375
509 return 0; 376 return 0;
@@ -519,11 +386,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
519{ 386{
520 struct fsmc_nand_data *host = container_of(mtd, 387 struct fsmc_nand_data *host = container_of(mtd,
521 struct fsmc_nand_data, mtd); 388 struct fsmc_nand_data, mtd);
522 void __iomem *regs = host->regs_va; 389 struct fsmc_regs *regs = host->regs_va;
523 uint32_t bank = host->bank; 390 uint32_t bank = host->bank;
524 uint32_t ecc_tmp; 391 uint32_t ecc_tmp;
525 392
526 ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); 393 ecc_tmp = readl(&regs->bank_regs[bank].ecc1);
527 ecc[0] = (uint8_t) (ecc_tmp >> 0); 394 ecc[0] = (uint8_t) (ecc_tmp >> 0);
528 ecc[1] = (uint8_t) (ecc_tmp >> 8); 395 ecc[1] = (uint8_t) (ecc_tmp >> 8);
529 ecc[2] = (uint8_t) (ecc_tmp >> 16); 396 ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -531,172 +398,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
531 return 0; 398 return 0;
532} 399}
533 400
534/* Count the number of 0's in buff upto a max of max_bits */
535static int count_written_bits(uint8_t *buff, int size, int max_bits)
536{
537 int k, written_bits = 0;
538
539 for (k = 0; k < size; k++) {
540 written_bits += hweight8(~buff[k]);
541 if (written_bits > max_bits)
542 break;
543 }
544
545 return written_bits;
546}
547
548static void dma_complete(void *param)
549{
550 struct fsmc_nand_data *host = param;
551
552 complete(&host->dma_access_complete);
553}
554
555static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
556 enum dma_data_direction direction)
557{
558 struct dma_chan *chan;
559 struct dma_device *dma_dev;
560 struct dma_async_tx_descriptor *tx;
561 dma_addr_t dma_dst, dma_src, dma_addr;
562 dma_cookie_t cookie;
563 unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
564 int ret;
565
566 if (direction == DMA_TO_DEVICE)
567 chan = host->write_dma_chan;
568 else if (direction == DMA_FROM_DEVICE)
569 chan = host->read_dma_chan;
570 else
571 return -EINVAL;
572
573 dma_dev = chan->device;
574 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
575
576 if (direction == DMA_TO_DEVICE) {
577 dma_src = dma_addr;
578 dma_dst = host->data_pa;
579 flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
580 } else {
581 dma_src = host->data_pa;
582 dma_dst = dma_addr;
583 flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
584 }
585
586 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
587 len, flags);
588
589 if (!tx) {
590 dev_err(host->dev, "device_prep_dma_memcpy error\n");
591 dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
592 return -EIO;
593 }
594
595 tx->callback = dma_complete;
596 tx->callback_param = host;
597 cookie = tx->tx_submit(tx);
598
599 ret = dma_submit_error(cookie);
600 if (ret) {
601 dev_err(host->dev, "dma_submit_error %d\n", cookie);
602 return ret;
603 }
604
605 dma_async_issue_pending(chan);
606
607 ret =
608 wait_for_completion_timeout(&host->dma_access_complete,
609 msecs_to_jiffies(3000));
610 if (ret <= 0) {
611 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
612 dev_err(host->dev, "wait_for_completion_timeout\n");
613 return ret ? ret : -ETIMEDOUT;
614 }
615
616 return 0;
617}
618
619/*
620 * fsmc_write_buf - write buffer to chip
621 * @mtd: MTD device structure
622 * @buf: data buffer
623 * @len: number of bytes to write
624 */
625static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
626{
627 int i;
628 struct nand_chip *chip = mtd->priv;
629
630 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
631 IS_ALIGNED(len, sizeof(uint32_t))) {
632 uint32_t *p = (uint32_t *)buf;
633 len = len >> 2;
634 for (i = 0; i < len; i++)
635 writel_relaxed(p[i], chip->IO_ADDR_W);
636 } else {
637 for (i = 0; i < len; i++)
638 writeb_relaxed(buf[i], chip->IO_ADDR_W);
639 }
640}
641
642/*
643 * fsmc_read_buf - read chip data into buffer
644 * @mtd: MTD device structure
645 * @buf: buffer to store date
646 * @len: number of bytes to read
647 */
648static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
649{
650 int i;
651 struct nand_chip *chip = mtd->priv;
652
653 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
654 IS_ALIGNED(len, sizeof(uint32_t))) {
655 uint32_t *p = (uint32_t *)buf;
656 len = len >> 2;
657 for (i = 0; i < len; i++)
658 p[i] = readl_relaxed(chip->IO_ADDR_R);
659 } else {
660 for (i = 0; i < len; i++)
661 buf[i] = readb_relaxed(chip->IO_ADDR_R);
662 }
663}
664
665/*
666 * fsmc_read_buf_dma - read chip data into buffer
667 * @mtd: MTD device structure
668 * @buf: buffer to store date
669 * @len: number of bytes to read
670 */
671static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
672{
673 struct fsmc_nand_data *host;
674
675 host = container_of(mtd, struct fsmc_nand_data, mtd);
676 dma_xfer(host, buf, len, DMA_FROM_DEVICE);
677}
678
679/*
680 * fsmc_write_buf_dma - write buffer to chip
681 * @mtd: MTD device structure
682 * @buf: data buffer
683 * @len: number of bytes to write
684 */
685static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
686 int len)
687{
688 struct fsmc_nand_data *host;
689
690 host = container_of(mtd, struct fsmc_nand_data, mtd);
691 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
692}
693
694/* 401/*
695 * fsmc_read_page_hwecc 402 * fsmc_read_page_hwecc
696 * @mtd: mtd info structure 403 * @mtd: mtd info structure
697 * @chip: nand chip info structure 404 * @chip: nand chip info structure
698 * @buf: buffer to store read data 405 * @buf: buffer to store read data
699 * @oob_required: caller expects OOB data read to chip->oob_poi
700 * @page: page number to read 406 * @page: page number to read
701 * 407 *
702 * This routine is needed for fsmc version 8 as reading from NAND chip has to be 408 * This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -706,7 +412,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
706 * max of 8 bits) 412 * max of 8 bits)
707 */ 413 */
708static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 414static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
709 uint8_t *buf, int oob_required, int page) 415 uint8_t *buf, int page)
710{ 416{
711 struct fsmc_nand_data *host = container_of(mtd, 417 struct fsmc_nand_data *host = container_of(mtd,
712 struct fsmc_nand_data, mtd); 418 struct fsmc_nand_data, mtd);
@@ -725,9 +431,9 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
725 */ 431 */
726 uint16_t ecc_oob[7]; 432 uint16_t ecc_oob[7];
727 uint8_t *oob = (uint8_t *)&ecc_oob[0]; 433 uint8_t *oob = (uint8_t *)&ecc_oob[0];
728 unsigned int max_bitflips = 0;
729 434
730 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { 435 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
436
731 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); 437 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
732 chip->ecc.hwctl(mtd, NAND_ECC_READ); 438 chip->ecc.hwctl(mtd, NAND_ECC_READ);
733 chip->read_buf(mtd, p, eccsize); 439 chip->read_buf(mtd, p, eccsize);
@@ -738,35 +444,31 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
738 group++; 444 group++;
739 445
740 /* 446 /*
741 * length is intentionally kept a higher multiple of 2 447 * length is intentionally kept a higher multiple of 2
742 * to read at least 13 bytes even in case of 16 bit NAND 448 * to read at least 13 bytes even in case of 16 bit NAND
743 * devices 449 * devices
744 */ 450 */
745 if (chip->options & NAND_BUSWIDTH_16) 451 len = roundup(len, 2);
746 len = roundup(len, 2);
747
748 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); 452 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
749 chip->read_buf(mtd, oob + j, len); 453 chip->read_buf(mtd, oob + j, len);
750 j += len; 454 j += len;
751 } 455 }
752 456
753 memcpy(&ecc_code[i], oob, chip->ecc.bytes); 457 memcpy(&ecc_code[i], oob, 13);
754 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 458 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
755 459
756 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 460 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
757 if (stat < 0) { 461 if (stat < 0)
758 mtd->ecc_stats.failed++; 462 mtd->ecc_stats.failed++;
759 } else { 463 else
760 mtd->ecc_stats.corrected += stat; 464 mtd->ecc_stats.corrected += stat;
761 max_bitflips = max_t(unsigned int, max_bitflips, stat);
762 }
763 } 465 }
764 466
765 return max_bitflips; 467 return 0;
766} 468}
767 469
768/* 470/*
769 * fsmc_bch8_correct_data 471 * fsmc_correct_data
770 * @mtd: mtd info structure 472 * @mtd: mtd info structure
771 * @dat: buffer of read data 473 * @dat: buffer of read data
772 * @read_ecc: ecc read from device spare area 474 * @read_ecc: ecc read from device spare area
@@ -775,51 +477,19 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
775 * calc_ecc is a 104 bit information containing maximum of 8 error 477 * calc_ecc is a 104 bit information containing maximum of 8 error
776 * offset informations of 13 bits each in 512 bytes of read data. 478 * offset informations of 13 bits each in 512 bytes of read data.
777 */ 479 */
778static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat, 480static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
779 uint8_t *read_ecc, uint8_t *calc_ecc) 481 uint8_t *read_ecc, uint8_t *calc_ecc)
780{ 482{
781 struct fsmc_nand_data *host = container_of(mtd, 483 struct fsmc_nand_data *host = container_of(mtd,
782 struct fsmc_nand_data, mtd); 484 struct fsmc_nand_data, mtd);
783 struct nand_chip *chip = mtd->priv; 485 struct fsmc_regs *regs = host->regs_va;
784 void __iomem *regs = host->regs_va;
785 unsigned int bank = host->bank; 486 unsigned int bank = host->bank;
786 uint32_t err_idx[8]; 487 uint16_t err_idx[8];
488 uint64_t ecc_data[2];
787 uint32_t num_err, i; 489 uint32_t num_err, i;
788 uint32_t ecc1, ecc2, ecc3, ecc4;
789
790 num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
791
792 /* no bit flipping */
793 if (likely(num_err == 0))
794 return 0;
795 490
796 /* too many errors */ 491 /* The calculated ecc is actually the correction index in data */
797 if (unlikely(num_err > 8)) { 492 memcpy(ecc_data, calc_ecc, 13);
798 /*
799 * This is a temporary erase check. A newly erased page read
800 * would result in an ecc error because the oob data is also
801 * erased to FF and the calculated ecc for an FF data is not
802 * FF..FF.
803 * This is a workaround to skip performing correction in case
804 * data is FF..FF
805 *
806 * Logic:
807 * For every page, each bit written as 0 is counted until these
808 * number of bits are greater than 8 (the maximum correction
809 * capability of FSMC for each 512 + 13 bytes)
810 */
811
812 int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
813 int bits_data = count_written_bits(dat, chip->ecc.size, 8);
814
815 if ((bits_ecc + bits_data) <= 8) {
816 if (bits_data)
817 memset(dat, 0xff, chip->ecc.size);
818 return bits_data;
819 }
820
821 return -EBADMSG;
822 }
823 493
824 /* 494 /*
825 * ------------------- calc_ecc[] bit wise -----------|--13 bits--| 495 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
@@ -830,26 +500,27 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
830 * uint64_t array and error offset indexes are populated in err_idx 500 * uint64_t array and error offset indexes are populated in err_idx
831 * array 501 * array
832 */ 502 */
833 ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); 503 for (i = 0; i < 8; i++) {
834 ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2)); 504 if (i == 4) {
835 ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3)); 505 err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0];
836 ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS)); 506 ecc_data[1] >>= 1;
837 507 continue;
838 err_idx[0] = (ecc1 >> 0) & 0x1FFF; 508 }
839 err_idx[1] = (ecc1 >> 13) & 0x1FFF; 509 err_idx[i] = (ecc_data[i/4] & 0x1FFF);
840 err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F); 510 ecc_data[i/4] >>= 13;
841 err_idx[3] = (ecc2 >> 7) & 0x1FFF; 511 }
842 err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF); 512
843 err_idx[5] = (ecc3 >> 1) & 0x1FFF; 513 num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF;
844 err_idx[6] = (ecc3 >> 14) & 0x1FFF; 514
845 err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F); 515 if (num_err == 0xF)
516 return -EBADMSG;
846 517
847 i = 0; 518 i = 0;
848 while (num_err--) { 519 while (num_err--) {
849 change_bit(0, (unsigned long *)&err_idx[i]); 520 change_bit(0, (unsigned long *)&err_idx[i]);
850 change_bit(1, (unsigned long *)&err_idx[i]); 521 change_bit(1, (unsigned long *)&err_idx[i]);
851 522
852 if (err_idx[i] < chip->ecc.size * 8) { 523 if (err_idx[i] <= 512 * 8) {
853 change_bit(err_idx[i], (unsigned long *)dat); 524 change_bit(err_idx[i], (unsigned long *)dat);
854 i++; 525 i++;
855 } 526 }
@@ -857,42 +528,6 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
857 return i; 528 return i;
858} 529}
859 530
860static bool filter(struct dma_chan *chan, void *slave)
861{
862 chan->private = slave;
863 return true;
864}
865
866#ifdef CONFIG_OF
867static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
868 struct device_node *np)
869{
870 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
871 u32 val;
872
873 /* Set default NAND width to 8 bits */
874 pdata->width = 8;
875 if (!of_property_read_u32(np, "bank-width", &val)) {
876 if (val == 2) {
877 pdata->width = 16;
878 } else if (val != 1) {
879 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
880 return -EINVAL;
881 }
882 }
883 if (of_get_property(np, "nand-skip-bbtscan", NULL))
884 pdata->options = NAND_SKIP_BBTSCAN;
885
886 return 0;
887}
888#else
889static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
890 struct device_node *np)
891{
892 return -ENOSYS;
893}
894#endif
895
896/* 531/*
897 * fsmc_nand_probe - Probe function 532 * fsmc_nand_probe - Probe function
898 * @pdev: platform device structure 533 * @pdev: platform device structure
@@ -900,89 +535,102 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
900static int __init fsmc_nand_probe(struct platform_device *pdev) 535static int __init fsmc_nand_probe(struct platform_device *pdev)
901{ 536{
902 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 537 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
903 struct device_node __maybe_unused *np = pdev->dev.of_node;
904 struct mtd_part_parser_data ppdata = {};
905 struct fsmc_nand_data *host; 538 struct fsmc_nand_data *host;
906 struct mtd_info *mtd; 539 struct mtd_info *mtd;
907 struct nand_chip *nand; 540 struct nand_chip *nand;
541 struct fsmc_regs *regs;
908 struct resource *res; 542 struct resource *res;
909 dma_cap_mask_t mask;
910 int ret = 0; 543 int ret = 0;
911 u32 pid; 544 u32 pid;
912 int i; 545 int i;
913 546
914 if (np) {
915 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
916 pdev->dev.platform_data = pdata;
917 ret = fsmc_nand_probe_config_dt(pdev, np);
918 if (ret) {
919 dev_err(&pdev->dev, "no platform data\n");
920 return -ENODEV;
921 }
922 }
923
924 if (!pdata) { 547 if (!pdata) {
925 dev_err(&pdev->dev, "platform data is NULL\n"); 548 dev_err(&pdev->dev, "platform data is NULL\n");
926 return -EINVAL; 549 return -EINVAL;
927 } 550 }
928 551
929 /* Allocate memory for the device structure (and zero it) */ 552 /* Allocate memory for the device structure (and zero it) */
930 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 553 host = kzalloc(sizeof(*host), GFP_KERNEL);
931 if (!host) { 554 if (!host) {
932 dev_err(&pdev->dev, "failed to allocate device structure\n"); 555 dev_err(&pdev->dev, "failed to allocate device structure\n");
933 return -ENOMEM; 556 return -ENOMEM;
934 } 557 }
935 558
936 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 559 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
937 if (!res) 560 if (!res) {
938 return -EINVAL; 561 ret = -EIO;
562 goto err_probe1;
563 }
939 564
940 host->data_va = devm_request_and_ioremap(&pdev->dev, res); 565 host->resdata = request_mem_region(res->start, resource_size(res),
566 pdev->name);
567 if (!host->resdata) {
568 ret = -EIO;
569 goto err_probe1;
570 }
571
572 host->data_va = ioremap(res->start, resource_size(res));
941 if (!host->data_va) { 573 if (!host->data_va) {
942 dev_err(&pdev->dev, "data ioremap failed\n"); 574 ret = -EIO;
943 return -ENOMEM; 575 goto err_probe1;
944 } 576 }
945 host->data_pa = (dma_addr_t)res->start;
946 577
947 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); 578 host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE,
948 if (!res) 579 resource_size(res), pdev->name);
949 return -EINVAL; 580 if (!host->resaddr) {
581 ret = -EIO;
582 goto err_probe1;
583 }
950 584
951 host->addr_va = devm_request_and_ioremap(&pdev->dev, res); 585 host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res));
952 if (!host->addr_va) { 586 if (!host->addr_va) {
953 dev_err(&pdev->dev, "ale ioremap failed\n"); 587 ret = -EIO;
954 return -ENOMEM; 588 goto err_probe1;
955 } 589 }
956 590
957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 591 host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE,
958 if (!res) 592 resource_size(res), pdev->name);
959 return -EINVAL; 593 if (!host->rescmd) {
594 ret = -EIO;
595 goto err_probe1;
596 }
960 597
961 host->cmd_va = devm_request_and_ioremap(&pdev->dev, res); 598 host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res));
962 if (!host->cmd_va) { 599 if (!host->cmd_va) {
963 dev_err(&pdev->dev, "ale ioremap failed\n"); 600 ret = -EIO;
964 return -ENOMEM; 601 goto err_probe1;
965 } 602 }
966 603
967 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs"); 604 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
968 if (!res) 605 if (!res) {
969 return -EINVAL; 606 ret = -EIO;
607 goto err_probe1;
608 }
609
610 host->resregs = request_mem_region(res->start, resource_size(res),
611 pdev->name);
612 if (!host->resregs) {
613 ret = -EIO;
614 goto err_probe1;
615 }
970 616
971 host->regs_va = devm_request_and_ioremap(&pdev->dev, res); 617 host->regs_va = ioremap(res->start, resource_size(res));
972 if (!host->regs_va) { 618 if (!host->regs_va) {
973 dev_err(&pdev->dev, "regs ioremap failed\n"); 619 ret = -EIO;
974 return -ENOMEM; 620 goto err_probe1;
975 } 621 }
976 622
977 host->clk = clk_get(&pdev->dev, NULL); 623 host->clk = clk_get(&pdev->dev, NULL);
978 if (IS_ERR(host->clk)) { 624 if (IS_ERR(host->clk)) {
979 dev_err(&pdev->dev, "failed to fetch block clock\n"); 625 dev_err(&pdev->dev, "failed to fetch block clock\n");
980 return PTR_ERR(host->clk); 626 ret = PTR_ERR(host->clk);
627 host->clk = NULL;
628 goto err_probe1;
981 } 629 }
982 630
983 ret = clk_prepare_enable(host->clk); 631 ret = clk_enable(host->clk);
984 if (ret) 632 if (ret)
985 goto err_clk_prepare_enable; 633 goto err_probe1;
986 634
987 /* 635 /*
988 * This device ID is actually a common AMBA ID as used on the 636 * This device ID is actually a common AMBA ID as used on the
@@ -998,14 +646,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
998 646
999 host->bank = pdata->bank; 647 host->bank = pdata->bank;
1000 host->select_chip = pdata->select_bank; 648 host->select_chip = pdata->select_bank;
1001 host->partitions = pdata->partitions; 649 regs = host->regs_va;
1002 host->nr_partitions = pdata->nr_partitions;
1003 host->dev = &pdev->dev;
1004 host->dev_timings = pdata->nand_timings;
1005 host->mode = pdata->mode;
1006
1007 if (host->mode == USE_DMA_ACCESS)
1008 init_completion(&host->dma_access_complete);
1009 650
1010 /* Link all private pointers */ 651 /* Link all private pointers */
1011 mtd = &host->mtd; 652 mtd = &host->mtd;
@@ -1024,53 +665,21 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
1024 nand->ecc.size = 512; 665 nand->ecc.size = 512;
1025 nand->options = pdata->options; 666 nand->options = pdata->options;
1026 nand->select_chip = fsmc_select_chip; 667 nand->select_chip = fsmc_select_chip;
1027 nand->badblockbits = 7;
1028 668
1029 if (pdata->width == FSMC_NAND_BW16) 669 if (pdata->width == FSMC_NAND_BW16)
1030 nand->options |= NAND_BUSWIDTH_16; 670 nand->options |= NAND_BUSWIDTH_16;
1031 671
1032 switch (host->mode) { 672 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
1033 case USE_DMA_ACCESS:
1034 dma_cap_zero(mask);
1035 dma_cap_set(DMA_MEMCPY, mask);
1036 host->read_dma_chan = dma_request_channel(mask, filter,
1037 pdata->read_dma_priv);
1038 if (!host->read_dma_chan) {
1039 dev_err(&pdev->dev, "Unable to get read dma channel\n");
1040 goto err_req_read_chnl;
1041 }
1042 host->write_dma_chan = dma_request_channel(mask, filter,
1043 pdata->write_dma_priv);
1044 if (!host->write_dma_chan) {
1045 dev_err(&pdev->dev, "Unable to get write dma channel\n");
1046 goto err_req_write_chnl;
1047 }
1048 nand->read_buf = fsmc_read_buf_dma;
1049 nand->write_buf = fsmc_write_buf_dma;
1050 break;
1051
1052 default:
1053 case USE_WORD_ACCESS:
1054 nand->read_buf = fsmc_read_buf;
1055 nand->write_buf = fsmc_write_buf;
1056 break;
1057 }
1058
1059 fsmc_nand_setup(host->regs_va, host->bank,
1060 nand->options & NAND_BUSWIDTH_16,
1061 host->dev_timings);
1062 673
1063 if (AMBA_REV_BITS(host->pid) >= 8) { 674 if (AMBA_REV_BITS(host->pid) >= 8) {
1064 nand->ecc.read_page = fsmc_read_page_hwecc; 675 nand->ecc.read_page = fsmc_read_page_hwecc;
1065 nand->ecc.calculate = fsmc_read_hwecc_ecc4; 676 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
1066 nand->ecc.correct = fsmc_bch8_correct_data; 677 nand->ecc.correct = fsmc_correct_data;
1067 nand->ecc.bytes = 13; 678 nand->ecc.bytes = 13;
1068 nand->ecc.strength = 8;
1069 } else { 679 } else {
1070 nand->ecc.calculate = fsmc_read_hwecc_ecc1; 680 nand->ecc.calculate = fsmc_read_hwecc_ecc1;
1071 nand->ecc.correct = nand_correct_data; 681 nand->ecc.correct = nand_correct_data;
1072 nand->ecc.bytes = 3; 682 nand->ecc.bytes = 3;
1073 nand->ecc.strength = 1;
1074 } 683 }
1075 684
1076 /* 685 /*
@@ -1079,52 +688,19 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
1079 if (nand_scan_ident(&host->mtd, 1, NULL)) { 688 if (nand_scan_ident(&host->mtd, 1, NULL)) {
1080 ret = -ENXIO; 689 ret = -ENXIO;
1081 dev_err(&pdev->dev, "No NAND Device found!\n"); 690 dev_err(&pdev->dev, "No NAND Device found!\n");
1082 goto err_scan_ident; 691 goto err_probe;
1083 } 692 }
1084 693
1085 if (AMBA_REV_BITS(host->pid) >= 8) { 694 if (AMBA_REV_BITS(host->pid) >= 8) {
1086 switch (host->mtd.oobsize) { 695 if (host->mtd.writesize == 512) {
1087 case 16: 696 nand->ecc.layout = &fsmc_ecc4_sp_layout;
1088 nand->ecc.layout = &fsmc_ecc4_16_layout;
1089 host->ecc_place = &fsmc_ecc4_sp_place; 697 host->ecc_place = &fsmc_ecc4_sp_place;
1090 break; 698 } else {
1091 case 64: 699 nand->ecc.layout = &fsmc_ecc4_lp_layout;
1092 nand->ecc.layout = &fsmc_ecc4_64_layout;
1093 host->ecc_place = &fsmc_ecc4_lp_place;
1094 break;
1095 case 128:
1096 nand->ecc.layout = &fsmc_ecc4_128_layout;
1097 host->ecc_place = &fsmc_ecc4_lp_place;
1098 break;
1099 case 224:
1100 nand->ecc.layout = &fsmc_ecc4_224_layout;
1101 host->ecc_place = &fsmc_ecc4_lp_place;
1102 break;
1103 case 256:
1104 nand->ecc.layout = &fsmc_ecc4_256_layout;
1105 host->ecc_place = &fsmc_ecc4_lp_place; 700 host->ecc_place = &fsmc_ecc4_lp_place;
1106 break;
1107 default:
1108 printk(KERN_WARNING "No oob scheme defined for "
1109 "oobsize %d\n", mtd->oobsize);
1110 BUG();
1111 } 701 }
1112 } else { 702 } else {
1113 switch (host->mtd.oobsize) { 703 nand->ecc.layout = &fsmc_ecc1_layout;
1114 case 16:
1115 nand->ecc.layout = &fsmc_ecc1_16_layout;
1116 break;
1117 case 64:
1118 nand->ecc.layout = &fsmc_ecc1_64_layout;
1119 break;
1120 case 128:
1121 nand->ecc.layout = &fsmc_ecc1_128_layout;
1122 break;
1123 default:
1124 printk(KERN_WARNING "No oob scheme defined for "
1125 "oobsize %d\n", mtd->oobsize);
1126 BUG();
1127 }
1128 } 704 }
1129 705
1130 /* Second stage of scan to fill MTD data-structures */ 706 /* Second stage of scan to fill MTD data-structures */
@@ -1140,13 +716,65 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
1140 * platform data, 716 * platform data,
1141 * default partition information present in driver. 717 * default partition information present in driver.
1142 */ 718 */
719#ifdef CONFIG_MTD_CMDLINE_PARTS
1143 /* 720 /*
1144 * Check for partition info passed 721 * Check if partition info passed via command line
1145 */ 722 */
1146 host->mtd.name = "nand"; 723 host->mtd.name = "nand";
1147 ppdata.of_node = np; 724 host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
1148 ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata, 725 &host->partitions, 0);
1149 host->partitions, host->nr_partitions); 726 if (host->nr_partitions <= 0) {
727#endif
728 /*
729 * Check if partition info passed via command line
730 */
731 if (pdata->partitions) {
732 host->partitions = pdata->partitions;
733 host->nr_partitions = pdata->nr_partitions;
734 } else {
735 struct mtd_partition *partition;
736 int i;
737
738 /* Select the default partitions info */
739 switch (host->mtd.size) {
740 case 0x01000000:
741 case 0x02000000:
742 case 0x04000000:
743 host->partitions = partition_info_16KB_blk;
744 host->nr_partitions =
745 sizeof(partition_info_16KB_blk) /
746 sizeof(struct mtd_partition);
747 break;
748 case 0x08000000:
749 case 0x10000000:
750 case 0x20000000:
751 case 0x40000000:
752 host->partitions = partition_info_128KB_blk;
753 host->nr_partitions =
754 sizeof(partition_info_128KB_blk) /
755 sizeof(struct mtd_partition);
756 break;
757 default:
758 ret = -ENXIO;
759 pr_err("Unsupported NAND size\n");
760 goto err_probe;
761 }
762
763 partition = host->partitions;
764 for (i = 0; i < host->nr_partitions; i++, partition++) {
765 if (partition->size == 0) {
766 partition->size = host->mtd.size -
767 partition->offset;
768 break;
769 }
770 }
771 }
772#ifdef CONFIG_MTD_CMDLINE_PARTS
773 }
774#endif
775
776 ret = mtd_device_register(&host->mtd, host->partitions,
777 host->nr_partitions);
1150 if (ret) 778 if (ret)
1151 goto err_probe; 779 goto err_probe;
1152 780
@@ -1155,16 +783,32 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
1155 return 0; 783 return 0;
1156 784
1157err_probe: 785err_probe:
1158err_scan_ident: 786 clk_disable(host->clk);
1159 if (host->mode == USE_DMA_ACCESS) 787err_probe1:
1160 dma_release_channel(host->write_dma_chan); 788 if (host->clk)
1161err_req_write_chnl: 789 clk_put(host->clk);
1162 if (host->mode == USE_DMA_ACCESS) 790 if (host->regs_va)
1163 dma_release_channel(host->read_dma_chan); 791 iounmap(host->regs_va);
1164err_req_read_chnl: 792 if (host->resregs)
1165 clk_disable_unprepare(host->clk); 793 release_mem_region(host->resregs->start,
1166err_clk_prepare_enable: 794 resource_size(host->resregs));
1167 clk_put(host->clk); 795 if (host->cmd_va)
796 iounmap(host->cmd_va);
797 if (host->rescmd)
798 release_mem_region(host->rescmd->start,
799 resource_size(host->rescmd));
800 if (host->addr_va)
801 iounmap(host->addr_va);
802 if (host->resaddr)
803 release_mem_region(host->resaddr->start,
804 resource_size(host->resaddr));
805 if (host->data_va)
806 iounmap(host->data_va);
807 if (host->resdata)
808 release_mem_region(host->resdata->start,
809 resource_size(host->resdata));
810
811 kfree(host);
1168 return ret; 812 return ret;
1169} 813}
1170 814
@@ -1178,16 +822,25 @@ static int fsmc_nand_remove(struct platform_device *pdev)
1178 platform_set_drvdata(pdev, NULL); 822 platform_set_drvdata(pdev, NULL);
1179 823
1180 if (host) { 824 if (host) {
1181 nand_release(&host->mtd); 825 mtd_device_unregister(&host->mtd);
1182 826 clk_disable(host->clk);
1183 if (host->mode == USE_DMA_ACCESS) {
1184 dma_release_channel(host->write_dma_chan);
1185 dma_release_channel(host->read_dma_chan);
1186 }
1187 clk_disable_unprepare(host->clk);
1188 clk_put(host->clk); 827 clk_put(host->clk);
1189 }
1190 828
829 iounmap(host->regs_va);
830 release_mem_region(host->resregs->start,
831 resource_size(host->resregs));
832 iounmap(host->cmd_va);
833 release_mem_region(host->rescmd->start,
834 resource_size(host->rescmd));
835 iounmap(host->addr_va);
836 release_mem_region(host->resaddr->start,
837 resource_size(host->resaddr));
838 iounmap(host->data_va);
839 release_mem_region(host->resdata->start,
840 resource_size(host->resdata));
841
842 kfree(host);
843 }
1191 return 0; 844 return 0;
1192} 845}
1193 846
@@ -1196,31 +849,22 @@ static int fsmc_nand_suspend(struct device *dev)
1196{ 849{
1197 struct fsmc_nand_data *host = dev_get_drvdata(dev); 850 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1198 if (host) 851 if (host)
1199 clk_disable_unprepare(host->clk); 852 clk_disable(host->clk);
1200 return 0; 853 return 0;
1201} 854}
1202 855
1203static int fsmc_nand_resume(struct device *dev) 856static int fsmc_nand_resume(struct device *dev)
1204{ 857{
1205 struct fsmc_nand_data *host = dev_get_drvdata(dev); 858 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1206 if (host) { 859 if (host)
1207 clk_prepare_enable(host->clk); 860 clk_enable(host->clk);
1208 fsmc_nand_setup(host->regs_va, host->bank,
1209 host->nand.options & NAND_BUSWIDTH_16,
1210 host->dev_timings);
1211 }
1212 return 0; 861 return 0;
1213} 862}
1214 863
1215static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume); 864static const struct dev_pm_ops fsmc_nand_pm_ops = {
1216#endif 865 .suspend = fsmc_nand_suspend,
1217 866 .resume = fsmc_nand_resume,
1218#ifdef CONFIG_OF
1219static const struct of_device_id fsmc_nand_id_table[] = {
1220 { .compatible = "st,spear600-fsmc-nand" },
1221 {}
1222}; 867};
1223MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
1224#endif 868#endif
1225 869
1226static struct platform_driver fsmc_nand_driver = { 870static struct platform_driver fsmc_nand_driver = {
@@ -1228,7 +872,6 @@ static struct platform_driver fsmc_nand_driver = {
1228 .driver = { 872 .driver = {
1229 .owner = THIS_MODULE, 873 .owner = THIS_MODULE,
1230 .name = "fsmc-nand", 874 .name = "fsmc-nand",
1231 .of_match_table = of_match_ptr(fsmc_nand_id_table),
1232#ifdef CONFIG_PM 875#ifdef CONFIG_PM
1233 .pm = &fsmc_nand_pm_ops, 876 .pm = &fsmc_nand_pm_ops,
1234#endif 877#endif
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index e789e3f5171..2c2060b2800 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -27,9 +27,6 @@
27#include <linux/mtd/nand.h> 27#include <linux/mtd/nand.h>
28#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
29#include <linux/mtd/nand-gpio.h> 29#include <linux/mtd/nand-gpio.h>
30#include <linux/of.h>
31#include <linux/of_address.h>
32#include <linux/of_gpio.h>
33 30
34struct gpiomtd { 31struct gpiomtd {
35 void __iomem *io_sync; 32 void __iomem *io_sync;
@@ -90,14 +87,31 @@ static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
90{ 87{
91 struct nand_chip *this = mtd->priv; 88 struct nand_chip *this = mtd->priv;
92 89
93 iowrite8_rep(this->IO_ADDR_W, buf, len); 90 writesb(this->IO_ADDR_W, buf, len);
94} 91}
95 92
96static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len) 93static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
97{ 94{
98 struct nand_chip *this = mtd->priv; 95 struct nand_chip *this = mtd->priv;
99 96
100 ioread8_rep(this->IO_ADDR_R, buf, len); 97 readsb(this->IO_ADDR_R, buf, len);
98}
99
100static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
101{
102 struct nand_chip *this = mtd->priv;
103 unsigned char read, *p = (unsigned char *) buf;
104 int i, err = 0;
105
106 for (i = 0; i < len; i++) {
107 read = readb(this->IO_ADDR_R);
108 if (read != p[i]) {
109 pr_debug("%s: err at %d (read %04x vs %04x)\n",
110 __func__, i, read, p[i]);
111 err = -EFAULT;
112 }
113 }
114 return err;
101} 115}
102 116
103static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf, 117static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
@@ -106,7 +120,7 @@ static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
106 struct nand_chip *this = mtd->priv; 120 struct nand_chip *this = mtd->priv;
107 121
108 if (IS_ALIGNED((unsigned long)buf, 2)) { 122 if (IS_ALIGNED((unsigned long)buf, 2)) {
109 iowrite16_rep(this->IO_ADDR_W, buf, len>>1); 123 writesw(this->IO_ADDR_W, buf, len>>1);
110 } else { 124 } else {
111 int i; 125 int i;
112 unsigned short *ptr = (unsigned short *)buf; 126 unsigned short *ptr = (unsigned short *)buf;
@@ -121,7 +135,7 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
121 struct nand_chip *this = mtd->priv; 135 struct nand_chip *this = mtd->priv;
122 136
123 if (IS_ALIGNED((unsigned long)buf, 2)) { 137 if (IS_ALIGNED((unsigned long)buf, 2)) {
124 ioread16_rep(this->IO_ADDR_R, buf, len>>1); 138 readsw(this->IO_ADDR_R, buf, len>>1);
125 } else { 139 } else {
126 int i; 140 int i;
127 unsigned short *ptr = (unsigned short *)buf; 141 unsigned short *ptr = (unsigned short *)buf;
@@ -131,114 +145,40 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
131 } 145 }
132} 146}
133 147
134static int gpio_nand_devready(struct mtd_info *mtd) 148static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
135{ 149 int len)
136 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
137
138 if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
139 return gpio_get_value(gpiomtd->plat.gpio_rdy);
140
141 return 1;
142}
143
144#ifdef CONFIG_OF
145static const struct of_device_id gpio_nand_id_table[] = {
146 { .compatible = "gpio-control-nand" },
147 {}
148};
149MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
150
151static int gpio_nand_get_config_of(const struct device *dev,
152 struct gpio_nand_platdata *plat)
153{ 150{
154 u32 val; 151 struct nand_chip *this = mtd->priv;
155 152 unsigned short read, *p = (unsigned short *) buf;
156 if (!of_property_read_u32(dev->of_node, "bank-width", &val)) { 153 int i, err = 0;
157 if (val == 2) { 154 len >>= 1;
158 plat->options |= NAND_BUSWIDTH_16; 155
159 } else if (val != 1) { 156 for (i = 0; i < len; i++) {
160 dev_err(dev, "invalid bank-width %u\n", val); 157 read = readw(this->IO_ADDR_R);
161 return -EINVAL; 158 if (read != p[i]) {
159 pr_debug("%s: err at %d (read %04x vs %04x)\n",
160 __func__, i, read, p[i]);
161 err = -EFAULT;
162 } 162 }
163 } 163 }
164 164 return err;
165 plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
166 plat->gpio_nce = of_get_gpio(dev->of_node, 1);
167 plat->gpio_ale = of_get_gpio(dev->of_node, 2);
168 plat->gpio_cle = of_get_gpio(dev->of_node, 3);
169 plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
170
171 if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
172 plat->chip_delay = val;
173
174 return 0;
175} 165}
176 166
177static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
178{
179 struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
180 u64 addr;
181
182 if (!r || of_property_read_u64(pdev->dev.of_node,
183 "gpio-control-nand,io-sync-reg", &addr))
184 return NULL;
185
186 r->start = addr;
187 r->end = r->start + 0x3;
188 r->flags = IORESOURCE_MEM;
189
190 return r;
191}
192#else /* CONFIG_OF */
193#define gpio_nand_id_table NULL
194static inline int gpio_nand_get_config_of(const struct device *dev,
195 struct gpio_nand_platdata *plat)
196{
197 return -ENOSYS;
198}
199 167
200static inline struct resource * 168static int gpio_nand_devready(struct mtd_info *mtd)
201gpio_nand_get_io_sync_of(struct platform_device *pdev)
202{
203 return NULL;
204}
205#endif /* CONFIG_OF */
206
207static inline int gpio_nand_get_config(const struct device *dev,
208 struct gpio_nand_platdata *plat)
209{
210 int ret = gpio_nand_get_config_of(dev, plat);
211
212 if (!ret)
213 return ret;
214
215 if (dev->platform_data) {
216 memcpy(plat, dev->platform_data, sizeof(*plat));
217 return 0;
218 }
219
220 return -EINVAL;
221}
222
223static inline struct resource *
224gpio_nand_get_io_sync(struct platform_device *pdev)
225{ 169{
226 struct resource *r = gpio_nand_get_io_sync_of(pdev); 170 struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
227 171 return gpio_get_value(gpiomtd->plat.gpio_rdy);
228 if (r)
229 return r;
230
231 return platform_get_resource(pdev, IORESOURCE_MEM, 1);
232} 172}
233 173
234static int gpio_nand_remove(struct platform_device *dev) 174static int __devexit gpio_nand_remove(struct platform_device *dev)
235{ 175{
236 struct gpiomtd *gpiomtd = platform_get_drvdata(dev); 176 struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
237 struct resource *res; 177 struct resource *res;
238 178
239 nand_release(&gpiomtd->mtd_info); 179 nand_release(&gpiomtd->mtd_info);
240 180
241 res = gpio_nand_get_io_sync(dev); 181 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
242 iounmap(gpiomtd->io_sync); 182 iounmap(gpiomtd->io_sync);
243 if (res) 183 if (res)
244 release_mem_region(res->start, resource_size(res)); 184 release_mem_region(res->start, resource_size(res));
@@ -256,8 +196,7 @@ static int gpio_nand_remove(struct platform_device *dev)
256 gpio_free(gpiomtd->plat.gpio_nce); 196 gpio_free(gpiomtd->plat.gpio_nce);
257 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 197 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
258 gpio_free(gpiomtd->plat.gpio_nwp); 198 gpio_free(gpiomtd->plat.gpio_nwp);
259 if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) 199 gpio_free(gpiomtd->plat.gpio_rdy);
260 gpio_free(gpiomtd->plat.gpio_rdy);
261 200
262 kfree(gpiomtd); 201 kfree(gpiomtd);
263 202
@@ -282,15 +221,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
282 return ptr; 221 return ptr;
283} 222}
284 223
285static int gpio_nand_probe(struct platform_device *dev) 224static int __devinit gpio_nand_probe(struct platform_device *dev)
286{ 225{
287 struct gpiomtd *gpiomtd; 226 struct gpiomtd *gpiomtd;
288 struct nand_chip *this; 227 struct nand_chip *this;
289 struct resource *res0, *res1; 228 struct resource *res0, *res1;
290 struct mtd_part_parser_data ppdata = {}; 229 int ret;
291 int ret = 0;
292 230
293 if (!dev->dev.of_node && !dev->dev.platform_data) 231 if (!dev->dev.platform_data)
294 return -EINVAL; 232 return -EINVAL;
295 233
296 res0 = platform_get_resource(dev, IORESOURCE_MEM, 0); 234 res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -310,7 +248,7 @@ static int gpio_nand_probe(struct platform_device *dev)
310 goto err_map; 248 goto err_map;
311 } 249 }
312 250
313 res1 = gpio_nand_get_io_sync(dev); 251 res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
314 if (res1) { 252 if (res1) {
315 gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret); 253 gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
316 if (!gpiomtd->io_sync) { 254 if (!gpiomtd->io_sync) {
@@ -319,9 +257,7 @@ static int gpio_nand_probe(struct platform_device *dev)
319 } 257 }
320 } 258 }
321 259
322 ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat); 260 memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
323 if (ret)
324 goto err_nce;
325 261
326 ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE"); 262 ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
327 if (ret) 263 if (ret)
@@ -341,12 +277,10 @@ static int gpio_nand_probe(struct platform_device *dev)
341 if (ret) 277 if (ret)
342 goto err_cle; 278 goto err_cle;
343 gpio_direction_output(gpiomtd->plat.gpio_cle, 0); 279 gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
344 if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { 280 ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
345 ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY"); 281 if (ret)
346 if (ret) 282 goto err_rdy;
347 goto err_rdy; 283 gpio_direction_input(gpiomtd->plat.gpio_rdy);
348 gpio_direction_input(gpiomtd->plat.gpio_rdy);
349 }
350 284
351 285
352 this->IO_ADDR_W = this->IO_ADDR_R; 286 this->IO_ADDR_W = this->IO_ADDR_R;
@@ -361,9 +295,11 @@ static int gpio_nand_probe(struct platform_device *dev)
361 if (this->options & NAND_BUSWIDTH_16) { 295 if (this->options & NAND_BUSWIDTH_16) {
362 this->read_buf = gpio_nand_readbuf16; 296 this->read_buf = gpio_nand_readbuf16;
363 this->write_buf = gpio_nand_writebuf16; 297 this->write_buf = gpio_nand_writebuf16;
298 this->verify_buf = gpio_nand_verifybuf16;
364 } else { 299 } else {
365 this->read_buf = gpio_nand_readbuf; 300 this->read_buf = gpio_nand_readbuf;
366 this->write_buf = gpio_nand_writebuf; 301 this->write_buf = gpio_nand_writebuf;
302 this->verify_buf = gpio_nand_verifybuf;
367 } 303 }
368 304
369 /* set the mtd private data for the nand driver */ 305 /* set the mtd private data for the nand driver */
@@ -380,12 +316,8 @@ static int gpio_nand_probe(struct platform_device *dev)
380 gpiomtd->plat.adjust_parts(&gpiomtd->plat, 316 gpiomtd->plat.adjust_parts(&gpiomtd->plat,
381 gpiomtd->mtd_info.size); 317 gpiomtd->mtd_info.size);
382 318
383 ppdata.of_node = dev->dev.of_node; 319 mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
384 ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata, 320 gpiomtd->plat.num_parts);
385 gpiomtd->plat.parts,
386 gpiomtd->plat.num_parts);
387 if (ret)
388 goto err_wp;
389 platform_set_drvdata(dev, gpiomtd); 321 platform_set_drvdata(dev, gpiomtd);
390 322
391 return 0; 323 return 0;
@@ -393,8 +325,7 @@ static int gpio_nand_probe(struct platform_device *dev)
393err_wp: 325err_wp:
394 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 326 if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
395 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); 327 gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
396 if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) 328 gpio_free(gpiomtd->plat.gpio_rdy);
397 gpio_free(gpiomtd->plat.gpio_rdy);
398err_rdy: 329err_rdy:
399 gpio_free(gpiomtd->plat.gpio_cle); 330 gpio_free(gpiomtd->plat.gpio_cle);
400err_cle: 331err_cle:
@@ -421,11 +352,23 @@ static struct platform_driver gpio_nand_driver = {
421 .remove = gpio_nand_remove, 352 .remove = gpio_nand_remove,
422 .driver = { 353 .driver = {
423 .name = "gpio-nand", 354 .name = "gpio-nand",
424 .of_match_table = gpio_nand_id_table,
425 }, 355 },
426}; 356};
427 357
428module_platform_driver(gpio_nand_driver); 358static int __init gpio_nand_init(void)
359{
360 printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
361
362 return platform_driver_register(&gpio_nand_driver);
363}
364
365static void __exit gpio_nand_exit(void)
366{
367 platform_driver_unregister(&gpio_nand_driver);
368}
369
370module_init(gpio_nand_init);
371module_exit(gpio_nand_exit);
429 372
430MODULE_LICENSE("GPL"); 373MODULE_LICENSE("GPL");
431MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 374MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
deleted file mode 100644
index 3a462487c35..00000000000
--- a/drivers/mtd/nand/gpmi-nand/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
2gpmi_nand-objs += gpmi-nand.o
3gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
deleted file mode 100644
index a0924515c39..00000000000
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __GPMI_NAND_BCH_REGS_H
22#define __GPMI_NAND_BCH_REGS_H
23
24#define HW_BCH_CTRL 0x00000000
25#define HW_BCH_CTRL_SET 0x00000004
26#define HW_BCH_CTRL_CLR 0x00000008
27#define HW_BCH_CTRL_TOG 0x0000000c
28
29#define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8)
30#define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0)
31
32#define HW_BCH_STATUS0 0x00000010
33#define HW_BCH_MODE 0x00000020
34#define HW_BCH_ENCODEPTR 0x00000030
35#define HW_BCH_DATAPTR 0x00000040
36#define HW_BCH_METAPTR 0x00000050
37#define HW_BCH_LAYOUTSELECT 0x00000070
38
39#define HW_BCH_FLASH0LAYOUT0 0x00000080
40
41#define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24
42#define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
43#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \
44 (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
45
46#define BP_BCH_FLASH0LAYOUT0_META_SIZE 16
47#define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
48#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \
49 (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
50 & BM_BCH_FLASH0LAYOUT0_META_SIZE)
51
52#define BP_BCH_FLASH0LAYOUT0_ECC0 12
53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
54#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
55#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
56#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
57 (GPMI_IS_MX6Q(x) \
58 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
59 & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
60 : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
61 & BM_BCH_FLASH0LAYOUT0_ECC0) \
62 )
63
64#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
65#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
66 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
67#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
68 (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
69#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
70 (GPMI_IS_MX6Q(x) \
71 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
72 : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
73 )
74
75#define HW_BCH_FLASH0LAYOUT1 0x00000090
76
77#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16
78#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \
79 (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
80#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \
81 (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
82 & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
83
84#define BP_BCH_FLASH0LAYOUT1_ECCN 12
85#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
86#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
87#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
88#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
89 (GPMI_IS_MX6Q(x) \
90 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
91 & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
92 : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
93 & BM_BCH_FLASH0LAYOUT1_ECCN) \
94 )
95
96#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
97#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
98 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
99#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
100 (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
101#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
102 (GPMI_IS_MX6Q(x) \
103 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
104 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
105 )
106#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
deleted file mode 100644
index d84699c7968..00000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ /dev/null
@@ -1,1337 +0,0 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#include <linux/delay.h>
22#include <linux/clk.h>
23
24#include "gpmi-nand.h"
25#include "gpmi-regs.h"
26#include "bch-regs.h"
27
28static struct timing_threshod timing_default_threshold = {
29 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
30 BP_GPMI_TIMING0_DATA_SETUP),
31 .internal_data_setup_in_ns = 0,
32 .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
33 BP_GPMI_CTRL1_RDN_DELAY),
34 .max_dll_clock_period_in_ns = 32,
35 .max_dll_delay_in_ns = 16,
36};
37
38#define MXS_SET_ADDR 0x4
39#define MXS_CLR_ADDR 0x8
40/*
41 * Clear the bit and poll it cleared. This is usually called with
42 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
43 * (bit 30).
44 */
45static int clear_poll_bit(void __iomem *addr, u32 mask)
46{
47 int timeout = 0x400;
48
49 /* clear the bit */
50 writel(mask, addr + MXS_CLR_ADDR);
51
52 /*
53 * SFTRST needs 3 GPMI clocks to settle, the reference manual
54 * recommends to wait 1us.
55 */
56 udelay(1);
57
58 /* poll the bit becoming clear */
59 while ((readl(addr) & mask) && --timeout)
60 /* nothing */;
61
62 return !timeout;
63}
64
65#define MODULE_CLKGATE (1 << 30)
66#define MODULE_SFTRST (1 << 31)
67/*
68 * The current mxs_reset_block() will do two things:
69 * [1] enable the module.
70 * [2] reset the module.
71 *
72 * In most of the cases, it's ok.
73 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
74 * If you try to soft reset the BCH block, it becomes unusable until
75 * the next hard reset. This case occurs in the NAND boot mode. When the board
76 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
77 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
78 * You will see a DMA timeout in this case. The bug has been fixed
79 * in the following chips, such as MX28.
80 *
81 * To avoid this bug, just add a new parameter `just_enable` for
82 * the mxs_reset_block(), and rewrite it here.
83 */
84static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
85{
86 int ret;
87 int timeout = 0x400;
88
89 /* clear and poll SFTRST */
90 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
91 if (unlikely(ret))
92 goto error;
93
94 /* clear CLKGATE */
95 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
96
97 if (!just_enable) {
98 /* set SFTRST to reset the block */
99 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
100 udelay(1);
101
102 /* poll CLKGATE becoming set */
103 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
104 /* nothing */;
105 if (unlikely(!timeout))
106 goto error;
107 }
108
109 /* clear and poll SFTRST */
110 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
111 if (unlikely(ret))
112 goto error;
113
114 /* clear and poll CLKGATE */
115 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
116 if (unlikely(ret))
117 goto error;
118
119 return 0;
120
121error:
122 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
123 return -ETIMEDOUT;
124}
125
126static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
127{
128 struct clk *clk;
129 int ret;
130 int i;
131
132 for (i = 0; i < GPMI_CLK_MAX; i++) {
133 clk = this->resources.clock[i];
134 if (!clk)
135 break;
136
137 if (v) {
138 ret = clk_prepare_enable(clk);
139 if (ret)
140 goto err_clk;
141 } else {
142 clk_disable_unprepare(clk);
143 }
144 }
145 return 0;
146
147err_clk:
148 for (; i > 0; i--)
149 clk_disable_unprepare(this->resources.clock[i - 1]);
150 return ret;
151}
152
153#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
154#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
155
156int gpmi_init(struct gpmi_nand_data *this)
157{
158 struct resources *r = &this->resources;
159 int ret;
160
161 ret = gpmi_enable_clk(this);
162 if (ret)
163 goto err_out;
164 ret = gpmi_reset_block(r->gpmi_regs, false);
165 if (ret)
166 goto err_out;
167
168 /*
169 * Reset BCH here, too. We got failures otherwise :(
170 * See later BCH reset for explanation of MX23 handling
171 */
172 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
173 if (ret)
174 goto err_out;
175
176
177 /* Choose NAND mode. */
178 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
179
180 /* Set the IRQ polarity. */
181 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
182 r->gpmi_regs + HW_GPMI_CTRL1_SET);
183
184 /* Disable Write-Protection. */
185 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
186
187 /* Select BCH ECC. */
188 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
189
190 gpmi_disable_clk(this);
191 return 0;
192err_out:
193 return ret;
194}
195
196/* This function is very useful. It is called only when the bug occur. */
197void gpmi_dump_info(struct gpmi_nand_data *this)
198{
199 struct resources *r = &this->resources;
200 struct bch_geometry *geo = &this->bch_geometry;
201 u32 reg;
202 int i;
203
204 pr_err("Show GPMI registers :\n");
205 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
206 reg = readl(r->gpmi_regs + i * 0x10);
207 pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
208 }
209
210 /* start to print out the BCH info */
211 pr_err("BCH Geometry :\n");
212 pr_err("GF length : %u\n", geo->gf_len);
213 pr_err("ECC Strength : %u\n", geo->ecc_strength);
214 pr_err("Page Size in Bytes : %u\n", geo->page_size);
215 pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
216 pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
217 pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
218 pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
219 pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
220 pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
221 pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
222 pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
223}
224
225/* Configures the geometry for BCH. */
226int bch_set_geometry(struct gpmi_nand_data *this)
227{
228 struct resources *r = &this->resources;
229 struct bch_geometry *bch_geo = &this->bch_geometry;
230 unsigned int block_count;
231 unsigned int block_size;
232 unsigned int metadata_size;
233 unsigned int ecc_strength;
234 unsigned int page_size;
235 int ret;
236
237 if (common_nfc_set_geometry(this))
238 return !0;
239
240 block_count = bch_geo->ecc_chunk_count - 1;
241 block_size = bch_geo->ecc_chunk_size;
242 metadata_size = bch_geo->metadata_size;
243 ecc_strength = bch_geo->ecc_strength >> 1;
244 page_size = bch_geo->page_size;
245
246 ret = gpmi_enable_clk(this);
247 if (ret)
248 goto err_out;
249
250 /*
251 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
252 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
253 * On the other hand, the MX28 needs the reset, because one case has been
254 * seen where the BCH produced ECC errors constantly after 10000
255 * consecutive reboots. The latter case has not been seen on the MX23 yet,
256 * still we don't know if it could happen there as well.
257 */
258 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
259 if (ret)
260 goto err_out;
261
262 /* Configure layout 0. */
263 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
264 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
265 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
266 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
267 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
268
269 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
270 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
271 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
272 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
273
274 /* Set *all* chip selects to use layout 0. */
275 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
276
277 /* Enable interrupts. */
278 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
279 r->bch_regs + HW_BCH_CTRL_SET);
280
281 gpmi_disable_clk(this);
282 return 0;
283err_out:
284 return ret;
285}
286
287/* Converts time in nanoseconds to cycles. */
288static unsigned int ns_to_cycles(unsigned int time,
289 unsigned int period, unsigned int min)
290{
291 unsigned int k;
292
293 k = (time + period - 1) / period;
294 return max(k, min);
295}
296
297#define DEF_MIN_PROP_DELAY 5
298#define DEF_MAX_PROP_DELAY 9
299/* Apply timing to current hardware conditions. */
300static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
301 struct gpmi_nfc_hardware_timing *hw)
302{
303 struct timing_threshod *nfc = &timing_default_threshold;
304 struct resources *r = &this->resources;
305 struct nand_chip *nand = &this->nand;
306 struct nand_timing target = this->timing;
307 bool improved_timing_is_available;
308 unsigned long clock_frequency_in_hz;
309 unsigned int clock_period_in_ns;
310 bool dll_use_half_periods;
311 unsigned int dll_delay_shift;
312 unsigned int max_sample_delay_in_ns;
313 unsigned int address_setup_in_cycles;
314 unsigned int data_setup_in_ns;
315 unsigned int data_setup_in_cycles;
316 unsigned int data_hold_in_cycles;
317 int ideal_sample_delay_in_ns;
318 unsigned int sample_delay_factor;
319 int tEYE;
320 unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
321 unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
322
323 /*
324 * If there are multiple chips, we need to relax the timings to allow
325 * for signal distortion due to higher capacitance.
326 */
327 if (nand->numchips > 2) {
328 target.data_setup_in_ns += 10;
329 target.data_hold_in_ns += 10;
330 target.address_setup_in_ns += 10;
331 } else if (nand->numchips > 1) {
332 target.data_setup_in_ns += 5;
333 target.data_hold_in_ns += 5;
334 target.address_setup_in_ns += 5;
335 }
336
337 /* Check if improved timing information is available. */
338 improved_timing_is_available =
339 (target.tREA_in_ns >= 0) &&
340 (target.tRLOH_in_ns >= 0) &&
341 (target.tRHOH_in_ns >= 0) ;
342
343 /* Inspect the clock. */
344 nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
345 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
346 clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
347
348 /*
349 * The NFC quantizes setup and hold parameters in terms of clock cycles.
350 * Here, we quantize the setup and hold timing parameters to the
351 * next-highest clock period to make sure we apply at least the
352 * specified times.
353 *
354 * For data setup and data hold, the hardware interprets a value of zero
355 * as the largest possible delay. This is not what's intended by a zero
356 * in the input parameter, so we impose a minimum of one cycle.
357 */
358 data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
359 clock_period_in_ns, 1);
360 data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
361 clock_period_in_ns, 1);
362 address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
363 clock_period_in_ns, 0);
364
365 /*
366 * The clock's period affects the sample delay in a number of ways:
367 *
368 * (1) The NFC HAL tells us the maximum clock period the sample delay
369 * DLL can tolerate. If the clock period is greater than half that
370 * maximum, we must configure the DLL to be driven by half periods.
371 *
372 * (2) We need to convert from an ideal sample delay, in ns, to a
373 * "sample delay factor," which the NFC uses. This factor depends on
374 * whether we're driving the DLL with full or half periods.
375 * Paraphrasing the reference manual:
376 *
377 * AD = SDF x 0.125 x RP
378 *
379 * where:
380 *
381 * AD is the applied delay, in ns.
382 * SDF is the sample delay factor, which is dimensionless.
383 * RP is the reference period, in ns, which is a full clock period
384 * if the DLL is being driven by full periods, or half that if
385 * the DLL is being driven by half periods.
386 *
387 * Let's re-arrange this in a way that's more useful to us:
388 *
389 * 8
390 * SDF = AD x ----
391 * RP
392 *
393 * The reference period is either the clock period or half that, so this
394 * is:
395 *
396 * 8 AD x DDF
397 * SDF = AD x ----- = --------
398 * f x P P
399 *
400 * where:
401 *
402 * f is 1 or 1/2, depending on how we're driving the DLL.
403 * P is the clock period.
404 * DDF is the DLL Delay Factor, a dimensionless value that
405 * incorporates all the constants in the conversion.
406 *
407 * DDF will be either 8 or 16, both of which are powers of two. We can
408 * reduce the cost of this conversion by using bit shifts instead of
409 * multiplication or division. Thus:
410 *
411 * AD << DDS
412 * SDF = ---------
413 * P
414 *
415 * or
416 *
417 * AD = (SDF >> DDS) x P
418 *
419 * where:
420 *
421 * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
422 */
423 if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
424 dll_use_half_periods = true;
425 dll_delay_shift = 3 + 1;
426 } else {
427 dll_use_half_periods = false;
428 dll_delay_shift = 3;
429 }
430
431 /*
432 * Compute the maximum sample delay the NFC allows, under current
433 * conditions. If the clock is running too slowly, no sample delay is
434 * possible.
435 */
436 if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
437 max_sample_delay_in_ns = 0;
438 else {
439 /*
440 * Compute the delay implied by the largest sample delay factor
441 * the NFC allows.
442 */
443 max_sample_delay_in_ns =
444 (nfc->max_sample_delay_factor * clock_period_in_ns) >>
445 dll_delay_shift;
446
447 /*
448 * Check if the implied sample delay larger than the NFC
449 * actually allows.
450 */
451 if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
452 max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
453 }
454
455 /*
456 * Check if improved timing information is available. If not, we have to
457 * use a less-sophisticated algorithm.
458 */
459 if (!improved_timing_is_available) {
460 /*
461 * Fold the read setup time required by the NFC into the ideal
462 * sample delay.
463 */
464 ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
465 nfc->internal_data_setup_in_ns;
466
467 /*
468 * The ideal sample delay may be greater than the maximum
469 * allowed by the NFC. If so, we can trade off sample delay time
470 * for more data setup time.
471 *
472 * In each iteration of the following loop, we add a cycle to
473 * the data setup time and subtract a corresponding amount from
474 * the sample delay until we've satisified the constraints or
475 * can't do any better.
476 */
477 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
478 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
479
480 data_setup_in_cycles++;
481 ideal_sample_delay_in_ns -= clock_period_in_ns;
482
483 if (ideal_sample_delay_in_ns < 0)
484 ideal_sample_delay_in_ns = 0;
485
486 }
487
488 /*
489 * Compute the sample delay factor that corresponds most closely
490 * to the ideal sample delay. If the result is too large for the
491 * NFC, use the maximum value.
492 *
493 * Notice that we use the ns_to_cycles function to compute the
494 * sample delay factor. We do this because the form of the
495 * computation is the same as that for calculating cycles.
496 */
497 sample_delay_factor =
498 ns_to_cycles(
499 ideal_sample_delay_in_ns << dll_delay_shift,
500 clock_period_in_ns, 0);
501
502 if (sample_delay_factor > nfc->max_sample_delay_factor)
503 sample_delay_factor = nfc->max_sample_delay_factor;
504
505 /* Skip to the part where we return our results. */
506 goto return_results;
507 }
508
509 /*
510 * If control arrives here, we have more detailed timing information,
511 * so we can use a better algorithm.
512 */
513
514 /*
515 * Fold the read setup time required by the NFC into the maximum
516 * propagation delay.
517 */
518 max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
519
520 /*
521 * Earlier, we computed the number of clock cycles required to satisfy
522 * the data setup time. Now, we need to know the actual nanoseconds.
523 */
524 data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
525
526 /*
527 * Compute tEYE, the width of the data eye when reading from the NAND
528 * Flash. The eye width is fundamentally determined by the data setup
529 * time, perturbed by propagation delays and some characteristics of the
530 * NAND Flash device.
531 *
532 * start of the eye = max_prop_delay + tREA
533 * end of the eye = min_prop_delay + tRHOH + data_setup
534 */
535 tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
536 (int)data_setup_in_ns;
537
538 tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
539
540 /*
541 * The eye must be open. If it's not, we can try to open it by
542 * increasing its main forcer, the data setup time.
543 *
544 * In each iteration of the following loop, we increase the data setup
545 * time by a single clock cycle. We do this until either the eye is
546 * open or we run into NFC limits.
547 */
548 while ((tEYE <= 0) &&
549 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
550 /* Give a cycle to data setup. */
551 data_setup_in_cycles++;
552 /* Synchronize the data setup time with the cycles. */
553 data_setup_in_ns += clock_period_in_ns;
554 /* Adjust tEYE accordingly. */
555 tEYE += clock_period_in_ns;
556 }
557
558 /*
559 * When control arrives here, the eye is open. The ideal time to sample
560 * the data is in the center of the eye:
561 *
562 * end of the eye + start of the eye
563 * --------------------------------- - data_setup
564 * 2
565 *
566 * After some algebra, this simplifies to the code immediately below.
567 */
568 ideal_sample_delay_in_ns =
569 ((int)max_prop_delay_in_ns +
570 (int)target.tREA_in_ns +
571 (int)min_prop_delay_in_ns +
572 (int)target.tRHOH_in_ns -
573 (int)data_setup_in_ns) >> 1;
574
575 /*
576 * The following figure illustrates some aspects of a NAND Flash read:
577 *
578 *
579 * __ _____________________________________
580 * RDN \_________________/
581 *
582 * <---- tEYE ----->
583 * /-----------------\
584 * Read Data ----------------------------< >---------
585 * \-----------------/
586 * ^ ^ ^ ^
587 * | | | |
588 * |<--Data Setup -->|<--Delay Time -->| |
589 * | | | |
590 * | | |
591 * | |<-- Quantized Delay Time -->|
592 * | | |
593 *
594 *
595 * We have some issues we must now address:
596 *
597 * (1) The *ideal* sample delay time must not be negative. If it is, we
598 * jam it to zero.
599 *
600 * (2) The *ideal* sample delay time must not be greater than that
601 * allowed by the NFC. If it is, we can increase the data setup
602 * time, which will reduce the delay between the end of the data
603 * setup and the center of the eye. It will also make the eye
604 * larger, which might help with the next issue...
605 *
606 * (3) The *quantized* sample delay time must not fall either before the
607 * eye opens or after it closes (the latter is the problem
608 * illustrated in the above figure).
609 */
610
611 /* Jam a negative ideal sample delay to zero. */
612 if (ideal_sample_delay_in_ns < 0)
613 ideal_sample_delay_in_ns = 0;
614
615 /*
616 * Extend the data setup as needed to reduce the ideal sample delay
617 * below the maximum permitted by the NFC.
618 */
619 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
620 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
621
622 /* Give a cycle to data setup. */
623 data_setup_in_cycles++;
624 /* Synchronize the data setup time with the cycles. */
625 data_setup_in_ns += clock_period_in_ns;
626 /* Adjust tEYE accordingly. */
627 tEYE += clock_period_in_ns;
628
629 /*
630 * Decrease the ideal sample delay by one half cycle, to keep it
631 * in the middle of the eye.
632 */
633 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
634
635 /* Jam a negative ideal sample delay to zero. */
636 if (ideal_sample_delay_in_ns < 0)
637 ideal_sample_delay_in_ns = 0;
638 }
639
640 /*
641 * Compute the sample delay factor that corresponds to the ideal sample
642 * delay. If the result is too large, then use the maximum allowed
643 * value.
644 *
645 * Notice that we use the ns_to_cycles function to compute the sample
646 * delay factor. We do this because the form of the computation is the
647 * same as that for calculating cycles.
648 */
649 sample_delay_factor =
650 ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
651 clock_period_in_ns, 0);
652
653 if (sample_delay_factor > nfc->max_sample_delay_factor)
654 sample_delay_factor = nfc->max_sample_delay_factor;
655
656 /*
657 * These macros conveniently encapsulate a computation we'll use to
658 * continuously evaluate whether or not the data sample delay is inside
659 * the eye.
660 */
661 #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
662
663 #define QUANTIZED_DELAY \
664 ((int) ((sample_delay_factor * clock_period_in_ns) >> \
665 dll_delay_shift))
666
667 #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
668
669 #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
670
671 /*
672 * While the quantized sample time falls outside the eye, reduce the
673 * sample delay or extend the data setup to move the sampling point back
674 * toward the eye. Do not allow the number of data setup cycles to
675 * exceed the maximum allowed by the NFC.
676 */
677 while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
678 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
679 /*
680 * If control arrives here, the quantized sample delay falls
681 * outside the eye. Check if it's before the eye opens, or after
682 * the eye closes.
683 */
684 if (QUANTIZED_DELAY > IDEAL_DELAY) {
685 /*
686 * If control arrives here, the quantized sample delay
687 * falls after the eye closes. Decrease the quantized
688 * delay time and then go back to re-evaluate.
689 */
690 if (sample_delay_factor != 0)
691 sample_delay_factor--;
692 continue;
693 }
694
695 /*
696 * If control arrives here, the quantized sample delay falls
697 * before the eye opens. Shift the sample point by increasing
698 * data setup time. This will also make the eye larger.
699 */
700
701 /* Give a cycle to data setup. */
702 data_setup_in_cycles++;
703 /* Synchronize the data setup time with the cycles. */
704 data_setup_in_ns += clock_period_in_ns;
705 /* Adjust tEYE accordingly. */
706 tEYE += clock_period_in_ns;
707
708 /*
709 * Decrease the ideal sample delay by one half cycle, to keep it
710 * in the middle of the eye.
711 */
712 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
713
714 /* ...and one less period for the delay time. */
715 ideal_sample_delay_in_ns -= clock_period_in_ns;
716
717 /* Jam a negative ideal sample delay to zero. */
718 if (ideal_sample_delay_in_ns < 0)
719 ideal_sample_delay_in_ns = 0;
720
721 /*
722 * We have a new ideal sample delay, so re-compute the quantized
723 * delay.
724 */
725 sample_delay_factor =
726 ns_to_cycles(
727 ideal_sample_delay_in_ns << dll_delay_shift,
728 clock_period_in_ns, 0);
729
730 if (sample_delay_factor > nfc->max_sample_delay_factor)
731 sample_delay_factor = nfc->max_sample_delay_factor;
732 }
733
734 /* Control arrives here when we're ready to return our results. */
735return_results:
736 hw->data_setup_in_cycles = data_setup_in_cycles;
737 hw->data_hold_in_cycles = data_hold_in_cycles;
738 hw->address_setup_in_cycles = address_setup_in_cycles;
739 hw->use_half_periods = dll_use_half_periods;
740 hw->sample_delay_factor = sample_delay_factor;
741 hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
742 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
743
744 /* Return success. */
745 return 0;
746}
747
748/*
749 * <1> Firstly, we should know what's the GPMI-clock means.
750 * The GPMI-clock is the internal clock in the gpmi nand controller.
751 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
752 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
753 *
754 * <2> Secondly, we should know what's the frequency on the nand chip pins.
755 * The frequency on the nand chip pins is derived from the GPMI-clock.
756 * We can get it from the following equation:
757 *
758 * F = G / (DS + DH)
759 *
760 * F : the frequency on the nand chip pins.
761 * G : the GPMI clock, such as 100MHz.
762 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
763 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
764 *
765 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
766 * the nand EDO(extended Data Out) timing could be applied.
767 * The GPMI implements a feedback read strobe to sample the read data.
768 * The feedback read strobe can be delayed to support the nand EDO timing
769 * where the read strobe may deasserts before the read data is valid, and
770 * read data is valid for some time after read strobe.
771 *
772 * The following figure illustrates some aspects of a NAND Flash read:
773 *
774 * |<---tREA---->|
775 * | |
776 * | | |
777 * |<--tRP-->| |
778 * | | |
779 * __ ___|__________________________________
780 * RDN \________/ |
781 * |
782 * /---------\
783 * Read Data --------------< >---------
784 * \---------/
785 * | |
786 * |<-D->|
787 * FeedbackRDN ________ ____________
788 * \___________/
789 *
790 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
791 *
792 *
793 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
794 *
795 * 4.1) From the aspect of the nand chip pins:
796 * Delay = (tREA + C - tRP) {1}
797 *
798 * tREA : the maximum read access time. From the ONFI nand standards,
799 * we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
800 * Please check it in : www.onfi.org
801 * C : a constant for adjust the delay. default is 4.
802 * tRP : the read pulse width.
803 * Specified by the HW_GPMI_TIMING0:DATA_SETUP:
804 * tRP = (GPMI-clock-period) * DATA_SETUP
805 *
806 * 4.2) From the aspect of the GPMI nand controller:
807 * Delay = RDN_DELAY * 0.125 * RP {2}
808 *
809 * RP : the DLL reference period.
810 * if (GPMI-clock-period > DLL_THRETHOLD)
811 * RP = GPMI-clock-period / 2;
812 * else
813 * RP = GPMI-clock-period;
814 *
815 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
816 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
817 * is 16ns, but in mx6q, we use 12ns.
818 *
819 * 4.3) since {1} equals {2}, we get:
820 *
821 * (tREA + 4 - tRP) * 8
822 * RDN_DELAY = --------------------- {3}
823 * RP
824 *
825 * 4.4) We only support the fastest asynchronous mode of ONFI nand.
826 * For some ONFI nand, the mode 4 is the fastest mode;
827 * while for some ONFI nand, the mode 5 is the fastest mode.
828 * So we only support the mode 4 and mode 5. It is no need to
829 * support other modes.
830 */
831static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
832 struct gpmi_nfc_hardware_timing *hw)
833{
834 struct resources *r = &this->resources;
835 unsigned long rate = clk_get_rate(r->clock[0]);
836 int mode = this->timing_mode;
837 int dll_threshold = 16; /* in ns */
838 unsigned long delay;
839 unsigned long clk_period;
840 int t_rea;
841 int c = 4;
842 int t_rp;
843 int rp;
844
845 /*
846 * [1] for GPMI_HW_GPMI_TIMING0:
847 * The async mode requires 40MHz for mode 4, 50MHz for mode 5.
848 * The GPMI can support 100MHz at most. So if we want to
849 * get the 40MHz or 50MHz, we have to set DS=1, DH=1.
850 * Set the ADDRESS_SETUP to 0 in mode 4.
851 */
852 hw->data_setup_in_cycles = 1;
853 hw->data_hold_in_cycles = 1;
854 hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
855
856 /* [2] for GPMI_HW_GPMI_TIMING1 */
857 hw->device_busy_timeout = 0x9000;
858
859 /* [3] for GPMI_HW_GPMI_CTRL1 */
860 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
861
862 if (GPMI_IS_MX6Q(this))
863 dll_threshold = 12;
864
865 /*
866 * Enlarge 10 times for the numerator and denominator in {3}.
867 * This make us to get more accurate result.
868 */
869 clk_period = NSEC_PER_SEC / (rate / 10);
870 dll_threshold *= 10;
871 t_rea = ((mode == 5) ? 16 : 20) * 10;
872 c *= 10;
873
874 t_rp = clk_period * 1; /* DATA_SETUP is 1 */
875
876 if (clk_period > dll_threshold) {
877 hw->use_half_periods = 1;
878 rp = clk_period / 2;
879 } else {
880 hw->use_half_periods = 0;
881 rp = clk_period;
882 }
883
884 /*
885 * Multiply the numerator with 10, we could do a round off:
886 * 7.8 round up to 8; 7.4 round down to 7.
887 */
888 delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
889 delay = (delay + 5) / 10;
890
891 hw->sample_delay_factor = delay;
892}
893
894static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
895{
896 struct resources *r = &this->resources;
897 struct nand_chip *nand = &this->nand;
898 struct mtd_info *mtd = &this->mtd;
899 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
900 unsigned long rate;
901 int ret;
902
903 nand->select_chip(mtd, 0);
904
905 /* [1] send SET FEATURE commond to NAND */
906 feature[0] = mode;
907 ret = nand->onfi_set_features(mtd, nand,
908 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
909 if (ret)
910 goto err_out;
911
912 /* [2] send GET FEATURE command to double-check the timing mode */
913 memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
914 ret = nand->onfi_get_features(mtd, nand,
915 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
916 if (ret || feature[0] != mode)
917 goto err_out;
918
919 nand->select_chip(mtd, -1);
920
921 /* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
922 rate = (mode == 5) ? 100000000 : 80000000;
923 clk_set_rate(r->clock[0], rate);
924
925 /* Let the gpmi_begin() re-compute the timing again. */
926 this->flags &= ~GPMI_TIMING_INIT_OK;
927
928 this->flags |= GPMI_ASYNC_EDO_ENABLED;
929 this->timing_mode = mode;
930 dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
931 return 0;
932
933err_out:
934 nand->select_chip(mtd, -1);
935 dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
936 return -EINVAL;
937}
938
939int gpmi_extra_init(struct gpmi_nand_data *this)
940{
941 struct nand_chip *chip = &this->nand;
942
943 /* Enable the asynchronous EDO feature. */
944 if (GPMI_IS_MX6Q(this) && chip->onfi_version) {
945 int mode = onfi_get_async_timing_mode(chip);
946
947 /* We only support the timing mode 4 and mode 5. */
948 if (mode & ONFI_TIMING_MODE_5)
949 mode = 5;
950 else if (mode & ONFI_TIMING_MODE_4)
951 mode = 4;
952 else
953 return 0;
954
955 return enable_edo_mode(this, mode);
956 }
957 return 0;
958}
959
960/* Begin the I/O */
961void gpmi_begin(struct gpmi_nand_data *this)
962{
963 struct resources *r = &this->resources;
964 void __iomem *gpmi_regs = r->gpmi_regs;
965 unsigned int clock_period_in_ns;
966 uint32_t reg;
967 unsigned int dll_wait_time_in_us;
968 struct gpmi_nfc_hardware_timing hw;
969 int ret;
970
971 /* Enable the clock. */
972 ret = gpmi_enable_clk(this);
973 if (ret) {
974 pr_err("We failed in enable the clk\n");
975 goto err_out;
976 }
977
978 /* Only initialize the timing once */
979 if (this->flags & GPMI_TIMING_INIT_OK)
980 return;
981 this->flags |= GPMI_TIMING_INIT_OK;
982
983 if (this->flags & GPMI_ASYNC_EDO_ENABLED)
984 gpmi_compute_edo_timing(this, &hw);
985 else
986 gpmi_nfc_compute_hardware_timing(this, &hw);
987
988 /* [1] Set HW_GPMI_TIMING0 */
989 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
990 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
991 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
992
993 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
994
995 /* [2] Set HW_GPMI_TIMING1 */
996 writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
997 gpmi_regs + HW_GPMI_TIMING1);
998
999 /* [3] The following code is to set the HW_GPMI_CTRL1. */
1000
1001 /* Set the WRN_DLY_SEL */
1002 writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
1003 writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
1004 gpmi_regs + HW_GPMI_CTRL1_SET);
1005
1006 /* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
1007 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
1008
1009 /* Clear out the DLL control fields. */
1010 reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
1011 writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
1012
1013 /* If no sample delay is called for, return immediately. */
1014 if (!hw.sample_delay_factor)
1015 return;
1016
1017 /* Set RDN_DELAY or HALF_PERIOD. */
1018 reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
1019 | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
1020
1021 writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
1022
1023 /* At last, we enable the DLL. */
1024 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
1025
1026 /*
1027 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
1028 * we can use the GPMI. Calculate the amount of time we need to wait,
1029 * in microseconds.
1030 */
1031 clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
1032 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
1033
1034 if (!dll_wait_time_in_us)
1035 dll_wait_time_in_us = 1;
1036
1037 /* Wait for the DLL to settle. */
1038 udelay(dll_wait_time_in_us);
1039
1040err_out:
1041 return;
1042}
1043
1044void gpmi_end(struct gpmi_nand_data *this)
1045{
1046 gpmi_disable_clk(this);
1047}
1048
1049/* Clears a BCH interrupt. */
1050void gpmi_clear_bch(struct gpmi_nand_data *this)
1051{
1052 struct resources *r = &this->resources;
1053 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
1054}
1055
1056/* Returns the Ready/Busy status of the given chip. */
1057int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
1058{
1059 struct resources *r = &this->resources;
1060 uint32_t mask = 0;
1061 uint32_t reg = 0;
1062
1063 if (GPMI_IS_MX23(this)) {
1064 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
1065 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
1066 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
1067 /* MX28 shares the same R/B register as MX6Q. */
1068 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
1069 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
1070 } else
1071 pr_err("unknow arch.\n");
1072 return reg & mask;
1073}
1074
1075static inline void set_dma_type(struct gpmi_nand_data *this,
1076 enum dma_ops_type type)
1077{
1078 this->last_dma_type = this->dma_type;
1079 this->dma_type = type;
1080}
1081
1082int gpmi_send_command(struct gpmi_nand_data *this)
1083{
1084 struct dma_chan *channel = get_dma_chan(this);
1085 struct dma_async_tx_descriptor *desc;
1086 struct scatterlist *sgl;
1087 int chip = this->current_chip;
1088 u32 pio[3];
1089
1090 /* [1] send out the PIO words */
1091 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
1092 | BM_GPMI_CTRL0_WORD_LENGTH
1093 | BF_GPMI_CTRL0_CS(chip, this)
1094 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1095 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
1096 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
1097 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
1098 pio[1] = pio[2] = 0;
1099 desc = dmaengine_prep_slave_sg(channel,
1100 (struct scatterlist *)pio,
1101 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1102 if (!desc) {
1103 pr_err("step 1 error\n");
1104 return -1;
1105 }
1106
1107 /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
1108 sgl = &this->cmd_sgl;
1109
1110 sg_init_one(sgl, this->cmd_buffer, this->command_length);
1111 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
1112 desc = dmaengine_prep_slave_sg(channel,
1113 sgl, 1, DMA_MEM_TO_DEV,
1114 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1115
1116 if (!desc) {
1117 pr_err("step 2 error\n");
1118 return -1;
1119 }
1120
1121 /* [3] submit the DMA */
1122 set_dma_type(this, DMA_FOR_COMMAND);
1123 return start_dma_without_bch_irq(this, desc);
1124}
1125
1126int gpmi_send_data(struct gpmi_nand_data *this)
1127{
1128 struct dma_async_tx_descriptor *desc;
1129 struct dma_chan *channel = get_dma_chan(this);
1130 int chip = this->current_chip;
1131 uint32_t command_mode;
1132 uint32_t address;
1133 u32 pio[2];
1134
1135 /* [1] PIO */
1136 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1137 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1138
1139 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1140 | BM_GPMI_CTRL0_WORD_LENGTH
1141 | BF_GPMI_CTRL0_CS(chip, this)
1142 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1143 | BF_GPMI_CTRL0_ADDRESS(address)
1144 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1145 pio[1] = 0;
1146 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
1147 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1148 if (!desc) {
1149 pr_err("step 1 error\n");
1150 return -1;
1151 }
1152
1153 /* [2] send DMA request */
1154 prepare_data_dma(this, DMA_TO_DEVICE);
1155 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1156 1, DMA_MEM_TO_DEV,
1157 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1158 if (!desc) {
1159 pr_err("step 2 error\n");
1160 return -1;
1161 }
1162 /* [3] submit the DMA */
1163 set_dma_type(this, DMA_FOR_WRITE_DATA);
1164 return start_dma_without_bch_irq(this, desc);
1165}
1166
1167int gpmi_read_data(struct gpmi_nand_data *this)
1168{
1169 struct dma_async_tx_descriptor *desc;
1170 struct dma_chan *channel = get_dma_chan(this);
1171 int chip = this->current_chip;
1172 u32 pio[2];
1173
1174 /* [1] : send PIO */
1175 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
1176 | BM_GPMI_CTRL0_WORD_LENGTH
1177 | BF_GPMI_CTRL0_CS(chip, this)
1178 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1179 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
1180 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1181 pio[1] = 0;
1182 desc = dmaengine_prep_slave_sg(channel,
1183 (struct scatterlist *)pio,
1184 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1185 if (!desc) {
1186 pr_err("step 1 error\n");
1187 return -1;
1188 }
1189
1190 /* [2] : send DMA request */
1191 prepare_data_dma(this, DMA_FROM_DEVICE);
1192 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1193 1, DMA_DEV_TO_MEM,
1194 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1195 if (!desc) {
1196 pr_err("step 2 error\n");
1197 return -1;
1198 }
1199
1200 /* [3] : submit the DMA */
1201 set_dma_type(this, DMA_FOR_READ_DATA);
1202 return start_dma_without_bch_irq(this, desc);
1203}
1204
1205int gpmi_send_page(struct gpmi_nand_data *this,
1206 dma_addr_t payload, dma_addr_t auxiliary)
1207{
1208 struct bch_geometry *geo = &this->bch_geometry;
1209 uint32_t command_mode;
1210 uint32_t address;
1211 uint32_t ecc_command;
1212 uint32_t buffer_mask;
1213 struct dma_async_tx_descriptor *desc;
1214 struct dma_chan *channel = get_dma_chan(this);
1215 int chip = this->current_chip;
1216 u32 pio[6];
1217
1218 /* A DMA descriptor that does an ECC page read. */
1219 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1220 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1221 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
1222 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1223 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1224
1225 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1226 | BM_GPMI_CTRL0_WORD_LENGTH
1227 | BF_GPMI_CTRL0_CS(chip, this)
1228 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1229 | BF_GPMI_CTRL0_ADDRESS(address)
1230 | BF_GPMI_CTRL0_XFER_COUNT(0);
1231 pio[1] = 0;
1232 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1233 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1234 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1235 pio[3] = geo->page_size;
1236 pio[4] = payload;
1237 pio[5] = auxiliary;
1238
1239 desc = dmaengine_prep_slave_sg(channel,
1240 (struct scatterlist *)pio,
1241 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1242 DMA_CTRL_ACK);
1243 if (!desc) {
1244 pr_err("step 2 error\n");
1245 return -1;
1246 }
1247 set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
1248 return start_dma_with_bch_irq(this, desc);
1249}
1250
1251int gpmi_read_page(struct gpmi_nand_data *this,
1252 dma_addr_t payload, dma_addr_t auxiliary)
1253{
1254 struct bch_geometry *geo = &this->bch_geometry;
1255 uint32_t command_mode;
1256 uint32_t address;
1257 uint32_t ecc_command;
1258 uint32_t buffer_mask;
1259 struct dma_async_tx_descriptor *desc;
1260 struct dma_chan *channel = get_dma_chan(this);
1261 int chip = this->current_chip;
1262 u32 pio[6];
1263
1264 /* [1] Wait for the chip to report ready. */
1265 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1266 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1267
1268 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1269 | BM_GPMI_CTRL0_WORD_LENGTH
1270 | BF_GPMI_CTRL0_CS(chip, this)
1271 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1272 | BF_GPMI_CTRL0_ADDRESS(address)
1273 | BF_GPMI_CTRL0_XFER_COUNT(0);
1274 pio[1] = 0;
1275 desc = dmaengine_prep_slave_sg(channel,
1276 (struct scatterlist *)pio, 2,
1277 DMA_TRANS_NONE, 0);
1278 if (!desc) {
1279 pr_err("step 1 error\n");
1280 return -1;
1281 }
1282
1283 /* [2] Enable the BCH block and read. */
1284 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1285 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1286 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1287 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1288 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1289
1290 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1291 | BM_GPMI_CTRL0_WORD_LENGTH
1292 | BF_GPMI_CTRL0_CS(chip, this)
1293 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1294 | BF_GPMI_CTRL0_ADDRESS(address)
1295 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1296
1297 pio[1] = 0;
1298 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1299 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1300 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1301 pio[3] = geo->page_size;
1302 pio[4] = payload;
1303 pio[5] = auxiliary;
1304 desc = dmaengine_prep_slave_sg(channel,
1305 (struct scatterlist *)pio,
1306 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1307 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1308 if (!desc) {
1309 pr_err("step 2 error\n");
1310 return -1;
1311 }
1312
1313 /* [3] Disable the BCH block */
1314 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1315 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1316
1317 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1318 | BM_GPMI_CTRL0_WORD_LENGTH
1319 | BF_GPMI_CTRL0_CS(chip, this)
1320 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1321 | BF_GPMI_CTRL0_ADDRESS(address)
1322 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1323 pio[1] = 0;
1324 pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
1325 desc = dmaengine_prep_slave_sg(channel,
1326 (struct scatterlist *)pio, 3,
1327 DMA_TRANS_NONE,
1328 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1329 if (!desc) {
1330 pr_err("step 3 error\n");
1331 return -1;
1332 }
1333
1334 /* [4] submit the DMA */
1335 set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1336 return start_dma_with_bch_irq(this, desc);
1337}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
deleted file mode 100644
index e9b1c47e3cf..00000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ /dev/null
@@ -1,1701 +0,0 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/clk.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/mtd/partitions.h>
29#include <linux/pinctrl/consumer.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_mtd.h>
33#include "gpmi-nand.h"
34
35/* Resource names for the GPMI NAND driver. */
36#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
37#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
38#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
39#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
40
41/* add our owner bbt descriptor */
42static uint8_t scan_ff_pattern[] = { 0xff };
43static struct nand_bbt_descr gpmi_bbt_descr = {
44 .options = 0,
45 .offs = 0,
46 .len = 1,
47 .pattern = scan_ff_pattern
48};
49
50/* We will use all the (page + OOB). */
51static struct nand_ecclayout gpmi_hw_ecclayout = {
52 .eccbytes = 0,
53 .eccpos = { 0, },
54 .oobfree = { {.offset = 0, .length = 0} }
55};
56
57static irqreturn_t bch_irq(int irq, void *cookie)
58{
59 struct gpmi_nand_data *this = cookie;
60
61 gpmi_clear_bch(this);
62 complete(&this->bch_done);
63 return IRQ_HANDLED;
64}
65
66/*
67 * Calculate the ECC strength by hand:
68 * E : The ECC strength.
69 * G : the length of Galois Field.
70 * N : The chunk count of per page.
71 * O : the oobsize of the NAND chip.
72 * M : the metasize of per page.
73 *
74 * The formula is :
75 * E * G * N
76 * ------------ <= (O - M)
77 * 8
78 *
79 * So, we get E by:
80 * (O - M) * 8
81 * E <= -------------
82 * G * N
83 */
84static inline int get_ecc_strength(struct gpmi_nand_data *this)
85{
86 struct bch_geometry *geo = &this->bch_geometry;
87 struct mtd_info *mtd = &this->mtd;
88 int ecc_strength;
89
90 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
91 / (geo->gf_len * geo->ecc_chunk_count);
92
93 /* We need the minor even number. */
94 return round_down(ecc_strength, 2);
95}
96
97int common_nfc_set_geometry(struct gpmi_nand_data *this)
98{
99 struct bch_geometry *geo = &this->bch_geometry;
100 struct mtd_info *mtd = &this->mtd;
101 unsigned int metadata_size;
102 unsigned int status_size;
103 unsigned int block_mark_bit_offset;
104
105 /*
106 * The size of the metadata can be changed, though we set it to 10
107 * bytes now. But it can't be too large, because we have to save
108 * enough space for BCH.
109 */
110 geo->metadata_size = 10;
111
112 /* The default for the length of Galois Field. */
113 geo->gf_len = 13;
114
115 /* The default for chunk size. There is no oobsize greater then 512. */
116 geo->ecc_chunk_size = 512;
117 while (geo->ecc_chunk_size < mtd->oobsize)
118 geo->ecc_chunk_size *= 2; /* keep C >= O */
119
120 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
121
122 /* We use the same ECC strength for all chunks. */
123 geo->ecc_strength = get_ecc_strength(this);
124 if (!geo->ecc_strength) {
125 pr_err("wrong ECC strength.\n");
126 return -EINVAL;
127 }
128
129 geo->page_size = mtd->writesize + mtd->oobsize;
130 geo->payload_size = mtd->writesize;
131
132 /*
133 * The auxiliary buffer contains the metadata and the ECC status. The
134 * metadata is padded to the nearest 32-bit boundary. The ECC status
135 * contains one byte for every ECC chunk, and is also padded to the
136 * nearest 32-bit boundary.
137 */
138 metadata_size = ALIGN(geo->metadata_size, 4);
139 status_size = ALIGN(geo->ecc_chunk_count, 4);
140
141 geo->auxiliary_size = metadata_size + status_size;
142 geo->auxiliary_status_offset = metadata_size;
143
144 if (!this->swap_block_mark)
145 return 0;
146
147 /*
148 * We need to compute the byte and bit offsets of
149 * the physical block mark within the ECC-based view of the page.
150 *
151 * NAND chip with 2K page shows below:
152 * (Block Mark)
153 * | |
154 * | D |
155 * |<---->|
156 * V V
157 * +---+----------+-+----------+-+----------+-+----------+-+
158 * | M | data |E| data |E| data |E| data |E|
159 * +---+----------+-+----------+-+----------+-+----------+-+
160 *
161 * The position of block mark moves forward in the ECC-based view
162 * of page, and the delta is:
163 *
164 * E * G * (N - 1)
165 * D = (---------------- + M)
166 * 8
167 *
168 * With the formula to compute the ECC strength, and the condition
169 * : C >= O (C is the ecc chunk size)
170 *
171 * It's easy to deduce to the following result:
172 *
173 * E * G (O - M) C - M C - M
174 * ----------- <= ------- <= -------- < ---------
175 * 8 N N (N - 1)
176 *
177 * So, we get:
178 *
179 * E * G * (N - 1)
180 * D = (---------------- + M) < C
181 * 8
182 *
183 * The above inequality means the position of block mark
184 * within the ECC-based view of the page is still in the data chunk,
185 * and it's NOT in the ECC bits of the chunk.
186 *
187 * Use the following to compute the bit position of the
188 * physical block mark within the ECC-based view of the page:
189 * (page_size - D) * 8
190 *
191 * --Huang Shijie
192 */
193 block_mark_bit_offset = mtd->writesize * 8 -
194 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
195 + geo->metadata_size * 8);
196
197 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
198 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
199 return 0;
200}
201
202struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
203{
204 int chipnr = this->current_chip;
205
206 return this->dma_chans[chipnr];
207}
208
209/* Can we use the upper's buffer directly for DMA? */
210void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
211{
212 struct scatterlist *sgl = &this->data_sgl;
213 int ret;
214
215 this->direct_dma_map_ok = true;
216
217 /* first try to map the upper buffer directly */
218 sg_init_one(sgl, this->upper_buf, this->upper_len);
219 ret = dma_map_sg(this->dev, sgl, 1, dr);
220 if (ret == 0) {
221 /* We have to use our own DMA buffer. */
222 sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
223
224 if (dr == DMA_TO_DEVICE)
225 memcpy(this->data_buffer_dma, this->upper_buf,
226 this->upper_len);
227
228 ret = dma_map_sg(this->dev, sgl, 1, dr);
229 if (ret == 0)
230 pr_err("DMA mapping failed.\n");
231
232 this->direct_dma_map_ok = false;
233 }
234}
235
236/* This will be called after the DMA operation is finished. */
237static void dma_irq_callback(void *param)
238{
239 struct gpmi_nand_data *this = param;
240 struct completion *dma_c = &this->dma_done;
241
242 complete(dma_c);
243
244 switch (this->dma_type) {
245 case DMA_FOR_COMMAND:
246 dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
247 break;
248
249 case DMA_FOR_READ_DATA:
250 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
251 if (this->direct_dma_map_ok == false)
252 memcpy(this->upper_buf, this->data_buffer_dma,
253 this->upper_len);
254 break;
255
256 case DMA_FOR_WRITE_DATA:
257 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
258 break;
259
260 case DMA_FOR_READ_ECC_PAGE:
261 case DMA_FOR_WRITE_ECC_PAGE:
262 /* We have to wait the BCH interrupt to finish. */
263 break;
264
265 default:
266 pr_err("in wrong DMA operation.\n");
267 }
268}
269
270int start_dma_without_bch_irq(struct gpmi_nand_data *this,
271 struct dma_async_tx_descriptor *desc)
272{
273 struct completion *dma_c = &this->dma_done;
274 int err;
275
276 init_completion(dma_c);
277
278 desc->callback = dma_irq_callback;
279 desc->callback_param = this;
280 dmaengine_submit(desc);
281 dma_async_issue_pending(get_dma_chan(this));
282
283 /* Wait for the interrupt from the DMA block. */
284 err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
285 if (!err) {
286 pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
287 gpmi_dump_info(this);
288 return -ETIMEDOUT;
289 }
290 return 0;
291}
292
293/*
294 * This function is used in BCH reading or BCH writing pages.
295 * It will wait for the BCH interrupt as long as ONE second.
296 * Actually, we must wait for two interrupts :
297 * [1] firstly the DMA interrupt and
298 * [2] secondly the BCH interrupt.
299 */
300int start_dma_with_bch_irq(struct gpmi_nand_data *this,
301 struct dma_async_tx_descriptor *desc)
302{
303 struct completion *bch_c = &this->bch_done;
304 int err;
305
306 /* Prepare to receive an interrupt from the BCH block. */
307 init_completion(bch_c);
308
309 /* start the DMA */
310 start_dma_without_bch_irq(this, desc);
311
312 /* Wait for the interrupt from the BCH block. */
313 err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
314 if (!err) {
315 pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
316 gpmi_dump_info(this);
317 return -ETIMEDOUT;
318 }
319 return 0;
320}
321
322static int acquire_register_block(struct gpmi_nand_data *this,
323 const char *res_name)
324{
325 struct platform_device *pdev = this->pdev;
326 struct resources *res = &this->resources;
327 struct resource *r;
328 void __iomem *p;
329
330 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
331 if (!r) {
332 pr_err("Can't get resource for %s\n", res_name);
333 return -ENXIO;
334 }
335
336 p = ioremap(r->start, resource_size(r));
337 if (!p) {
338 pr_err("Can't remap %s\n", res_name);
339 return -ENOMEM;
340 }
341
342 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
343 res->gpmi_regs = p;
344 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
345 res->bch_regs = p;
346 else
347 pr_err("unknown resource name : %s\n", res_name);
348
349 return 0;
350}
351
352static void release_register_block(struct gpmi_nand_data *this)
353{
354 struct resources *res = &this->resources;
355 if (res->gpmi_regs)
356 iounmap(res->gpmi_regs);
357 if (res->bch_regs)
358 iounmap(res->bch_regs);
359 res->gpmi_regs = NULL;
360 res->bch_regs = NULL;
361}
362
363static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
364{
365 struct platform_device *pdev = this->pdev;
366 struct resources *res = &this->resources;
367 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
368 struct resource *r;
369 int err;
370
371 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
372 if (!r) {
373 pr_err("Can't get resource for %s\n", res_name);
374 return -ENXIO;
375 }
376
377 err = request_irq(r->start, irq_h, 0, res_name, this);
378 if (err) {
379 pr_err("Can't own %s\n", res_name);
380 return err;
381 }
382
383 res->bch_low_interrupt = r->start;
384 res->bch_high_interrupt = r->end;
385 return 0;
386}
387
388static void release_bch_irq(struct gpmi_nand_data *this)
389{
390 struct resources *res = &this->resources;
391 int i = res->bch_low_interrupt;
392
393 for (; i <= res->bch_high_interrupt; i++)
394 free_irq(i, this);
395}
396
397static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
398{
399 struct gpmi_nand_data *this = param;
400 int dma_channel = (int)this->private;
401
402 if (!mxs_dma_is_apbh(chan))
403 return false;
404 /*
405 * only catch the GPMI dma channels :
406 * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
407 * (These four channels share the same IRQ!)
408 *
409 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
410 * (These eight channels share the same IRQ!)
411 */
412 if (dma_channel == chan->chan_id) {
413 chan->private = &this->dma_data;
414 return true;
415 }
416 return false;
417}
418
419static void release_dma_channels(struct gpmi_nand_data *this)
420{
421 unsigned int i;
422 for (i = 0; i < DMA_CHANS; i++)
423 if (this->dma_chans[i]) {
424 dma_release_channel(this->dma_chans[i]);
425 this->dma_chans[i] = NULL;
426 }
427}
428
429static int acquire_dma_channels(struct gpmi_nand_data *this)
430{
431 struct platform_device *pdev = this->pdev;
432 struct resource *r_dma;
433 struct device_node *dn;
434 u32 dma_channel;
435 int ret;
436 struct dma_chan *dma_chan;
437 dma_cap_mask_t mask;
438
439 /* dma channel, we only use the first one. */
440 dn = pdev->dev.of_node;
441 ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
442 if (ret) {
443 pr_err("unable to get DMA channel from dt.\n");
444 goto acquire_err;
445 }
446 this->private = (void *)dma_channel;
447
448 /* gpmi dma interrupt */
449 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
450 GPMI_NAND_DMA_INTERRUPT_RES_NAME);
451 if (!r_dma) {
452 pr_err("Can't get resource for DMA\n");
453 goto acquire_err;
454 }
455 this->dma_data.chan_irq = r_dma->start;
456
457 /* request dma channel */
458 dma_cap_zero(mask);
459 dma_cap_set(DMA_SLAVE, mask);
460
461 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
462 if (!dma_chan) {
463 pr_err("Failed to request DMA channel.\n");
464 goto acquire_err;
465 }
466
467 this->dma_chans[0] = dma_chan;
468 return 0;
469
470acquire_err:
471 release_dma_channels(this);
472 return -EINVAL;
473}
474
475static void gpmi_put_clks(struct gpmi_nand_data *this)
476{
477 struct resources *r = &this->resources;
478 struct clk *clk;
479 int i;
480
481 for (i = 0; i < GPMI_CLK_MAX; i++) {
482 clk = r->clock[i];
483 if (clk) {
484 clk_put(clk);
485 r->clock[i] = NULL;
486 }
487 }
488}
489
490static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
491 "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
492};
493
494static int gpmi_get_clks(struct gpmi_nand_data *this)
495{
496 struct resources *r = &this->resources;
497 char **extra_clks = NULL;
498 struct clk *clk;
499 int i;
500
501 /* The main clock is stored in the first. */
502 r->clock[0] = clk_get(this->dev, "gpmi_io");
503 if (IS_ERR(r->clock[0]))
504 goto err_clock;
505
506 /* Get extra clocks */
507 if (GPMI_IS_MX6Q(this))
508 extra_clks = extra_clks_for_mx6q;
509 if (!extra_clks)
510 return 0;
511
512 for (i = 1; i < GPMI_CLK_MAX; i++) {
513 if (extra_clks[i - 1] == NULL)
514 break;
515
516 clk = clk_get(this->dev, extra_clks[i - 1]);
517 if (IS_ERR(clk))
518 goto err_clock;
519
520 r->clock[i] = clk;
521 }
522
523 if (GPMI_IS_MX6Q(this))
524 /*
525 * Set the default value for the gpmi clock in mx6q:
526 *
527 * If you want to use the ONFI nand which is in the
528 * Synchronous Mode, you should change the clock as you need.
529 */
530 clk_set_rate(r->clock[0], 22000000);
531
532 return 0;
533
534err_clock:
535 dev_dbg(this->dev, "failed in finding the clocks.\n");
536 gpmi_put_clks(this);
537 return -ENOMEM;
538}
539
540static int acquire_resources(struct gpmi_nand_data *this)
541{
542 struct pinctrl *pinctrl;
543 int ret;
544
545 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
546 if (ret)
547 goto exit_regs;
548
549 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
550 if (ret)
551 goto exit_regs;
552
553 ret = acquire_bch_irq(this, bch_irq);
554 if (ret)
555 goto exit_regs;
556
557 ret = acquire_dma_channels(this);
558 if (ret)
559 goto exit_dma_channels;
560
561 pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev);
562 if (IS_ERR(pinctrl)) {
563 ret = PTR_ERR(pinctrl);
564 goto exit_pin;
565 }
566
567 ret = gpmi_get_clks(this);
568 if (ret)
569 goto exit_clock;
570 return 0;
571
572exit_clock:
573exit_pin:
574 release_dma_channels(this);
575exit_dma_channels:
576 release_bch_irq(this);
577exit_regs:
578 release_register_block(this);
579 return ret;
580}
581
582static void release_resources(struct gpmi_nand_data *this)
583{
584 gpmi_put_clks(this);
585 release_register_block(this);
586 release_bch_irq(this);
587 release_dma_channels(this);
588}
589
590static int init_hardware(struct gpmi_nand_data *this)
591{
592 int ret;
593
594 /*
595 * This structure contains the "safe" GPMI timing that should succeed
596 * with any NAND Flash device
597 * (although, with less-than-optimal performance).
598 */
599 struct nand_timing safe_timing = {
600 .data_setup_in_ns = 80,
601 .data_hold_in_ns = 60,
602 .address_setup_in_ns = 25,
603 .gpmi_sample_delay_in_ns = 6,
604 .tREA_in_ns = -1,
605 .tRLOH_in_ns = -1,
606 .tRHOH_in_ns = -1,
607 };
608
609 /* Initialize the hardwares. */
610 ret = gpmi_init(this);
611 if (ret)
612 return ret;
613
614 this->timing = safe_timing;
615 return 0;
616}
617
618static int read_page_prepare(struct gpmi_nand_data *this,
619 void *destination, unsigned length,
620 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
621 void **use_virt, dma_addr_t *use_phys)
622{
623 struct device *dev = this->dev;
624
625 if (virt_addr_valid(destination)) {
626 dma_addr_t dest_phys;
627
628 dest_phys = dma_map_single(dev, destination,
629 length, DMA_FROM_DEVICE);
630 if (dma_mapping_error(dev, dest_phys)) {
631 if (alt_size < length) {
632 pr_err("%s, Alternate buffer is too small\n",
633 __func__);
634 return -ENOMEM;
635 }
636 goto map_failed;
637 }
638 *use_virt = destination;
639 *use_phys = dest_phys;
640 this->direct_dma_map_ok = true;
641 return 0;
642 }
643
644map_failed:
645 *use_virt = alt_virt;
646 *use_phys = alt_phys;
647 this->direct_dma_map_ok = false;
648 return 0;
649}
650
651static inline void read_page_end(struct gpmi_nand_data *this,
652 void *destination, unsigned length,
653 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
654 void *used_virt, dma_addr_t used_phys)
655{
656 if (this->direct_dma_map_ok)
657 dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
658}
659
660static inline void read_page_swap_end(struct gpmi_nand_data *this,
661 void *destination, unsigned length,
662 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
663 void *used_virt, dma_addr_t used_phys)
664{
665 if (!this->direct_dma_map_ok)
666 memcpy(destination, alt_virt, length);
667}
668
669static int send_page_prepare(struct gpmi_nand_data *this,
670 const void *source, unsigned length,
671 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
672 const void **use_virt, dma_addr_t *use_phys)
673{
674 struct device *dev = this->dev;
675
676 if (virt_addr_valid(source)) {
677 dma_addr_t source_phys;
678
679 source_phys = dma_map_single(dev, (void *)source, length,
680 DMA_TO_DEVICE);
681 if (dma_mapping_error(dev, source_phys)) {
682 if (alt_size < length) {
683 pr_err("%s, Alternate buffer is too small\n",
684 __func__);
685 return -ENOMEM;
686 }
687 goto map_failed;
688 }
689 *use_virt = source;
690 *use_phys = source_phys;
691 return 0;
692 }
693map_failed:
694 /*
695 * Copy the content of the source buffer into the alternate
696 * buffer and set up the return values accordingly.
697 */
698 memcpy(alt_virt, source, length);
699
700 *use_virt = alt_virt;
701 *use_phys = alt_phys;
702 return 0;
703}
704
705static void send_page_end(struct gpmi_nand_data *this,
706 const void *source, unsigned length,
707 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
708 const void *used_virt, dma_addr_t used_phys)
709{
710 struct device *dev = this->dev;
711 if (used_virt == source)
712 dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
713}
714
715static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
716{
717 struct device *dev = this->dev;
718
719 if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
720 dma_free_coherent(dev, this->page_buffer_size,
721 this->page_buffer_virt,
722 this->page_buffer_phys);
723 kfree(this->cmd_buffer);
724 kfree(this->data_buffer_dma);
725
726 this->cmd_buffer = NULL;
727 this->data_buffer_dma = NULL;
728 this->page_buffer_virt = NULL;
729 this->page_buffer_size = 0;
730}
731
732/* Allocate the DMA buffers */
733static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
734{
735 struct bch_geometry *geo = &this->bch_geometry;
736 struct device *dev = this->dev;
737
738 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
739 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
740 if (this->cmd_buffer == NULL)
741 goto error_alloc;
742
743 /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
744 this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
745 if (this->data_buffer_dma == NULL)
746 goto error_alloc;
747
748 /*
749 * [3] Allocate the page buffer.
750 *
751 * Both the payload buffer and the auxiliary buffer must appear on
752 * 32-bit boundaries. We presume the size of the payload buffer is a
753 * power of two and is much larger than four, which guarantees the
754 * auxiliary buffer will appear on a 32-bit boundary.
755 */
756 this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
757 this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
758 &this->page_buffer_phys, GFP_DMA);
759 if (!this->page_buffer_virt)
760 goto error_alloc;
761
762
763 /* Slice up the page buffer. */
764 this->payload_virt = this->page_buffer_virt;
765 this->payload_phys = this->page_buffer_phys;
766 this->auxiliary_virt = this->payload_virt + geo->payload_size;
767 this->auxiliary_phys = this->payload_phys + geo->payload_size;
768 return 0;
769
770error_alloc:
771 gpmi_free_dma_buffer(this);
772 pr_err("Error allocating DMA buffers!\n");
773 return -ENOMEM;
774}
775
776static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
777{
778 struct nand_chip *chip = mtd->priv;
779 struct gpmi_nand_data *this = chip->priv;
780 int ret;
781
782 /*
783 * Every operation begins with a command byte and a series of zero or
784 * more address bytes. These are distinguished by either the Address
785 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
786 * asserted. When MTD is ready to execute the command, it will deassert
787 * both latch enables.
788 *
789 * Rather than run a separate DMA operation for every single byte, we
790 * queue them up and run a single DMA operation for the entire series
791 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
792 */
793 if ((ctrl & (NAND_ALE | NAND_CLE))) {
794 if (data != NAND_CMD_NONE)
795 this->cmd_buffer[this->command_length++] = data;
796 return;
797 }
798
799 if (!this->command_length)
800 return;
801
802 ret = gpmi_send_command(this);
803 if (ret)
804 pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
805
806 this->command_length = 0;
807}
808
809static int gpmi_dev_ready(struct mtd_info *mtd)
810{
811 struct nand_chip *chip = mtd->priv;
812 struct gpmi_nand_data *this = chip->priv;
813
814 return gpmi_is_ready(this, this->current_chip);
815}
816
817static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
818{
819 struct nand_chip *chip = mtd->priv;
820 struct gpmi_nand_data *this = chip->priv;
821
822 if ((this->current_chip < 0) && (chipnr >= 0))
823 gpmi_begin(this);
824 else if ((this->current_chip >= 0) && (chipnr < 0))
825 gpmi_end(this);
826
827 this->current_chip = chipnr;
828}
829
830static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
831{
832 struct nand_chip *chip = mtd->priv;
833 struct gpmi_nand_data *this = chip->priv;
834
835 pr_debug("len is %d\n", len);
836 this->upper_buf = buf;
837 this->upper_len = len;
838
839 gpmi_read_data(this);
840}
841
842static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
843{
844 struct nand_chip *chip = mtd->priv;
845 struct gpmi_nand_data *this = chip->priv;
846
847 pr_debug("len is %d\n", len);
848 this->upper_buf = (uint8_t *)buf;
849 this->upper_len = len;
850
851 gpmi_send_data(this);
852}
853
854static uint8_t gpmi_read_byte(struct mtd_info *mtd)
855{
856 struct nand_chip *chip = mtd->priv;
857 struct gpmi_nand_data *this = chip->priv;
858 uint8_t *buf = this->data_buffer_dma;
859
860 gpmi_read_buf(mtd, buf, 1);
861 return buf[0];
862}
863
864/*
865 * Handles block mark swapping.
866 * It can be called in swapping the block mark, or swapping it back,
867 * because the the operations are the same.
868 */
869static void block_mark_swapping(struct gpmi_nand_data *this,
870 void *payload, void *auxiliary)
871{
872 struct bch_geometry *nfc_geo = &this->bch_geometry;
873 unsigned char *p;
874 unsigned char *a;
875 unsigned int bit;
876 unsigned char mask;
877 unsigned char from_data;
878 unsigned char from_oob;
879
880 if (!this->swap_block_mark)
881 return;
882
883 /*
884 * If control arrives here, we're swapping. Make some convenience
885 * variables.
886 */
887 bit = nfc_geo->block_mark_bit_offset;
888 p = payload + nfc_geo->block_mark_byte_offset;
889 a = auxiliary;
890
891 /*
892 * Get the byte from the data area that overlays the block mark. Since
893 * the ECC engine applies its own view to the bits in the page, the
894 * physical block mark won't (in general) appear on a byte boundary in
895 * the data.
896 */
897 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
898
899 /* Get the byte from the OOB. */
900 from_oob = a[0];
901
902 /* Swap them. */
903 a[0] = from_data;
904
905 mask = (0x1 << bit) - 1;
906 p[0] = (p[0] & mask) | (from_oob << bit);
907
908 mask = ~0 << bit;
909 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
910}
911
912static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
913 uint8_t *buf, int oob_required, int page)
914{
915 struct gpmi_nand_data *this = chip->priv;
916 struct bch_geometry *nfc_geo = &this->bch_geometry;
917 void *payload_virt;
918 dma_addr_t payload_phys;
919 void *auxiliary_virt;
920 dma_addr_t auxiliary_phys;
921 unsigned int i;
922 unsigned char *status;
923 unsigned int failed;
924 unsigned int corrected;
925 int ret;
926
927 pr_debug("page number is : %d\n", page);
928 ret = read_page_prepare(this, buf, mtd->writesize,
929 this->payload_virt, this->payload_phys,
930 nfc_geo->payload_size,
931 &payload_virt, &payload_phys);
932 if (ret) {
933 pr_err("Inadequate DMA buffer\n");
934 ret = -ENOMEM;
935 return ret;
936 }
937 auxiliary_virt = this->auxiliary_virt;
938 auxiliary_phys = this->auxiliary_phys;
939
940 /* go! */
941 ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
942 read_page_end(this, buf, mtd->writesize,
943 this->payload_virt, this->payload_phys,
944 nfc_geo->payload_size,
945 payload_virt, payload_phys);
946 if (ret) {
947 pr_err("Error in ECC-based read: %d\n", ret);
948 goto exit_nfc;
949 }
950
951 /* handle the block mark swapping */
952 block_mark_swapping(this, payload_virt, auxiliary_virt);
953
954 /* Loop over status bytes, accumulating ECC status. */
955 failed = 0;
956 corrected = 0;
957 status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
958
959 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
960 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
961 continue;
962
963 if (*status == STATUS_UNCORRECTABLE) {
964 failed++;
965 continue;
966 }
967 corrected += *status;
968 }
969
970 /*
971 * Propagate ECC status to the owning MTD only when failed or
972 * corrected times nearly reaches our ECC correction threshold.
973 */
974 if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
975 mtd->ecc_stats.failed += failed;
976 mtd->ecc_stats.corrected += corrected;
977 }
978
979 if (oob_required) {
980 /*
981 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
982 * for details about our policy for delivering the OOB.
983 *
984 * We fill the caller's buffer with set bits, and then copy the
985 * block mark to th caller's buffer. Note that, if block mark
986 * swapping was necessary, it has already been done, so we can
987 * rely on the first byte of the auxiliary buffer to contain
988 * the block mark.
989 */
990 memset(chip->oob_poi, ~0, mtd->oobsize);
991 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
992 }
993
994 read_page_swap_end(this, buf, mtd->writesize,
995 this->payload_virt, this->payload_phys,
996 nfc_geo->payload_size,
997 payload_virt, payload_phys);
998exit_nfc:
999 return ret;
1000}
1001
1002static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1003 const uint8_t *buf, int oob_required)
1004{
1005 struct gpmi_nand_data *this = chip->priv;
1006 struct bch_geometry *nfc_geo = &this->bch_geometry;
1007 const void *payload_virt;
1008 dma_addr_t payload_phys;
1009 const void *auxiliary_virt;
1010 dma_addr_t auxiliary_phys;
1011 int ret;
1012
1013 pr_debug("ecc write page.\n");
1014 if (this->swap_block_mark) {
1015 /*
1016 * If control arrives here, we're doing block mark swapping.
1017 * Since we can't modify the caller's buffers, we must copy them
1018 * into our own.
1019 */
1020 memcpy(this->payload_virt, buf, mtd->writesize);
1021 payload_virt = this->payload_virt;
1022 payload_phys = this->payload_phys;
1023
1024 memcpy(this->auxiliary_virt, chip->oob_poi,
1025 nfc_geo->auxiliary_size);
1026 auxiliary_virt = this->auxiliary_virt;
1027 auxiliary_phys = this->auxiliary_phys;
1028
1029 /* Handle block mark swapping. */
1030 block_mark_swapping(this,
1031 (void *) payload_virt, (void *) auxiliary_virt);
1032 } else {
1033 /*
1034 * If control arrives here, we're not doing block mark swapping,
1035 * so we can to try and use the caller's buffers.
1036 */
1037 ret = send_page_prepare(this,
1038 buf, mtd->writesize,
1039 this->payload_virt, this->payload_phys,
1040 nfc_geo->payload_size,
1041 &payload_virt, &payload_phys);
1042 if (ret) {
1043 pr_err("Inadequate payload DMA buffer\n");
1044 return 0;
1045 }
1046
1047 ret = send_page_prepare(this,
1048 chip->oob_poi, mtd->oobsize,
1049 this->auxiliary_virt, this->auxiliary_phys,
1050 nfc_geo->auxiliary_size,
1051 &auxiliary_virt, &auxiliary_phys);
1052 if (ret) {
1053 pr_err("Inadequate auxiliary DMA buffer\n");
1054 goto exit_auxiliary;
1055 }
1056 }
1057
1058 /* Ask the NFC. */
1059 ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
1060 if (ret)
1061 pr_err("Error in ECC-based write: %d\n", ret);
1062
1063 if (!this->swap_block_mark) {
1064 send_page_end(this, chip->oob_poi, mtd->oobsize,
1065 this->auxiliary_virt, this->auxiliary_phys,
1066 nfc_geo->auxiliary_size,
1067 auxiliary_virt, auxiliary_phys);
1068exit_auxiliary:
1069 send_page_end(this, buf, mtd->writesize,
1070 this->payload_virt, this->payload_phys,
1071 nfc_geo->payload_size,
1072 payload_virt, payload_phys);
1073 }
1074
1075 return 0;
1076}
1077
1078/*
1079 * There are several places in this driver where we have to handle the OOB and
1080 * block marks. This is the function where things are the most complicated, so
1081 * this is where we try to explain it all. All the other places refer back to
1082 * here.
1083 *
1084 * These are the rules, in order of decreasing importance:
1085 *
1086 * 1) Nothing the caller does can be allowed to imperil the block mark.
1087 *
1088 * 2) In read operations, the first byte of the OOB we return must reflect the
1089 * true state of the block mark, no matter where that block mark appears in
1090 * the physical page.
1091 *
1092 * 3) ECC-based read operations return an OOB full of set bits (since we never
1093 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1094 * return).
1095 *
1096 * 4) "Raw" read operations return a direct view of the physical bytes in the
1097 * page, using the conventional definition of which bytes are data and which
1098 * are OOB. This gives the caller a way to see the actual, physical bytes
1099 * in the page, without the distortions applied by our ECC engine.
1100 *
1101 *
1102 * What we do for this specific read operation depends on two questions:
1103 *
1104 * 1) Are we doing a "raw" read, or an ECC-based read?
1105 *
1106 * 2) Are we using block mark swapping or transcription?
1107 *
1108 * There are four cases, illustrated by the following Karnaugh map:
1109 *
1110 * | Raw | ECC-based |
1111 * -------------+-------------------------+-------------------------+
1112 * | Read the conventional | |
1113 * | OOB at the end of the | |
1114 * Swapping | page and return it. It | |
1115 * | contains exactly what | |
1116 * | we want. | Read the block mark and |
1117 * -------------+-------------------------+ return it in a buffer |
1118 * | Read the conventional | full of set bits. |
1119 * | OOB at the end of the | |
1120 * | page and also the block | |
1121 * Transcribing | mark in the metadata. | |
1122 * | Copy the block mark | |
1123 * | into the first byte of | |
1124 * | the OOB. | |
1125 * -------------+-------------------------+-------------------------+
1126 *
1127 * Note that we break rule #4 in the Transcribing/Raw case because we're not
1128 * giving an accurate view of the actual, physical bytes in the page (we're
1129 * overwriting the block mark). That's OK because it's more important to follow
1130 * rule #2.
1131 *
1132 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1133 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1134 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1135 * ECC-based or raw view of the page is implicit in which function it calls
1136 * (there is a similar pair of ECC-based/raw functions for writing).
1137 *
1138 * FIXME: The following paragraph is incorrect, now that there exist
1139 * ecc.read_oob_raw and ecc.write_oob_raw functions.
1140 *
1141 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1142 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1143 * caller wants an ECC-based or raw view of the page is not propagated down to
1144 * this driver.
1145 */
1146static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1147 int page)
1148{
1149 struct gpmi_nand_data *this = chip->priv;
1150
1151 pr_debug("page number is %d\n", page);
1152 /* clear the OOB buffer */
1153 memset(chip->oob_poi, ~0, mtd->oobsize);
1154
1155 /* Read out the conventional OOB. */
1156 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1157 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1158
1159 /*
1160 * Now, we want to make sure the block mark is correct. In the
1161 * Swapping/Raw case, we already have it. Otherwise, we need to
1162 * explicitly read it.
1163 */
1164 if (!this->swap_block_mark) {
1165 /* Read the block mark into the first byte of the OOB buffer. */
1166 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1167 chip->oob_poi[0] = chip->read_byte(mtd);
1168 }
1169
1170 return 0;
1171}
1172
1173static int
1174gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
1175{
1176 /*
1177 * The BCH will use all the (page + oob).
1178 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
1179 * But it can not stop some ioctls such MEMWRITEOOB which uses
1180 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
1181 * these ioctls too.
1182 */
1183 return -EPERM;
1184}
1185
1186static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1187{
1188 struct nand_chip *chip = mtd->priv;
1189 struct gpmi_nand_data *this = chip->priv;
1190 int block, ret = 0;
1191 uint8_t *block_mark;
1192 int column, page, status, chipnr;
1193
1194 /* Get block number */
1195 block = (int)(ofs >> chip->bbt_erase_shift);
1196 if (chip->bbt)
1197 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1198
1199 /* Do we have a flash based bad block table ? */
1200 if (chip->bbt_options & NAND_BBT_USE_FLASH)
1201 ret = nand_update_bbt(mtd, ofs);
1202 else {
1203 chipnr = (int)(ofs >> chip->chip_shift);
1204 chip->select_chip(mtd, chipnr);
1205
1206 column = this->swap_block_mark ? mtd->writesize : 0;
1207
1208 /* Write the block mark. */
1209 block_mark = this->data_buffer_dma;
1210 block_mark[0] = 0; /* bad block marker */
1211
1212 /* Shift to get page */
1213 page = (int)(ofs >> chip->page_shift);
1214
1215 chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
1216 chip->write_buf(mtd, block_mark, 1);
1217 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1218
1219 status = chip->waitfunc(mtd, chip);
1220 if (status & NAND_STATUS_FAIL)
1221 ret = -EIO;
1222
1223 chip->select_chip(mtd, -1);
1224 }
1225 if (!ret)
1226 mtd->ecc_stats.badblocks++;
1227
1228 return ret;
1229}
1230
1231static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1232{
1233 struct boot_rom_geometry *geometry = &this->rom_geometry;
1234
1235 /*
1236 * Set the boot block stride size.
1237 *
1238 * In principle, we should be reading this from the OTP bits, since
1239 * that's where the ROM is going to get it. In fact, we don't have any
1240 * way to read the OTP bits, so we go with the default and hope for the
1241 * best.
1242 */
1243 geometry->stride_size_in_pages = 64;
1244
1245 /*
1246 * Set the search area stride exponent.
1247 *
1248 * In principle, we should be reading this from the OTP bits, since
1249 * that's where the ROM is going to get it. In fact, we don't have any
1250 * way to read the OTP bits, so we go with the default and hope for the
1251 * best.
1252 */
1253 geometry->search_area_stride_exponent = 2;
1254 return 0;
1255}
1256
1257static const char *fingerprint = "STMP";
1258static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1259{
1260 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1261 struct device *dev = this->dev;
1262 struct mtd_info *mtd = &this->mtd;
1263 struct nand_chip *chip = &this->nand;
1264 unsigned int search_area_size_in_strides;
1265 unsigned int stride;
1266 unsigned int page;
1267 uint8_t *buffer = chip->buffers->databuf;
1268 int saved_chip_number;
1269 int found_an_ncb_fingerprint = false;
1270
1271 /* Compute the number of strides in a search area. */
1272 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1273
1274 saved_chip_number = this->current_chip;
1275 chip->select_chip(mtd, 0);
1276
1277 /*
1278 * Loop through the first search area, looking for the NCB fingerprint.
1279 */
1280 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1281
1282 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1283 /* Compute the page addresses. */
1284 page = stride * rom_geo->stride_size_in_pages;
1285
1286 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1287
1288 /*
1289 * Read the NCB fingerprint. The fingerprint is four bytes long
1290 * and starts in the 12th byte of the page.
1291 */
1292 chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
1293 chip->read_buf(mtd, buffer, strlen(fingerprint));
1294
1295 /* Look for the fingerprint. */
1296 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1297 found_an_ncb_fingerprint = true;
1298 break;
1299 }
1300
1301 }
1302
1303 chip->select_chip(mtd, saved_chip_number);
1304
1305 if (found_an_ncb_fingerprint)
1306 dev_dbg(dev, "\tFound a fingerprint\n");
1307 else
1308 dev_dbg(dev, "\tNo fingerprint found\n");
1309 return found_an_ncb_fingerprint;
1310}
1311
1312/* Writes a transcription stamp. */
1313static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1314{
1315 struct device *dev = this->dev;
1316 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1317 struct mtd_info *mtd = &this->mtd;
1318 struct nand_chip *chip = &this->nand;
1319 unsigned int block_size_in_pages;
1320 unsigned int search_area_size_in_strides;
1321 unsigned int search_area_size_in_pages;
1322 unsigned int search_area_size_in_blocks;
1323 unsigned int block;
1324 unsigned int stride;
1325 unsigned int page;
1326 uint8_t *buffer = chip->buffers->databuf;
1327 int saved_chip_number;
1328 int status;
1329
1330 /* Compute the search area geometry. */
1331 block_size_in_pages = mtd->erasesize / mtd->writesize;
1332 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1333 search_area_size_in_pages = search_area_size_in_strides *
1334 rom_geo->stride_size_in_pages;
1335 search_area_size_in_blocks =
1336 (search_area_size_in_pages + (block_size_in_pages - 1)) /
1337 block_size_in_pages;
1338
1339 dev_dbg(dev, "Search Area Geometry :\n");
1340 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1341 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1342 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1343
1344 /* Select chip 0. */
1345 saved_chip_number = this->current_chip;
1346 chip->select_chip(mtd, 0);
1347
1348 /* Loop over blocks in the first search area, erasing them. */
1349 dev_dbg(dev, "Erasing the search area...\n");
1350
1351 for (block = 0; block < search_area_size_in_blocks; block++) {
1352 /* Compute the page address. */
1353 page = block * block_size_in_pages;
1354
1355 /* Erase this block. */
1356 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1357 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
1358 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
1359
1360 /* Wait for the erase to finish. */
1361 status = chip->waitfunc(mtd, chip);
1362 if (status & NAND_STATUS_FAIL)
1363 dev_err(dev, "[%s] Erase failed.\n", __func__);
1364 }
1365
1366 /* Write the NCB fingerprint into the page buffer. */
1367 memset(buffer, ~0, mtd->writesize);
1368 memset(chip->oob_poi, ~0, mtd->oobsize);
1369 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1370
1371 /* Loop through the first search area, writing NCB fingerprints. */
1372 dev_dbg(dev, "Writing NCB fingerprints...\n");
1373 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1374 /* Compute the page addresses. */
1375 page = stride * rom_geo->stride_size_in_pages;
1376
1377 /* Write the first page of the current stride. */
1378 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1379 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1380 chip->ecc.write_page_raw(mtd, chip, buffer, 0);
1381 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1382
1383 /* Wait for the write to finish. */
1384 status = chip->waitfunc(mtd, chip);
1385 if (status & NAND_STATUS_FAIL)
1386 dev_err(dev, "[%s] Write failed.\n", __func__);
1387 }
1388
1389 /* Deselect chip 0. */
1390 chip->select_chip(mtd, saved_chip_number);
1391 return 0;
1392}
1393
1394static int mx23_boot_init(struct gpmi_nand_data *this)
1395{
1396 struct device *dev = this->dev;
1397 struct nand_chip *chip = &this->nand;
1398 struct mtd_info *mtd = &this->mtd;
1399 unsigned int block_count;
1400 unsigned int block;
1401 int chipnr;
1402 int page;
1403 loff_t byte;
1404 uint8_t block_mark;
1405 int ret = 0;
1406
1407 /*
1408 * If control arrives here, we can't use block mark swapping, which
1409 * means we're forced to use transcription. First, scan for the
1410 * transcription stamp. If we find it, then we don't have to do
1411 * anything -- the block marks are already transcribed.
1412 */
1413 if (mx23_check_transcription_stamp(this))
1414 return 0;
1415
1416 /*
1417 * If control arrives here, we couldn't find a transcription stamp, so
1418 * so we presume the block marks are in the conventional location.
1419 */
1420 dev_dbg(dev, "Transcribing bad block marks...\n");
1421
1422 /* Compute the number of blocks in the entire medium. */
1423 block_count = chip->chipsize >> chip->phys_erase_shift;
1424
1425 /*
1426 * Loop over all the blocks in the medium, transcribing block marks as
1427 * we go.
1428 */
1429 for (block = 0; block < block_count; block++) {
1430 /*
1431 * Compute the chip, page and byte addresses for this block's
1432 * conventional mark.
1433 */
1434 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1435 page = block << (chip->phys_erase_shift - chip->page_shift);
1436 byte = block << chip->phys_erase_shift;
1437
1438 /* Send the command to read the conventional block mark. */
1439 chip->select_chip(mtd, chipnr);
1440 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1441 block_mark = chip->read_byte(mtd);
1442 chip->select_chip(mtd, -1);
1443
1444 /*
1445 * Check if the block is marked bad. If so, we need to mark it
1446 * again, but this time the result will be a mark in the
1447 * location where we transcribe block marks.
1448 */
1449 if (block_mark != 0xff) {
1450 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1451 ret = chip->block_markbad(mtd, byte);
1452 if (ret)
1453 dev_err(dev, "Failed to mark block bad with "
1454 "ret %d\n", ret);
1455 }
1456 }
1457
1458 /* Write the stamp that indicates we've transcribed the block marks. */
1459 mx23_write_transcription_stamp(this);
1460 return 0;
1461}
1462
1463static int nand_boot_init(struct gpmi_nand_data *this)
1464{
1465 nand_boot_set_geometry(this);
1466
1467 /* This is ROM arch-specific initilization before the BBT scanning. */
1468 if (GPMI_IS_MX23(this))
1469 return mx23_boot_init(this);
1470 return 0;
1471}
1472
1473static int gpmi_set_geometry(struct gpmi_nand_data *this)
1474{
1475 int ret;
1476
1477 /* Free the temporary DMA memory for reading ID. */
1478 gpmi_free_dma_buffer(this);
1479
1480 /* Set up the NFC geometry which is used by BCH. */
1481 ret = bch_set_geometry(this);
1482 if (ret) {
1483 pr_err("Error setting BCH geometry : %d\n", ret);
1484 return ret;
1485 }
1486
1487 /* Alloc the new DMA buffers according to the pagesize and oobsize */
1488 return gpmi_alloc_dma_buffer(this);
1489}
1490
1491static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
1492{
1493 int ret;
1494
1495 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
1496 if (GPMI_IS_MX23(this))
1497 this->swap_block_mark = false;
1498 else
1499 this->swap_block_mark = true;
1500
1501 /* Set up the medium geometry */
1502 ret = gpmi_set_geometry(this);
1503 if (ret)
1504 return ret;
1505
1506 /* Adjust the ECC strength according to the chip. */
1507 this->nand.ecc.strength = this->bch_geometry.ecc_strength;
1508 this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
1509 this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
1510
1511 /* NAND boot init, depends on the gpmi_set_geometry(). */
1512 return nand_boot_init(this);
1513}
1514
1515static int gpmi_scan_bbt(struct mtd_info *mtd)
1516{
1517 struct nand_chip *chip = mtd->priv;
1518 struct gpmi_nand_data *this = chip->priv;
1519 int ret;
1520
1521 /* Prepare for the BBT scan. */
1522 ret = gpmi_pre_bbt_scan(this);
1523 if (ret)
1524 return ret;
1525
1526 /*
1527 * Can we enable the extra features? such as EDO or Sync mode.
1528 *
1529 * We do not check the return value now. That's means if we fail in
1530 * enable the extra features, we still can run in the normal way.
1531 */
1532 gpmi_extra_init(this);
1533
1534 /* use the default BBT implementation */
1535 return nand_default_bbt(mtd);
1536}
1537
1538static void gpmi_nfc_exit(struct gpmi_nand_data *this)
1539{
1540 nand_release(&this->mtd);
1541 gpmi_free_dma_buffer(this);
1542}
1543
1544static int gpmi_nfc_init(struct gpmi_nand_data *this)
1545{
1546 struct mtd_info *mtd = &this->mtd;
1547 struct nand_chip *chip = &this->nand;
1548 struct mtd_part_parser_data ppdata = {};
1549 int ret;
1550
1551 /* init current chip */
1552 this->current_chip = -1;
1553
1554 /* init the MTD data structures */
1555 mtd->priv = chip;
1556 mtd->name = "gpmi-nand";
1557 mtd->owner = THIS_MODULE;
1558
1559 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1560 chip->priv = this;
1561 chip->select_chip = gpmi_select_chip;
1562 chip->cmd_ctrl = gpmi_cmd_ctrl;
1563 chip->dev_ready = gpmi_dev_ready;
1564 chip->read_byte = gpmi_read_byte;
1565 chip->read_buf = gpmi_read_buf;
1566 chip->write_buf = gpmi_write_buf;
1567 chip->ecc.read_page = gpmi_ecc_read_page;
1568 chip->ecc.write_page = gpmi_ecc_write_page;
1569 chip->ecc.read_oob = gpmi_ecc_read_oob;
1570 chip->ecc.write_oob = gpmi_ecc_write_oob;
1571 chip->scan_bbt = gpmi_scan_bbt;
1572 chip->badblock_pattern = &gpmi_bbt_descr;
1573 chip->block_markbad = gpmi_block_markbad;
1574 chip->options |= NAND_NO_SUBPAGE_WRITE;
1575 chip->ecc.mode = NAND_ECC_HW;
1576 chip->ecc.size = 1;
1577 chip->ecc.strength = 8;
1578 chip->ecc.layout = &gpmi_hw_ecclayout;
1579 if (of_get_nand_on_flash_bbt(this->dev->of_node))
1580 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1581
1582 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
1583 this->bch_geometry.payload_size = 1024;
1584 this->bch_geometry.auxiliary_size = 128;
1585 ret = gpmi_alloc_dma_buffer(this);
1586 if (ret)
1587 goto err_out;
1588
1589 ret = nand_scan(mtd, 1);
1590 if (ret) {
1591 pr_err("Chip scan failed\n");
1592 goto err_out;
1593 }
1594
1595 ppdata.of_node = this->pdev->dev.of_node;
1596 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
1597 if (ret)
1598 goto err_out;
1599 return 0;
1600
1601err_out:
1602 gpmi_nfc_exit(this);
1603 return ret;
1604}
1605
1606static const struct platform_device_id gpmi_ids[] = {
1607 { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
1608 { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
1609 { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
1610 {},
1611};
1612
1613static const struct of_device_id gpmi_nand_id_table[] = {
1614 {
1615 .compatible = "fsl,imx23-gpmi-nand",
1616 .data = (void *)&gpmi_ids[IS_MX23]
1617 }, {
1618 .compatible = "fsl,imx28-gpmi-nand",
1619 .data = (void *)&gpmi_ids[IS_MX28]
1620 }, {
1621 .compatible = "fsl,imx6q-gpmi-nand",
1622 .data = (void *)&gpmi_ids[IS_MX6Q]
1623 }, {}
1624};
1625MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
1626
1627static int gpmi_nand_probe(struct platform_device *pdev)
1628{
1629 struct gpmi_nand_data *this;
1630 const struct of_device_id *of_id;
1631 int ret;
1632
1633 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
1634 if (of_id) {
1635 pdev->id_entry = of_id->data;
1636 } else {
1637 pr_err("Failed to find the right device id.\n");
1638 return -ENOMEM;
1639 }
1640
1641 this = kzalloc(sizeof(*this), GFP_KERNEL);
1642 if (!this) {
1643 pr_err("Failed to allocate per-device memory\n");
1644 return -ENOMEM;
1645 }
1646
1647 platform_set_drvdata(pdev, this);
1648 this->pdev = pdev;
1649 this->dev = &pdev->dev;
1650
1651 ret = acquire_resources(this);
1652 if (ret)
1653 goto exit_acquire_resources;
1654
1655 ret = init_hardware(this);
1656 if (ret)
1657 goto exit_nfc_init;
1658
1659 ret = gpmi_nfc_init(this);
1660 if (ret)
1661 goto exit_nfc_init;
1662
1663 dev_info(this->dev, "driver registered.\n");
1664
1665 return 0;
1666
1667exit_nfc_init:
1668 release_resources(this);
1669exit_acquire_resources:
1670 platform_set_drvdata(pdev, NULL);
1671 kfree(this);
1672 dev_err(this->dev, "driver registration failed: %d\n", ret);
1673
1674 return ret;
1675}
1676
1677static int gpmi_nand_remove(struct platform_device *pdev)
1678{
1679 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
1680
1681 gpmi_nfc_exit(this);
1682 release_resources(this);
1683 platform_set_drvdata(pdev, NULL);
1684 kfree(this);
1685 return 0;
1686}
1687
1688static struct platform_driver gpmi_nand_driver = {
1689 .driver = {
1690 .name = "gpmi-nand",
1691 .of_match_table = gpmi_nand_id_table,
1692 },
1693 .probe = gpmi_nand_probe,
1694 .remove = gpmi_nand_remove,
1695 .id_table = gpmi_ids,
1696};
1697module_platform_driver(gpmi_nand_driver);
1698
1699MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1700MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
1701MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
deleted file mode 100644
index 3d93a5e3909..00000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ /dev/null
@@ -1,294 +0,0 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
18#define __DRIVERS_MTD_NAND_GPMI_NAND_H
19
20#include <linux/mtd/nand.h>
21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h>
23#include <linux/fsl/mxs-dma.h>
24
25#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
26struct resources {
27 void __iomem *gpmi_regs;
28 void __iomem *bch_regs;
29 unsigned int bch_low_interrupt;
30 unsigned int bch_high_interrupt;
31 unsigned int dma_low_channel;
32 unsigned int dma_high_channel;
33 struct clk *clock[GPMI_CLK_MAX];
34};
35
36/**
37 * struct bch_geometry - BCH geometry description.
38 * @gf_len: The length of Galois Field. (e.g., 13 or 14)
39 * @ecc_strength: A number that describes the strength of the ECC
40 * algorithm.
41 * @page_size: The size, in bytes, of a physical page, including
42 * both data and OOB.
43 * @metadata_size: The size, in bytes, of the metadata.
44 * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note
45 * the first chunk in the page includes both data and
46 * metadata, so it's a bit larger than this value.
47 * @ecc_chunk_count: The number of ECC chunks in the page,
48 * @payload_size: The size, in bytes, of the payload buffer.
49 * @auxiliary_size: The size, in bytes, of the auxiliary buffer.
50 * @auxiliary_status_offset: The offset into the auxiliary buffer at which
51 * the ECC status appears.
52 * @block_mark_byte_offset: The byte offset in the ECC-based page view at
53 * which the underlying physical block mark appears.
54 * @block_mark_bit_offset: The bit offset into the ECC-based page view at
55 * which the underlying physical block mark appears.
56 */
57struct bch_geometry {
58 unsigned int gf_len;
59 unsigned int ecc_strength;
60 unsigned int page_size;
61 unsigned int metadata_size;
62 unsigned int ecc_chunk_size;
63 unsigned int ecc_chunk_count;
64 unsigned int payload_size;
65 unsigned int auxiliary_size;
66 unsigned int auxiliary_status_offset;
67 unsigned int block_mark_byte_offset;
68 unsigned int block_mark_bit_offset;
69};
70
71/**
72 * struct boot_rom_geometry - Boot ROM geometry description.
73 * @stride_size_in_pages: The size of a boot block stride, in pages.
74 * @search_area_stride_exponent: The logarithm to base 2 of the size of a
75 * search area in boot block strides.
76 */
77struct boot_rom_geometry {
78 unsigned int stride_size_in_pages;
79 unsigned int search_area_stride_exponent;
80};
81
82/* DMA operations types */
83enum dma_ops_type {
84 DMA_FOR_COMMAND = 1,
85 DMA_FOR_READ_DATA,
86 DMA_FOR_WRITE_DATA,
87 DMA_FOR_READ_ECC_PAGE,
88 DMA_FOR_WRITE_ECC_PAGE
89};
90
91/**
92 * struct nand_timing - Fundamental timing attributes for NAND.
93 * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the
94 * maximum of tDS and tWP. A negative value
95 * indicates this characteristic isn't known.
96 * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the
97 * maximum of tDH, tWH and tREH. A negative value
98 * indicates this characteristic isn't known.
99 * @address_setup_in_ns: The address setup time, in nanoseconds. Usually
100 * the maximum of tCLS, tCS and tALS. A negative
101 * value indicates this characteristic isn't known.
102 * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value
103 * indicates this characteristic isn't known.
104 * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A
105 * negative value indicates this characteristic isn't
106 * known.
107 * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A
108 * negative value indicates this characteristic isn't
109 * known.
110 * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A
111 * negative value indicates this characteristic isn't
112 * known.
113 */
114struct nand_timing {
115 int8_t data_setup_in_ns;
116 int8_t data_hold_in_ns;
117 int8_t address_setup_in_ns;
118 int8_t gpmi_sample_delay_in_ns;
119 int8_t tREA_in_ns;
120 int8_t tRLOH_in_ns;
121 int8_t tRHOH_in_ns;
122};
123
124struct gpmi_nand_data {
125 /* flags */
126#define GPMI_ASYNC_EDO_ENABLED (1 << 0)
127#define GPMI_TIMING_INIT_OK (1 << 1)
128 int flags;
129
130 /* System Interface */
131 struct device *dev;
132 struct platform_device *pdev;
133
134 /* Resources */
135 struct resources resources;
136
137 /* Flash Hardware */
138 struct nand_timing timing;
139 int timing_mode;
140
141 /* BCH */
142 struct bch_geometry bch_geometry;
143 struct completion bch_done;
144
145 /* NAND Boot issue */
146 bool swap_block_mark;
147 struct boot_rom_geometry rom_geometry;
148
149 /* MTD / NAND */
150 struct nand_chip nand;
151 struct mtd_info mtd;
152
153 /* General-use Variables */
154 int current_chip;
155 unsigned int command_length;
156
157 /* passed from upper layer */
158 uint8_t *upper_buf;
159 int upper_len;
160
161 /* for DMA operations */
162 bool direct_dma_map_ok;
163
164 struct scatterlist cmd_sgl;
165 char *cmd_buffer;
166
167 struct scatterlist data_sgl;
168 char *data_buffer_dma;
169
170 void *page_buffer_virt;
171 dma_addr_t page_buffer_phys;
172 unsigned int page_buffer_size;
173
174 void *payload_virt;
175 dma_addr_t payload_phys;
176
177 void *auxiliary_virt;
178 dma_addr_t auxiliary_phys;
179
180 /* DMA channels */
181#define DMA_CHANS 8
182 struct dma_chan *dma_chans[DMA_CHANS];
183 struct mxs_dma_data dma_data;
184 enum dma_ops_type last_dma_type;
185 enum dma_ops_type dma_type;
186 struct completion dma_done;
187
188 /* private */
189 void *private;
190};
191
192/**
193 * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
194 * @data_setup_in_cycles: The data setup time, in cycles.
195 * @data_hold_in_cycles: The data hold time, in cycles.
196 * @address_setup_in_cycles: The address setup time, in cycles.
197 * @device_busy_timeout: The timeout waiting for NAND Ready/Busy,
198 * this value is the number of cycles multiplied
199 * by 4096.
200 * @use_half_periods: Indicates the clock is running slowly, so the
201 * NFC DLL should use half-periods.
202 * @sample_delay_factor: The sample delay factor.
203 * @wrn_dly_sel: The delay on the GPMI write strobe.
204 */
205struct gpmi_nfc_hardware_timing {
206 /* for HW_GPMI_TIMING0 */
207 uint8_t data_setup_in_cycles;
208 uint8_t data_hold_in_cycles;
209 uint8_t address_setup_in_cycles;
210
211 /* for HW_GPMI_TIMING1 */
212 uint16_t device_busy_timeout;
213#define GPMI_DEFAULT_BUSY_TIMEOUT 0x500 /* default busy timeout value.*/
214
215 /* for HW_GPMI_CTRL1 */
216 bool use_half_periods;
217 uint8_t sample_delay_factor;
218 uint8_t wrn_dly_sel;
219};
220
221/**
222 * struct timing_threshod - Timing threshold
223 * @max_data_setup_cycles: The maximum number of data setup cycles that
224 * can be expressed in the hardware.
225 * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires
226 * for data read internal setup. In the Reference
227 * Manual, see the chapter "High-Speed NAND
228 * Timing" for more details.
229 * @max_sample_delay_factor: The maximum sample delay factor that can be
230 * expressed in the hardware.
231 * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the
232 * sample delay DLL hardware can possibly work
233 * with (the DLL is unusable with longer periods).
234 * If the full-cycle period is greater than HALF
235 * this value, the DLL must be configured to use
236 * half-periods.
237 * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the
238 * DLL can implement.
239 * @clock_frequency_in_hz: The clock frequency, in Hz, during the current
240 * I/O transaction. If no I/O transaction is in
241 * progress, this is the clock frequency during
242 * the most recent I/O transaction.
243 */
244struct timing_threshod {
245 const unsigned int max_chip_count;
246 const unsigned int max_data_setup_cycles;
247 const unsigned int internal_data_setup_in_ns;
248 const unsigned int max_sample_delay_factor;
249 const unsigned int max_dll_clock_period_in_ns;
250 const unsigned int max_dll_delay_in_ns;
251 unsigned long clock_frequency_in_hz;
252
253};
254
255/* Common Services */
256extern int common_nfc_set_geometry(struct gpmi_nand_data *);
257extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
258extern void prepare_data_dma(struct gpmi_nand_data *,
259 enum dma_data_direction dr);
260extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
261 struct dma_async_tx_descriptor *);
262extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
263 struct dma_async_tx_descriptor *);
264
265/* GPMI-NAND helper function library */
266extern int gpmi_init(struct gpmi_nand_data *);
267extern int gpmi_extra_init(struct gpmi_nand_data *);
268extern void gpmi_clear_bch(struct gpmi_nand_data *);
269extern void gpmi_dump_info(struct gpmi_nand_data *);
270extern int bch_set_geometry(struct gpmi_nand_data *);
271extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
272extern int gpmi_send_command(struct gpmi_nand_data *);
273extern void gpmi_begin(struct gpmi_nand_data *);
274extern void gpmi_end(struct gpmi_nand_data *);
275extern int gpmi_read_data(struct gpmi_nand_data *);
276extern int gpmi_send_data(struct gpmi_nand_data *);
277extern int gpmi_send_page(struct gpmi_nand_data *,
278 dma_addr_t payload, dma_addr_t auxiliary);
279extern int gpmi_read_page(struct gpmi_nand_data *,
280 dma_addr_t payload, dma_addr_t auxiliary);
281
282/* BCH : Status Block Completion Codes */
283#define STATUS_GOOD 0x00
284#define STATUS_ERASED 0xff
285#define STATUS_UNCORRECTABLE 0xfe
286
287/* Use the platform_id to distinguish different Archs. */
288#define IS_MX23 0x0
289#define IS_MX28 0x1
290#define IS_MX6Q 0x2
291#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
292#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
293#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
294#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
deleted file mode 100644
index 53397cc290f..00000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef __GPMI_NAND_GPMI_REGS_H
22#define __GPMI_NAND_GPMI_REGS_H
23
24#define HW_GPMI_CTRL0 0x00000000
25#define HW_GPMI_CTRL0_SET 0x00000004
26#define HW_GPMI_CTRL0_CLR 0x00000008
27#define HW_GPMI_CTRL0_TOG 0x0000000c
28
29#define BP_GPMI_CTRL0_COMMAND_MODE 24
30#define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE)
31#define BF_GPMI_CTRL0_COMMAND_MODE(v) \
32 (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
33#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0
34#define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1
35#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2
36#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3
37
38#define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23)
39#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0
40#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1
41
42/*
43 * Difference in LOCK_CS between imx23 and imx28 :
44 * This bit may impact the _POWER_ consumption. So some chips
45 * do not set it.
46 */
47#define MX23_BP_GPMI_CTRL0_LOCK_CS 22
48#define MX28_BP_GPMI_CTRL0_LOCK_CS 27
49#define LOCK_CS_ENABLE 0x1
50#define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0
51
52/* Difference in CS between imx23 and imx28 */
53#define BP_GPMI_CTRL0_CS 20
54#define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS)
55#define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS)
56#define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \
57 (GPMI_IS_MX23((x)) \
58 ? MX23_BM_GPMI_CTRL0_CS \
59 : MX28_BM_GPMI_CTRL0_CS))
60
61#define BP_GPMI_CTRL0_ADDRESS 17
62#define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS)
63#define BF_GPMI_CTRL0_ADDRESS(v) \
64 (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
65#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0
66#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1
67#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2
68
69#define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16)
70#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0
71#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1
72
73#define BP_GPMI_CTRL0_XFER_COUNT 0
74#define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT)
75#define BF_GPMI_CTRL0_XFER_COUNT(v) \
76 (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
77
78#define HW_GPMI_COMPARE 0x00000010
79
80#define HW_GPMI_ECCCTRL 0x00000020
81#define HW_GPMI_ECCCTRL_SET 0x00000024
82#define HW_GPMI_ECCCTRL_CLR 0x00000028
83#define HW_GPMI_ECCCTRL_TOG 0x0000002c
84
85#define BP_GPMI_ECCCTRL_ECC_CMD 13
86#define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD)
87#define BF_GPMI_ECCCTRL_ECC_CMD(v) \
88 (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
89#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0
90#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1
91
92#define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12)
93#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1
94#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0
95
96#define BP_GPMI_ECCCTRL_BUFFER_MASK 0
97#define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
98#define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \
99 (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
100#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100
101#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF
102
103#define HW_GPMI_ECCCOUNT 0x00000030
104#define HW_GPMI_PAYLOAD 0x00000040
105#define HW_GPMI_AUXILIARY 0x00000050
106#define HW_GPMI_CTRL1 0x00000060
107#define HW_GPMI_CTRL1_SET 0x00000064
108#define HW_GPMI_CTRL1_CLR 0x00000068
109#define HW_GPMI_CTRL1_TOG 0x0000006c
110
111#define BP_GPMI_CTRL1_WRN_DLY_SEL 22
112#define BM_GPMI_CTRL1_WRN_DLY_SEL (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
113#define BF_GPMI_CTRL1_WRN_DLY_SEL(v) \
114 (((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
115#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS 0x0
116#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS 0x1
117#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2
118#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3
119
120#define BM_GPMI_CTRL1_BCH_MODE (1 << 18)
121
122#define BP_GPMI_CTRL1_DLL_ENABLE 17
123#define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE)
124
125#define BP_GPMI_CTRL1_HALF_PERIOD 16
126#define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD)
127
128#define BP_GPMI_CTRL1_RDN_DELAY 12
129#define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY)
130#define BF_GPMI_CTRL1_RDN_DELAY(v) \
131 (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
132
133#define BM_GPMI_CTRL1_DEV_RESET (1 << 3)
134#define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0
135#define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1
136
137#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2)
138#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0
139#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1
140
141#define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1)
142#define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0
143#define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1
144
145#define BM_GPMI_CTRL1_GPMI_MODE (1 << 0)
146
147#define HW_GPMI_TIMING0 0x00000070
148
149#define BP_GPMI_TIMING0_ADDRESS_SETUP 16
150#define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
151#define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \
152 (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
153
154#define BP_GPMI_TIMING0_DATA_HOLD 8
155#define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD)
156#define BF_GPMI_TIMING0_DATA_HOLD(v) \
157 (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
158
159#define BP_GPMI_TIMING0_DATA_SETUP 0
160#define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP)
161#define BF_GPMI_TIMING0_DATA_SETUP(v) \
162 (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
163
164#define HW_GPMI_TIMING1 0x00000080
165#define BP_GPMI_TIMING1_BUSY_TIMEOUT 16
166#define BM_GPMI_TIMING1_BUSY_TIMEOUT (0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
167#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v) \
168 (((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
169
170#define HW_GPMI_TIMING2 0x00000090
171#define HW_GPMI_DATA 0x000000a0
172
173/* MX28 uses this to detect READY. */
174#define HW_GPMI_STAT 0x000000b0
175#define MX28_BP_GPMI_STAT_READY_BUSY 24
176#define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY)
177#define MX28_BF_GPMI_STAT_READY_BUSY(v) \
178 (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
179
180/* MX23 uses this to detect READY. */
181#define HW_GPMI_DEBUG 0x000000c0
182#define MX23_BP_GPMI_DEBUG_READY0 28
183#define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0)
184#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 50166e93ba9..02a03e67109 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -24,7 +24,7 @@
24#include <linux/mtd/nand.h> 24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <mach/hardware.h> 27#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
28#include <asm/sizes.h> 28#include <asm/sizes.h>
29#include <mach/h1900-gpio.h> 29#include <mach/h1900-gpio.h>
30#include <mach/ipaq.h> 30#include <mach/ipaq.h>
@@ -81,6 +81,9 @@ static int h1910_device_ready(struct mtd_info *mtd)
81static int __init h1910_init(void) 81static int __init h1910_init(void)
82{ 82{
83 struct nand_chip *this; 83 struct nand_chip *this;
84 const char *part_type = 0;
85 int mtd_parts_nb = 0;
86 struct mtd_partition *mtd_parts = 0;
84 void __iomem *nandaddr; 87 void __iomem *nandaddr;
85 88
86 if (!machine_is_h1900()) 89 if (!machine_is_h1900())
@@ -124,6 +127,7 @@ static int __init h1910_init(void)
124 /* 15 us command delay time */ 127 /* 15 us command delay time */
125 this->chip_delay = 50; 128 this->chip_delay = 50;
126 this->ecc.mode = NAND_ECC_SOFT; 129 this->ecc.mode = NAND_ECC_SOFT;
130 this->options = NAND_NO_AUTOINCR;
127 131
128 /* Scan to find existence of the device */ 132 /* Scan to find existence of the device */
129 if (nand_scan(h1910_nand_mtd, 1)) { 133 if (nand_scan(h1910_nand_mtd, 1)) {
@@ -132,10 +136,22 @@ static int __init h1910_init(void)
132 iounmap((void *)nandaddr); 136 iounmap((void *)nandaddr);
133 return -ENXIO; 137 return -ENXIO;
134 } 138 }
139#ifdef CONFIG_MTD_CMDLINE_PARTS
140 mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
141 if (mtd_parts_nb > 0)
142 part_type = "command line";
143 else
144 mtd_parts_nb = 0;
145#endif
146 if (mtd_parts_nb == 0) {
147 mtd_parts = partition_info;
148 mtd_parts_nb = NUM_PARTITIONS;
149 part_type = "static";
150 }
135 151
136 /* Register the partitions */ 152 /* Register the partitions */
137 mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info, 153 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
138 NUM_PARTITIONS); 154 mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
139 155
140 /* Return happy */ 156 /* Return happy */
141 return 0; 157 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index b76460eeaf2..6e813daed06 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -52,10 +52,9 @@
52 52
53#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1) 53#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
54#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1) 54#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
55#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa
56 55
57#define JZ_NAND_MEM_CMD_OFFSET 0x08000
58#define JZ_NAND_MEM_ADDR_OFFSET 0x10000 56#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
57#define JZ_NAND_MEM_CMD_OFFSET 0x08000
59 58
60struct jz_nand { 59struct jz_nand {
61 struct mtd_info mtd; 60 struct mtd_info mtd;
@@ -63,11 +62,8 @@ struct jz_nand {
63 void __iomem *base; 62 void __iomem *base;
64 struct resource *mem; 63 struct resource *mem;
65 64
66 unsigned char banks[JZ_NAND_NUM_BANKS]; 65 void __iomem *bank_base;
67 void __iomem *bank_base[JZ_NAND_NUM_BANKS]; 66 struct resource *bank_mem;
68 struct resource *bank_mem[JZ_NAND_NUM_BANKS];
69
70 int selected_bank;
71 67
72 struct jz_nand_platform_data *pdata; 68 struct jz_nand_platform_data *pdata;
73 bool is_reading; 69 bool is_reading;
@@ -78,50 +74,26 @@ static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
78 return container_of(mtd, struct jz_nand, mtd); 74 return container_of(mtd, struct jz_nand, mtd);
79} 75}
80 76
81static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
82{
83 struct jz_nand *nand = mtd_to_jz_nand(mtd);
84 struct nand_chip *chip = mtd->priv;
85 uint32_t ctrl;
86 int banknr;
87
88 ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
89 ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK;
90
91 if (chipnr == -1) {
92 banknr = -1;
93 } else {
94 banknr = nand->banks[chipnr] - 1;
95 chip->IO_ADDR_R = nand->bank_base[banknr];
96 chip->IO_ADDR_W = nand->bank_base[banknr];
97 }
98 writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
99
100 nand->selected_bank = banknr;
101}
102
103static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) 77static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
104{ 78{
105 struct jz_nand *nand = mtd_to_jz_nand(mtd); 79 struct jz_nand *nand = mtd_to_jz_nand(mtd);
106 struct nand_chip *chip = mtd->priv; 80 struct nand_chip *chip = mtd->priv;
107 uint32_t reg; 81 uint32_t reg;
108 void __iomem *bank_base = nand->bank_base[nand->selected_bank];
109
110 BUG_ON(nand->selected_bank < 0);
111 82
112 if (ctrl & NAND_CTRL_CHANGE) { 83 if (ctrl & NAND_CTRL_CHANGE) {
113 BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE)); 84 BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
114 if (ctrl & NAND_ALE) 85 if (ctrl & NAND_ALE)
115 bank_base += JZ_NAND_MEM_ADDR_OFFSET; 86 chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET;
116 else if (ctrl & NAND_CLE) 87 else if (ctrl & NAND_CLE)
117 bank_base += JZ_NAND_MEM_CMD_OFFSET; 88 chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET;
118 chip->IO_ADDR_W = bank_base; 89 else
90 chip->IO_ADDR_W = nand->bank_base;
119 91
120 reg = readl(nand->base + JZ_REG_NAND_CTRL); 92 reg = readl(nand->base + JZ_REG_NAND_CTRL);
121 if (ctrl & NAND_NCE) 93 if (ctrl & NAND_NCE)
122 reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank); 94 reg |= JZ_NAND_CTRL_ASSERT_CHIP(0);
123 else 95 else
124 reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank); 96 reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0);
125 writel(reg, nand->base + JZ_REG_NAND_CTRL); 97 writel(reg, nand->base + JZ_REG_NAND_CTRL);
126 } 98 }
127 if (dat != NAND_CMD_NONE) 99 if (dat != NAND_CMD_NONE)
@@ -279,8 +251,12 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
279 return 0; 251 return 0;
280} 252}
281 253
254#ifdef CONFIG_MTD_CMDLINE_PARTS
255static const char *part_probes[] = {"cmdline", NULL};
256#endif
257
282static int jz_nand_ioremap_resource(struct platform_device *pdev, 258static int jz_nand_ioremap_resource(struct platform_device *pdev,
283 const char *name, struct resource **res, void *__iomem *base) 259 const char *name, struct resource **res, void __iomem **base)
284{ 260{
285 int ret; 261 int ret;
286 262
@@ -316,104 +292,15 @@ err:
316 return ret; 292 return ret;
317} 293}
318 294
319static inline void jz_nand_iounmap_resource(struct resource *res, 295static int __devinit jz_nand_probe(struct platform_device *pdev)
320 void __iomem *base)
321{
322 iounmap(base);
323 release_mem_region(res->start, resource_size(res));
324}
325
326static int jz_nand_detect_bank(struct platform_device *pdev,
327 struct jz_nand *nand, unsigned char bank,
328 size_t chipnr, uint8_t *nand_maf_id,
329 uint8_t *nand_dev_id)
330{
331 int ret;
332 int gpio;
333 char gpio_name[9];
334 char res_name[6];
335 uint32_t ctrl;
336 struct mtd_info *mtd = &nand->mtd;
337 struct nand_chip *chip = &nand->chip;
338
339 /* Request GPIO port. */
340 gpio = JZ_GPIO_MEM_CS0 + bank - 1;
341 sprintf(gpio_name, "NAND CS%d", bank);
342 ret = gpio_request(gpio, gpio_name);
343 if (ret) {
344 dev_warn(&pdev->dev,
345 "Failed to request %s gpio %d: %d\n",
346 gpio_name, gpio, ret);
347 goto notfound_gpio;
348 }
349
350 /* Request I/O resource. */
351 sprintf(res_name, "bank%d", bank);
352 ret = jz_nand_ioremap_resource(pdev, res_name,
353 &nand->bank_mem[bank - 1],
354 &nand->bank_base[bank - 1]);
355 if (ret)
356 goto notfound_resource;
357
358 /* Enable chip in bank. */
359 jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
360 ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
361 ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
362 writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
363
364 if (chipnr == 0) {
365 /* Detect first chip. */
366 ret = nand_scan_ident(mtd, 1, NULL);
367 if (ret)
368 goto notfound_id;
369
370 /* Retrieve the IDs from the first chip. */
371 chip->select_chip(mtd, 0);
372 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
373 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
374 *nand_maf_id = chip->read_byte(mtd);
375 *nand_dev_id = chip->read_byte(mtd);
376 } else {
377 /* Detect additional chip. */
378 chip->select_chip(mtd, chipnr);
379 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
380 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
381 if (*nand_maf_id != chip->read_byte(mtd)
382 || *nand_dev_id != chip->read_byte(mtd)) {
383 ret = -ENODEV;
384 goto notfound_id;
385 }
386
387 /* Update size of the MTD. */
388 chip->numchips++;
389 mtd->size += chip->chipsize;
390 }
391
392 dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
393 return 0;
394
395notfound_id:
396 dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
397 ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
398 writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
399 jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
400 jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
401 nand->bank_base[bank - 1]);
402notfound_resource:
403 gpio_free(gpio);
404notfound_gpio:
405 return ret;
406}
407
408static int jz_nand_probe(struct platform_device *pdev)
409{ 296{
410 int ret; 297 int ret;
411 struct jz_nand *nand; 298 struct jz_nand *nand;
412 struct nand_chip *chip; 299 struct nand_chip *chip;
413 struct mtd_info *mtd; 300 struct mtd_info *mtd;
414 struct jz_nand_platform_data *pdata = pdev->dev.platform_data; 301 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
415 size_t chipnr, bank_idx; 302 struct mtd_partition *partition_info;
416 uint8_t nand_maf_id = 0, nand_dev_id = 0; 303 int num_partitions = 0;
417 304
418 nand = kzalloc(sizeof(*nand), GFP_KERNEL); 305 nand = kzalloc(sizeof(*nand), GFP_KERNEL);
419 if (!nand) { 306 if (!nand) {
@@ -424,6 +311,10 @@ static int jz_nand_probe(struct platform_device *pdev)
424 ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base); 311 ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
425 if (ret) 312 if (ret)
426 goto err_free; 313 goto err_free;
314 ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem,
315 &nand->bank_base);
316 if (ret)
317 goto err_iounmap_mmio;
427 318
428 if (pdata && gpio_is_valid(pdata->busy_gpio)) { 319 if (pdata && gpio_is_valid(pdata->busy_gpio)) {
429 ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); 320 ret = gpio_request(pdata->busy_gpio, "NAND busy pin");
@@ -431,7 +322,7 @@ static int jz_nand_probe(struct platform_device *pdev)
431 dev_err(&pdev->dev, 322 dev_err(&pdev->dev,
432 "Failed to request busy gpio %d: %d\n", 323 "Failed to request busy gpio %d: %d\n",
433 pdata->busy_gpio, ret); 324 pdata->busy_gpio, ret);
434 goto err_iounmap_mmio; 325 goto err_iounmap_mem;
435 } 326 }
436 } 327 }
437 328
@@ -447,58 +338,28 @@ static int jz_nand_probe(struct platform_device *pdev)
447 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; 338 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
448 chip->ecc.size = 512; 339 chip->ecc.size = 512;
449 chip->ecc.bytes = 9; 340 chip->ecc.bytes = 9;
450 chip->ecc.strength = 4;
451 341
452 if (pdata) 342 if (pdata)
453 chip->ecc.layout = pdata->ecc_layout; 343 chip->ecc.layout = pdata->ecc_layout;
454 344
455 chip->chip_delay = 50; 345 chip->chip_delay = 50;
456 chip->cmd_ctrl = jz_nand_cmd_ctrl; 346 chip->cmd_ctrl = jz_nand_cmd_ctrl;
457 chip->select_chip = jz_nand_select_chip;
458 347
459 if (pdata && gpio_is_valid(pdata->busy_gpio)) 348 if (pdata && gpio_is_valid(pdata->busy_gpio))
460 chip->dev_ready = jz_nand_dev_ready; 349 chip->dev_ready = jz_nand_dev_ready;
461 350
351 chip->IO_ADDR_R = nand->bank_base;
352 chip->IO_ADDR_W = nand->bank_base;
353
462 nand->pdata = pdata; 354 nand->pdata = pdata;
463 platform_set_drvdata(pdev, nand); 355 platform_set_drvdata(pdev, nand);
464 356
465 /* We are going to autodetect NAND chips in the banks specified in the 357 writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL);
466 * platform data. Although nand_scan_ident() can detect multiple chips, 358
467 * it requires those chips to be numbered consecuitively, which is not 359 ret = nand_scan_ident(mtd, 1, NULL);
468 * always the case for external memory banks. And a fixed chip-to-bank 360 if (ret) {
469 * mapping is not practical either, since for example Dingoo units 361 dev_err(&pdev->dev, "Failed to scan nand\n");
470 * produced at different times have NAND chips in different banks. 362 goto err_gpio_free;
471 */
472 chipnr = 0;
473 for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) {
474 unsigned char bank;
475
476 /* If there is no platform data, look for NAND in bank 1,
477 * which is the most likely bank since it is the only one
478 * that can be booted from.
479 */
480 bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1;
481 if (bank == 0)
482 break;
483 if (bank > JZ_NAND_NUM_BANKS) {
484 dev_warn(&pdev->dev,
485 "Skipping non-existing bank: %d\n", bank);
486 continue;
487 }
488 /* The detection routine will directly or indirectly call
489 * jz_nand_select_chip(), so nand->banks has to contain the
490 * bank we're checking.
491 */
492 nand->banks[chipnr] = bank;
493 if (jz_nand_detect_bank(pdev, nand, bank, chipnr,
494 &nand_maf_id, &nand_dev_id) == 0)
495 chipnr++;
496 else
497 nand->banks[chipnr] = 0;
498 }
499 if (chipnr == 0) {
500 dev_err(&pdev->dev, "No NAND chips found\n");
501 goto err_gpio_busy;
502 } 363 }
503 364
504 if (pdata && pdata->ident_callback) { 365 if (pdata && pdata->ident_callback) {
@@ -508,13 +369,19 @@ static int jz_nand_probe(struct platform_device *pdev)
508 369
509 ret = nand_scan_tail(mtd); 370 ret = nand_scan_tail(mtd);
510 if (ret) { 371 if (ret) {
511 dev_err(&pdev->dev, "Failed to scan NAND\n"); 372 dev_err(&pdev->dev, "Failed to scan nand\n");
512 goto err_unclaim_banks; 373 goto err_gpio_free;
513 } 374 }
514 375
515 ret = mtd_device_parse_register(mtd, NULL, NULL, 376#ifdef CONFIG_MTD_CMDLINE_PARTS
516 pdata ? pdata->partitions : NULL, 377 num_partitions = parse_mtd_partitions(mtd, part_probes,
517 pdata ? pdata->num_partitions : 0); 378 &partition_info, 0);
379#endif
380 if (num_partitions <= 0 && pdata) {
381 num_partitions = pdata->num_partitions;
382 partition_info = pdata->partitions;
383 }
384 ret = mtd_device_register(mtd, partition_info, num_partitions);
518 385
519 if (ret) { 386 if (ret) {
520 dev_err(&pdev->dev, "Failed to add mtd device\n"); 387 dev_err(&pdev->dev, "Failed to add mtd device\n");
@@ -526,49 +393,32 @@ static int jz_nand_probe(struct platform_device *pdev)
526 return 0; 393 return 0;
527 394
528err_nand_release: 395err_nand_release:
529 nand_release(mtd); 396 nand_release(&nand->mtd);
530err_unclaim_banks: 397err_gpio_free:
531 while (chipnr--) {
532 unsigned char bank = nand->banks[chipnr];
533 gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
534 jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
535 nand->bank_base[bank - 1]);
536 }
537 writel(0, nand->base + JZ_REG_NAND_CTRL);
538err_gpio_busy:
539 if (pdata && gpio_is_valid(pdata->busy_gpio))
540 gpio_free(pdata->busy_gpio);
541 platform_set_drvdata(pdev, NULL); 398 platform_set_drvdata(pdev, NULL);
399 gpio_free(pdata->busy_gpio);
400err_iounmap_mem:
401 iounmap(nand->bank_base);
542err_iounmap_mmio: 402err_iounmap_mmio:
543 jz_nand_iounmap_resource(nand->mem, nand->base); 403 iounmap(nand->base);
544err_free: 404err_free:
545 kfree(nand); 405 kfree(nand);
546 return ret; 406 return ret;
547} 407}
548 408
549static int jz_nand_remove(struct platform_device *pdev) 409static int __devexit jz_nand_remove(struct platform_device *pdev)
550{ 410{
551 struct jz_nand *nand = platform_get_drvdata(pdev); 411 struct jz_nand *nand = platform_get_drvdata(pdev);
552 struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
553 size_t i;
554 412
555 nand_release(&nand->mtd); 413 nand_release(&nand->mtd);
556 414
557 /* Deassert and disable all chips */ 415 /* Deassert and disable all chips */
558 writel(0, nand->base + JZ_REG_NAND_CTRL); 416 writel(0, nand->base + JZ_REG_NAND_CTRL);
559 417
560 for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) { 418 iounmap(nand->bank_base);
561 unsigned char bank = nand->banks[i]; 419 release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem));
562 if (bank != 0) { 420 iounmap(nand->base);
563 jz_nand_iounmap_resource(nand->bank_mem[bank - 1], 421 release_mem_region(nand->mem->start, resource_size(nand->mem));
564 nand->bank_base[bank - 1]);
565 gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
566 }
567 }
568 if (pdata && gpio_is_valid(pdata->busy_gpio))
569 gpio_free(pdata->busy_gpio);
570
571 jz_nand_iounmap_resource(nand->mem, nand->base);
572 422
573 platform_set_drvdata(pdev, NULL); 423 platform_set_drvdata(pdev, NULL);
574 kfree(nand); 424 kfree(nand);
@@ -578,14 +428,24 @@ static int jz_nand_remove(struct platform_device *pdev)
578 428
579static struct platform_driver jz_nand_driver = { 429static struct platform_driver jz_nand_driver = {
580 .probe = jz_nand_probe, 430 .probe = jz_nand_probe,
581 .remove = jz_nand_remove, 431 .remove = __devexit_p(jz_nand_remove),
582 .driver = { 432 .driver = {
583 .name = "jz4740-nand", 433 .name = "jz4740-nand",
584 .owner = THIS_MODULE, 434 .owner = THIS_MODULE,
585 }, 435 },
586}; 436};
587 437
588module_platform_driver(jz_nand_driver); 438static int __init jz_nand_init(void)
439{
440 return platform_driver_register(&jz_nand_driver);
441}
442module_init(jz_nand_init);
443
444static void __exit jz_nand_exit(void)
445{
446 platform_driver_unregister(&jz_nand_driver);
447}
448module_exit(jz_nand_exit);
589 449
590MODULE_LICENSE("GPL"); 450MODULE_LICENSE("GPL");
591MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 451MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
deleted file mode 100644
index f182befa736..00000000000
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ /dev/null
@@ -1,924 +0,0 @@
1/*
2 * Driver for NAND MLC Controller in LPC32xx
3 *
4 * Author: Roland Stigge <stigge@antcom.de>
5 *
6 * Copyright © 2011 WORK Microwave GmbH
7 * Copyright © 2011, 2012 Roland Stigge
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 *
20 * NAND Flash Controller Operation:
21 * - Read: Auto Decode
22 * - Write: Auto Encode
23 * - Tested Page Sizes: 2048, 4096
24 */
25
26#include <linux/slab.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/nand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/clk.h>
33#include <linux/err.h>
34#include <linux/delay.h>
35#include <linux/completion.h>
36#include <linux/interrupt.h>
37#include <linux/of.h>
38#include <linux/of_mtd.h>
39#include <linux/of_gpio.h>
40#include <linux/mtd/lpc32xx_mlc.h>
41#include <linux/io.h>
42#include <linux/mm.h>
43#include <linux/dma-mapping.h>
44#include <linux/dmaengine.h>
45#include <linux/mtd/nand_ecc.h>
46
47#define DRV_NAME "lpc32xx_mlc"
48
49/**********************************************************************
50* MLC NAND controller register offsets
51**********************************************************************/
52
53#define MLC_BUFF(x) (x + 0x00000)
54#define MLC_DATA(x) (x + 0x08000)
55#define MLC_CMD(x) (x + 0x10000)
56#define MLC_ADDR(x) (x + 0x10004)
57#define MLC_ECC_ENC_REG(x) (x + 0x10008)
58#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
59#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
60#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
61#define MLC_RPR(x) (x + 0x10018)
62#define MLC_WPR(x) (x + 0x1001C)
63#define MLC_RUBP(x) (x + 0x10020)
64#define MLC_ROBP(x) (x + 0x10024)
65#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
66#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
67#define MLC_ICR(x) (x + 0x10030)
68#define MLC_TIME_REG(x) (x + 0x10034)
69#define MLC_IRQ_MR(x) (x + 0x10038)
70#define MLC_IRQ_SR(x) (x + 0x1003C)
71#define MLC_LOCK_PR(x) (x + 0x10044)
72#define MLC_ISR(x) (x + 0x10048)
73#define MLC_CEH(x) (x + 0x1004C)
74
75/**********************************************************************
76* MLC_CMD bit definitions
77**********************************************************************/
78#define MLCCMD_RESET 0xFF
79
80/**********************************************************************
81* MLC_ICR bit definitions
82**********************************************************************/
83#define MLCICR_WPROT (1 << 3)
84#define MLCICR_LARGEBLOCK (1 << 2)
85#define MLCICR_LONGADDR (1 << 1)
86#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
87
88/**********************************************************************
89* MLC_TIME_REG bit definitions
90**********************************************************************/
91#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
92#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
93#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
94#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
95#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
96#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
97#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
98
99/**********************************************************************
100* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101**********************************************************************/
102#define MLCIRQ_NAND_READY (1 << 5)
103#define MLCIRQ_CONTROLLER_READY (1 << 4)
104#define MLCIRQ_DECODE_FAILURE (1 << 3)
105#define MLCIRQ_DECODE_ERROR (1 << 2)
106#define MLCIRQ_ECC_READY (1 << 1)
107#define MLCIRQ_WRPROT_FAULT (1 << 0)
108
109/**********************************************************************
110* MLC_LOCK_PR bit definitions
111**********************************************************************/
112#define MLCLOCKPR_MAGIC 0xA25E
113
114/**********************************************************************
115* MLC_ISR bit definitions
116**********************************************************************/
117#define MLCISR_DECODER_FAILURE (1 << 6)
118#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
119#define MLCISR_ERRORS_DETECTED (1 << 3)
120#define MLCISR_ECC_READY (1 << 2)
121#define MLCISR_CONTROLLER_READY (1 << 1)
122#define MLCISR_NAND_READY (1 << 0)
123
124/**********************************************************************
125* MLC_CEH bit definitions
126**********************************************************************/
127#define MLCCEH_NORMAL (1 << 0)
128
129struct lpc32xx_nand_cfg_mlc {
130 uint32_t tcea_delay;
131 uint32_t busy_delay;
132 uint32_t nand_ta;
133 uint32_t rd_high;
134 uint32_t rd_low;
135 uint32_t wr_high;
136 uint32_t wr_low;
137 int wp_gpio;
138 struct mtd_partition *parts;
139 unsigned num_parts;
140};
141
142static struct nand_ecclayout lpc32xx_nand_oob = {
143 .eccbytes = 40,
144 .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
145 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
148 .oobfree = {
149 { .offset = 0,
150 .length = 6, },
151 { .offset = 16,
152 .length = 6, },
153 { .offset = 32,
154 .length = 6, },
155 { .offset = 48,
156 .length = 6, },
157 },
158};
159
160static struct nand_bbt_descr lpc32xx_nand_bbt = {
161 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
162 NAND_BBT_WRITE,
163 .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
164};
165
166static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
167 .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
168 NAND_BBT_WRITE,
169 .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
170};
171
172struct lpc32xx_nand_host {
173 struct nand_chip nand_chip;
174 struct lpc32xx_mlc_platform_data *pdata;
175 struct clk *clk;
176 struct mtd_info mtd;
177 void __iomem *io_base;
178 int irq;
179 struct lpc32xx_nand_cfg_mlc *ncfg;
180 struct completion comp_nand;
181 struct completion comp_controller;
182 uint32_t llptr;
183 /*
184 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
185 */
186 dma_addr_t oob_buf_phy;
187 /*
188 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
189 */
190 uint8_t *oob_buf;
191 /* Physical address of DMA base address */
192 dma_addr_t io_base_phy;
193
194 struct completion comp_dma;
195 struct dma_chan *dma_chan;
196 struct dma_slave_config dma_slave_config;
197 struct scatterlist sgl;
198 uint8_t *dma_buf;
199 uint8_t *dummy_buf;
200 int mlcsubpages; /* number of 512bytes-subpages */
201};
202
203/*
204 * Activate/Deactivate DMA Operation:
205 *
206 * Using the PL080 DMA Controller for transferring the 512 byte subpages
207 * instead of doing readl() / writel() in a loop slows it down significantly.
208 * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
209 *
210 * - readl() of 128 x 32 bits in a loop: ~20us
211 * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
212 * - DMA read of 512 bytes (32 bit, no bursts): ~100us
213 *
214 * This applies to the transfer itself. In the DMA case: only the
215 * wait_for_completion() (DMA setup _not_ included).
216 *
217 * Note that the 512 bytes subpage transfer is done directly from/to a
218 * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
219 * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
220 * controller transferring data between its internal buffer to/from the NAND
221 * chip.)
222 *
223 * Therefore, using the PL080 DMA is disabled by default, for now.
224 *
225 */
226static int use_dma;
227
228static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
229{
230 uint32_t clkrate, tmp;
231
232 /* Reset MLC controller */
233 writel(MLCCMD_RESET, MLC_CMD(host->io_base));
234 udelay(1000);
235
236 /* Get base clock for MLC block */
237 clkrate = clk_get_rate(host->clk);
238 if (clkrate == 0)
239 clkrate = 104000000;
240
241 /* Unlock MLC_ICR
242 * (among others, will be locked again automatically) */
243 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
244
245 /* Configure MLC Controller: Large Block, 5 Byte Address */
246 tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
247 writel(tmp, MLC_ICR(host->io_base));
248
249 /* Unlock MLC_TIME_REG
250 * (among others, will be locked again automatically) */
251 writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
252
253 /* Compute clock setup values, see LPC and NAND manual */
254 tmp = 0;
255 tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
256 tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
257 tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
258 tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
259 tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
260 tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
261 tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
262 writel(tmp, MLC_TIME_REG(host->io_base));
263
264 /* Enable IRQ for CONTROLLER_READY and NAND_READY */
265 writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
266 MLC_IRQ_MR(host->io_base));
267
268 /* Normal nCE operation: nCE controlled by controller */
269 writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
270}
271
272/*
273 * Hardware specific access to control lines
274 */
275static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
276 unsigned int ctrl)
277{
278 struct nand_chip *nand_chip = mtd->priv;
279 struct lpc32xx_nand_host *host = nand_chip->priv;
280
281 if (cmd != NAND_CMD_NONE) {
282 if (ctrl & NAND_CLE)
283 writel(cmd, MLC_CMD(host->io_base));
284 else
285 writel(cmd, MLC_ADDR(host->io_base));
286 }
287}
288
289/*
290 * Read Device Ready (NAND device _and_ controller ready)
291 */
292static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
293{
294 struct nand_chip *nand_chip = mtd->priv;
295 struct lpc32xx_nand_host *host = nand_chip->priv;
296
297 if ((readb(MLC_ISR(host->io_base)) &
298 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
299 (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
300 return 1;
301
302 return 0;
303}
304
305static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
306{
307 uint8_t sr;
308
309 /* Clear interrupt flag by reading status */
310 sr = readb(MLC_IRQ_SR(host->io_base));
311 if (sr & MLCIRQ_NAND_READY)
312 complete(&host->comp_nand);
313 if (sr & MLCIRQ_CONTROLLER_READY)
314 complete(&host->comp_controller);
315
316 return IRQ_HANDLED;
317}
318
319static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
320{
321 struct lpc32xx_nand_host *host = chip->priv;
322
323 if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
324 goto exit;
325
326 wait_for_completion(&host->comp_nand);
327
328 while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
329 /* Seems to be delayed sometimes by controller */
330 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
331 cpu_relax();
332 }
333
334exit:
335 return NAND_STATUS_READY;
336}
337
338static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
339 struct nand_chip *chip)
340{
341 struct lpc32xx_nand_host *host = chip->priv;
342
343 if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
344 goto exit;
345
346 wait_for_completion(&host->comp_controller);
347
348 while (!(readb(MLC_ISR(host->io_base)) &
349 MLCISR_CONTROLLER_READY)) {
350 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
351 cpu_relax();
352 }
353
354exit:
355 return NAND_STATUS_READY;
356}
357
358static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
359{
360 lpc32xx_waitfunc_nand(mtd, chip);
361 lpc32xx_waitfunc_controller(mtd, chip);
362
363 return NAND_STATUS_READY;
364}
365
366/*
367 * Enable NAND write protect
368 */
369static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
370{
371 if (gpio_is_valid(host->ncfg->wp_gpio))
372 gpio_set_value(host->ncfg->wp_gpio, 0);
373}
374
375/*
376 * Disable NAND write protect
377 */
378static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
379{
380 if (gpio_is_valid(host->ncfg->wp_gpio))
381 gpio_set_value(host->ncfg->wp_gpio, 1);
382}
383
384static void lpc32xx_dma_complete_func(void *completion)
385{
386 complete(completion);
387}
388
389static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
390 enum dma_transfer_direction dir)
391{
392 struct nand_chip *chip = mtd->priv;
393 struct lpc32xx_nand_host *host = chip->priv;
394 struct dma_async_tx_descriptor *desc;
395 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
396 int res;
397
398 sg_init_one(&host->sgl, mem, len);
399
400 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
401 DMA_BIDIRECTIONAL);
402 if (res != 1) {
403 dev_err(mtd->dev.parent, "Failed to map sg list\n");
404 return -ENXIO;
405 }
406 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
407 flags);
408 if (!desc) {
409 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
410 goto out1;
411 }
412
413 init_completion(&host->comp_dma);
414 desc->callback = lpc32xx_dma_complete_func;
415 desc->callback_param = &host->comp_dma;
416
417 dmaengine_submit(desc);
418 dma_async_issue_pending(host->dma_chan);
419
420 wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
421
422 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
423 DMA_BIDIRECTIONAL);
424 return 0;
425out1:
426 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
427 DMA_BIDIRECTIONAL);
428 return -ENXIO;
429}
430
431static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
432 uint8_t *buf, int oob_required, int page)
433{
434 struct lpc32xx_nand_host *host = chip->priv;
435 int i, j;
436 uint8_t *oobbuf = chip->oob_poi;
437 uint32_t mlc_isr;
438 int res;
439 uint8_t *dma_buf;
440 bool dma_mapped;
441
442 if ((void *)buf <= high_memory) {
443 dma_buf = buf;
444 dma_mapped = true;
445 } else {
446 dma_buf = host->dma_buf;
447 dma_mapped = false;
448 }
449
450 /* Writing Command and Address */
451 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
452
453 /* For all sub-pages */
454 for (i = 0; i < host->mlcsubpages; i++) {
455 /* Start Auto Decode Command */
456 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
457
458 /* Wait for Controller Ready */
459 lpc32xx_waitfunc_controller(mtd, chip);
460
461 /* Check ECC Error status */
462 mlc_isr = readl(MLC_ISR(host->io_base));
463 if (mlc_isr & MLCISR_DECODER_FAILURE) {
464 mtd->ecc_stats.failed++;
465 dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
466 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
467 mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
468 }
469
470 /* Read 512 + 16 Bytes */
471 if (use_dma) {
472 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
473 DMA_DEV_TO_MEM);
474 if (res)
475 return res;
476 } else {
477 for (j = 0; j < (512 >> 2); j++) {
478 *((uint32_t *)(buf)) =
479 readl(MLC_BUFF(host->io_base));
480 buf += 4;
481 }
482 }
483 for (j = 0; j < (16 >> 2); j++) {
484 *((uint32_t *)(oobbuf)) =
485 readl(MLC_BUFF(host->io_base));
486 oobbuf += 4;
487 }
488 }
489
490 if (use_dma && !dma_mapped)
491 memcpy(buf, dma_buf, mtd->writesize);
492
493 return 0;
494}
495
496static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
497 struct nand_chip *chip,
498 const uint8_t *buf, int oob_required)
499{
500 struct lpc32xx_nand_host *host = chip->priv;
501 const uint8_t *oobbuf = chip->oob_poi;
502 uint8_t *dma_buf = (uint8_t *)buf;
503 int res;
504 int i, j;
505
506 if (use_dma && (void *)buf >= high_memory) {
507 dma_buf = host->dma_buf;
508 memcpy(dma_buf, buf, mtd->writesize);
509 }
510
511 for (i = 0; i < host->mlcsubpages; i++) {
512 /* Start Encode */
513 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
514
515 /* Write 512 + 6 Bytes to Buffer */
516 if (use_dma) {
517 res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
518 DMA_MEM_TO_DEV);
519 if (res)
520 return res;
521 } else {
522 for (j = 0; j < (512 >> 2); j++) {
523 writel(*((uint32_t *)(buf)),
524 MLC_BUFF(host->io_base));
525 buf += 4;
526 }
527 }
528 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
529 oobbuf += 4;
530 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
531 oobbuf += 12;
532
533 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
535
536 /* Wait for Controller Ready */
537 lpc32xx_waitfunc_controller(mtd, chip);
538 }
539 return 0;
540}
541
542static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
543 const uint8_t *buf, int oob_required, int page,
544 int cached, int raw)
545{
546 int res;
547
548 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
549 res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
550 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
551 lpc32xx_waitfunc(mtd, chip);
552
553 return res;
554}
555
556static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
557 int page)
558{
559 struct lpc32xx_nand_host *host = chip->priv;
560
561 /* Read whole page - necessary with MLC controller! */
562 lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
563
564 return 0;
565}
566
567static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
568 int page)
569{
570 /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
571 return 0;
572}
573
574/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
575static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
576{
577 /* Always enabled! */
578}
579
580static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
581{
582 struct mtd_info *mtd = &host->mtd;
583 dma_cap_mask_t mask;
584
585 if (!host->pdata || !host->pdata->dma_filter) {
586 dev_err(mtd->dev.parent, "no DMA platform data\n");
587 return -ENOENT;
588 }
589
590 dma_cap_zero(mask);
591 dma_cap_set(DMA_SLAVE, mask);
592 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
593 "nand-mlc");
594 if (!host->dma_chan) {
595 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
596 return -EBUSY;
597 }
598
599 /*
600 * Set direction to a sensible value even if the dmaengine driver
601 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
602 * driver criticizes it as "alien transfer direction".
603 */
604 host->dma_slave_config.direction = DMA_DEV_TO_MEM;
605 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
606 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
607 host->dma_slave_config.src_maxburst = 128;
608 host->dma_slave_config.dst_maxburst = 128;
609 /* DMA controller does flow control: */
610 host->dma_slave_config.device_fc = false;
611 host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
612 host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
613 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
614 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
615 goto out1;
616 }
617
618 return 0;
619out1:
620 dma_release_channel(host->dma_chan);
621 return -ENXIO;
622}
623
624static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
625{
626 struct lpc32xx_nand_cfg_mlc *ncfg;
627 struct device_node *np = dev->of_node;
628
629 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
630 if (!ncfg) {
631 dev_err(dev, "could not allocate memory for platform data\n");
632 return NULL;
633 }
634
635 of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
636 of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
637 of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
638 of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
639 of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
640 of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
641 of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
642
643 if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
644 !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
645 !ncfg->wr_low) {
646 dev_err(dev, "chip parameters not specified correctly\n");
647 return NULL;
648 }
649
650 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
651
652 return ncfg;
653}
654
655/*
656 * Probe for NAND controller
657 */
658static int lpc32xx_nand_probe(struct platform_device *pdev)
659{
660 struct lpc32xx_nand_host *host;
661 struct mtd_info *mtd;
662 struct nand_chip *nand_chip;
663 struct resource *rc;
664 int res;
665 struct mtd_part_parser_data ppdata = {};
666
667 /* Allocate memory for the device structure (and zero it) */
668 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
669 if (!host) {
670 dev_err(&pdev->dev, "failed to allocate device structure.\n");
671 return -ENOMEM;
672 }
673
674 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
675 if (rc == NULL) {
676 dev_err(&pdev->dev, "No memory resource found for device!\r\n");
677 return -ENXIO;
678 }
679
680 host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
681 if (host->io_base == NULL) {
682 dev_err(&pdev->dev, "ioremap failed\n");
683 return -EIO;
684 }
685 host->io_base_phy = rc->start;
686
687 mtd = &host->mtd;
688 nand_chip = &host->nand_chip;
689 if (pdev->dev.of_node)
690 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
691 if (!host->ncfg) {
692 dev_err(&pdev->dev,
693 "Missing or bad NAND config from device tree\n");
694 return -ENOENT;
695 }
696 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
697 return -EPROBE_DEFER;
698 if (gpio_is_valid(host->ncfg->wp_gpio) &&
699 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
700 dev_err(&pdev->dev, "GPIO not available\n");
701 return -EBUSY;
702 }
703 lpc32xx_wp_disable(host);
704
705 host->pdata = pdev->dev.platform_data;
706
707 nand_chip->priv = host; /* link the private data structures */
708 mtd->priv = nand_chip;
709 mtd->owner = THIS_MODULE;
710 mtd->dev.parent = &pdev->dev;
711
712 /* Get NAND clock */
713 host->clk = clk_get(&pdev->dev, NULL);
714 if (IS_ERR(host->clk)) {
715 dev_err(&pdev->dev, "Clock initialization failure\n");
716 res = -ENOENT;
717 goto err_exit1;
718 }
719 clk_enable(host->clk);
720
721 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
722 nand_chip->dev_ready = lpc32xx_nand_device_ready;
723 nand_chip->chip_delay = 25; /* us */
724 nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
725 nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
726
727 /* Init NAND controller */
728 lpc32xx_nand_setup(host);
729
730 platform_set_drvdata(pdev, host);
731
732 /* Initialize function pointers */
733 nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
734 nand_chip->ecc.read_page_raw = lpc32xx_read_page;
735 nand_chip->ecc.read_page = lpc32xx_read_page;
736 nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
737 nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
738 nand_chip->ecc.write_oob = lpc32xx_write_oob;
739 nand_chip->ecc.read_oob = lpc32xx_read_oob;
740 nand_chip->ecc.strength = 4;
741 nand_chip->write_page = lpc32xx_write_page;
742 nand_chip->waitfunc = lpc32xx_waitfunc;
743
744 nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
745 nand_chip->bbt_td = &lpc32xx_nand_bbt;
746 nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
747
748 /* bitflip_threshold's default is defined as ecc_strength anyway.
749 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
750 * being 0, it causes bad block table scanning errors in
751 * nand_scan_tail(), so preparing it here. */
752 mtd->bitflip_threshold = nand_chip->ecc.strength;
753
754 if (use_dma) {
755 res = lpc32xx_dma_setup(host);
756 if (res) {
757 res = -EIO;
758 goto err_exit2;
759 }
760 }
761
762 /*
763 * Scan to find existance of the device and
764 * Get the type of NAND device SMALL block or LARGE block
765 */
766 if (nand_scan_ident(mtd, 1, NULL)) {
767 res = -ENXIO;
768 goto err_exit3;
769 }
770
771 host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
772 if (!host->dma_buf) {
773 dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
774 res = -ENOMEM;
775 goto err_exit3;
776 }
777
778 host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
779 if (!host->dummy_buf) {
780 dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
781 res = -ENOMEM;
782 goto err_exit3;
783 }
784
785 nand_chip->ecc.mode = NAND_ECC_HW;
786 nand_chip->ecc.size = mtd->writesize;
787 nand_chip->ecc.layout = &lpc32xx_nand_oob;
788 host->mlcsubpages = mtd->writesize / 512;
789
790 /* initially clear interrupt status */
791 readb(MLC_IRQ_SR(host->io_base));
792
793 init_completion(&host->comp_nand);
794 init_completion(&host->comp_controller);
795
796 host->irq = platform_get_irq(pdev, 0);
797 if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
798 dev_err(&pdev->dev, "failed to get platform irq\n");
799 res = -EINVAL;
800 goto err_exit3;
801 }
802
803 if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
804 IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
805 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
806 res = -ENXIO;
807 goto err_exit3;
808 }
809
810 /*
811 * Fills out all the uninitialized function pointers with the defaults
812 * And scans for a bad block table if appropriate.
813 */
814 if (nand_scan_tail(mtd)) {
815 res = -ENXIO;
816 goto err_exit4;
817 }
818
819 mtd->name = DRV_NAME;
820
821 ppdata.of_node = pdev->dev.of_node;
822 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
823 host->ncfg->num_parts);
824 if (!res)
825 return res;
826
827 nand_release(mtd);
828
829err_exit4:
830 free_irq(host->irq, host);
831err_exit3:
832 if (use_dma)
833 dma_release_channel(host->dma_chan);
834err_exit2:
835 clk_disable(host->clk);
836 clk_put(host->clk);
837 platform_set_drvdata(pdev, NULL);
838err_exit1:
839 lpc32xx_wp_enable(host);
840 gpio_free(host->ncfg->wp_gpio);
841
842 return res;
843}
844
845/*
846 * Remove NAND device
847 */
848static int lpc32xx_nand_remove(struct platform_device *pdev)
849{
850 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
851 struct mtd_info *mtd = &host->mtd;
852
853 nand_release(mtd);
854 free_irq(host->irq, host);
855 if (use_dma)
856 dma_release_channel(host->dma_chan);
857
858 clk_disable(host->clk);
859 clk_put(host->clk);
860 platform_set_drvdata(pdev, NULL);
861
862 lpc32xx_wp_enable(host);
863 gpio_free(host->ncfg->wp_gpio);
864
865 return 0;
866}
867
868#ifdef CONFIG_PM
869static int lpc32xx_nand_resume(struct platform_device *pdev)
870{
871 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
872
873 /* Re-enable NAND clock */
874 clk_enable(host->clk);
875
876 /* Fresh init of NAND controller */
877 lpc32xx_nand_setup(host);
878
879 /* Disable write protect */
880 lpc32xx_wp_disable(host);
881
882 return 0;
883}
884
885static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
886{
887 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
888
889 /* Enable write protect for safety */
890 lpc32xx_wp_enable(host);
891
892 /* Disable clock */
893 clk_disable(host->clk);
894 return 0;
895}
896
897#else
898#define lpc32xx_nand_resume NULL
899#define lpc32xx_nand_suspend NULL
900#endif
901
902static const struct of_device_id lpc32xx_nand_match[] = {
903 { .compatible = "nxp,lpc3220-mlc" },
904 { /* sentinel */ },
905};
906MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
907
908static struct platform_driver lpc32xx_nand_driver = {
909 .probe = lpc32xx_nand_probe,
910 .remove = lpc32xx_nand_remove,
911 .resume = lpc32xx_nand_resume,
912 .suspend = lpc32xx_nand_suspend,
913 .driver = {
914 .name = DRV_NAME,
915 .owner = THIS_MODULE,
916 .of_match_table = of_match_ptr(lpc32xx_nand_match),
917 },
918};
919
920module_platform_driver(lpc32xx_nand_driver);
921
922MODULE_LICENSE("GPL");
923MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
924MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
deleted file mode 100644
index 030b78c6289..00000000000
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ /dev/null
@@ -1,1039 +0,0 @@
1/*
2 * NXP LPC32XX NAND SLC driver
3 *
4 * Authors:
5 * Kevin Wells <kevin.wells@nxp.com>
6 * Roland Stigge <stigge@antcom.de>
7 *
8 * Copyright © 2011 NXP Semiconductors
9 * Copyright © 2012 Roland Stigge
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#include <linux/slab.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h>
28#include <linux/clk.h>
29#include <linux/err.h>
30#include <linux/delay.h>
31#include <linux/io.h>
32#include <linux/mm.h>
33#include <linux/dma-mapping.h>
34#include <linux/dmaengine.h>
35#include <linux/mtd/nand_ecc.h>
36#include <linux/gpio.h>
37#include <linux/of.h>
38#include <linux/of_mtd.h>
39#include <linux/of_gpio.h>
40#include <linux/mtd/lpc32xx_slc.h>
41
42#define LPC32XX_MODNAME "lpc32xx-nand"
43
44/**********************************************************************
45* SLC NAND controller register offsets
46**********************************************************************/
47
48#define SLC_DATA(x) (x + 0x000)
49#define SLC_ADDR(x) (x + 0x004)
50#define SLC_CMD(x) (x + 0x008)
51#define SLC_STOP(x) (x + 0x00C)
52#define SLC_CTRL(x) (x + 0x010)
53#define SLC_CFG(x) (x + 0x014)
54#define SLC_STAT(x) (x + 0x018)
55#define SLC_INT_STAT(x) (x + 0x01C)
56#define SLC_IEN(x) (x + 0x020)
57#define SLC_ISR(x) (x + 0x024)
58#define SLC_ICR(x) (x + 0x028)
59#define SLC_TAC(x) (x + 0x02C)
60#define SLC_TC(x) (x + 0x030)
61#define SLC_ECC(x) (x + 0x034)
62#define SLC_DMA_DATA(x) (x + 0x038)
63
64/**********************************************************************
65* slc_ctrl register definitions
66**********************************************************************/
67#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
68#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
69#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
70
71/**********************************************************************
72* slc_cfg register definitions
73**********************************************************************/
74#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
75#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
76#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
77#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
78#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
79#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
80
81/**********************************************************************
82* slc_stat register definitions
83**********************************************************************/
84#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
85#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
86#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
87
88/**********************************************************************
89* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
90**********************************************************************/
91#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
92#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
93
94/**********************************************************************
95* slc_tac register definitions
96**********************************************************************/
97/* Clock setting for RDY write sample wait time in 2*n clocks */
98#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
99/* Write pulse width in clock cycles, 1 to 16 clocks */
100#define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
101/* Write hold time of control and data signals, 1 to 16 clocks */
102#define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
103/* Write setup time of control and data signals, 1 to 16 clocks */
104#define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
105/* Clock setting for RDY read sample wait time in 2*n clocks */
106#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
107/* Read pulse width in clock cycles, 1 to 16 clocks */
108#define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
109/* Read hold time of control and data signals, 1 to 16 clocks */
110#define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
111/* Read setup time of control and data signals, 1 to 16 clocks */
112#define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
113
114/**********************************************************************
115* slc_ecc register definitions
116**********************************************************************/
117/* ECC line party fetch macro */
118#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
119#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
120
121/*
122 * DMA requires storage space for the DMA local buffer and the hardware ECC
123 * storage area. The DMA local buffer is only used if DMA mapping fails
124 * during runtime.
125 */
126#define LPC32XX_DMA_DATA_SIZE 4096
127#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
128
129/* Number of bytes used for ECC stored in NAND per 256 bytes */
130#define LPC32XX_SLC_DEV_ECC_BYTES 3
131
132/*
133 * If the NAND base clock frequency can't be fetched, this frequency will be
134 * used instead as the base. This rate is used to setup the timing registers
135 * used for NAND accesses.
136 */
137#define LPC32XX_DEF_BUS_RATE 133250000
138
139/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
140#define LPC32XX_DMA_TIMEOUT 100
141
142/*
143 * NAND ECC Layout for small page NAND devices
144 * Note: For large and huge page devices, the default layouts are used
145 */
146static struct nand_ecclayout lpc32xx_nand_oob_16 = {
147 .eccbytes = 6,
148 .eccpos = {10, 11, 12, 13, 14, 15},
149 .oobfree = {
150 { .offset = 0, .length = 4 },
151 { .offset = 6, .length = 4 },
152 },
153};
154
155static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
156static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
157
158/*
159 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
160 * Note: Large page devices used the default layout
161 */
162static struct nand_bbt_descr bbt_smallpage_main_descr = {
163 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
164 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
165 .offs = 0,
166 .len = 4,
167 .veroffs = 6,
168 .maxblocks = 4,
169 .pattern = bbt_pattern
170};
171
172static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
173 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
174 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
175 .offs = 0,
176 .len = 4,
177 .veroffs = 6,
178 .maxblocks = 4,
179 .pattern = mirror_pattern
180};
181
182/*
183 * NAND platform configuration structure
184 */
185struct lpc32xx_nand_cfg_slc {
186 uint32_t wdr_clks;
187 uint32_t wwidth;
188 uint32_t whold;
189 uint32_t wsetup;
190 uint32_t rdr_clks;
191 uint32_t rwidth;
192 uint32_t rhold;
193 uint32_t rsetup;
194 bool use_bbt;
195 int wp_gpio;
196 struct mtd_partition *parts;
197 unsigned num_parts;
198};
199
200struct lpc32xx_nand_host {
201 struct nand_chip nand_chip;
202 struct lpc32xx_slc_platform_data *pdata;
203 struct clk *clk;
204 struct mtd_info mtd;
205 void __iomem *io_base;
206 struct lpc32xx_nand_cfg_slc *ncfg;
207
208 struct completion comp;
209 struct dma_chan *dma_chan;
210 uint32_t dma_buf_len;
211 struct dma_slave_config dma_slave_config;
212 struct scatterlist sgl;
213
214 /*
215 * DMA and CPU addresses of ECC work area and data buffer
216 */
217 uint32_t *ecc_buf;
218 uint8_t *data_buf;
219 dma_addr_t io_base_dma;
220};
221
222static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
223{
224 uint32_t clkrate, tmp;
225
226 /* Reset SLC controller */
227 writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
228 udelay(1000);
229
230 /* Basic setup */
231 writel(0, SLC_CFG(host->io_base));
232 writel(0, SLC_IEN(host->io_base));
233 writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
234 SLC_ICR(host->io_base));
235
236 /* Get base clock for SLC block */
237 clkrate = clk_get_rate(host->clk);
238 if (clkrate == 0)
239 clkrate = LPC32XX_DEF_BUS_RATE;
240
241 /* Compute clock setup values */
242 tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
243 SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
244 SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
245 SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
246 SLCTAC_RDR(host->ncfg->rdr_clks) |
247 SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
248 SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
249 SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
250 writel(tmp, SLC_TAC(host->io_base));
251}
252
253/*
254 * Hardware specific access to control lines
255 */
256static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
257 unsigned int ctrl)
258{
259 uint32_t tmp;
260 struct nand_chip *chip = mtd->priv;
261 struct lpc32xx_nand_host *host = chip->priv;
262
263 /* Does CE state need to be changed? */
264 tmp = readl(SLC_CFG(host->io_base));
265 if (ctrl & NAND_NCE)
266 tmp |= SLCCFG_CE_LOW;
267 else
268 tmp &= ~SLCCFG_CE_LOW;
269 writel(tmp, SLC_CFG(host->io_base));
270
271 if (cmd != NAND_CMD_NONE) {
272 if (ctrl & NAND_CLE)
273 writel(cmd, SLC_CMD(host->io_base));
274 else
275 writel(cmd, SLC_ADDR(host->io_base));
276 }
277}
278
279/*
280 * Read the Device Ready pin
281 */
282static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
283{
284 struct nand_chip *chip = mtd->priv;
285 struct lpc32xx_nand_host *host = chip->priv;
286 int rdy = 0;
287
288 if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
289 rdy = 1;
290
291 return rdy;
292}
293
294/*
295 * Enable NAND write protect
296 */
297static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
298{
299 if (gpio_is_valid(host->ncfg->wp_gpio))
300 gpio_set_value(host->ncfg->wp_gpio, 0);
301}
302
303/*
304 * Disable NAND write protect
305 */
306static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
307{
308 if (gpio_is_valid(host->ncfg->wp_gpio))
309 gpio_set_value(host->ncfg->wp_gpio, 1);
310}
311
312/*
313 * Prepares SLC for transfers with H/W ECC enabled
314 */
315static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
316{
317 /* Hardware ECC is enabled automatically in hardware as needed */
318}
319
320/*
321 * Calculates the ECC for the data
322 */
323static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
324 const unsigned char *buf,
325 unsigned char *code)
326{
327 /*
328 * ECC is calculated automatically in hardware during syndrome read
329 * and write operations, so it doesn't need to be calculated here.
330 */
331 return 0;
332}
333
334/*
335 * Read a single byte from NAND device
336 */
337static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
338{
339 struct nand_chip *chip = mtd->priv;
340 struct lpc32xx_nand_host *host = chip->priv;
341
342 return (uint8_t)readl(SLC_DATA(host->io_base));
343}
344
345/*
346 * Simple device read without ECC
347 */
348static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
349{
350 struct nand_chip *chip = mtd->priv;
351 struct lpc32xx_nand_host *host = chip->priv;
352
353 /* Direct device read with no ECC */
354 while (len-- > 0)
355 *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
356}
357
358/*
359 * Simple device write without ECC
360 */
361static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
362{
363 struct nand_chip *chip = mtd->priv;
364 struct lpc32xx_nand_host *host = chip->priv;
365
366 /* Direct device write with no ECC */
367 while (len-- > 0)
368 writel((uint32_t)*buf++, SLC_DATA(host->io_base));
369}
370
371/*
372 * Read the OOB data from the device without ECC using FIFO method
373 */
374static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
375 struct nand_chip *chip, int page)
376{
377 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
378 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
379
380 return 0;
381}
382
383/*
384 * Write the OOB data to the device without ECC using FIFO method
385 */
386static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
387 struct nand_chip *chip, int page)
388{
389 int status;
390
391 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
392 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
393
394 /* Send command to program the OOB data */
395 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
396
397 status = chip->waitfunc(mtd, chip);
398
399 return status & NAND_STATUS_FAIL ? -EIO : 0;
400}
401
402/*
403 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
404 */
405static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
406{
407 int i;
408
409 for (i = 0; i < (count * 3); i += 3) {
410 uint32_t ce = ecc[i / 3];
411 ce = ~(ce << 2) & 0xFFFFFF;
412 spare[i + 2] = (uint8_t)(ce & 0xFF);
413 ce >>= 8;
414 spare[i + 1] = (uint8_t)(ce & 0xFF);
415 ce >>= 8;
416 spare[i] = (uint8_t)(ce & 0xFF);
417 }
418}
419
420static void lpc32xx_dma_complete_func(void *completion)
421{
422 complete(completion);
423}
424
425static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
426 void *mem, int len, enum dma_transfer_direction dir)
427{
428 struct nand_chip *chip = mtd->priv;
429 struct lpc32xx_nand_host *host = chip->priv;
430 struct dma_async_tx_descriptor *desc;
431 int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
432 int res;
433
434 host->dma_slave_config.direction = dir;
435 host->dma_slave_config.src_addr = dma;
436 host->dma_slave_config.dst_addr = dma;
437 host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
438 host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
439 host->dma_slave_config.src_maxburst = 4;
440 host->dma_slave_config.dst_maxburst = 4;
441 /* DMA controller does flow control: */
442 host->dma_slave_config.device_fc = false;
443 if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
444 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
445 return -ENXIO;
446 }
447
448 sg_init_one(&host->sgl, mem, len);
449
450 res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
451 DMA_BIDIRECTIONAL);
452 if (res != 1) {
453 dev_err(mtd->dev.parent, "Failed to map sg list\n");
454 return -ENXIO;
455 }
456 desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
457 flags);
458 if (!desc) {
459 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
460 goto out1;
461 }
462
463 init_completion(&host->comp);
464 desc->callback = lpc32xx_dma_complete_func;
465 desc->callback_param = &host->comp;
466
467 dmaengine_submit(desc);
468 dma_async_issue_pending(host->dma_chan);
469
470 wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
471
472 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
473 DMA_BIDIRECTIONAL);
474
475 return 0;
476out1:
477 dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
478 DMA_BIDIRECTIONAL);
479 return -ENXIO;
480}
481
482/*
483 * DMA read/write transfers with ECC support
484 */
485static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
486 int read)
487{
488 struct nand_chip *chip = mtd->priv;
489 struct lpc32xx_nand_host *host = chip->priv;
490 int i, status = 0;
491 unsigned long timeout;
492 int res;
493 enum dma_transfer_direction dir =
494 read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
495 uint8_t *dma_buf;
496 bool dma_mapped;
497
498 if ((void *)buf <= high_memory) {
499 dma_buf = buf;
500 dma_mapped = true;
501 } else {
502 dma_buf = host->data_buf;
503 dma_mapped = false;
504 if (!read)
505 memcpy(host->data_buf, buf, mtd->writesize);
506 }
507
508 if (read) {
509 writel(readl(SLC_CFG(host->io_base)) |
510 SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
511 SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
512 } else {
513 writel((readl(SLC_CFG(host->io_base)) |
514 SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
515 ~SLCCFG_DMA_DIR,
516 SLC_CFG(host->io_base));
517 }
518
519 /* Clear initial ECC */
520 writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
521
522 /* Transfer size is data area only */
523 writel(mtd->writesize, SLC_TC(host->io_base));
524
525 /* Start transfer in the NAND controller */
526 writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
527 SLC_CTRL(host->io_base));
528
529 for (i = 0; i < chip->ecc.steps; i++) {
530 /* Data */
531 res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
532 dma_buf + i * chip->ecc.size,
533 mtd->writesize / chip->ecc.steps, dir);
534 if (res)
535 return res;
536
537 /* Always _read_ ECC */
538 if (i == chip->ecc.steps - 1)
539 break;
540 if (!read) /* ECC availability delayed on write */
541 udelay(10);
542 res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
543 &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
544 if (res)
545 return res;
546 }
547
548 /*
549 * According to NXP, the DMA can be finished here, but the NAND
550 * controller may still have buffered data. After porting to using the
551 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
552 * appears to be always true, according to tests. Keeping the check for
553 * safety reasons for now.
554 */
555 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
556 dev_warn(mtd->dev.parent, "FIFO not empty!\n");
557 timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
558 while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
559 time_before(jiffies, timeout))
560 cpu_relax();
561 if (!time_before(jiffies, timeout)) {
562 dev_err(mtd->dev.parent, "FIFO held data too long\n");
563 status = -EIO;
564 }
565 }
566
567 /* Read last calculated ECC value */
568 if (!read)
569 udelay(10);
570 host->ecc_buf[chip->ecc.steps - 1] =
571 readl(SLC_ECC(host->io_base));
572
573 /* Flush DMA */
574 dmaengine_terminate_all(host->dma_chan);
575
576 if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
577 readl(SLC_TC(host->io_base))) {
578 /* Something is left in the FIFO, something is wrong */
579 dev_err(mtd->dev.parent, "DMA FIFO failure\n");
580 status = -EIO;
581 }
582
583 /* Stop DMA & HW ECC */
584 writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
585 SLC_CTRL(host->io_base));
586 writel(readl(SLC_CFG(host->io_base)) &
587 ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
588 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
589
590 if (!dma_mapped && read)
591 memcpy(buf, host->data_buf, mtd->writesize);
592
593 return status;
594}
595
596/*
597 * Read the data and OOB data from the device, use ECC correction with the
598 * data, disable ECC for the OOB data
599 */
600static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
601 struct nand_chip *chip, uint8_t *buf,
602 int oob_required, int page)
603{
604 struct lpc32xx_nand_host *host = chip->priv;
605 int stat, i, status;
606 uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
607
608 /* Issue read command */
609 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
610
611 /* Read data and oob, calculate ECC */
612 status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
613
614 /* Get OOB data */
615 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
616
617 /* Convert to stored ECC format */
618 lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
619
620 /* Pointer to ECC data retrieved from NAND spare area */
621 oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
622
623 for (i = 0; i < chip->ecc.steps; i++) {
624 stat = chip->ecc.correct(mtd, buf, oobecc,
625 &tmpecc[i * chip->ecc.bytes]);
626 if (stat < 0)
627 mtd->ecc_stats.failed++;
628 else
629 mtd->ecc_stats.corrected += stat;
630
631 buf += chip->ecc.size;
632 oobecc += chip->ecc.bytes;
633 }
634
635 return status;
636}
637
638/*
639 * Read the data and OOB data from the device, no ECC correction with the
640 * data or OOB data
641 */
642static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
643 struct nand_chip *chip,
644 uint8_t *buf, int oob_required,
645 int page)
646{
647 /* Issue read command */
648 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
649
650 /* Raw reads can just use the FIFO interface */
651 chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
652 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
653
654 return 0;
655}
656
657/*
658 * Write the data and OOB data to the device, use ECC with the data,
659 * disable ECC for the OOB data
660 */
661static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
662 struct nand_chip *chip,
663 const uint8_t *buf, int oob_required)
664{
665 struct lpc32xx_nand_host *host = chip->priv;
666 uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
667 int error;
668
669 /* Write data, calculate ECC on outbound data */
670 error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
671 if (error)
672 return error;
673
674 /*
675 * The calculated ECC needs some manual work done to it before
676 * committing it to NAND. Process the calculated ECC and place
677 * the resultant values directly into the OOB buffer. */
678 lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
679
680 /* Write ECC data to device */
681 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
682 return 0;
683}
684
685/*
686 * Write the data and OOB data to the device, no ECC correction with the
687 * data or OOB data
688 */
689static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
690 struct nand_chip *chip,
691 const uint8_t *buf,
692 int oob_required)
693{
694 /* Raw writes can just use the FIFO interface */
695 chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
696 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
697 return 0;
698}
699
700static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
701{
702 struct mtd_info *mtd = &host->mtd;
703 dma_cap_mask_t mask;
704
705 if (!host->pdata || !host->pdata->dma_filter) {
706 dev_err(mtd->dev.parent, "no DMA platform data\n");
707 return -ENOENT;
708 }
709
710 dma_cap_zero(mask);
711 dma_cap_set(DMA_SLAVE, mask);
712 host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
713 "nand-slc");
714 if (!host->dma_chan) {
715 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
716 return -EBUSY;
717 }
718
719 return 0;
720}
721
722static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
723{
724 struct lpc32xx_nand_cfg_slc *ncfg;
725 struct device_node *np = dev->of_node;
726
727 ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
728 if (!ncfg) {
729 dev_err(dev, "could not allocate memory for NAND config\n");
730 return NULL;
731 }
732
733 of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
734 of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
735 of_property_read_u32(np, "nxp,whold", &ncfg->whold);
736 of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
737 of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
738 of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
739 of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
740 of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
741
742 if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
743 !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
744 !ncfg->rhold || !ncfg->rsetup) {
745 dev_err(dev, "chip parameters not specified correctly\n");
746 return NULL;
747 }
748
749 ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
750 ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
751
752 return ncfg;
753}
754
755/*
756 * Probe for NAND controller
757 */
758static int lpc32xx_nand_probe(struct platform_device *pdev)
759{
760 struct lpc32xx_nand_host *host;
761 struct mtd_info *mtd;
762 struct nand_chip *chip;
763 struct resource *rc;
764 struct mtd_part_parser_data ppdata = {};
765 int res;
766
767 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
768 if (rc == NULL) {
769 dev_err(&pdev->dev, "No memory resource found for device\n");
770 return -EBUSY;
771 }
772
773 /* Allocate memory for the device structure (and zero it) */
774 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
775 if (!host) {
776 dev_err(&pdev->dev, "failed to allocate device structure\n");
777 return -ENOMEM;
778 }
779 host->io_base_dma = rc->start;
780
781 host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
782 if (host->io_base == NULL) {
783 dev_err(&pdev->dev, "ioremap failed\n");
784 return -ENOMEM;
785 }
786
787 if (pdev->dev.of_node)
788 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
789 if (!host->ncfg) {
790 dev_err(&pdev->dev,
791 "Missing or bad NAND config from device tree\n");
792 return -ENOENT;
793 }
794 if (host->ncfg->wp_gpio == -EPROBE_DEFER)
795 return -EPROBE_DEFER;
796 if (gpio_is_valid(host->ncfg->wp_gpio) &&
797 gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
798 dev_err(&pdev->dev, "GPIO not available\n");
799 return -EBUSY;
800 }
801 lpc32xx_wp_disable(host);
802
803 host->pdata = pdev->dev.platform_data;
804
805 mtd = &host->mtd;
806 chip = &host->nand_chip;
807 chip->priv = host;
808 mtd->priv = chip;
809 mtd->owner = THIS_MODULE;
810 mtd->dev.parent = &pdev->dev;
811
812 /* Get NAND clock */
813 host->clk = clk_get(&pdev->dev, NULL);
814 if (IS_ERR(host->clk)) {
815 dev_err(&pdev->dev, "Clock failure\n");
816 res = -ENOENT;
817 goto err_exit1;
818 }
819 clk_enable(host->clk);
820
821 /* Set NAND IO addresses and command/ready functions */
822 chip->IO_ADDR_R = SLC_DATA(host->io_base);
823 chip->IO_ADDR_W = SLC_DATA(host->io_base);
824 chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
825 chip->dev_ready = lpc32xx_nand_device_ready;
826 chip->chip_delay = 20; /* 20us command delay time */
827
828 /* Init NAND controller */
829 lpc32xx_nand_setup(host);
830
831 platform_set_drvdata(pdev, host);
832
833 /* NAND callbacks for LPC32xx SLC hardware */
834 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
835 chip->read_byte = lpc32xx_nand_read_byte;
836 chip->read_buf = lpc32xx_nand_read_buf;
837 chip->write_buf = lpc32xx_nand_write_buf;
838 chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
839 chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
840 chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
841 chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
842 chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
843 chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
844 chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
845 chip->ecc.correct = nand_correct_data;
846 chip->ecc.strength = 1;
847 chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
848
849 /* bitflip_threshold's default is defined as ecc_strength anyway.
850 * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
851 * being 0, it causes bad block table scanning errors in
852 * nand_scan_tail(), so preparing it here already. */
853 mtd->bitflip_threshold = chip->ecc.strength;
854
855 /*
856 * Allocate a large enough buffer for a single huge page plus
857 * extra space for the spare area and ECC storage area
858 */
859 host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
860 host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
861 GFP_KERNEL);
862 if (host->data_buf == NULL) {
863 dev_err(&pdev->dev, "Error allocating memory\n");
864 res = -ENOMEM;
865 goto err_exit2;
866 }
867
868 res = lpc32xx_nand_dma_setup(host);
869 if (res) {
870 res = -EIO;
871 goto err_exit2;
872 }
873
874 /* Find NAND device */
875 if (nand_scan_ident(mtd, 1, NULL)) {
876 res = -ENXIO;
877 goto err_exit3;
878 }
879
880 /* OOB and ECC CPU and DMA work areas */
881 host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
882
883 /*
884 * Small page FLASH has a unique OOB layout, but large and huge
885 * page FLASH use the standard layout. Small page FLASH uses a
886 * custom BBT marker layout.
887 */
888 if (mtd->writesize <= 512)
889 chip->ecc.layout = &lpc32xx_nand_oob_16;
890
891 /* These sizes remain the same regardless of page size */
892 chip->ecc.size = 256;
893 chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
894 chip->ecc.prepad = chip->ecc.postpad = 0;
895
896 /* Avoid extra scan if using BBT, setup BBT support */
897 if (host->ncfg->use_bbt) {
898 chip->options |= NAND_SKIP_BBTSCAN;
899 chip->bbt_options |= NAND_BBT_USE_FLASH;
900
901 /*
902 * Use a custom BBT marker setup for small page FLASH that
903 * won't interfere with the ECC layout. Large and huge page
904 * FLASH use the standard layout.
905 */
906 if (mtd->writesize <= 512) {
907 chip->bbt_td = &bbt_smallpage_main_descr;
908 chip->bbt_md = &bbt_smallpage_mirror_descr;
909 }
910 }
911
912 /*
913 * Fills out all the uninitialized function pointers with the defaults
914 */
915 if (nand_scan_tail(mtd)) {
916 res = -ENXIO;
917 goto err_exit3;
918 }
919
920 /* Standard layout in FLASH for bad block tables */
921 if (host->ncfg->use_bbt) {
922 if (nand_default_bbt(mtd) < 0)
923 dev_err(&pdev->dev,
924 "Error initializing default bad block tables\n");
925 }
926
927 mtd->name = "nxp_lpc3220_slc";
928 ppdata.of_node = pdev->dev.of_node;
929 res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
930 host->ncfg->num_parts);
931 if (!res)
932 return res;
933
934 nand_release(mtd);
935
936err_exit3:
937 dma_release_channel(host->dma_chan);
938err_exit2:
939 clk_disable(host->clk);
940 clk_put(host->clk);
941 platform_set_drvdata(pdev, NULL);
942err_exit1:
943 lpc32xx_wp_enable(host);
944 gpio_free(host->ncfg->wp_gpio);
945
946 return res;
947}
948
949/*
950 * Remove NAND device.
951 */
952static int lpc32xx_nand_remove(struct platform_device *pdev)
953{
954 uint32_t tmp;
955 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
956 struct mtd_info *mtd = &host->mtd;
957
958 nand_release(mtd);
959 dma_release_channel(host->dma_chan);
960
961 /* Force CE high */
962 tmp = readl(SLC_CTRL(host->io_base));
963 tmp &= ~SLCCFG_CE_LOW;
964 writel(tmp, SLC_CTRL(host->io_base));
965
966 clk_disable(host->clk);
967 clk_put(host->clk);
968 platform_set_drvdata(pdev, NULL);
969 lpc32xx_wp_enable(host);
970 gpio_free(host->ncfg->wp_gpio);
971
972 return 0;
973}
974
975#ifdef CONFIG_PM
976static int lpc32xx_nand_resume(struct platform_device *pdev)
977{
978 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
979
980 /* Re-enable NAND clock */
981 clk_enable(host->clk);
982
983 /* Fresh init of NAND controller */
984 lpc32xx_nand_setup(host);
985
986 /* Disable write protect */
987 lpc32xx_wp_disable(host);
988
989 return 0;
990}
991
992static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
993{
994 uint32_t tmp;
995 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
996
997 /* Force CE high */
998 tmp = readl(SLC_CTRL(host->io_base));
999 tmp &= ~SLCCFG_CE_LOW;
1000 writel(tmp, SLC_CTRL(host->io_base));
1001
1002 /* Enable write protect for safety */
1003 lpc32xx_wp_enable(host);
1004
1005 /* Disable clock */
1006 clk_disable(host->clk);
1007
1008 return 0;
1009}
1010
1011#else
1012#define lpc32xx_nand_resume NULL
1013#define lpc32xx_nand_suspend NULL
1014#endif
1015
1016static const struct of_device_id lpc32xx_nand_match[] = {
1017 { .compatible = "nxp,lpc3220-slc" },
1018 { /* sentinel */ },
1019};
1020MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
1021
1022static struct platform_driver lpc32xx_nand_driver = {
1023 .probe = lpc32xx_nand_probe,
1024 .remove = lpc32xx_nand_remove,
1025 .resume = lpc32xx_nand_resume,
1026 .suspend = lpc32xx_nand_suspend,
1027 .driver = {
1028 .name = LPC32XX_MODNAME,
1029 .owner = THIS_MODULE,
1030 .of_match_table = of_match_ptr(lpc32xx_nand_match),
1031 },
1032};
1033
1034module_platform_driver(lpc32xx_nand_driver);
1035
1036MODULE_LICENSE("GPL");
1037MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1038MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1039MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 3c9cdcbc4cb..eb1fbac63eb 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,6 +131,8 @@ struct mpc5121_nfc_prv {
131 131
132static void mpc5121_nfc_done(struct mtd_info *mtd); 132static void mpc5121_nfc_done(struct mtd_info *mtd);
133 133
134static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
135
134/* Read NFC register */ 136/* Read NFC register */
135static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 137static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
136{ 138{
@@ -506,6 +508,27 @@ static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
506 mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); 508 mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
507} 509}
508 510
511/* Compare buffer with NAND flash */
512static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
513 const u_char *buf, int len)
514{
515 u_char tmp[256];
516 uint bsize;
517
518 while (len) {
519 bsize = min(len, 256);
520 mpc5121_nfc_read_buf(mtd, tmp, bsize);
521
522 if (memcmp(buf, tmp, bsize))
523 return 1;
524
525 buf += bsize;
526 len -= bsize;
527 }
528
529 return 0;
530}
531
509/* Read byte from NFC buffers */ 532/* Read byte from NFC buffers */
510static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) 533static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
511{ 534{
@@ -626,20 +649,20 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
626 iounmap(prv->csreg); 649 iounmap(prv->csreg);
627} 650}
628 651
629static int mpc5121_nfc_probe(struct platform_device *op) 652static int __devinit mpc5121_nfc_probe(struct platform_device *op)
630{ 653{
631 struct device_node *rootnode, *dn = op->dev.of_node; 654 struct device_node *rootnode, *dn = op->dev.of_node;
632 struct device *dev = &op->dev; 655 struct device *dev = &op->dev;
633 struct mpc5121_nfc_prv *prv; 656 struct mpc5121_nfc_prv *prv;
634 struct resource res; 657 struct resource res;
635 struct mtd_info *mtd; 658 struct mtd_info *mtd;
659 struct mtd_partition *parts;
636 struct nand_chip *chip; 660 struct nand_chip *chip;
637 unsigned long regs_paddr, regs_size; 661 unsigned long regs_paddr, regs_size;
638 const __be32 *chips_no; 662 const __be32 *chips_no;
639 int resettime = 0; 663 int resettime = 0;
640 int retval = 0; 664 int retval = 0;
641 int rev, len; 665 int rev, len;
642 struct mtd_part_parser_data ppdata;
643 666
644 /* 667 /*
645 * Check SoC revision. This driver supports only NFC 668 * Check SoC revision. This driver supports only NFC
@@ -704,15 +727,15 @@ static int mpc5121_nfc_probe(struct platform_device *op)
704 } 727 }
705 728
706 mtd->name = "MPC5121 NAND"; 729 mtd->name = "MPC5121 NAND";
707 ppdata.of_node = dn;
708 chip->dev_ready = mpc5121_nfc_dev_ready; 730 chip->dev_ready = mpc5121_nfc_dev_ready;
709 chip->cmdfunc = mpc5121_nfc_command; 731 chip->cmdfunc = mpc5121_nfc_command;
710 chip->read_byte = mpc5121_nfc_read_byte; 732 chip->read_byte = mpc5121_nfc_read_byte;
711 chip->read_word = mpc5121_nfc_read_word; 733 chip->read_word = mpc5121_nfc_read_word;
712 chip->read_buf = mpc5121_nfc_read_buf; 734 chip->read_buf = mpc5121_nfc_read_buf;
713 chip->write_buf = mpc5121_nfc_write_buf; 735 chip->write_buf = mpc5121_nfc_write_buf;
736 chip->verify_buf = mpc5121_nfc_verify_buf;
714 chip->select_chip = mpc5121_nfc_select_chip; 737 chip->select_chip = mpc5121_nfc_select_chip;
715 chip->bbt_options = NAND_BBT_USE_FLASH; 738 chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
716 chip->ecc.mode = NAND_ECC_SOFT; 739 chip->ecc.mode = NAND_ECC_SOFT;
717 740
718 /* Support external chip-select logic on ADS5121 board */ 741 /* Support external chip-select logic on ADS5121 board */
@@ -814,7 +837,19 @@ static int mpc5121_nfc_probe(struct platform_device *op)
814 dev_set_drvdata(dev, mtd); 837 dev_set_drvdata(dev, mtd);
815 838
816 /* Register device in MTD */ 839 /* Register device in MTD */
817 retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); 840 retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
841#ifdef CONFIG_MTD_OF_PARTS
842 if (retval == 0)
843 retval = of_mtd_parse_partitions(dev, dn, &parts);
844#endif
845 if (retval < 0) {
846 dev_err(dev, "Error parsing MTD partitions!\n");
847 devm_free_irq(dev, prv->irq, mtd);
848 retval = -EINVAL;
849 goto error;
850 }
851
852 retval = mtd_device_register(mtd, parts, retval);
818 if (retval) { 853 if (retval) {
819 dev_err(dev, "Error adding MTD device!\n"); 854 dev_err(dev, "Error adding MTD device!\n");
820 devm_free_irq(dev, prv->irq, mtd); 855 devm_free_irq(dev, prv->irq, mtd);
@@ -827,7 +862,7 @@ error:
827 return retval; 862 return retval;
828} 863}
829 864
830static int mpc5121_nfc_remove(struct platform_device *op) 865static int __devexit mpc5121_nfc_remove(struct platform_device *op)
831{ 866{
832 struct device *dev = &op->dev; 867 struct device *dev = &op->dev;
833 struct mtd_info *mtd = dev_get_drvdata(dev); 868 struct mtd_info *mtd = dev_get_drvdata(dev);
@@ -841,14 +876,14 @@ static int mpc5121_nfc_remove(struct platform_device *op)
841 return 0; 876 return 0;
842} 877}
843 878
844static struct of_device_id mpc5121_nfc_match[] = { 879static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
845 { .compatible = "fsl,mpc5121-nfc", }, 880 { .compatible = "fsl,mpc5121-nfc", },
846 {}, 881 {},
847}; 882};
848 883
849static struct platform_driver mpc5121_nfc_driver = { 884static struct platform_driver mpc5121_nfc_driver = {
850 .probe = mpc5121_nfc_probe, 885 .probe = mpc5121_nfc_probe,
851 .remove = mpc5121_nfc_remove, 886 .remove = __devexit_p(mpc5121_nfc_remove),
852 .driver = { 887 .driver = {
853 .name = DRV_NAME, 888 .name = DRV_NAME,
854 .owner = THIS_MODULE, 889 .owner = THIS_MODULE,
@@ -856,7 +891,19 @@ static struct platform_driver mpc5121_nfc_driver = {
856 }, 891 },
857}; 892};
858 893
859module_platform_driver(mpc5121_nfc_driver); 894static int __init mpc5121_nfc_init(void)
895{
896 return platform_driver_register(&mpc5121_nfc_driver);
897}
898
899module_init(mpc5121_nfc_init);
900
901static void __exit mpc5121_nfc_cleanup(void)
902{
903 platform_driver_unregister(&mpc5121_nfc_driver);
904}
905
906module_exit(mpc5121_nfc_cleanup);
860 907
861MODULE_AUTHOR("Freescale Semiconductor, Inc."); 908MODULE_AUTHOR("Freescale Semiconductor, Inc.");
862MODULE_DESCRIPTION("MPC5121 NAND MTD driver"); 909MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 45204e41a02..90df34c4d26 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,14 +32,18 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/completion.h> 34#include <linux/completion.h>
35#include <linux/of_device.h>
36#include <linux/of_mtd.h>
37 35
38#include <asm/mach/flash.h> 36#include <asm/mach/flash.h>
39#include <linux/platform_data/mtd-mxc_nand.h> 37#include <mach/mxc_nand.h>
38#include <mach/hardware.h>
40 39
41#define DRIVER_NAME "mxc_nand" 40#define DRIVER_NAME "mxc_nand"
42 41
42#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
43#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
44#define nfc_is_v3_2() cpu_is_mx51()
45#define nfc_is_v3() nfc_is_v3_2()
46
43/* Addresses for NFC registers */ 47/* Addresses for NFC registers */
44#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00) 48#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
45#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04) 49#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
@@ -116,7 +120,7 @@
116#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4) 120#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
117#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5) 121#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
118#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6) 122#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
119#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift) 123#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
120#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12) 124#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
121#define NFC_V3_CONFIG2_INT_MSK (1 << 15) 125#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
122#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24) 126#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
@@ -136,48 +140,14 @@
136 140
137#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34) 141#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
138 142
139struct mxc_nand_host;
140
141struct mxc_nand_devtype_data {
142 void (*preset)(struct mtd_info *);
143 void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
144 void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
145 void (*send_page)(struct mtd_info *, unsigned int);
146 void (*send_read_id)(struct mxc_nand_host *);
147 uint16_t (*get_dev_status)(struct mxc_nand_host *);
148 int (*check_int)(struct mxc_nand_host *);
149 void (*irq_control)(struct mxc_nand_host *, int);
150 u32 (*get_ecc_status)(struct mxc_nand_host *);
151 struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
152 void (*select_chip)(struct mtd_info *mtd, int chip);
153 int (*correct_data)(struct mtd_info *mtd, u_char *dat,
154 u_char *read_ecc, u_char *calc_ecc);
155
156 /*
157 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
158 * (CONFIG1:INT_MSK is set). To handle this the driver uses
159 * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
160 */
161 int irqpending_quirk;
162 int needs_ip;
163
164 size_t regs_offset;
165 size_t spare0_offset;
166 size_t axi_offset;
167
168 int spare_len;
169 int eccbytes;
170 int eccsize;
171 int ppb_shift;
172};
173
174struct mxc_nand_host { 143struct mxc_nand_host {
175 struct mtd_info mtd; 144 struct mtd_info mtd;
176 struct nand_chip nand; 145 struct nand_chip nand;
146 struct mtd_partition *parts;
177 struct device *dev; 147 struct device *dev;
178 148
179 void __iomem *spare0; 149 void *spare0;
180 void __iomem *main_area0; 150 void *main_area0;
181 151
182 void __iomem *base; 152 void __iomem *base;
183 void __iomem *regs; 153 void __iomem *regs;
@@ -194,9 +164,16 @@ struct mxc_nand_host {
194 164
195 uint8_t *data_buf; 165 uint8_t *data_buf;
196 unsigned int buf_start; 166 unsigned int buf_start;
197 167 int spare_len;
198 const struct mxc_nand_devtype_data *devtype_data; 168
199 struct mxc_nand_platform_data pdata; 169 void (*preset)(struct mtd_info *);
170 void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
171 void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
172 void (*send_page)(struct mtd_info *, unsigned int);
173 void (*send_read_id)(struct mxc_nand_host *);
174 uint16_t (*get_dev_status)(struct mxc_nand_host *);
175 int (*check_int)(struct mxc_nand_host *);
176 void (*irq_control)(struct mxc_nand_host *, int);
200}; 177};
201 178
202/* OOB placement block for use with hardware ecc generation */ 179/* OOB placement block for use with hardware ecc generation */
@@ -266,27 +243,20 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
266 } 243 }
267}; 244};
268 245
269static const char const *part_probes[] = { 246static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
270 "cmdlinepart", "RedBoot", "ofpart", NULL };
271 247
272static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size) 248static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
273{ 249{
274 int i; 250 struct mxc_nand_host *host = dev_id;
275 u32 *t = trg;
276 const __iomem u32 *s = src;
277 251
278 for (i = 0; i < (size >> 2); i++) 252 if (!host->check_int(host))
279 *t++ = __raw_readl(s++); 253 return IRQ_NONE;
280}
281 254
282static void memcpy32_toio(void __iomem *trg, const void *src, int size) 255 host->irq_control(host, 0);
283{ 256
284 int i; 257 complete(&host->op_completion);
285 u32 __iomem *t = trg;
286 const u32 *s = src;
287 258
288 for (i = 0; i < (size >> 2); i++) 259 return IRQ_HANDLED;
289 __raw_writel(*s++, t++);
290} 260}
291 261
292static int check_int_v3(struct mxc_nand_host *host) 262static int check_int_v3(struct mxc_nand_host *host)
@@ -311,12 +281,26 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
311 if (!(tmp & NFC_V1_V2_CONFIG2_INT)) 281 if (!(tmp & NFC_V1_V2_CONFIG2_INT))
312 return 0; 282 return 0;
313 283
314 if (!host->devtype_data->irqpending_quirk) 284 if (!cpu_is_mx21())
315 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); 285 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
316 286
317 return 1; 287 return 1;
318} 288}
319 289
290/*
291 * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
292 * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
293 * driver can enable/disable the irq line rather than simply masking the
294 * interrupts.
295 */
296static void irq_control_mx21(struct mxc_nand_host *host, int activate)
297{
298 if (activate)
299 enable_irq(host->irq);
300 else
301 disable_irq_nosync(host->irq);
302}
303
320static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) 304static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
321{ 305{
322 uint16_t tmp; 306 uint16_t tmp;
@@ -345,47 +329,6 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
345 writel(tmp, NFC_V3_CONFIG2); 329 writel(tmp, NFC_V3_CONFIG2);
346} 330}
347 331
348static void irq_control(struct mxc_nand_host *host, int activate)
349{
350 if (host->devtype_data->irqpending_quirk) {
351 if (activate)
352 enable_irq(host->irq);
353 else
354 disable_irq_nosync(host->irq);
355 } else {
356 host->devtype_data->irq_control(host, activate);
357 }
358}
359
360static u32 get_ecc_status_v1(struct mxc_nand_host *host)
361{
362 return readw(NFC_V1_V2_ECC_STATUS_RESULT);
363}
364
365static u32 get_ecc_status_v2(struct mxc_nand_host *host)
366{
367 return readl(NFC_V1_V2_ECC_STATUS_RESULT);
368}
369
370static u32 get_ecc_status_v3(struct mxc_nand_host *host)
371{
372 return readl(NFC_V3_ECC_STATUS_RESULT);
373}
374
375static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
376{
377 struct mxc_nand_host *host = dev_id;
378
379 if (!host->devtype_data->check_int(host))
380 return IRQ_NONE;
381
382 irq_control(host, 0);
383
384 complete(&host->op_completion);
385
386 return IRQ_HANDLED;
387}
388
389/* This function polls the NANDFC to wait for the basic operation to 332/* This function polls the NANDFC to wait for the basic operation to
390 * complete by checking the INT bit of config2 register. 333 * complete by checking the INT bit of config2 register.
391 */ 334 */
@@ -394,20 +337,21 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
394 int max_retries = 8000; 337 int max_retries = 8000;
395 338
396 if (useirq) { 339 if (useirq) {
397 if (!host->devtype_data->check_int(host)) { 340 if (!host->check_int(host)) {
398 INIT_COMPLETION(host->op_completion); 341 INIT_COMPLETION(host->op_completion);
399 irq_control(host, 1); 342 host->irq_control(host, 1);
400 wait_for_completion(&host->op_completion); 343 wait_for_completion(&host->op_completion);
401 } 344 }
402 } else { 345 } else {
403 while (max_retries-- > 0) { 346 while (max_retries-- > 0) {
404 if (host->devtype_data->check_int(host)) 347 if (host->check_int(host))
405 break; 348 break;
406 349
407 udelay(1); 350 udelay(1);
408 } 351 }
409 if (max_retries < 0) 352 if (max_retries < 0)
410 pr_debug("%s: INT not set\n", __func__); 353 DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
354 __func__);
411 } 355 }
412} 356}
413 357
@@ -427,12 +371,12 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
427 * waits for completion. */ 371 * waits for completion. */
428static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) 372static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
429{ 373{
430 pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); 374 DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
431 375
432 writew(cmd, NFC_V1_V2_FLASH_CMD); 376 writew(cmd, NFC_V1_V2_FLASH_CMD);
433 writew(NFC_CMD, NFC_V1_V2_CONFIG2); 377 writew(NFC_CMD, NFC_V1_V2_CONFIG2);
434 378
435 if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) { 379 if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
436 int max_retries = 100; 380 int max_retries = 100;
437 /* Reset completion is indicated by NFC_CONFIG2 */ 381 /* Reset completion is indicated by NFC_CONFIG2 */
438 /* being set to 0 */ 382 /* being set to 0 */
@@ -443,7 +387,8 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
443 udelay(1); 387 udelay(1);
444 } 388 }
445 if (max_retries < 0) 389 if (max_retries < 0)
446 pr_debug("%s: RESET failed\n", __func__); 390 DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
391 __func__);
447 } else { 392 } else {
448 /* Wait for operation to complete */ 393 /* Wait for operation to complete */
449 wait_op_done(host, useirq); 394 wait_op_done(host, useirq);
@@ -466,7 +411,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
466 * a NAND command. */ 411 * a NAND command. */
467static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) 412static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
468{ 413{
469 pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); 414 DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
470 415
471 writew(addr, NFC_V1_V2_FLASH_ADDR); 416 writew(addr, NFC_V1_V2_FLASH_ADDR);
472 writew(NFC_ADDR, NFC_V1_V2_CONFIG2); 417 writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -491,27 +436,13 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
491 wait_op_done(host, false); 436 wait_op_done(host, false);
492} 437}
493 438
494static void send_page_v2(struct mtd_info *mtd, unsigned int ops) 439static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
495{
496 struct nand_chip *nand_chip = mtd->priv;
497 struct mxc_nand_host *host = nand_chip->priv;
498
499 /* NANDFC buffer 0 is used for page read/write */
500 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
501
502 writew(ops, NFC_V1_V2_CONFIG2);
503
504 /* Wait for operation to complete */
505 wait_op_done(host, true);
506}
507
508static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
509{ 440{
510 struct nand_chip *nand_chip = mtd->priv; 441 struct nand_chip *nand_chip = mtd->priv;
511 struct mxc_nand_host *host = nand_chip->priv; 442 struct mxc_nand_host *host = nand_chip->priv;
512 int bufs, i; 443 int bufs, i;
513 444
514 if (mtd->writesize > 512) 445 if (nfc_is_v1() && mtd->writesize > 512)
515 bufs = 4; 446 bufs = 4;
516 else 447 else
517 bufs = 1; 448 bufs = 1;
@@ -535,7 +466,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
535 466
536 wait_op_done(host, true); 467 wait_op_done(host, true);
537 468
538 memcpy32_fromio(host->data_buf, host->main_area0, 16); 469 memcpy(host->data_buf, host->main_area0, 16);
539} 470}
540 471
541/* Request the NANDFC to perform a read of the NAND device ID. */ 472/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -551,7 +482,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
551 /* Wait for operation to complete */ 482 /* Wait for operation to complete */
552 wait_op_done(host, true); 483 wait_op_done(host, true);
553 484
554 memcpy32_fromio(host->data_buf, host->main_area0, 16); 485 memcpy(host->data_buf, host->main_area0, 16);
555 486
556 if (this->options & NAND_BUSWIDTH_16) { 487 if (this->options & NAND_BUSWIDTH_16) {
557 /* compress the ID info */ 488 /* compress the ID info */
@@ -627,10 +558,11 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
627 * additional correction. 2-Bit errors cannot be corrected by 558 * additional correction. 2-Bit errors cannot be corrected by
628 * HW ECC, so we need to return failure 559 * HW ECC, so we need to return failure
629 */ 560 */
630 uint16_t ecc_status = get_ecc_status_v1(host); 561 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
631 562
632 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 563 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
633 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 564 DEBUG(MTD_DEBUG_LEVEL0,
565 "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
634 return -1; 566 return -1;
635 } 567 }
636 568
@@ -652,7 +584,10 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
652 584
653 no_subpages = mtd->writesize >> 9; 585 no_subpages = mtd->writesize >> 9;
654 586
655 ecc_stat = host->devtype_data->get_ecc_status(host); 587 if (nfc_is_v21())
588 ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
589 else
590 ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
656 591
657 do { 592 do {
658 err = ecc_stat & ecc_bit_mask; 593 err = ecc_stat & ecc_bit_mask;
@@ -685,7 +620,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
685 620
686 /* Check for status request */ 621 /* Check for status request */
687 if (host->status_request) 622 if (host->status_request)
688 return host->devtype_data->get_dev_status(host) & 0xFF; 623 return host->get_dev_status(host) & 0xFF;
689 624
690 ret = *(uint8_t *)(host->data_buf + host->buf_start); 625 ret = *(uint8_t *)(host->data_buf + host->buf_start);
691 host->buf_start++; 626 host->buf_start++;
@@ -741,30 +676,17 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
741 host->buf_start += n; 676 host->buf_start += n;
742} 677}
743 678
744/* This function is used by upper layer for select and 679/* Used by the upper layer to verify the data in NAND Flash
745 * deselect of the NAND chip */ 680 * with the data in the buf. */
746static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip) 681static int mxc_nand_verify_buf(struct mtd_info *mtd,
682 const u_char *buf, int len)
747{ 683{
748 struct nand_chip *nand_chip = mtd->priv; 684 return -EFAULT;
749 struct mxc_nand_host *host = nand_chip->priv;
750
751 if (chip == -1) {
752 /* Disable the NFC clock */
753 if (host->clk_act) {
754 clk_disable_unprepare(host->clk);
755 host->clk_act = 0;
756 }
757 return;
758 }
759
760 if (!host->clk_act) {
761 /* Enable the NFC clock */
762 clk_prepare_enable(host->clk);
763 host->clk_act = 1;
764 }
765} 685}
766 686
767static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip) 687/* This function is used by upper layer for select and
688 * deselect of the NAND chip */
689static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
768{ 690{
769 struct nand_chip *nand_chip = mtd->priv; 691 struct nand_chip *nand_chip = mtd->priv;
770 struct mxc_nand_host *host = nand_chip->priv; 692 struct mxc_nand_host *host = nand_chip->priv;
@@ -772,7 +694,7 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
772 if (chip == -1) { 694 if (chip == -1) {
773 /* Disable the NFC clock */ 695 /* Disable the NFC clock */
774 if (host->clk_act) { 696 if (host->clk_act) {
775 clk_disable_unprepare(host->clk); 697 clk_disable(host->clk);
776 host->clk_act = 0; 698 host->clk_act = 0;
777 } 699 }
778 return; 700 return;
@@ -780,12 +702,14 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
780 702
781 if (!host->clk_act) { 703 if (!host->clk_act) {
782 /* Enable the NFC clock */ 704 /* Enable the NFC clock */
783 clk_prepare_enable(host->clk); 705 clk_enable(host->clk);
784 host->clk_act = 1; 706 host->clk_act = 1;
785 } 707 }
786 708
787 host->active_cs = chip; 709 if (nfc_is_v21()) {
788 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); 710 host->active_cs = chip;
711 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
712 }
789} 713}
790 714
791/* 715/*
@@ -798,23 +722,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
798 u16 i, j; 722 u16 i, j;
799 u16 n = mtd->writesize >> 9; 723 u16 n = mtd->writesize >> 9;
800 u8 *d = host->data_buf + mtd->writesize; 724 u8 *d = host->data_buf + mtd->writesize;
801 u8 __iomem *s = host->spare0; 725 u8 *s = host->spare0;
802 u16 t = host->devtype_data->spare_len; 726 u16 t = host->spare_len;
803 727
804 j = (mtd->oobsize / n >> 1) << 1; 728 j = (mtd->oobsize / n >> 1) << 1;
805 729
806 if (bfrom) { 730 if (bfrom) {
807 for (i = 0; i < n - 1; i++) 731 for (i = 0; i < n - 1; i++)
808 memcpy32_fromio(d + i * j, s + i * t, j); 732 memcpy(d + i * j, s + i * t, j);
809 733
810 /* the last section */ 734 /* the last section */
811 memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j); 735 memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
812 } else { 736 } else {
813 for (i = 0; i < n - 1; i++) 737 for (i = 0; i < n - 1; i++)
814 memcpy32_toio(&s[i * t], &d[i * j], j); 738 memcpy(&s[i * t], &d[i * j], j);
815 739
816 /* the last section */ 740 /* the last section */
817 memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j); 741 memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
818 } 742 }
819} 743}
820 744
@@ -831,44 +755,34 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
831 * perform a read/write buf operation, the saved column 755 * perform a read/write buf operation, the saved column
832 * address is used to index into the full page. 756 * address is used to index into the full page.
833 */ 757 */
834 host->devtype_data->send_addr(host, 0, page_addr == -1); 758 host->send_addr(host, 0, page_addr == -1);
835 if (mtd->writesize > 512) 759 if (mtd->writesize > 512)
836 /* another col addr cycle for 2k page */ 760 /* another col addr cycle for 2k page */
837 host->devtype_data->send_addr(host, 0, false); 761 host->send_addr(host, 0, false);
838 } 762 }
839 763
840 /* Write out page address, if necessary */ 764 /* Write out page address, if necessary */
841 if (page_addr != -1) { 765 if (page_addr != -1) {
842 /* paddr_0 - p_addr_7 */ 766 /* paddr_0 - p_addr_7 */
843 host->devtype_data->send_addr(host, (page_addr & 0xff), false); 767 host->send_addr(host, (page_addr & 0xff), false);
844 768
845 if (mtd->writesize > 512) { 769 if (mtd->writesize > 512) {
846 if (mtd->size >= 0x10000000) { 770 if (mtd->size >= 0x10000000) {
847 /* paddr_8 - paddr_15 */ 771 /* paddr_8 - paddr_15 */
848 host->devtype_data->send_addr(host, 772 host->send_addr(host, (page_addr >> 8) & 0xff, false);
849 (page_addr >> 8) & 0xff, 773 host->send_addr(host, (page_addr >> 16) & 0xff, true);
850 false);
851 host->devtype_data->send_addr(host,
852 (page_addr >> 16) & 0xff,
853 true);
854 } else 774 } else
855 /* paddr_8 - paddr_15 */ 775 /* paddr_8 - paddr_15 */
856 host->devtype_data->send_addr(host, 776 host->send_addr(host, (page_addr >> 8) & 0xff, true);
857 (page_addr >> 8) & 0xff, true);
858 } else { 777 } else {
859 /* One more address cycle for higher density devices */ 778 /* One more address cycle for higher density devices */
860 if (mtd->size >= 0x4000000) { 779 if (mtd->size >= 0x4000000) {
861 /* paddr_8 - paddr_15 */ 780 /* paddr_8 - paddr_15 */
862 host->devtype_data->send_addr(host, 781 host->send_addr(host, (page_addr >> 8) & 0xff, false);
863 (page_addr >> 8) & 0xff, 782 host->send_addr(host, (page_addr >> 16) & 0xff, true);
864 false);
865 host->devtype_data->send_addr(host,
866 (page_addr >> 16) & 0xff,
867 true);
868 } else 783 } else
869 /* paddr_8 - paddr_15 */ 784 /* paddr_8 - paddr_15 */
870 host->devtype_data->send_addr(host, 785 host->send_addr(host, (page_addr >> 8) & 0xff, true);
871 (page_addr >> 8) & 0xff, true);
872 } 786 }
873 } 787 }
874} 788}
@@ -890,7 +804,7 @@ static int get_eccsize(struct mtd_info *mtd)
890 return 8; 804 return 8;
891} 805}
892 806
893static void preset_v1(struct mtd_info *mtd) 807static void preset_v1_v2(struct mtd_info *mtd)
894{ 808{
895 struct nand_chip *nand_chip = mtd->priv; 809 struct nand_chip *nand_chip = mtd->priv;
896 struct mxc_nand_host *host = nand_chip->priv; 810 struct mxc_nand_host *host = nand_chip->priv;
@@ -899,40 +813,13 @@ static void preset_v1(struct mtd_info *mtd)
899 if (nand_chip->ecc.mode == NAND_ECC_HW) 813 if (nand_chip->ecc.mode == NAND_ECC_HW)
900 config1 |= NFC_V1_V2_CONFIG1_ECC_EN; 814 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
901 815
902 if (!host->devtype_data->irqpending_quirk) 816 if (nfc_is_v21())
903 config1 |= NFC_V1_V2_CONFIG1_INT_MSK; 817 config1 |= NFC_V2_CONFIG1_FP_INT;
904
905 host->eccsize = 1;
906
907 writew(config1, NFC_V1_V2_CONFIG1);
908 /* preset operation */
909
910 /* Unlock the internal RAM Buffer */
911 writew(0x2, NFC_V1_V2_CONFIG);
912 818
913 /* Blocks to be unlocked */ 819 if (!cpu_is_mx21())
914 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
915 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
916
917 /* Unlock Block Command for given address range */
918 writew(0x4, NFC_V1_V2_WRPROT);
919}
920
921static void preset_v2(struct mtd_info *mtd)
922{
923 struct nand_chip *nand_chip = mtd->priv;
924 struct mxc_nand_host *host = nand_chip->priv;
925 uint16_t config1 = 0;
926
927 if (nand_chip->ecc.mode == NAND_ECC_HW)
928 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
929
930 config1 |= NFC_V2_CONFIG1_FP_INT;
931
932 if (!host->devtype_data->irqpending_quirk)
933 config1 |= NFC_V1_V2_CONFIG1_INT_MSK; 820 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
934 821
935 if (mtd->writesize) { 822 if (nfc_is_v21() && mtd->writesize) {
936 uint16_t pages_per_block = mtd->erasesize / mtd->writesize; 823 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
937 824
938 host->eccsize = get_eccsize(mtd); 825 host->eccsize = get_eccsize(mtd);
@@ -951,14 +838,20 @@ static void preset_v2(struct mtd_info *mtd)
951 writew(0x2, NFC_V1_V2_CONFIG); 838 writew(0x2, NFC_V1_V2_CONFIG);
952 839
953 /* Blocks to be unlocked */ 840 /* Blocks to be unlocked */
954 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0); 841 if (nfc_is_v21()) {
955 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1); 842 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
956 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2); 843 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
957 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3); 844 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
958 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0); 845 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
959 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1); 846 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
960 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2); 847 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
961 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); 848 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
849 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
850 } else if (nfc_is_v1()) {
851 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
852 writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
853 } else
854 BUG();
962 855
963 /* Unlock Block Command for given address range */ 856 /* Unlock Block Command for given address range */
964 writew(0x4, NFC_V1_V2_WRPROT); 857 writew(0x4, NFC_V1_V2_WRPROT);
@@ -1009,9 +902,7 @@ static void preset_v3(struct mtd_info *mtd)
1009 } 902 }
1010 903
1011 if (mtd->writesize) { 904 if (mtd->writesize) {
1012 config2 |= NFC_V3_CONFIG2_PPB( 905 config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
1013 ffs(mtd->erasesize / mtd->writesize) - 6,
1014 host->devtype_data->ppb_shift);
1015 host->eccsize = get_eccsize(mtd); 906 host->eccsize = get_eccsize(mtd);
1016 if (host->eccsize == 8) 907 if (host->eccsize == 8)
1017 config2 |= NFC_V3_CONFIG2_ECC_MODE_8; 908 config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
@@ -1041,7 +932,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
1041 struct nand_chip *nand_chip = mtd->priv; 932 struct nand_chip *nand_chip = mtd->priv;
1042 struct mxc_nand_host *host = nand_chip->priv; 933 struct mxc_nand_host *host = nand_chip->priv;
1043 934
1044 pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 935 DEBUG(MTD_DEBUG_LEVEL3,
936 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
1045 command, column, page_addr); 937 command, column, page_addr);
1046 938
1047 /* Reset command state information */ 939 /* Reset command state information */
@@ -1050,15 +942,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
1050 /* Command pre-processing step */ 942 /* Command pre-processing step */
1051 switch (command) { 943 switch (command) {
1052 case NAND_CMD_RESET: 944 case NAND_CMD_RESET:
1053 host->devtype_data->preset(mtd); 945 host->preset(mtd);
1054 host->devtype_data->send_cmd(host, command, false); 946 host->send_cmd(host, command, false);
1055 break; 947 break;
1056 948
1057 case NAND_CMD_STATUS: 949 case NAND_CMD_STATUS:
1058 host->buf_start = 0; 950 host->buf_start = 0;
1059 host->status_request = true; 951 host->status_request = true;
1060 952
1061 host->devtype_data->send_cmd(host, command, true); 953 host->send_cmd(host, command, true);
1062 mxc_do_addr_cycle(mtd, column, page_addr); 954 mxc_do_addr_cycle(mtd, column, page_addr);
1063 break; 955 break;
1064 956
@@ -1071,17 +963,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
1071 963
1072 command = NAND_CMD_READ0; /* only READ0 is valid */ 964 command = NAND_CMD_READ0; /* only READ0 is valid */
1073 965
1074 host->devtype_data->send_cmd(host, command, false); 966 host->send_cmd(host, command, false);
1075 mxc_do_addr_cycle(mtd, column, page_addr); 967 mxc_do_addr_cycle(mtd, column, page_addr);
1076 968
1077 if (mtd->writesize > 512) 969 if (mtd->writesize > 512)
1078 host->devtype_data->send_cmd(host, 970 host->send_cmd(host, NAND_CMD_READSTART, true);
1079 NAND_CMD_READSTART, true);
1080 971
1081 host->devtype_data->send_page(mtd, NFC_OUTPUT); 972 host->send_page(mtd, NFC_OUTPUT);
1082 973
1083 memcpy32_fromio(host->data_buf, host->main_area0, 974 memcpy(host->data_buf, host->main_area0, mtd->writesize);
1084 mtd->writesize);
1085 copy_spare(mtd, true); 975 copy_spare(mtd, true);
1086 break; 976 break;
1087 977
@@ -1092,28 +982,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
1092 982
1093 host->buf_start = column; 983 host->buf_start = column;
1094 984
1095 host->devtype_data->send_cmd(host, command, false); 985 host->send_cmd(host, command, false);
1096 mxc_do_addr_cycle(mtd, column, page_addr); 986 mxc_do_addr_cycle(mtd, column, page_addr);
1097 break; 987 break;
1098 988
1099 case NAND_CMD_PAGEPROG: 989 case NAND_CMD_PAGEPROG:
1100 memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize); 990 memcpy(host->main_area0, host->data_buf, mtd->writesize);
1101 copy_spare(mtd, false); 991 copy_spare(mtd, false);
1102 host->devtype_data->send_page(mtd, NFC_INPUT); 992 host->send_page(mtd, NFC_INPUT);
1103 host->devtype_data->send_cmd(host, command, true); 993 host->send_cmd(host, command, true);
1104 mxc_do_addr_cycle(mtd, column, page_addr); 994 mxc_do_addr_cycle(mtd, column, page_addr);
1105 break; 995 break;
1106 996
1107 case NAND_CMD_READID: 997 case NAND_CMD_READID:
1108 host->devtype_data->send_cmd(host, command, true); 998 host->send_cmd(host, command, true);
1109 mxc_do_addr_cycle(mtd, column, page_addr); 999 mxc_do_addr_cycle(mtd, column, page_addr);
1110 host->devtype_data->send_read_id(host); 1000 host->send_read_id(host);
1111 host->buf_start = column; 1001 host->buf_start = column;
1112 break; 1002 break;
1113 1003
1114 case NAND_CMD_ERASE1: 1004 case NAND_CMD_ERASE1:
1115 case NAND_CMD_ERASE2: 1005 case NAND_CMD_ERASE2:
1116 host->devtype_data->send_cmd(host, command, false); 1006 host->send_cmd(host, command, false);
1117 mxc_do_addr_cycle(mtd, column, page_addr); 1007 mxc_do_addr_cycle(mtd, column, page_addr);
1118 1008
1119 break; 1009 break;
@@ -1147,249 +1037,19 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1147 .pattern = mirror_pattern, 1037 .pattern = mirror_pattern,
1148}; 1038};
1149 1039
1150/* v1 + irqpending_quirk: i.MX21 */ 1040static int __init mxcnd_probe(struct platform_device *pdev)
1151static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
1152 .preset = preset_v1,
1153 .send_cmd = send_cmd_v1_v2,
1154 .send_addr = send_addr_v1_v2,
1155 .send_page = send_page_v1,
1156 .send_read_id = send_read_id_v1_v2,
1157 .get_dev_status = get_dev_status_v1_v2,
1158 .check_int = check_int_v1_v2,
1159 .irq_control = irq_control_v1_v2,
1160 .get_ecc_status = get_ecc_status_v1,
1161 .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
1162 .ecclayout_2k = &nandv1_hw_eccoob_largepage,
1163 .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
1164 .select_chip = mxc_nand_select_chip_v1_v3,
1165 .correct_data = mxc_nand_correct_data_v1,
1166 .irqpending_quirk = 1,
1167 .needs_ip = 0,
1168 .regs_offset = 0xe00,
1169 .spare0_offset = 0x800,
1170 .spare_len = 16,
1171 .eccbytes = 3,
1172 .eccsize = 1,
1173};
1174
1175/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
1176static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
1177 .preset = preset_v1,
1178 .send_cmd = send_cmd_v1_v2,
1179 .send_addr = send_addr_v1_v2,
1180 .send_page = send_page_v1,
1181 .send_read_id = send_read_id_v1_v2,
1182 .get_dev_status = get_dev_status_v1_v2,
1183 .check_int = check_int_v1_v2,
1184 .irq_control = irq_control_v1_v2,
1185 .get_ecc_status = get_ecc_status_v1,
1186 .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
1187 .ecclayout_2k = &nandv1_hw_eccoob_largepage,
1188 .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
1189 .select_chip = mxc_nand_select_chip_v1_v3,
1190 .correct_data = mxc_nand_correct_data_v1,
1191 .irqpending_quirk = 0,
1192 .needs_ip = 0,
1193 .regs_offset = 0xe00,
1194 .spare0_offset = 0x800,
1195 .axi_offset = 0,
1196 .spare_len = 16,
1197 .eccbytes = 3,
1198 .eccsize = 1,
1199};
1200
1201/* v21: i.MX25, i.MX35 */
1202static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
1203 .preset = preset_v2,
1204 .send_cmd = send_cmd_v1_v2,
1205 .send_addr = send_addr_v1_v2,
1206 .send_page = send_page_v2,
1207 .send_read_id = send_read_id_v1_v2,
1208 .get_dev_status = get_dev_status_v1_v2,
1209 .check_int = check_int_v1_v2,
1210 .irq_control = irq_control_v1_v2,
1211 .get_ecc_status = get_ecc_status_v2,
1212 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1213 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1214 .ecclayout_4k = &nandv2_hw_eccoob_4k,
1215 .select_chip = mxc_nand_select_chip_v2,
1216 .correct_data = mxc_nand_correct_data_v2_v3,
1217 .irqpending_quirk = 0,
1218 .needs_ip = 0,
1219 .regs_offset = 0x1e00,
1220 .spare0_offset = 0x1000,
1221 .axi_offset = 0,
1222 .spare_len = 64,
1223 .eccbytes = 9,
1224 .eccsize = 0,
1225};
1226
1227/* v3.2a: i.MX51 */
1228static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
1229 .preset = preset_v3,
1230 .send_cmd = send_cmd_v3,
1231 .send_addr = send_addr_v3,
1232 .send_page = send_page_v3,
1233 .send_read_id = send_read_id_v3,
1234 .get_dev_status = get_dev_status_v3,
1235 .check_int = check_int_v3,
1236 .irq_control = irq_control_v3,
1237 .get_ecc_status = get_ecc_status_v3,
1238 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1239 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1240 .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
1241 .select_chip = mxc_nand_select_chip_v1_v3,
1242 .correct_data = mxc_nand_correct_data_v2_v3,
1243 .irqpending_quirk = 0,
1244 .needs_ip = 1,
1245 .regs_offset = 0,
1246 .spare0_offset = 0x1000,
1247 .axi_offset = 0x1e00,
1248 .spare_len = 64,
1249 .eccbytes = 0,
1250 .eccsize = 0,
1251 .ppb_shift = 7,
1252};
1253
1254/* v3.2b: i.MX53 */
1255static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
1256 .preset = preset_v3,
1257 .send_cmd = send_cmd_v3,
1258 .send_addr = send_addr_v3,
1259 .send_page = send_page_v3,
1260 .send_read_id = send_read_id_v3,
1261 .get_dev_status = get_dev_status_v3,
1262 .check_int = check_int_v3,
1263 .irq_control = irq_control_v3,
1264 .get_ecc_status = get_ecc_status_v3,
1265 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1266 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1267 .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
1268 .select_chip = mxc_nand_select_chip_v1_v3,
1269 .correct_data = mxc_nand_correct_data_v2_v3,
1270 .irqpending_quirk = 0,
1271 .needs_ip = 1,
1272 .regs_offset = 0,
1273 .spare0_offset = 0x1000,
1274 .axi_offset = 0x1e00,
1275 .spare_len = 64,
1276 .eccbytes = 0,
1277 .eccsize = 0,
1278 .ppb_shift = 8,
1279};
1280
1281static inline int is_imx21_nfc(struct mxc_nand_host *host)
1282{
1283 return host->devtype_data == &imx21_nand_devtype_data;
1284}
1285
1286static inline int is_imx27_nfc(struct mxc_nand_host *host)
1287{
1288 return host->devtype_data == &imx27_nand_devtype_data;
1289}
1290
1291static inline int is_imx25_nfc(struct mxc_nand_host *host)
1292{
1293 return host->devtype_data == &imx25_nand_devtype_data;
1294}
1295
1296static inline int is_imx51_nfc(struct mxc_nand_host *host)
1297{
1298 return host->devtype_data == &imx51_nand_devtype_data;
1299}
1300
1301static inline int is_imx53_nfc(struct mxc_nand_host *host)
1302{
1303 return host->devtype_data == &imx53_nand_devtype_data;
1304}
1305
1306static struct platform_device_id mxcnd_devtype[] = {
1307 {
1308 .name = "imx21-nand",
1309 .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
1310 }, {
1311 .name = "imx27-nand",
1312 .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
1313 }, {
1314 .name = "imx25-nand",
1315 .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
1316 }, {
1317 .name = "imx51-nand",
1318 .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
1319 }, {
1320 .name = "imx53-nand",
1321 .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
1322 }, {
1323 /* sentinel */
1324 }
1325};
1326MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
1327
1328#ifdef CONFIG_OF_MTD
1329static const struct of_device_id mxcnd_dt_ids[] = {
1330 {
1331 .compatible = "fsl,imx21-nand",
1332 .data = &imx21_nand_devtype_data,
1333 }, {
1334 .compatible = "fsl,imx27-nand",
1335 .data = &imx27_nand_devtype_data,
1336 }, {
1337 .compatible = "fsl,imx25-nand",
1338 .data = &imx25_nand_devtype_data,
1339 }, {
1340 .compatible = "fsl,imx51-nand",
1341 .data = &imx51_nand_devtype_data,
1342 }, {
1343 .compatible = "fsl,imx53-nand",
1344 .data = &imx53_nand_devtype_data,
1345 },
1346 { /* sentinel */ }
1347};
1348
1349static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1350{
1351 struct device_node *np = host->dev->of_node;
1352 struct mxc_nand_platform_data *pdata = &host->pdata;
1353 const struct of_device_id *of_id =
1354 of_match_device(mxcnd_dt_ids, host->dev);
1355 int buswidth;
1356
1357 if (!np)
1358 return 1;
1359
1360 if (of_get_nand_ecc_mode(np) >= 0)
1361 pdata->hw_ecc = 1;
1362
1363 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1364
1365 buswidth = of_get_nand_bus_width(np);
1366 if (buswidth < 0)
1367 return buswidth;
1368
1369 pdata->width = buswidth / 8;
1370
1371 host->devtype_data = of_id->data;
1372
1373 return 0;
1374}
1375#else
1376static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1377{
1378 return 1;
1379}
1380#endif
1381
1382static int mxcnd_probe(struct platform_device *pdev)
1383{ 1041{
1384 struct nand_chip *this; 1042 struct nand_chip *this;
1385 struct mtd_info *mtd; 1043 struct mtd_info *mtd;
1044 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1386 struct mxc_nand_host *host; 1045 struct mxc_nand_host *host;
1387 struct resource *res; 1046 struct resource *res;
1388 int err = 0; 1047 int err = 0, __maybe_unused nr_parts = 0;
1048 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1389 1049
1390 /* Allocate memory for MTD device structure and private data */ 1050 /* Allocate memory for MTD device structure and private data */
1391 host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) + 1051 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
1392 NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL); 1052 NAND_MAX_OOBSIZE, GFP_KERNEL);
1393 if (!host) 1053 if (!host)
1394 return -ENOMEM; 1054 return -ENOMEM;
1395 1055
@@ -1410,82 +1070,116 @@ static int mxcnd_probe(struct platform_device *pdev)
1410 this->priv = host; 1070 this->priv = host;
1411 this->dev_ready = mxc_nand_dev_ready; 1071 this->dev_ready = mxc_nand_dev_ready;
1412 this->cmdfunc = mxc_nand_command; 1072 this->cmdfunc = mxc_nand_command;
1073 this->select_chip = mxc_nand_select_chip;
1413 this->read_byte = mxc_nand_read_byte; 1074 this->read_byte = mxc_nand_read_byte;
1414 this->read_word = mxc_nand_read_word; 1075 this->read_word = mxc_nand_read_word;
1415 this->write_buf = mxc_nand_write_buf; 1076 this->write_buf = mxc_nand_write_buf;
1416 this->read_buf = mxc_nand_read_buf; 1077 this->read_buf = mxc_nand_read_buf;
1078 this->verify_buf = mxc_nand_verify_buf;
1417 1079
1418 host->clk = devm_clk_get(&pdev->dev, NULL); 1080 host->clk = clk_get(&pdev->dev, "nfc");
1419 if (IS_ERR(host->clk)) 1081 if (IS_ERR(host->clk)) {
1420 return PTR_ERR(host->clk); 1082 err = PTR_ERR(host->clk);
1421 1083 goto eclk;
1422 err = mxcnd_probe_dt(host);
1423 if (err > 0) {
1424 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1425 if (pdata) {
1426 host->pdata = *pdata;
1427 host->devtype_data = (struct mxc_nand_devtype_data *)
1428 pdev->id_entry->driver_data;
1429 } else {
1430 err = -ENODEV;
1431 }
1432 } 1084 }
1433 if (err < 0)
1434 return err;
1435 1085
1436 if (host->devtype_data->needs_ip) { 1086 clk_enable(host->clk);
1437 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1087 host->clk_act = 1;
1438 if (!res)
1439 return -ENODEV;
1440 host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
1441 if (!host->regs_ip)
1442 return -ENOMEM;
1443 1088
1444 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1089 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1445 } else { 1090 if (!res) {
1446 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1091 err = -ENODEV;
1092 goto eres;
1447 } 1093 }
1448 1094
1449 if (!res) 1095 host->base = ioremap(res->start, resource_size(res));
1450 return -ENODEV; 1096 if (!host->base) {
1451 1097 err = -ENOMEM;
1452 host->base = devm_request_and_ioremap(&pdev->dev, res); 1098 goto eres;
1453 if (!host->base) 1099 }
1454 return -ENOMEM;
1455 1100
1456 host->main_area0 = host->base; 1101 host->main_area0 = host->base;
1457 1102
1458 if (host->devtype_data->regs_offset) 1103 if (nfc_is_v1() || nfc_is_v21()) {
1459 host->regs = host->base + host->devtype_data->regs_offset; 1104 host->preset = preset_v1_v2;
1460 host->spare0 = host->base + host->devtype_data->spare0_offset; 1105 host->send_cmd = send_cmd_v1_v2;
1461 if (host->devtype_data->axi_offset) 1106 host->send_addr = send_addr_v1_v2;
1462 host->regs_axi = host->base + host->devtype_data->axi_offset; 1107 host->send_page = send_page_v1_v2;
1108 host->send_read_id = send_read_id_v1_v2;
1109 host->get_dev_status = get_dev_status_v1_v2;
1110 host->check_int = check_int_v1_v2;
1111 if (cpu_is_mx21())
1112 host->irq_control = irq_control_mx21;
1113 else
1114 host->irq_control = irq_control_v1_v2;
1115 }
1463 1116
1464 this->ecc.bytes = host->devtype_data->eccbytes; 1117 if (nfc_is_v21()) {
1465 host->eccsize = host->devtype_data->eccsize; 1118 host->regs = host->base + 0x1e00;
1119 host->spare0 = host->base + 0x1000;
1120 host->spare_len = 64;
1121 oob_smallpage = &nandv2_hw_eccoob_smallpage;
1122 oob_largepage = &nandv2_hw_eccoob_largepage;
1123 this->ecc.bytes = 9;
1124 } else if (nfc_is_v1()) {
1125 host->regs = host->base + 0xe00;
1126 host->spare0 = host->base + 0x800;
1127 host->spare_len = 16;
1128 oob_smallpage = &nandv1_hw_eccoob_smallpage;
1129 oob_largepage = &nandv1_hw_eccoob_largepage;
1130 this->ecc.bytes = 3;
1131 host->eccsize = 1;
1132 } else if (nfc_is_v3_2()) {
1133 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1134 if (!res) {
1135 err = -ENODEV;
1136 goto eirq;
1137 }
1138 host->regs_ip = ioremap(res->start, resource_size(res));
1139 if (!host->regs_ip) {
1140 err = -ENOMEM;
1141 goto eirq;
1142 }
1143 host->regs_axi = host->base + 0x1e00;
1144 host->spare0 = host->base + 0x1000;
1145 host->spare_len = 64;
1146 host->preset = preset_v3;
1147 host->send_cmd = send_cmd_v3;
1148 host->send_addr = send_addr_v3;
1149 host->send_page = send_page_v3;
1150 host->send_read_id = send_read_id_v3;
1151 host->check_int = check_int_v3;
1152 host->get_dev_status = get_dev_status_v3;
1153 host->irq_control = irq_control_v3;
1154 oob_smallpage = &nandv2_hw_eccoob_smallpage;
1155 oob_largepage = &nandv2_hw_eccoob_largepage;
1156 } else
1157 BUG();
1466 1158
1467 this->select_chip = host->devtype_data->select_chip;
1468 this->ecc.size = 512; 1159 this->ecc.size = 512;
1469 this->ecc.layout = host->devtype_data->ecclayout_512; 1160 this->ecc.layout = oob_smallpage;
1470 1161
1471 if (host->pdata.hw_ecc) { 1162 if (pdata->hw_ecc) {
1472 this->ecc.calculate = mxc_nand_calculate_ecc; 1163 this->ecc.calculate = mxc_nand_calculate_ecc;
1473 this->ecc.hwctl = mxc_nand_enable_hwecc; 1164 this->ecc.hwctl = mxc_nand_enable_hwecc;
1474 this->ecc.correct = host->devtype_data->correct_data; 1165 if (nfc_is_v1())
1166 this->ecc.correct = mxc_nand_correct_data_v1;
1167 else
1168 this->ecc.correct = mxc_nand_correct_data_v2_v3;
1475 this->ecc.mode = NAND_ECC_HW; 1169 this->ecc.mode = NAND_ECC_HW;
1476 } else { 1170 } else {
1477 this->ecc.mode = NAND_ECC_SOFT; 1171 this->ecc.mode = NAND_ECC_SOFT;
1478 } 1172 }
1479 1173
1480 /* NAND bus width determines access functions used by upper layer */ 1174 /* NAND bus width determines access funtions used by upper layer */
1481 if (host->pdata.width == 2) 1175 if (pdata->width == 2)
1482 this->options |= NAND_BUSWIDTH_16; 1176 this->options |= NAND_BUSWIDTH_16;
1483 1177
1484 if (host->pdata.flash_bbt) { 1178 if (pdata->flash_bbt) {
1485 this->bbt_td = &bbt_main_descr; 1179 this->bbt_td = &bbt_main_descr;
1486 this->bbt_md = &bbt_mirror_descr; 1180 this->bbt_md = &bbt_mirror_descr;
1487 /* update flash based bbt */ 1181 /* update flash based bbt */
1488 this->bbt_options |= NAND_BBT_USE_FLASH; 1182 this->options |= NAND_USE_FLASH_BBT;
1489 } 1183 }
1490 1184
1491 init_completion(&host->op_completion); 1185 init_completion(&host->op_completion);
@@ -1493,50 +1187,42 @@ static int mxcnd_probe(struct platform_device *pdev)
1493 host->irq = platform_get_irq(pdev, 0); 1187 host->irq = platform_get_irq(pdev, 0);
1494 1188
1495 /* 1189 /*
1496 * Use host->devtype_data->irq_control() here instead of irq_control() 1190 * mask the interrupt. For i.MX21 explicitely call
1497 * because we must not disable_irq_nosync without having requested the 1191 * irq_control_v1_v2 to use the mask bit. We can't call
1498 * irq. 1192 * disable_irq_nosync() for an interrupt we do not own yet.
1499 */ 1193 */
1500 host->devtype_data->irq_control(host, 0); 1194 if (cpu_is_mx21())
1195 irq_control_v1_v2(host, 0);
1196 else
1197 host->irq_control(host, 0);
1501 1198
1502 err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq, 1199 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
1503 IRQF_DISABLED, DRIVER_NAME, host);
1504 if (err) 1200 if (err)
1505 return err; 1201 goto eirq;
1506 1202
1507 clk_prepare_enable(host->clk); 1203 host->irq_control(host, 0);
1508 host->clk_act = 1;
1509 1204
1510 /* 1205 /*
1511 * Now that we "own" the interrupt make sure the interrupt mask bit is 1206 * Now that the interrupt is disabled make sure the interrupt
1512 * cleared on i.MX21. Otherwise we can't read the interrupt status bit 1207 * mask bit is cleared on i.MX21. Otherwise we can't read
1513 * on this machine. 1208 * the interrupt status bit on this machine.
1514 */ 1209 */
1515 if (host->devtype_data->irqpending_quirk) { 1210 if (cpu_is_mx21())
1516 disable_irq_nosync(host->irq); 1211 irq_control_v1_v2(host, 1);
1517 host->devtype_data->irq_control(host, 1);
1518 }
1519 1212
1520 /* first scan to find the device and get the page size */ 1213 /* first scan to find the device and get the page size */
1521 if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) { 1214 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
1522 err = -ENXIO; 1215 err = -ENXIO;
1523 goto escan; 1216 goto escan;
1524 } 1217 }
1525 1218
1526 /* Call preset again, with correct writesize this time */ 1219 /* Call preset again, with correct writesize this time */
1527 host->devtype_data->preset(mtd); 1220 host->preset(mtd);
1528 1221
1529 if (mtd->writesize == 2048) 1222 if (mtd->writesize == 2048)
1530 this->ecc.layout = host->devtype_data->ecclayout_2k; 1223 this->ecc.layout = oob_largepage;
1531 else if (mtd->writesize == 4096) 1224 if (nfc_is_v21() && mtd->writesize == 4096)
1532 this->ecc.layout = host->devtype_data->ecclayout_4k; 1225 this->ecc.layout = &nandv2_hw_eccoob_4k;
1533
1534 if (this->ecc.mode == NAND_ECC_HW) {
1535 if (is_imx21_nfc(host) || is_imx27_nfc(host))
1536 this->ecc.strength = 1;
1537 else
1538 this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
1539 }
1540 1226
1541 /* second phase scan */ 1227 /* second phase scan */
1542 if (nand_scan_tail(mtd)) { 1228 if (nand_scan_tail(mtd)) {
@@ -1545,31 +1231,49 @@ static int mxcnd_probe(struct platform_device *pdev)
1545 } 1231 }
1546 1232
1547 /* Register the partitions */ 1233 /* Register the partitions */
1548 mtd_device_parse_register(mtd, part_probes, 1234 nr_parts =
1549 &(struct mtd_part_parser_data){ 1235 parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
1550 .of_node = pdev->dev.of_node, 1236 if (nr_parts > 0)
1551 }, 1237 mtd_device_register(mtd, host->parts, nr_parts);
1552 host->pdata.parts, 1238 else if (pdata->parts)
1553 host->pdata.nr_parts); 1239 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1240 else {
1241 pr_info("Registering %s as whole device\n", mtd->name);
1242 mtd_device_register(mtd, NULL, 0);
1243 }
1554 1244
1555 platform_set_drvdata(pdev, host); 1245 platform_set_drvdata(pdev, host);
1556 1246
1557 return 0; 1247 return 0;
1558 1248
1559escan: 1249escan:
1560 if (host->clk_act) 1250 free_irq(host->irq, host);
1561 clk_disable_unprepare(host->clk); 1251eirq:
1252 if (host->regs_ip)
1253 iounmap(host->regs_ip);
1254 iounmap(host->base);
1255eres:
1256 clk_put(host->clk);
1257eclk:
1258 kfree(host);
1562 1259
1563 return err; 1260 return err;
1564} 1261}
1565 1262
1566static int mxcnd_remove(struct platform_device *pdev) 1263static int __devexit mxcnd_remove(struct platform_device *pdev)
1567{ 1264{
1568 struct mxc_nand_host *host = platform_get_drvdata(pdev); 1265 struct mxc_nand_host *host = platform_get_drvdata(pdev);
1569 1266
1267 clk_put(host->clk);
1268
1570 platform_set_drvdata(pdev, NULL); 1269 platform_set_drvdata(pdev, NULL);
1571 1270
1572 nand_release(&host->mtd); 1271 nand_release(&host->mtd);
1272 free_irq(host->irq, host);
1273 if (host->regs_ip)
1274 iounmap(host->regs_ip);
1275 iounmap(host->base);
1276 kfree(host);
1573 1277
1574 return 0; 1278 return 0;
1575} 1279}
@@ -1577,14 +1281,23 @@ static int mxcnd_remove(struct platform_device *pdev)
1577static struct platform_driver mxcnd_driver = { 1281static struct platform_driver mxcnd_driver = {
1578 .driver = { 1282 .driver = {
1579 .name = DRIVER_NAME, 1283 .name = DRIVER_NAME,
1580 .owner = THIS_MODULE,
1581 .of_match_table = of_match_ptr(mxcnd_dt_ids),
1582 }, 1284 },
1583 .id_table = mxcnd_devtype, 1285 .remove = __devexit_p(mxcnd_remove),
1584 .probe = mxcnd_probe,
1585 .remove = mxcnd_remove,
1586}; 1286};
1587module_platform_driver(mxcnd_driver); 1287
1288static int __init mxc_nd_init(void)
1289{
1290 return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
1291}
1292
1293static void __exit mxc_nd_cleanup(void)
1294{
1295 /* Unregister the device structure */
1296 platform_driver_unregister(&mxcnd_driver);
1297}
1298
1299module_init(mxc_nd_init);
1300module_exit(mxc_nd_cleanup);
1588 1301
1589MODULE_AUTHOR("Freescale Semiconductor, Inc."); 1302MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1590MODULE_DESCRIPTION("MXC NAND MTD driver"); 1303MODULE_DESCRIPTION("MXC NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8323ac991ad..15d71658b4f 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -21,7 +21,7 @@
21 * TODO: 21 * TODO:
22 * Enable cached programming for 2k page size chips 22 * Enable cached programming for 2k page size chips
23 * Check, if mtd->ecctype should be set to MTD_ECC_HW 23 * Check, if mtd->ecctype should be set to MTD_ECC_HW
24 * if we have HW ECC support. 24 * if we have HW ecc support.
25 * The AG-AND chips have nice features for speed improvement, 25 * The AG-AND chips have nice features for speed improvement,
26 * which are not supported yet. Read / program 4 pages in one go. 26 * which are not supported yet. Read / program 4 pages in one go.
27 * BBT table is not serialized, has to be fixed 27 * BBT table is not serialized, has to be fixed
@@ -93,7 +93,8 @@ static struct nand_ecclayout nand_oob_128 = {
93 .length = 78} } 93 .length = 78} }
94}; 94};
95 95
96static int nand_get_device(struct mtd_info *mtd, int new_state); 96static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
97 int new_state);
97 98
98static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 99static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
99 struct mtd_oob_ops *ops); 100 struct mtd_oob_ops *ops);
@@ -112,13 +113,21 @@ static int check_offs_len(struct mtd_info *mtd,
112 113
113 /* Start address must align on block boundary */ 114 /* Start address must align on block boundary */
114 if (ofs & ((1 << chip->phys_erase_shift) - 1)) { 115 if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
115 pr_debug("%s: unaligned address\n", __func__); 116 DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
116 ret = -EINVAL; 117 ret = -EINVAL;
117 } 118 }
118 119
119 /* Length must align on block boundary */ 120 /* Length must align on block boundary */
120 if (len & ((1 << chip->phys_erase_shift) - 1)) { 121 if (len & ((1 << chip->phys_erase_shift) - 1)) {
121 pr_debug("%s: length not block aligned\n", __func__); 122 DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
123 __func__);
124 ret = -EINVAL;
125 }
126
127 /* Do not allow past end of device */
128 if (ofs + len > mtd->size) {
129 DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
130 __func__);
122 ret = -EINVAL; 131 ret = -EINVAL;
123 } 132 }
124 133
@@ -127,14 +136,17 @@ static int check_offs_len(struct mtd_info *mtd,
127 136
128/** 137/**
129 * nand_release_device - [GENERIC] release chip 138 * nand_release_device - [GENERIC] release chip
130 * @mtd: MTD device structure 139 * @mtd: MTD device structure
131 * 140 *
132 * Release chip lock and wake up anyone waiting on the device. 141 * Deselect, release chip lock and wake up anyone waiting on the device
133 */ 142 */
134static void nand_release_device(struct mtd_info *mtd) 143static void nand_release_device(struct mtd_info *mtd)
135{ 144{
136 struct nand_chip *chip = mtd->priv; 145 struct nand_chip *chip = mtd->priv;
137 146
147 /* De-select the NAND device */
148 chip->select_chip(mtd, -1);
149
138 /* Release the controller and the chip */ 150 /* Release the controller and the chip */
139 spin_lock(&chip->controller->lock); 151 spin_lock(&chip->controller->lock);
140 chip->controller->active = NULL; 152 chip->controller->active = NULL;
@@ -145,9 +157,9 @@ static void nand_release_device(struct mtd_info *mtd)
145 157
146/** 158/**
147 * nand_read_byte - [DEFAULT] read one byte from the chip 159 * nand_read_byte - [DEFAULT] read one byte from the chip
148 * @mtd: MTD device structure 160 * @mtd: MTD device structure
149 * 161 *
150 * Default read function for 8bit buswidth 162 * Default read function for 8bit buswith
151 */ 163 */
152static uint8_t nand_read_byte(struct mtd_info *mtd) 164static uint8_t nand_read_byte(struct mtd_info *mtd)
153{ 165{
@@ -156,12 +168,11 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
156} 168}
157 169
158/** 170/**
159 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 171 * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
160 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip 172 * @mtd: MTD device structure
161 * @mtd: MTD device structure
162 *
163 * Default read function for 16bit buswidth with endianness conversion.
164 * 173 *
174 * Default read function for 16bit buswith with
175 * endianess conversion
165 */ 176 */
166static uint8_t nand_read_byte16(struct mtd_info *mtd) 177static uint8_t nand_read_byte16(struct mtd_info *mtd)
167{ 178{
@@ -171,9 +182,10 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd)
171 182
172/** 183/**
173 * nand_read_word - [DEFAULT] read one word from the chip 184 * nand_read_word - [DEFAULT] read one word from the chip
174 * @mtd: MTD device structure 185 * @mtd: MTD device structure
175 * 186 *
176 * Default read function for 16bit buswidth without endianness conversion. 187 * Default read function for 16bit buswith without
188 * endianess conversion
177 */ 189 */
178static u16 nand_read_word(struct mtd_info *mtd) 190static u16 nand_read_word(struct mtd_info *mtd)
179{ 191{
@@ -183,8 +195,8 @@ static u16 nand_read_word(struct mtd_info *mtd)
183 195
184/** 196/**
185 * nand_select_chip - [DEFAULT] control CE line 197 * nand_select_chip - [DEFAULT] control CE line
186 * @mtd: MTD device structure 198 * @mtd: MTD device structure
187 * @chipnr: chipnumber to select, -1 for deselect 199 * @chipnr: chipnumber to select, -1 for deselect
188 * 200 *
189 * Default select function for 1 chip devices. 201 * Default select function for 1 chip devices.
190 */ 202 */
@@ -206,11 +218,11 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
206 218
207/** 219/**
208 * nand_write_buf - [DEFAULT] write buffer to chip 220 * nand_write_buf - [DEFAULT] write buffer to chip
209 * @mtd: MTD device structure 221 * @mtd: MTD device structure
210 * @buf: data buffer 222 * @buf: data buffer
211 * @len: number of bytes to write 223 * @len: number of bytes to write
212 * 224 *
213 * Default write function for 8bit buswidth. 225 * Default write function for 8bit buswith
214 */ 226 */
215static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 227static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
216{ 228{
@@ -223,11 +235,11 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
223 235
224/** 236/**
225 * nand_read_buf - [DEFAULT] read chip data into buffer 237 * nand_read_buf - [DEFAULT] read chip data into buffer
226 * @mtd: MTD device structure 238 * @mtd: MTD device structure
227 * @buf: buffer to store date 239 * @buf: buffer to store date
228 * @len: number of bytes to read 240 * @len: number of bytes to read
229 * 241 *
230 * Default read function for 8bit buswidth. 242 * Default read function for 8bit buswith
231 */ 243 */
232static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 244static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
233{ 245{
@@ -239,12 +251,31 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
239} 251}
240 252
241/** 253/**
254 * nand_verify_buf - [DEFAULT] Verify chip data against buffer
255 * @mtd: MTD device structure
256 * @buf: buffer containing the data to compare
257 * @len: number of bytes to compare
258 *
259 * Default verify function for 8bit buswith
260 */
261static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
262{
263 int i;
264 struct nand_chip *chip = mtd->priv;
265
266 for (i = 0; i < len; i++)
267 if (buf[i] != readb(chip->IO_ADDR_R))
268 return -EFAULT;
269 return 0;
270}
271
272/**
242 * nand_write_buf16 - [DEFAULT] write buffer to chip 273 * nand_write_buf16 - [DEFAULT] write buffer to chip
243 * @mtd: MTD device structure 274 * @mtd: MTD device structure
244 * @buf: data buffer 275 * @buf: data buffer
245 * @len: number of bytes to write 276 * @len: number of bytes to write
246 * 277 *
247 * Default write function for 16bit buswidth. 278 * Default write function for 16bit buswith
248 */ 279 */
249static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) 280static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
250{ 281{
@@ -260,11 +291,11 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
260 291
261/** 292/**
262 * nand_read_buf16 - [DEFAULT] read chip data into buffer 293 * nand_read_buf16 - [DEFAULT] read chip data into buffer
263 * @mtd: MTD device structure 294 * @mtd: MTD device structure
264 * @buf: buffer to store date 295 * @buf: buffer to store date
265 * @len: number of bytes to read 296 * @len: number of bytes to read
266 * 297 *
267 * Default read function for 16bit buswidth. 298 * Default read function for 16bit buswith
268 */ 299 */
269static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 300static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
270{ 301{
@@ -278,20 +309,42 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
278} 309}
279 310
280/** 311/**
312 * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
313 * @mtd: MTD device structure
314 * @buf: buffer containing the data to compare
315 * @len: number of bytes to compare
316 *
317 * Default verify function for 16bit buswith
318 */
319static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
320{
321 int i;
322 struct nand_chip *chip = mtd->priv;
323 u16 *p = (u16 *) buf;
324 len >>= 1;
325
326 for (i = 0; i < len; i++)
327 if (p[i] != readw(chip->IO_ADDR_R))
328 return -EFAULT;
329
330 return 0;
331}
332
333/**
281 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 334 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
282 * @mtd: MTD device structure 335 * @mtd: MTD device structure
283 * @ofs: offset from device start 336 * @ofs: offset from device start
284 * @getchip: 0, if the chip is already selected 337 * @getchip: 0, if the chip is already selected
285 * 338 *
286 * Check, if the block is bad. 339 * Check, if the block is bad.
287 */ 340 */
288static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 341static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
289{ 342{
290 int page, chipnr, res = 0, i = 0; 343 int page, chipnr, res = 0;
291 struct nand_chip *chip = mtd->priv; 344 struct nand_chip *chip = mtd->priv;
292 u16 bad; 345 u16 bad;
293 346
294 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 347 if (chip->options & NAND_BBT_SCANLASTPAGE)
295 ofs += mtd->erasesize - mtd->writesize; 348 ofs += mtd->erasesize - mtd->writesize;
296 349
297 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 350 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -299,123 +352,89 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
299 if (getchip) { 352 if (getchip) {
300 chipnr = (int)(ofs >> chip->chip_shift); 353 chipnr = (int)(ofs >> chip->chip_shift);
301 354
302 nand_get_device(mtd, FL_READING); 355 nand_get_device(chip, mtd, FL_READING);
303 356
304 /* Select the NAND device */ 357 /* Select the NAND device */
305 chip->select_chip(mtd, chipnr); 358 chip->select_chip(mtd, chipnr);
306 } 359 }
307 360
308 do { 361 if (chip->options & NAND_BUSWIDTH_16) {
309 if (chip->options & NAND_BUSWIDTH_16) { 362 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
310 chip->cmdfunc(mtd, NAND_CMD_READOOB, 363 page);
311 chip->badblockpos & 0xFE, page); 364 bad = cpu_to_le16(chip->read_word(mtd));
312 bad = cpu_to_le16(chip->read_word(mtd)); 365 if (chip->badblockpos & 0x1)
313 if (chip->badblockpos & 0x1) 366 bad >>= 8;
314 bad >>= 8;
315 else
316 bad &= 0xFF;
317 } else {
318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
319 page);
320 bad = chip->read_byte(mtd);
321 }
322
323 if (likely(chip->badblockbits == 8))
324 res = bad != 0xFF;
325 else 367 else
326 res = hweight8(bad) < chip->badblockbits; 368 bad &= 0xFF;
327 ofs += mtd->writesize; 369 } else {
328 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 370 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
329 i++; 371 bad = chip->read_byte(mtd);
330 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); 372 }
331 373
332 if (getchip) { 374 if (likely(chip->badblockbits == 8))
333 chip->select_chip(mtd, -1); 375 res = bad != 0xFF;
376 else
377 res = hweight8(bad) < chip->badblockbits;
378
379 if (getchip)
334 nand_release_device(mtd); 380 nand_release_device(mtd);
335 }
336 381
337 return res; 382 return res;
338} 383}
339 384
340/** 385/**
341 * nand_default_block_markbad - [DEFAULT] mark a block bad 386 * nand_default_block_markbad - [DEFAULT] mark a block bad
342 * @mtd: MTD device structure 387 * @mtd: MTD device structure
343 * @ofs: offset from device start 388 * @ofs: offset from device start
344 * 389 *
345 * This is the default implementation, which can be overridden by a hardware 390 * This is the default implementation, which can be overridden by
346 * specific driver. We try operations in the following order, according to our 391 * a hardware specific driver.
347 * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
348 * (1) erase the affected block, to allow OOB marker to be written cleanly
349 * (2) update in-memory BBT
350 * (3) write bad block marker to OOB area of affected block
351 * (4) update flash-based BBT
352 * Note that we retain the first error encountered in (3) or (4), finish the
353 * procedures, and dump the error in the end.
354*/ 392*/
355static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 393static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
356{ 394{
357 struct nand_chip *chip = mtd->priv; 395 struct nand_chip *chip = mtd->priv;
358 uint8_t buf[2] = { 0, 0 }; 396 uint8_t buf[2] = { 0, 0 };
359 int block, res, ret = 0, i = 0; 397 int block, ret, i = 0;
360 int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
361 398
362 if (write_oob) { 399 if (chip->options & NAND_BBT_SCANLASTPAGE)
363 struct erase_info einfo; 400 ofs += mtd->erasesize - mtd->writesize;
364
365 /* Attempt erase before marking OOB */
366 memset(&einfo, 0, sizeof(einfo));
367 einfo.mtd = mtd;
368 einfo.addr = ofs;
369 einfo.len = 1 << chip->phys_erase_shift;
370 nand_erase_nand(mtd, &einfo, 0);
371 }
372 401
373 /* Get block number */ 402 /* Get block number */
374 block = (int)(ofs >> chip->bbt_erase_shift); 403 block = (int)(ofs >> chip->bbt_erase_shift);
375 /* Mark block bad in memory-based BBT */
376 if (chip->bbt) 404 if (chip->bbt)
377 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 405 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
378 406
379 /* Write bad block marker to OOB */ 407 /* Do we have a flash based bad block table ? */
380 if (write_oob) { 408 if (chip->options & NAND_USE_FLASH_BBT)
381 struct mtd_oob_ops ops; 409 ret = nand_update_bbt(mtd, ofs);
382 loff_t wr_ofs = ofs; 410 else {
383 411 nand_get_device(chip, mtd, FL_WRITING);
384 nand_get_device(mtd, FL_WRITING);
385
386 ops.datbuf = NULL;
387 ops.oobbuf = buf;
388 ops.ooboffs = chip->badblockpos;
389 if (chip->options & NAND_BUSWIDTH_16) {
390 ops.ooboffs &= ~0x01;
391 ops.len = ops.ooblen = 2;
392 } else {
393 ops.len = ops.ooblen = 1;
394 }
395 ops.mode = MTD_OPS_PLACE_OOB;
396 412
397 /* Write to first/last page(s) if necessary */ 413 /* Write to first two pages and to byte 1 and 6 if necessary.
398 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 414 * If we write to more than one location, the first error
399 wr_ofs += mtd->erasesize - mtd->writesize; 415 * encountered quits the procedure. We write two bytes per
416 * location, so we dont have to mess with 16 bit access.
417 */
400 do { 418 do {
401 res = nand_do_write_oob(mtd, wr_ofs, &ops); 419 chip->ops.len = chip->ops.ooblen = 2;
402 if (!ret) 420 chip->ops.datbuf = NULL;
403 ret = res; 421 chip->ops.oobbuf = buf;
422 chip->ops.ooboffs = chip->badblockpos & ~0x01;
404 423
424 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
425
426 if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
427 chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
428 & ~0x01;
429 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
430 }
405 i++; 431 i++;
406 wr_ofs += mtd->writesize; 432 ofs += mtd->writesize;
407 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2); 433 } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) &&
434 i < 2);
408 435
409 nand_release_device(mtd); 436 nand_release_device(mtd);
410 } 437 }
411
412 /* Update flash-based bad block table */
413 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
414 res = nand_update_bbt(mtd, ofs);
415 if (!ret)
416 ret = res;
417 }
418
419 if (!ret) 438 if (!ret)
420 mtd->ecc_stats.badblocks++; 439 mtd->ecc_stats.badblocks++;
421 440
@@ -424,16 +443,16 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
424 443
425/** 444/**
426 * nand_check_wp - [GENERIC] check if the chip is write protected 445 * nand_check_wp - [GENERIC] check if the chip is write protected
427 * @mtd: MTD device structure 446 * @mtd: MTD device structure
447 * Check, if the device is write protected
428 * 448 *
429 * Check, if the device is write protected. The function expects, that the 449 * The function expects, that the device is already selected
430 * device is already selected.
431 */ 450 */
432static int nand_check_wp(struct mtd_info *mtd) 451static int nand_check_wp(struct mtd_info *mtd)
433{ 452{
434 struct nand_chip *chip = mtd->priv; 453 struct nand_chip *chip = mtd->priv;
435 454
436 /* Broken xD cards report WP despite being writable */ 455 /* broken xD cards report WP despite being writable */
437 if (chip->options & NAND_BROKEN_XD) 456 if (chip->options & NAND_BROKEN_XD)
438 return 0; 457 return 0;
439 458
@@ -444,10 +463,10 @@ static int nand_check_wp(struct mtd_info *mtd)
444 463
445/** 464/**
446 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 465 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
447 * @mtd: MTD device structure 466 * @mtd: MTD device structure
448 * @ofs: offset from device start 467 * @ofs: offset from device start
449 * @getchip: 0, if the chip is already selected 468 * @getchip: 0, if the chip is already selected
450 * @allowbbt: 1, if its allowed to access the bbt area 469 * @allowbbt: 1, if its allowed to access the bbt area
451 * 470 *
452 * Check, if the block is bad. Either by reading the bad block table or 471 * Check, if the block is bad. Either by reading the bad block table or
453 * calling of the scan function. 472 * calling of the scan function.
@@ -466,8 +485,8 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
466 485
467/** 486/**
468 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. 487 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
469 * @mtd: MTD device structure 488 * @mtd: MTD device structure
470 * @timeo: Timeout 489 * @timeo: Timeout
471 * 490 *
472 * Helper function for nand_wait_ready used when needing to wait in interrupt 491 * Helper function for nand_wait_ready used when needing to wait in interrupt
473 * context. 492 * context.
@@ -486,18 +505,21 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
486 } 505 }
487} 506}
488 507
489/* Wait for the ready pin, after a command. The timeout is caught later. */ 508/*
509 * Wait for the ready pin, after a command
510 * The timeout is catched later.
511 */
490void nand_wait_ready(struct mtd_info *mtd) 512void nand_wait_ready(struct mtd_info *mtd)
491{ 513{
492 struct nand_chip *chip = mtd->priv; 514 struct nand_chip *chip = mtd->priv;
493 unsigned long timeo = jiffies + msecs_to_jiffies(20); 515 unsigned long timeo = jiffies + 2;
494 516
495 /* 400ms timeout */ 517 /* 400ms timeout */
496 if (in_interrupt() || oops_in_progress) 518 if (in_interrupt() || oops_in_progress)
497 return panic_nand_wait_ready(mtd, 400); 519 return panic_nand_wait_ready(mtd, 400);
498 520
499 led_trigger_event(nand_led_trigger, LED_FULL); 521 led_trigger_event(nand_led_trigger, LED_FULL);
500 /* Wait until command is processed or timeout occurs */ 522 /* wait until command is processed or timeout occures */
501 do { 523 do {
502 if (chip->dev_ready(mtd)) 524 if (chip->dev_ready(mtd))
503 break; 525 break;
@@ -509,13 +531,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
509 531
510/** 532/**
511 * nand_command - [DEFAULT] Send command to NAND device 533 * nand_command - [DEFAULT] Send command to NAND device
512 * @mtd: MTD device structure 534 * @mtd: MTD device structure
513 * @command: the command to be sent 535 * @command: the command to be sent
514 * @column: the column address for this command, -1 if none 536 * @column: the column address for this command, -1 if none
515 * @page_addr: the page address for this command, -1 if none 537 * @page_addr: the page address for this command, -1 if none
516 * 538 *
517 * Send command to NAND device. This function is used for small page devices 539 * Send command to NAND device. This function is used for small page
518 * (256/512 Bytes per page). 540 * devices (256/512 Bytes per page)
519 */ 541 */
520static void nand_command(struct mtd_info *mtd, unsigned int command, 542static void nand_command(struct mtd_info *mtd, unsigned int command,
521 int column, int page_addr) 543 int column, int page_addr)
@@ -523,7 +545,9 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
523 register struct nand_chip *chip = mtd->priv; 545 register struct nand_chip *chip = mtd->priv;
524 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; 546 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
525 547
526 /* Write out the command to the device */ 548 /*
549 * Write out the command to the device.
550 */
527 if (command == NAND_CMD_SEQIN) { 551 if (command == NAND_CMD_SEQIN) {
528 int readcmd; 552 int readcmd;
529 553
@@ -543,7 +567,9 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
543 } 567 }
544 chip->cmd_ctrl(mtd, command, ctrl); 568 chip->cmd_ctrl(mtd, command, ctrl);
545 569
546 /* Address cycle, when necessary */ 570 /*
571 * Address cycle, when necessary
572 */
547 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; 573 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
548 /* Serially input address */ 574 /* Serially input address */
549 if (column != -1) { 575 if (column != -1) {
@@ -564,8 +590,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
564 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 590 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
565 591
566 /* 592 /*
567 * Program and erase have their own busy handlers status and sequential 593 * program and erase have their own busy handlers
568 * in needs no delay 594 * status and sequential in needs no delay
569 */ 595 */
570 switch (command) { 596 switch (command) {
571 597
@@ -599,10 +625,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
599 return; 625 return;
600 } 626 }
601 } 627 }
602 /* 628 /* Apply this short delay always to ensure that we do wait tWB in
603 * Apply this short delay always to ensure that we do wait tWB in 629 * any case on any machine. */
604 * any case on any machine.
605 */
606 ndelay(100); 630 ndelay(100);
607 631
608 nand_wait_ready(mtd); 632 nand_wait_ready(mtd);
@@ -610,14 +634,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
610 634
611/** 635/**
612 * nand_command_lp - [DEFAULT] Send command to NAND large page device 636 * nand_command_lp - [DEFAULT] Send command to NAND large page device
613 * @mtd: MTD device structure 637 * @mtd: MTD device structure
614 * @command: the command to be sent 638 * @command: the command to be sent
615 * @column: the column address for this command, -1 if none 639 * @column: the column address for this command, -1 if none
616 * @page_addr: the page address for this command, -1 if none 640 * @page_addr: the page address for this command, -1 if none
617 * 641 *
618 * Send command to NAND device. This is the version for the new large page 642 * Send command to NAND device. This is the version for the new large page
619 * devices. We don't have the separate regions as we have in the small page 643 * devices We dont have the separate regions as we have in the small page
620 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 644 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
621 */ 645 */
622static void nand_command_lp(struct mtd_info *mtd, unsigned int command, 646static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
623 int column, int page_addr) 647 int column, int page_addr)
@@ -659,8 +683,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
659 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 683 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
660 684
661 /* 685 /*
662 * Program and erase have their own busy handlers status, sequential 686 * program and erase have their own busy handlers
663 * in, and deplete1 need no delay. 687 * status, sequential in, and deplete1 need no delay
664 */ 688 */
665 switch (command) { 689 switch (command) {
666 690
@@ -674,12 +698,14 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
674 case NAND_CMD_DEPLETE1: 698 case NAND_CMD_DEPLETE1:
675 return; 699 return;
676 700
701 /*
702 * read error status commands require only a short delay
703 */
677 case NAND_CMD_STATUS_ERROR: 704 case NAND_CMD_STATUS_ERROR:
678 case NAND_CMD_STATUS_ERROR0: 705 case NAND_CMD_STATUS_ERROR0:
679 case NAND_CMD_STATUS_ERROR1: 706 case NAND_CMD_STATUS_ERROR1:
680 case NAND_CMD_STATUS_ERROR2: 707 case NAND_CMD_STATUS_ERROR2:
681 case NAND_CMD_STATUS_ERROR3: 708 case NAND_CMD_STATUS_ERROR3:
682 /* Read error status commands require only a short delay */
683 udelay(chip->chip_delay); 709 udelay(chip->chip_delay);
684 return; 710 return;
685 711
@@ -713,7 +739,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
713 default: 739 default:
714 /* 740 /*
715 * If we don't have access to the busy pin, we apply the given 741 * If we don't have access to the busy pin, we apply the given
716 * command delay. 742 * command delay
717 */ 743 */
718 if (!chip->dev_ready) { 744 if (!chip->dev_ready) {
719 udelay(chip->chip_delay); 745 udelay(chip->chip_delay);
@@ -721,10 +747,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
721 } 747 }
722 } 748 }
723 749
724 /* 750 /* Apply this short delay always to ensure that we do wait tWB in
725 * Apply this short delay always to ensure that we do wait tWB in 751 * any case on any machine. */
726 * any case on any machine.
727 */
728 ndelay(100); 752 ndelay(100);
729 753
730 nand_wait_ready(mtd); 754 nand_wait_ready(mtd);
@@ -732,31 +756,31 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
732 756
733/** 757/**
734 * panic_nand_get_device - [GENERIC] Get chip for selected access 758 * panic_nand_get_device - [GENERIC] Get chip for selected access
735 * @chip: the nand chip descriptor 759 * @chip: the nand chip descriptor
736 * @mtd: MTD device structure 760 * @mtd: MTD device structure
737 * @new_state: the state which is requested 761 * @new_state: the state which is requested
738 * 762 *
739 * Used when in panic, no locks are taken. 763 * Used when in panic, no locks are taken.
740 */ 764 */
741static void panic_nand_get_device(struct nand_chip *chip, 765static void panic_nand_get_device(struct nand_chip *chip,
742 struct mtd_info *mtd, int new_state) 766 struct mtd_info *mtd, int new_state)
743{ 767{
744 /* Hardware controller shared among independent devices */ 768 /* Hardware controller shared among independend devices */
745 chip->controller->active = chip; 769 chip->controller->active = chip;
746 chip->state = new_state; 770 chip->state = new_state;
747} 771}
748 772
749/** 773/**
750 * nand_get_device - [GENERIC] Get chip for selected access 774 * nand_get_device - [GENERIC] Get chip for selected access
751 * @mtd: MTD device structure 775 * @chip: the nand chip descriptor
752 * @new_state: the state which is requested 776 * @mtd: MTD device structure
777 * @new_state: the state which is requested
753 * 778 *
754 * Get the device and lock it for exclusive access 779 * Get the device and lock it for exclusive access
755 */ 780 */
756static int 781static int
757nand_get_device(struct mtd_info *mtd, int new_state) 782nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
758{ 783{
759 struct nand_chip *chip = mtd->priv;
760 spinlock_t *lock = &chip->controller->lock; 784 spinlock_t *lock = &chip->controller->lock;
761 wait_queue_head_t *wq = &chip->controller->wq; 785 wait_queue_head_t *wq = &chip->controller->wq;
762 DECLARE_WAITQUEUE(wait, current); 786 DECLARE_WAITQUEUE(wait, current);
@@ -788,10 +812,10 @@ retry:
788} 812}
789 813
790/** 814/**
791 * panic_nand_wait - [GENERIC] wait until the command is done 815 * panic_nand_wait - [GENERIC] wait until the command is done
792 * @mtd: MTD device structure 816 * @mtd: MTD device structure
793 * @chip: NAND chip structure 817 * @chip: NAND chip structure
794 * @timeo: timeout 818 * @timeo: Timeout
795 * 819 *
796 * Wait for command done. This is a helper function for nand_wait used when 820 * Wait for command done. This is a helper function for nand_wait used when
797 * we are in interrupt context. May happen when in panic and trying to write 821 * we are in interrupt context. May happen when in panic and trying to write
@@ -814,13 +838,13 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
814} 838}
815 839
816/** 840/**
817 * nand_wait - [DEFAULT] wait until the command is done 841 * nand_wait - [DEFAULT] wait until the command is done
818 * @mtd: MTD device structure 842 * @mtd: MTD device structure
819 * @chip: NAND chip structure 843 * @chip: NAND chip structure
820 * 844 *
821 * Wait for command done. This applies to erase and program only. Erase can 845 * Wait for command done. This applies to erase and program only
822 * take up to 400ms and program up to 20ms according to general NAND and 846 * Erase can take up to 400ms and program up to 20ms according to
823 * SmartMedia specs. 847 * general NAND and SmartMedia specs
824 */ 848 */
825static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) 849static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
826{ 850{
@@ -835,10 +859,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
835 859
836 led_trigger_event(nand_led_trigger, LED_FULL); 860 led_trigger_event(nand_led_trigger, LED_FULL);
837 861
838 /* 862 /* Apply this short delay always to ensure that we do wait tWB in
839 * Apply this short delay always to ensure that we do wait tWB in any 863 * any case on any machine. */
840 * case on any machine.
841 */
842 ndelay(100); 864 ndelay(100);
843 865
844 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) 866 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
@@ -863,22 +885,21 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
863 led_trigger_event(nand_led_trigger, LED_OFF); 885 led_trigger_event(nand_led_trigger, LED_OFF);
864 886
865 status = (int)chip->read_byte(mtd); 887 status = (int)chip->read_byte(mtd);
866 /* This can happen if in case of timeout or buggy dev_ready */
867 WARN_ON(!(status & NAND_STATUS_READY));
868 return status; 888 return status;
869} 889}
870 890
871/** 891/**
872 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks 892 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
893 *
873 * @mtd: mtd info 894 * @mtd: mtd info
874 * @ofs: offset to start unlock from 895 * @ofs: offset to start unlock from
875 * @len: length to unlock 896 * @len: length to unlock
876 * @invert: when = 0, unlock the range of blocks within the lower and 897 * @invert: when = 0, unlock the range of blocks within the lower and
877 * upper boundary address 898 * upper boundary address
878 * when = 1, unlock the range of blocks outside the boundaries 899 * when = 1, unlock the range of blocks outside the boundaries
879 * of the lower and upper boundary address 900 * of the lower and upper boundary address
880 * 901 *
881 * Returs unlock status. 902 * return - unlock status
882 */ 903 */
883static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, 904static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
884 uint64_t len, int invert) 905 uint64_t len, int invert)
@@ -898,9 +919,10 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
898 919
899 /* Call wait ready function */ 920 /* Call wait ready function */
900 status = chip->waitfunc(mtd, chip); 921 status = chip->waitfunc(mtd, chip);
922 udelay(1000);
901 /* See if device thinks it succeeded */ 923 /* See if device thinks it succeeded */
902 if (status & NAND_STATUS_FAIL) { 924 if (status & 0x01) {
903 pr_debug("%s: error status = 0x%08x\n", 925 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
904 __func__, status); 926 __func__, status);
905 ret = -EIO; 927 ret = -EIO;
906 } 928 }
@@ -910,11 +932,12 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
910 932
911/** 933/**
912 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks 934 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
935 *
913 * @mtd: mtd info 936 * @mtd: mtd info
914 * @ofs: offset to start unlock from 937 * @ofs: offset to start unlock from
915 * @len: length to unlock 938 * @len: length to unlock
916 * 939 *
917 * Returns unlock status. 940 * return - unlock status
918 */ 941 */
919int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 942int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
920{ 943{
@@ -922,7 +945,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
922 int chipnr; 945 int chipnr;
923 struct nand_chip *chip = mtd->priv; 946 struct nand_chip *chip = mtd->priv;
924 947
925 pr_debug("%s: start = 0x%012llx, len = %llu\n", 948 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
926 __func__, (unsigned long long)ofs, len); 949 __func__, (unsigned long long)ofs, len);
927 950
928 if (check_offs_len(mtd, ofs, len)) 951 if (check_offs_len(mtd, ofs, len))
@@ -932,7 +955,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
932 if (ofs + len == mtd->size) 955 if (ofs + len == mtd->size)
933 len -= mtd->erasesize; 956 len -= mtd->erasesize;
934 957
935 nand_get_device(mtd, FL_UNLOCKING); 958 nand_get_device(chip, mtd, FL_UNLOCKING);
936 959
937 /* Shift to get chip number */ 960 /* Shift to get chip number */
938 chipnr = ofs >> chip->chip_shift; 961 chipnr = ofs >> chip->chip_shift;
@@ -941,7 +964,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
941 964
942 /* Check, if it is write protected */ 965 /* Check, if it is write protected */
943 if (nand_check_wp(mtd)) { 966 if (nand_check_wp(mtd)) {
944 pr_debug("%s: device is write protected!\n", 967 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
945 __func__); 968 __func__);
946 ret = -EIO; 969 ret = -EIO;
947 goto out; 970 goto out;
@@ -950,7 +973,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
950 ret = __nand_unlock(mtd, ofs, len, 0); 973 ret = __nand_unlock(mtd, ofs, len, 0);
951 974
952out: 975out:
953 chip->select_chip(mtd, -1);
954 nand_release_device(mtd); 976 nand_release_device(mtd);
955 977
956 return ret; 978 return ret;
@@ -959,16 +981,18 @@ EXPORT_SYMBOL(nand_unlock);
959 981
960/** 982/**
961 * nand_lock - [REPLACEABLE] locks all blocks present in the device 983 * nand_lock - [REPLACEABLE] locks all blocks present in the device
984 *
962 * @mtd: mtd info 985 * @mtd: mtd info
963 * @ofs: offset to start unlock from 986 * @ofs: offset to start unlock from
964 * @len: length to unlock 987 * @len: length to unlock
965 * 988 *
966 * This feature is not supported in many NAND parts. 'Micron' NAND parts do 989 * return - lock status
967 * have this feature, but it allows only to lock all blocks, not for specified 990 *
968 * range for block. Implementing 'lock' feature by making use of 'unlock', for 991 * This feature is not supported in many NAND parts. 'Micron' NAND parts
969 * now. 992 * do have this feature, but it allows only to lock all blocks, not for
993 * specified range for block.
970 * 994 *
971 * Returns lock status. 995 * Implementing 'lock' feature by making use of 'unlock', for now.
972 */ 996 */
973int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 997int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
974{ 998{
@@ -976,13 +1000,13 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
976 int chipnr, status, page; 1000 int chipnr, status, page;
977 struct nand_chip *chip = mtd->priv; 1001 struct nand_chip *chip = mtd->priv;
978 1002
979 pr_debug("%s: start = 0x%012llx, len = %llu\n", 1003 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
980 __func__, (unsigned long long)ofs, len); 1004 __func__, (unsigned long long)ofs, len);
981 1005
982 if (check_offs_len(mtd, ofs, len)) 1006 if (check_offs_len(mtd, ofs, len))
983 ret = -EINVAL; 1007 ret = -EINVAL;
984 1008
985 nand_get_device(mtd, FL_LOCKING); 1009 nand_get_device(chip, mtd, FL_LOCKING);
986 1010
987 /* Shift to get chip number */ 1011 /* Shift to get chip number */
988 chipnr = ofs >> chip->chip_shift; 1012 chipnr = ofs >> chip->chip_shift;
@@ -991,7 +1015,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
991 1015
992 /* Check, if it is write protected */ 1016 /* Check, if it is write protected */
993 if (nand_check_wp(mtd)) { 1017 if (nand_check_wp(mtd)) {
994 pr_debug("%s: device is write protected!\n", 1018 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
995 __func__); 1019 __func__);
996 status = MTD_ERASE_FAILED; 1020 status = MTD_ERASE_FAILED;
997 ret = -EIO; 1021 ret = -EIO;
@@ -1004,9 +1028,10 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1004 1028
1005 /* Call wait ready function */ 1029 /* Call wait ready function */
1006 status = chip->waitfunc(mtd, chip); 1030 status = chip->waitfunc(mtd, chip);
1031 udelay(1000);
1007 /* See if device thinks it succeeded */ 1032 /* See if device thinks it succeeded */
1008 if (status & NAND_STATUS_FAIL) { 1033 if (status & 0x01) {
1009 pr_debug("%s: error status = 0x%08x\n", 1034 DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
1010 __func__, status); 1035 __func__, status);
1011 ret = -EIO; 1036 ret = -EIO;
1012 goto out; 1037 goto out;
@@ -1015,7 +1040,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1015 ret = __nand_unlock(mtd, ofs, len, 0x1); 1040 ret = __nand_unlock(mtd, ofs, len, 0x1);
1016 1041
1017out: 1042out:
1018 chip->select_chip(mtd, -1);
1019 nand_release_device(mtd); 1043 nand_release_device(mtd);
1020 1044
1021 return ret; 1045 return ret;
@@ -1023,37 +1047,34 @@ out:
1023EXPORT_SYMBOL(nand_lock); 1047EXPORT_SYMBOL(nand_lock);
1024 1048
1025/** 1049/**
1026 * nand_read_page_raw - [INTERN] read raw page data without ecc 1050 * nand_read_page_raw - [Intern] read raw page data without ecc
1027 * @mtd: mtd info structure 1051 * @mtd: mtd info structure
1028 * @chip: nand chip info structure 1052 * @chip: nand chip info structure
1029 * @buf: buffer to store read data 1053 * @buf: buffer to store read data
1030 * @oob_required: caller requires OOB data read to chip->oob_poi 1054 * @page: page number to read
1031 * @page: page number to read
1032 * 1055 *
1033 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1056 * Not for syndrome calculating ecc controllers, which use a special oob layout
1034 */ 1057 */
1035static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1058static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1036 uint8_t *buf, int oob_required, int page) 1059 uint8_t *buf, int page)
1037{ 1060{
1038 chip->read_buf(mtd, buf, mtd->writesize); 1061 chip->read_buf(mtd, buf, mtd->writesize);
1039 if (oob_required) 1062 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1040 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1041 return 0; 1063 return 0;
1042} 1064}
1043 1065
1044/** 1066/**
1045 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 1067 * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc
1046 * @mtd: mtd info structure 1068 * @mtd: mtd info structure
1047 * @chip: nand chip info structure 1069 * @chip: nand chip info structure
1048 * @buf: buffer to store read data 1070 * @buf: buffer to store read data
1049 * @oob_required: caller requires OOB data read to chip->oob_poi 1071 * @page: page number to read
1050 * @page: page number to read
1051 * 1072 *
1052 * We need a special oob layout and handling even when OOB isn't used. 1073 * We need a special oob layout and handling even when OOB isn't used.
1053 */ 1074 */
1054static int nand_read_page_raw_syndrome(struct mtd_info *mtd, 1075static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1055 struct nand_chip *chip, uint8_t *buf, 1076 struct nand_chip *chip,
1056 int oob_required, int page) 1077 uint8_t *buf, int page)
1057{ 1078{
1058 int eccsize = chip->ecc.size; 1079 int eccsize = chip->ecc.size;
1059 int eccbytes = chip->ecc.bytes; 1080 int eccbytes = chip->ecc.bytes;
@@ -1086,15 +1107,14 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1086} 1107}
1087 1108
1088/** 1109/**
1089 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 1110 * nand_read_page_swecc - [REPLACABLE] software ecc based page read function
1090 * @mtd: mtd info structure 1111 * @mtd: mtd info structure
1091 * @chip: nand chip info structure 1112 * @chip: nand chip info structure
1092 * @buf: buffer to store read data 1113 * @buf: buffer to store read data
1093 * @oob_required: caller requires OOB data read to chip->oob_poi 1114 * @page: page number to read
1094 * @page: page number to read
1095 */ 1115 */
1096static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1116static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1097 uint8_t *buf, int oob_required, int page) 1117 uint8_t *buf, int page)
1098{ 1118{
1099 int i, eccsize = chip->ecc.size; 1119 int i, eccsize = chip->ecc.size;
1100 int eccbytes = chip->ecc.bytes; 1120 int eccbytes = chip->ecc.bytes;
@@ -1103,9 +1123,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1103 uint8_t *ecc_calc = chip->buffers->ecccalc; 1123 uint8_t *ecc_calc = chip->buffers->ecccalc;
1104 uint8_t *ecc_code = chip->buffers->ecccode; 1124 uint8_t *ecc_code = chip->buffers->ecccode;
1105 uint32_t *eccpos = chip->ecc.layout->eccpos; 1125 uint32_t *eccpos = chip->ecc.layout->eccpos;
1106 unsigned int max_bitflips = 0;
1107 1126
1108 chip->ecc.read_page_raw(mtd, chip, buf, 1, page); 1127 chip->ecc.read_page_raw(mtd, chip, buf, page);
1109 1128
1110 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1129 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1111 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1130 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1120,23 +1139,21 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1120 int stat; 1139 int stat;
1121 1140
1122 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 1141 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1123 if (stat < 0) { 1142 if (stat < 0)
1124 mtd->ecc_stats.failed++; 1143 mtd->ecc_stats.failed++;
1125 } else { 1144 else
1126 mtd->ecc_stats.corrected += stat; 1145 mtd->ecc_stats.corrected += stat;
1127 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1128 }
1129 } 1146 }
1130 return max_bitflips; 1147 return 0;
1131} 1148}
1132 1149
1133/** 1150/**
1134 * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function 1151 * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
1135 * @mtd: mtd info structure 1152 * @mtd: mtd info structure
1136 * @chip: nand chip info structure 1153 * @chip: nand chip info structure
1137 * @data_offs: offset of requested data within the page 1154 * @data_offs: offset of requested data within the page
1138 * @readlen: data length 1155 * @readlen: data length
1139 * @bufpoi: buffer to store read data 1156 * @bufpoi: buffer to store read data
1140 */ 1157 */
1141static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, 1158static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1142 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) 1159 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
@@ -1148,14 +1165,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1148 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 1165 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1149 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1166 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1150 int index = 0; 1167 int index = 0;
1151 unsigned int max_bitflips = 0;
1152 1168
1153 /* Column address within the page aligned to ECC size (256bytes) */ 1169 /* Column address wihin the page aligned to ECC size (256bytes). */
1154 start_step = data_offs / chip->ecc.size; 1170 start_step = data_offs / chip->ecc.size;
1155 end_step = (data_offs + readlen - 1) / chip->ecc.size; 1171 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1156 num_steps = end_step - start_step + 1; 1172 num_steps = end_step - start_step + 1;
1157 1173
1158 /* Data size aligned to ECC ecc.size */ 1174 /* Data size aligned to ECC ecc.size*/
1159 datafrag_len = num_steps * chip->ecc.size; 1175 datafrag_len = num_steps * chip->ecc.size;
1160 eccfrag_len = num_steps * chip->ecc.bytes; 1176 eccfrag_len = num_steps * chip->ecc.bytes;
1161 1177
@@ -1167,14 +1183,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1167 p = bufpoi + data_col_addr; 1183 p = bufpoi + data_col_addr;
1168 chip->read_buf(mtd, p, datafrag_len); 1184 chip->read_buf(mtd, p, datafrag_len);
1169 1185
1170 /* Calculate ECC */ 1186 /* Calculate ECC */
1171 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 1187 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1172 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); 1188 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1173 1189
1174 /* 1190 /* The performance is faster if to position offsets
1175 * The performance is faster if we position offsets according to 1191 according to ecc.pos. Let make sure here that
1176 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 1192 there are no gaps in ecc positions */
1177 */
1178 for (i = 0; i < eccfrag_len - 1; i++) { 1193 for (i = 0; i < eccfrag_len - 1; i++) {
1179 if (eccpos[i + start_step * chip->ecc.bytes] + 1 != 1194 if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
1180 eccpos[i + start_step * chip->ecc.bytes + 1]) { 1195 eccpos[i + start_step * chip->ecc.bytes + 1]) {
@@ -1186,10 +1201,8 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1186 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 1201 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1187 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1202 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1188 } else { 1203 } else {
1189 /* 1204 /* send the command to read the particular ecc bytes */
1190 * Send the command to read the particular ECC bytes take care 1205 /* take care about buswidth alignment in read_buf */
1191 * about buswidth alignment in read_buf.
1192 */
1193 index = start_step * chip->ecc.bytes; 1206 index = start_step * chip->ecc.bytes;
1194 1207
1195 aligned_pos = eccpos[index] & ~(busw - 1); 1208 aligned_pos = eccpos[index] & ~(busw - 1);
@@ -1213,28 +1226,25 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1213 1226
1214 stat = chip->ecc.correct(mtd, p, 1227 stat = chip->ecc.correct(mtd, p,
1215 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); 1228 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1216 if (stat < 0) { 1229 if (stat < 0)
1217 mtd->ecc_stats.failed++; 1230 mtd->ecc_stats.failed++;
1218 } else { 1231 else
1219 mtd->ecc_stats.corrected += stat; 1232 mtd->ecc_stats.corrected += stat;
1220 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1221 }
1222 } 1233 }
1223 return max_bitflips; 1234 return 0;
1224} 1235}
1225 1236
1226/** 1237/**
1227 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 1238 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
1228 * @mtd: mtd info structure 1239 * @mtd: mtd info structure
1229 * @chip: nand chip info structure 1240 * @chip: nand chip info structure
1230 * @buf: buffer to store read data 1241 * @buf: buffer to store read data
1231 * @oob_required: caller requires OOB data read to chip->oob_poi 1242 * @page: page number to read
1232 * @page: page number to read
1233 * 1243 *
1234 * Not for syndrome calculating ECC controllers which need a special oob layout. 1244 * Not for syndrome calculating ecc controllers which need a special oob layout
1235 */ 1245 */
1236static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1246static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1237 uint8_t *buf, int oob_required, int page) 1247 uint8_t *buf, int page)
1238{ 1248{
1239 int i, eccsize = chip->ecc.size; 1249 int i, eccsize = chip->ecc.size;
1240 int eccbytes = chip->ecc.bytes; 1250 int eccbytes = chip->ecc.bytes;
@@ -1243,7 +1253,6 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1243 uint8_t *ecc_calc = chip->buffers->ecccalc; 1253 uint8_t *ecc_calc = chip->buffers->ecccalc;
1244 uint8_t *ecc_code = chip->buffers->ecccode; 1254 uint8_t *ecc_code = chip->buffers->ecccode;
1245 uint32_t *eccpos = chip->ecc.layout->eccpos; 1255 uint32_t *eccpos = chip->ecc.layout->eccpos;
1246 unsigned int max_bitflips = 0;
1247 1256
1248 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1257 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1249 chip->ecc.hwctl(mtd, NAND_ECC_READ); 1258 chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1262,32 +1271,30 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1262 int stat; 1271 int stat;
1263 1272
1264 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 1273 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1265 if (stat < 0) { 1274 if (stat < 0)
1266 mtd->ecc_stats.failed++; 1275 mtd->ecc_stats.failed++;
1267 } else { 1276 else
1268 mtd->ecc_stats.corrected += stat; 1277 mtd->ecc_stats.corrected += stat;
1269 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1270 }
1271 } 1278 }
1272 return max_bitflips; 1279 return 0;
1273} 1280}
1274 1281
1275/** 1282/**
1276 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first 1283 * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first
1277 * @mtd: mtd info structure 1284 * @mtd: mtd info structure
1278 * @chip: nand chip info structure 1285 * @chip: nand chip info structure
1279 * @buf: buffer to store read data 1286 * @buf: buffer to store read data
1280 * @oob_required: caller requires OOB data read to chip->oob_poi 1287 * @page: page number to read
1281 * @page: page number to read
1282 * 1288 *
1283 * Hardware ECC for large page chips, require OOB to be read first. For this 1289 * Hardware ECC for large page chips, require OOB to be read first.
1284 * ECC mode, the write_page method is re-used from ECC_HW. These methods 1290 * For this ECC mode, the write_page method is re-used from ECC_HW.
1285 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with 1291 * These methods read/write ECC from the OOB area, unlike the
1286 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from 1292 * ECC_HW_SYNDROME support with multiple ECC steps, follows the
1287 * the data area, by overwriting the NAND manufacturer bad block markings. 1293 * "infix ECC" scheme and reads/writes ECC from the data area, by
1294 * overwriting the NAND manufacturer bad block markings.
1288 */ 1295 */
1289static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1296static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1290 struct nand_chip *chip, uint8_t *buf, int oob_required, int page) 1297 struct nand_chip *chip, uint8_t *buf, int page)
1291{ 1298{
1292 int i, eccsize = chip->ecc.size; 1299 int i, eccsize = chip->ecc.size;
1293 int eccbytes = chip->ecc.bytes; 1300 int eccbytes = chip->ecc.bytes;
@@ -1296,7 +1303,6 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1296 uint8_t *ecc_code = chip->buffers->ecccode; 1303 uint8_t *ecc_code = chip->buffers->ecccode;
1297 uint32_t *eccpos = chip->ecc.layout->eccpos; 1304 uint32_t *eccpos = chip->ecc.layout->eccpos;
1298 uint8_t *ecc_calc = chip->buffers->ecccalc; 1305 uint8_t *ecc_calc = chip->buffers->ecccalc;
1299 unsigned int max_bitflips = 0;
1300 1306
1301 /* Read the OOB area first */ 1307 /* Read the OOB area first */
1302 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 1308 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1314,36 +1320,32 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1314 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1320 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1315 1321
1316 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); 1322 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1317 if (stat < 0) { 1323 if (stat < 0)
1318 mtd->ecc_stats.failed++; 1324 mtd->ecc_stats.failed++;
1319 } else { 1325 else
1320 mtd->ecc_stats.corrected += stat; 1326 mtd->ecc_stats.corrected += stat;
1321 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1322 }
1323 } 1327 }
1324 return max_bitflips; 1328 return 0;
1325} 1329}
1326 1330
1327/** 1331/**
1328 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 1332 * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
1329 * @mtd: mtd info structure 1333 * @mtd: mtd info structure
1330 * @chip: nand chip info structure 1334 * @chip: nand chip info structure
1331 * @buf: buffer to store read data 1335 * @buf: buffer to store read data
1332 * @oob_required: caller requires OOB data read to chip->oob_poi 1336 * @page: page number to read
1333 * @page: page number to read
1334 * 1337 *
1335 * The hw generator calculates the error syndrome automatically. Therefore we 1338 * The hw generator calculates the error syndrome automatically. Therefor
1336 * need a special oob layout and handling. 1339 * we need a special oob layout and handling.
1337 */ 1340 */
1338static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1341static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1339 uint8_t *buf, int oob_required, int page) 1342 uint8_t *buf, int page)
1340{ 1343{
1341 int i, eccsize = chip->ecc.size; 1344 int i, eccsize = chip->ecc.size;
1342 int eccbytes = chip->ecc.bytes; 1345 int eccbytes = chip->ecc.bytes;
1343 int eccsteps = chip->ecc.steps; 1346 int eccsteps = chip->ecc.steps;
1344 uint8_t *p = buf; 1347 uint8_t *p = buf;
1345 uint8_t *oob = chip->oob_poi; 1348 uint8_t *oob = chip->oob_poi;
1346 unsigned int max_bitflips = 0;
1347 1349
1348 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1350 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1349 int stat; 1351 int stat;
@@ -1360,12 +1362,10 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1360 chip->read_buf(mtd, oob, eccbytes); 1362 chip->read_buf(mtd, oob, eccbytes);
1361 stat = chip->ecc.correct(mtd, p, oob, NULL); 1363 stat = chip->ecc.correct(mtd, p, oob, NULL);
1362 1364
1363 if (stat < 0) { 1365 if (stat < 0)
1364 mtd->ecc_stats.failed++; 1366 mtd->ecc_stats.failed++;
1365 } else { 1367 else
1366 mtd->ecc_stats.corrected += stat; 1368 mtd->ecc_stats.corrected += stat;
1367 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1368 }
1369 1369
1370 oob += eccbytes; 1370 oob += eccbytes;
1371 1371
@@ -1380,33 +1380,33 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1380 if (i) 1380 if (i)
1381 chip->read_buf(mtd, oob, i); 1381 chip->read_buf(mtd, oob, i);
1382 1382
1383 return max_bitflips; 1383 return 0;
1384} 1384}
1385 1385
1386/** 1386/**
1387 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 1387 * nand_transfer_oob - [Internal] Transfer oob to client buffer
1388 * @chip: nand chip structure 1388 * @chip: nand chip structure
1389 * @oob: oob destination address 1389 * @oob: oob destination address
1390 * @ops: oob ops structure 1390 * @ops: oob ops structure
1391 * @len: size of oob to transfer 1391 * @len: size of oob to transfer
1392 */ 1392 */
1393static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 1393static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1394 struct mtd_oob_ops *ops, size_t len) 1394 struct mtd_oob_ops *ops, size_t len)
1395{ 1395{
1396 switch (ops->mode) { 1396 switch (ops->mode) {
1397 1397
1398 case MTD_OPS_PLACE_OOB: 1398 case MTD_OOB_PLACE:
1399 case MTD_OPS_RAW: 1399 case MTD_OOB_RAW:
1400 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 1400 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1401 return oob + len; 1401 return oob + len;
1402 1402
1403 case MTD_OPS_AUTO_OOB: { 1403 case MTD_OOB_AUTO: {
1404 struct nand_oobfree *free = chip->ecc.layout->oobfree; 1404 struct nand_oobfree *free = chip->ecc.layout->oobfree;
1405 uint32_t boffs = 0, roffs = ops->ooboffs; 1405 uint32_t boffs = 0, roffs = ops->ooboffs;
1406 size_t bytes = 0; 1406 size_t bytes = 0;
1407 1407
1408 for (; free->length && len; free++, len -= bytes) { 1408 for (; free->length && len; free++, len -= bytes) {
1409 /* Read request not from offset 0? */ 1409 /* Read request not from offset 0 ? */
1410 if (unlikely(roffs)) { 1410 if (unlikely(roffs)) {
1411 if (roffs >= free->length) { 1411 if (roffs >= free->length) {
1412 roffs -= free->length; 1412 roffs -= free->length;
@@ -1432,27 +1432,29 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1432} 1432}
1433 1433
1434/** 1434/**
1435 * nand_do_read_ops - [INTERN] Read data with ECC 1435 * nand_do_read_ops - [Internal] Read data with ECC
1436 * @mtd: MTD device structure 1436 *
1437 * @from: offset to read from 1437 * @mtd: MTD device structure
1438 * @ops: oob ops structure 1438 * @from: offset to read from
1439 * @ops: oob ops structure
1439 * 1440 *
1440 * Internal function. Called with chip held. 1441 * Internal function. Called with chip held.
1441 */ 1442 */
1442static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, 1443static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1443 struct mtd_oob_ops *ops) 1444 struct mtd_oob_ops *ops)
1444{ 1445{
1445 int chipnr, page, realpage, col, bytes, aligned, oob_required; 1446 int chipnr, page, realpage, col, bytes, aligned;
1446 struct nand_chip *chip = mtd->priv; 1447 struct nand_chip *chip = mtd->priv;
1447 struct mtd_ecc_stats stats; 1448 struct mtd_ecc_stats stats;
1449 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1450 int sndcmd = 1;
1448 int ret = 0; 1451 int ret = 0;
1449 uint32_t readlen = ops->len; 1452 uint32_t readlen = ops->len;
1450 uint32_t oobreadlen = ops->ooblen; 1453 uint32_t oobreadlen = ops->ooblen;
1451 uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? 1454 uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
1452 mtd->oobavail : mtd->oobsize; 1455 mtd->oobavail : mtd->oobsize;
1453 1456
1454 uint8_t *bufpoi, *oob, *buf; 1457 uint8_t *bufpoi, *oob, *buf;
1455 unsigned int max_bitflips = 0;
1456 1458
1457 stats = mtd->ecc_stats; 1459 stats = mtd->ecc_stats;
1458 1460
@@ -1466,59 +1468,45 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1466 1468
1467 buf = ops->datbuf; 1469 buf = ops->datbuf;
1468 oob = ops->oobbuf; 1470 oob = ops->oobbuf;
1469 oob_required = oob ? 1 : 0;
1470 1471
1471 while (1) { 1472 while (1) {
1472 bytes = min(mtd->writesize - col, readlen); 1473 bytes = min(mtd->writesize - col, readlen);
1473 aligned = (bytes == mtd->writesize); 1474 aligned = (bytes == mtd->writesize);
1474 1475
1475 /* Is the current page in the buffer? */ 1476 /* Is the current page in the buffer ? */
1476 if (realpage != chip->pagebuf || oob) { 1477 if (realpage != chip->pagebuf || oob) {
1477 bufpoi = aligned ? buf : chip->buffers->databuf; 1478 bufpoi = aligned ? buf : chip->buffers->databuf;
1478 1479
1479 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); 1480 if (likely(sndcmd)) {
1481 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1482 sndcmd = 0;
1483 }
1480 1484
1481 /* 1485 /* Now read the page into the buffer */
1482 * Now read the page into the buffer. Absent an error, 1486 if (unlikely(ops->mode == MTD_OOB_RAW))
1483 * the read methods return max bitflips per ecc step. 1487 ret = chip->ecc.read_page_raw(mtd, chip,
1484 */ 1488 bufpoi, page);
1485 if (unlikely(ops->mode == MTD_OPS_RAW)) 1489 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
1486 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1487 oob_required,
1488 page);
1489 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1490 !oob)
1491 ret = chip->ecc.read_subpage(mtd, chip, 1490 ret = chip->ecc.read_subpage(mtd, chip,
1492 col, bytes, bufpoi); 1491 col, bytes, bufpoi);
1493 else 1492 else
1494 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1493 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1495 oob_required, page); 1494 page);
1496 if (ret < 0) { 1495 if (ret < 0)
1497 if (!aligned)
1498 /* Invalidate page cache */
1499 chip->pagebuf = -1;
1500 break; 1496 break;
1501 }
1502
1503 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1504 1497
1505 /* Transfer not aligned data */ 1498 /* Transfer not aligned data */
1506 if (!aligned) { 1499 if (!aligned) {
1507 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 1500 if (!NAND_SUBPAGE_READ(chip) && !oob &&
1508 !(mtd->ecc_stats.failed - stats.failed) && 1501 !(mtd->ecc_stats.failed - stats.failed))
1509 (ops->mode != MTD_OPS_RAW)) {
1510 chip->pagebuf = realpage; 1502 chip->pagebuf = realpage;
1511 chip->pagebuf_bitflips = ret;
1512 } else {
1513 /* Invalidate page cache */
1514 chip->pagebuf = -1;
1515 }
1516 memcpy(buf, chip->buffers->databuf + col, bytes); 1503 memcpy(buf, chip->buffers->databuf + col, bytes);
1517 } 1504 }
1518 1505
1519 buf += bytes; 1506 buf += bytes;
1520 1507
1521 if (unlikely(oob)) { 1508 if (unlikely(oob)) {
1509
1522 int toread = min(oobreadlen, max_oobsize); 1510 int toread = min(oobreadlen, max_oobsize);
1523 1511
1524 if (toread) { 1512 if (toread) {
@@ -1527,11 +1515,23 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1527 oobreadlen -= toread; 1515 oobreadlen -= toread;
1528 } 1516 }
1529 } 1517 }
1518
1519 if (!(chip->options & NAND_NO_READRDY)) {
1520 /*
1521 * Apply delay or wait for ready/busy pin. Do
1522 * this before the AUTOINCR check, so no
1523 * problems arise if a chip which does auto
1524 * increment is marked as NOAUTOINCR by the
1525 * board driver.
1526 */
1527 if (!chip->dev_ready)
1528 udelay(chip->chip_delay);
1529 else
1530 nand_wait_ready(mtd);
1531 }
1530 } else { 1532 } else {
1531 memcpy(buf, chip->buffers->databuf + col, bytes); 1533 memcpy(buf, chip->buffers->databuf + col, bytes);
1532 buf += bytes; 1534 buf += bytes;
1533 max_bitflips = max_t(unsigned int, max_bitflips,
1534 chip->pagebuf_bitflips);
1535 } 1535 }
1536 1536
1537 readlen -= bytes; 1537 readlen -= bytes;
@@ -1539,7 +1539,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1539 if (!readlen) 1539 if (!readlen)
1540 break; 1540 break;
1541 1541
1542 /* For subsequent reads align to page boundary */ 1542 /* For subsequent reads align to page boundary. */
1543 col = 0; 1543 col = 0;
1544 /* Increment page address */ 1544 /* Increment page address */
1545 realpage++; 1545 realpage++;
@@ -1551,72 +1551,92 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1551 chip->select_chip(mtd, -1); 1551 chip->select_chip(mtd, -1);
1552 chip->select_chip(mtd, chipnr); 1552 chip->select_chip(mtd, chipnr);
1553 } 1553 }
1554
1555 /* Check, if the chip supports auto page increment
1556 * or if we have hit a block boundary.
1557 */
1558 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1559 sndcmd = 1;
1554 } 1560 }
1555 chip->select_chip(mtd, -1);
1556 1561
1557 ops->retlen = ops->len - (size_t) readlen; 1562 ops->retlen = ops->len - (size_t) readlen;
1558 if (oob) 1563 if (oob)
1559 ops->oobretlen = ops->ooblen - oobreadlen; 1564 ops->oobretlen = ops->ooblen - oobreadlen;
1560 1565
1561 if (ret < 0) 1566 if (ret)
1562 return ret; 1567 return ret;
1563 1568
1564 if (mtd->ecc_stats.failed - stats.failed) 1569 if (mtd->ecc_stats.failed - stats.failed)
1565 return -EBADMSG; 1570 return -EBADMSG;
1566 1571
1567 return max_bitflips; 1572 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1568} 1573}
1569 1574
1570/** 1575/**
1571 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc 1576 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
1572 * @mtd: MTD device structure 1577 * @mtd: MTD device structure
1573 * @from: offset to read from 1578 * @from: offset to read from
1574 * @len: number of bytes to read 1579 * @len: number of bytes to read
1575 * @retlen: pointer to variable to store the number of read bytes 1580 * @retlen: pointer to variable to store the number of read bytes
1576 * @buf: the databuffer to put data 1581 * @buf: the databuffer to put data
1577 * 1582 *
1578 * Get hold of the chip and call nand_do_read. 1583 * Get hold of the chip and call nand_do_read
1579 */ 1584 */
1580static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, 1585static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1581 size_t *retlen, uint8_t *buf) 1586 size_t *retlen, uint8_t *buf)
1582{ 1587{
1583 struct mtd_oob_ops ops; 1588 struct nand_chip *chip = mtd->priv;
1584 int ret; 1589 int ret;
1585 1590
1586 nand_get_device(mtd, FL_READING); 1591 /* Do not allow reads past end of device */
1587 ops.len = len; 1592 if ((from + len) > mtd->size)
1588 ops.datbuf = buf; 1593 return -EINVAL;
1589 ops.oobbuf = NULL; 1594 if (!len)
1590 ops.mode = MTD_OPS_PLACE_OOB; 1595 return 0;
1591 ret = nand_do_read_ops(mtd, from, &ops); 1596
1592 *retlen = ops.retlen; 1597 nand_get_device(chip, mtd, FL_READING);
1598
1599 chip->ops.len = len;
1600 chip->ops.datbuf = buf;
1601 chip->ops.oobbuf = NULL;
1602
1603 ret = nand_do_read_ops(mtd, from, &chip->ops);
1604
1605 *retlen = chip->ops.retlen;
1606
1593 nand_release_device(mtd); 1607 nand_release_device(mtd);
1608
1594 return ret; 1609 return ret;
1595} 1610}
1596 1611
1597/** 1612/**
1598 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 1613 * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
1599 * @mtd: mtd info structure 1614 * @mtd: mtd info structure
1600 * @chip: nand chip info structure 1615 * @chip: nand chip info structure
1601 * @page: page number to read 1616 * @page: page number to read
1617 * @sndcmd: flag whether to issue read command or not
1602 */ 1618 */
1603static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1619static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1604 int page) 1620 int page, int sndcmd)
1605{ 1621{
1606 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 1622 if (sndcmd) {
1623 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1624 sndcmd = 0;
1625 }
1607 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1626 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1608 return 0; 1627 return sndcmd;
1609} 1628}
1610 1629
1611/** 1630/**
1612 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 1631 * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
1613 * with syndromes 1632 * with syndromes
1614 * @mtd: mtd info structure 1633 * @mtd: mtd info structure
1615 * @chip: nand chip info structure 1634 * @chip: nand chip info structure
1616 * @page: page number to read 1635 * @page: page number to read
1636 * @sndcmd: flag whether to issue read command or not
1617 */ 1637 */
1618static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1638static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1619 int page) 1639 int page, int sndcmd)
1620{ 1640{
1621 uint8_t *buf = chip->oob_poi; 1641 uint8_t *buf = chip->oob_poi;
1622 int length = mtd->oobsize; 1642 int length = mtd->oobsize;
@@ -1643,14 +1663,14 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1643 if (length > 0) 1663 if (length > 0)
1644 chip->read_buf(mtd, bufpoi, length); 1664 chip->read_buf(mtd, bufpoi, length);
1645 1665
1646 return 0; 1666 return 1;
1647} 1667}
1648 1668
1649/** 1669/**
1650 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 1670 * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
1651 * @mtd: mtd info structure 1671 * @mtd: mtd info structure
1652 * @chip: nand chip info structure 1672 * @chip: nand chip info structure
1653 * @page: page number to write 1673 * @page: page number to write
1654 */ 1674 */
1655static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1675static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1656 int page) 1676 int page)
@@ -1670,11 +1690,11 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1670} 1690}
1671 1691
1672/** 1692/**
1673 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 1693 * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
1674 * with syndrome - only for large page flash 1694 * with syndrome - only for large page flash !
1675 * @mtd: mtd info structure 1695 * @mtd: mtd info structure
1676 * @chip: nand chip info structure 1696 * @chip: nand chip info structure
1677 * @page: page number to write 1697 * @page: page number to write
1678 */ 1698 */
1679static int nand_write_oob_syndrome(struct mtd_info *mtd, 1699static int nand_write_oob_syndrome(struct mtd_info *mtd,
1680 struct nand_chip *chip, int page) 1700 struct nand_chip *chip, int page)
@@ -1729,37 +1749,34 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
1729} 1749}
1730 1750
1731/** 1751/**
1732 * nand_do_read_oob - [INTERN] NAND read out-of-band 1752 * nand_do_read_oob - [Intern] NAND read out-of-band
1733 * @mtd: MTD device structure 1753 * @mtd: MTD device structure
1734 * @from: offset to read from 1754 * @from: offset to read from
1735 * @ops: oob operations description structure 1755 * @ops: oob operations description structure
1736 * 1756 *
1737 * NAND read out-of-band data from the spare area. 1757 * NAND read out-of-band data from the spare area
1738 */ 1758 */
1739static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 1759static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1740 struct mtd_oob_ops *ops) 1760 struct mtd_oob_ops *ops)
1741{ 1761{
1742 int page, realpage, chipnr; 1762 int page, realpage, chipnr, sndcmd = 1;
1743 struct nand_chip *chip = mtd->priv; 1763 struct nand_chip *chip = mtd->priv;
1744 struct mtd_ecc_stats stats; 1764 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1745 int readlen = ops->ooblen; 1765 int readlen = ops->ooblen;
1746 int len; 1766 int len;
1747 uint8_t *buf = ops->oobbuf; 1767 uint8_t *buf = ops->oobbuf;
1748 int ret = 0;
1749 1768
1750 pr_debug("%s: from = 0x%08Lx, len = %i\n", 1769 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
1751 __func__, (unsigned long long)from, readlen); 1770 __func__, (unsigned long long)from, readlen);
1752 1771
1753 stats = mtd->ecc_stats; 1772 if (ops->mode == MTD_OOB_AUTO)
1754
1755 if (ops->mode == MTD_OPS_AUTO_OOB)
1756 len = chip->ecc.layout->oobavail; 1773 len = chip->ecc.layout->oobavail;
1757 else 1774 else
1758 len = mtd->oobsize; 1775 len = mtd->oobsize;
1759 1776
1760 if (unlikely(ops->ooboffs >= len)) { 1777 if (unlikely(ops->ooboffs >= len)) {
1761 pr_debug("%s: attempt to start read outside oob\n", 1778 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read "
1762 __func__); 1779 "outside oob\n", __func__);
1763 return -EINVAL; 1780 return -EINVAL;
1764 } 1781 }
1765 1782
@@ -1767,8 +1784,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1767 if (unlikely(from >= mtd->size || 1784 if (unlikely(from >= mtd->size ||
1768 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - 1785 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
1769 (from >> chip->page_shift)) * len)) { 1786 (from >> chip->page_shift)) * len)) {
1770 pr_debug("%s: attempt to read beyond end of device\n", 1787 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end "
1771 __func__); 1788 "of device\n", __func__);
1772 return -EINVAL; 1789 return -EINVAL;
1773 } 1790 }
1774 1791
@@ -1780,17 +1797,24 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1780 page = realpage & chip->pagemask; 1797 page = realpage & chip->pagemask;
1781 1798
1782 while (1) { 1799 while (1) {
1783 if (ops->mode == MTD_OPS_RAW) 1800 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
1784 ret = chip->ecc.read_oob_raw(mtd, chip, page);
1785 else
1786 ret = chip->ecc.read_oob(mtd, chip, page);
1787
1788 if (ret < 0)
1789 break;
1790 1801
1791 len = min(len, readlen); 1802 len = min(len, readlen);
1792 buf = nand_transfer_oob(chip, buf, ops, len); 1803 buf = nand_transfer_oob(chip, buf, ops, len);
1793 1804
1805 if (!(chip->options & NAND_NO_READRDY)) {
1806 /*
1807 * Apply delay or wait for ready/busy pin. Do this
1808 * before the AUTOINCR check, so no problems arise if a
1809 * chip which does auto increment is marked as
1810 * NOAUTOINCR by the board driver.
1811 */
1812 if (!chip->dev_ready)
1813 udelay(chip->chip_delay);
1814 else
1815 nand_wait_ready(mtd);
1816 }
1817
1794 readlen -= len; 1818 readlen -= len;
1795 if (!readlen) 1819 if (!readlen)
1796 break; 1820 break;
@@ -1805,48 +1829,47 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1805 chip->select_chip(mtd, -1); 1829 chip->select_chip(mtd, -1);
1806 chip->select_chip(mtd, chipnr); 1830 chip->select_chip(mtd, chipnr);
1807 } 1831 }
1808 }
1809 chip->select_chip(mtd, -1);
1810
1811 ops->oobretlen = ops->ooblen - readlen;
1812 1832
1813 if (ret < 0) 1833 /* Check, if the chip supports auto page increment
1814 return ret; 1834 * or if we have hit a block boundary.
1815 1835 */
1816 if (mtd->ecc_stats.failed - stats.failed) 1836 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1817 return -EBADMSG; 1837 sndcmd = 1;
1838 }
1818 1839
1819 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1840 ops->oobretlen = ops->ooblen;
1841 return 0;
1820} 1842}
1821 1843
1822/** 1844/**
1823 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 1845 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
1824 * @mtd: MTD device structure 1846 * @mtd: MTD device structure
1825 * @from: offset to read from 1847 * @from: offset to read from
1826 * @ops: oob operation description structure 1848 * @ops: oob operation description structure
1827 * 1849 *
1828 * NAND read data and/or out-of-band data. 1850 * NAND read data and/or out-of-band data
1829 */ 1851 */
1830static int nand_read_oob(struct mtd_info *mtd, loff_t from, 1852static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1831 struct mtd_oob_ops *ops) 1853 struct mtd_oob_ops *ops)
1832{ 1854{
1855 struct nand_chip *chip = mtd->priv;
1833 int ret = -ENOTSUPP; 1856 int ret = -ENOTSUPP;
1834 1857
1835 ops->retlen = 0; 1858 ops->retlen = 0;
1836 1859
1837 /* Do not allow reads past end of device */ 1860 /* Do not allow reads past end of device */
1838 if (ops->datbuf && (from + ops->len) > mtd->size) { 1861 if (ops->datbuf && (from + ops->len) > mtd->size) {
1839 pr_debug("%s: attempt to read beyond end of device\n", 1862 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read "
1840 __func__); 1863 "beyond end of device\n", __func__);
1841 return -EINVAL; 1864 return -EINVAL;
1842 } 1865 }
1843 1866
1844 nand_get_device(mtd, FL_READING); 1867 nand_get_device(chip, mtd, FL_READING);
1845 1868
1846 switch (ops->mode) { 1869 switch (ops->mode) {
1847 case MTD_OPS_PLACE_OOB: 1870 case MTD_OOB_PLACE:
1848 case MTD_OPS_AUTO_OOB: 1871 case MTD_OOB_AUTO:
1849 case MTD_OPS_RAW: 1872 case MTD_OOB_RAW:
1850 break; 1873 break;
1851 1874
1852 default: 1875 default:
@@ -1865,36 +1888,31 @@ out:
1865 1888
1866 1889
1867/** 1890/**
1868 * nand_write_page_raw - [INTERN] raw page write function 1891 * nand_write_page_raw - [Intern] raw page write function
1869 * @mtd: mtd info structure 1892 * @mtd: mtd info structure
1870 * @chip: nand chip info structure 1893 * @chip: nand chip info structure
1871 * @buf: data buffer 1894 * @buf: data buffer
1872 * @oob_required: must write chip->oob_poi to OOB
1873 * 1895 *
1874 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1896 * Not for syndrome calculating ecc controllers, which use a special oob layout
1875 */ 1897 */
1876static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1898static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1877 const uint8_t *buf, int oob_required) 1899 const uint8_t *buf)
1878{ 1900{
1879 chip->write_buf(mtd, buf, mtd->writesize); 1901 chip->write_buf(mtd, buf, mtd->writesize);
1880 if (oob_required) 1902 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1881 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1882
1883 return 0;
1884} 1903}
1885 1904
1886/** 1905/**
1887 * nand_write_page_raw_syndrome - [INTERN] raw page write function 1906 * nand_write_page_raw_syndrome - [Intern] raw page write function
1888 * @mtd: mtd info structure 1907 * @mtd: mtd info structure
1889 * @chip: nand chip info structure 1908 * @chip: nand chip info structure
1890 * @buf: data buffer 1909 * @buf: data buffer
1891 * @oob_required: must write chip->oob_poi to OOB
1892 * 1910 *
1893 * We need a special oob layout and handling even when ECC isn't checked. 1911 * We need a special oob layout and handling even when ECC isn't checked.
1894 */ 1912 */
1895static int nand_write_page_raw_syndrome(struct mtd_info *mtd, 1913static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1896 struct nand_chip *chip, 1914 struct nand_chip *chip,
1897 const uint8_t *buf, int oob_required) 1915 const uint8_t *buf)
1898{ 1916{
1899 int eccsize = chip->ecc.size; 1917 int eccsize = chip->ecc.size;
1900 int eccbytes = chip->ecc.bytes; 1918 int eccbytes = chip->ecc.bytes;
@@ -1922,18 +1940,15 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
1922 size = mtd->oobsize - (oob - chip->oob_poi); 1940 size = mtd->oobsize - (oob - chip->oob_poi);
1923 if (size) 1941 if (size)
1924 chip->write_buf(mtd, oob, size); 1942 chip->write_buf(mtd, oob, size);
1925
1926 return 0;
1927} 1943}
1928/** 1944/**
1929 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 1945 * nand_write_page_swecc - [REPLACABLE] software ecc based page write function
1930 * @mtd: mtd info structure 1946 * @mtd: mtd info structure
1931 * @chip: nand chip info structure 1947 * @chip: nand chip info structure
1932 * @buf: data buffer 1948 * @buf: data buffer
1933 * @oob_required: must write chip->oob_poi to OOB
1934 */ 1949 */
1935static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1950static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1936 const uint8_t *buf, int oob_required) 1951 const uint8_t *buf)
1937{ 1952{
1938 int i, eccsize = chip->ecc.size; 1953 int i, eccsize = chip->ecc.size;
1939 int eccbytes = chip->ecc.bytes; 1954 int eccbytes = chip->ecc.bytes;
@@ -1942,25 +1957,24 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1942 const uint8_t *p = buf; 1957 const uint8_t *p = buf;
1943 uint32_t *eccpos = chip->ecc.layout->eccpos; 1958 uint32_t *eccpos = chip->ecc.layout->eccpos;
1944 1959
1945 /* Software ECC calculation */ 1960 /* Software ecc calculation */
1946 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1961 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1947 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1962 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1948 1963
1949 for (i = 0; i < chip->ecc.total; i++) 1964 for (i = 0; i < chip->ecc.total; i++)
1950 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1965 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1951 1966
1952 return chip->ecc.write_page_raw(mtd, chip, buf, 1); 1967 chip->ecc.write_page_raw(mtd, chip, buf);
1953} 1968}
1954 1969
1955/** 1970/**
1956 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 1971 * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
1957 * @mtd: mtd info structure 1972 * @mtd: mtd info structure
1958 * @chip: nand chip info structure 1973 * @chip: nand chip info structure
1959 * @buf: data buffer 1974 * @buf: data buffer
1960 * @oob_required: must write chip->oob_poi to OOB
1961 */ 1975 */
1962static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1976static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1963 const uint8_t *buf, int oob_required) 1977 const uint8_t *buf)
1964{ 1978{
1965 int i, eccsize = chip->ecc.size; 1979 int i, eccsize = chip->ecc.size;
1966 int eccbytes = chip->ecc.bytes; 1980 int eccbytes = chip->ecc.bytes;
@@ -1979,23 +1993,19 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1979 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1993 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1980 1994
1981 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1995 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1982
1983 return 0;
1984} 1996}
1985 1997
1986/** 1998/**
1987 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 1999 * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
1988 * @mtd: mtd info structure 2000 * @mtd: mtd info structure
1989 * @chip: nand chip info structure 2001 * @chip: nand chip info structure
1990 * @buf: data buffer 2002 * @buf: data buffer
1991 * @oob_required: must write chip->oob_poi to OOB
1992 * 2003 *
1993 * The hw generator calculates the error syndrome automatically. Therefore we 2004 * The hw generator calculates the error syndrome automatically. Therefor
1994 * need a special oob layout and handling. 2005 * we need a special oob layout and handling.
1995 */ 2006 */
1996static int nand_write_page_syndrome(struct mtd_info *mtd, 2007static void nand_write_page_syndrome(struct mtd_info *mtd,
1997 struct nand_chip *chip, 2008 struct nand_chip *chip, const uint8_t *buf)
1998 const uint8_t *buf, int oob_required)
1999{ 2009{
2000 int i, eccsize = chip->ecc.size; 2010 int i, eccsize = chip->ecc.size;
2001 int eccbytes = chip->ecc.bytes; 2011 int eccbytes = chip->ecc.bytes;
@@ -2027,39 +2037,32 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
2027 i = mtd->oobsize - (oob - chip->oob_poi); 2037 i = mtd->oobsize - (oob - chip->oob_poi);
2028 if (i) 2038 if (i)
2029 chip->write_buf(mtd, oob, i); 2039 chip->write_buf(mtd, oob, i);
2030
2031 return 0;
2032} 2040}
2033 2041
2034/** 2042/**
2035 * nand_write_page - [REPLACEABLE] write one page 2043 * nand_write_page - [REPLACEABLE] write one page
2036 * @mtd: MTD device structure 2044 * @mtd: MTD device structure
2037 * @chip: NAND chip descriptor 2045 * @chip: NAND chip descriptor
2038 * @buf: the data to write 2046 * @buf: the data to write
2039 * @oob_required: must write chip->oob_poi to OOB 2047 * @page: page number to write
2040 * @page: page number to write 2048 * @cached: cached programming
2041 * @cached: cached programming 2049 * @raw: use _raw version of write_page
2042 * @raw: use _raw version of write_page
2043 */ 2050 */
2044static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2051static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2045 const uint8_t *buf, int oob_required, int page, 2052 const uint8_t *buf, int page, int cached, int raw)
2046 int cached, int raw)
2047{ 2053{
2048 int status; 2054 int status;
2049 2055
2050 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 2056 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2051 2057
2052 if (unlikely(raw)) 2058 if (unlikely(raw))
2053 status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); 2059 chip->ecc.write_page_raw(mtd, chip, buf);
2054 else 2060 else
2055 status = chip->ecc.write_page(mtd, chip, buf, oob_required); 2061 chip->ecc.write_page(mtd, chip, buf);
2056
2057 if (status < 0)
2058 return status;
2059 2062
2060 /* 2063 /*
2061 * Cached progamming disabled for now. Not sure if it's worth the 2064 * Cached progamming disabled for now, Not sure if its worth the
2062 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s). 2065 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
2063 */ 2066 */
2064 cached = 0; 2067 cached = 0;
2065 2068
@@ -2069,7 +2072,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2069 status = chip->waitfunc(mtd, chip); 2072 status = chip->waitfunc(mtd, chip);
2070 /* 2073 /*
2071 * See if operation failed and additional status checks are 2074 * See if operation failed and additional status checks are
2072 * available. 2075 * available
2073 */ 2076 */
2074 if ((status & NAND_STATUS_FAIL) && (chip->errstat)) 2077 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2075 status = chip->errstat(mtd, chip, FL_WRITING, status, 2078 status = chip->errstat(mtd, chip, FL_WRITING, status,
@@ -2082,15 +2085,22 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2082 status = chip->waitfunc(mtd, chip); 2085 status = chip->waitfunc(mtd, chip);
2083 } 2086 }
2084 2087
2088#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
2089 /* Send command to read back the data */
2090 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
2091
2092 if (chip->verify_buf(mtd, buf, mtd->writesize))
2093 return -EIO;
2094#endif
2085 return 0; 2095 return 0;
2086} 2096}
2087 2097
2088/** 2098/**
2089 * nand_fill_oob - [INTERN] Transfer client buffer to oob 2099 * nand_fill_oob - [Internal] Transfer client buffer to oob
2090 * @mtd: MTD device structure 2100 * @mtd: MTD device structure
2091 * @oob: oob data buffer 2101 * @oob: oob data buffer
2092 * @len: oob data write length 2102 * @len: oob data write length
2093 * @ops: oob ops structure 2103 * @ops: oob ops structure
2094 */ 2104 */
2095static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, 2105static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2096 struct mtd_oob_ops *ops) 2106 struct mtd_oob_ops *ops)
@@ -2105,18 +2115,18 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2105 2115
2106 switch (ops->mode) { 2116 switch (ops->mode) {
2107 2117
2108 case MTD_OPS_PLACE_OOB: 2118 case MTD_OOB_PLACE:
2109 case MTD_OPS_RAW: 2119 case MTD_OOB_RAW:
2110 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 2120 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2111 return oob + len; 2121 return oob + len;
2112 2122
2113 case MTD_OPS_AUTO_OOB: { 2123 case MTD_OOB_AUTO: {
2114 struct nand_oobfree *free = chip->ecc.layout->oobfree; 2124 struct nand_oobfree *free = chip->ecc.layout->oobfree;
2115 uint32_t boffs = 0, woffs = ops->ooboffs; 2125 uint32_t boffs = 0, woffs = ops->ooboffs;
2116 size_t bytes = 0; 2126 size_t bytes = 0;
2117 2127
2118 for (; free->length && len; free++, len -= bytes) { 2128 for (; free->length && len; free++, len -= bytes) {
2119 /* Write request not from offset 0? */ 2129 /* Write request not from offset 0 ? */
2120 if (unlikely(woffs)) { 2130 if (unlikely(woffs)) {
2121 if (woffs >= free->length) { 2131 if (woffs >= free->length) {
2122 woffs -= free->length; 2132 woffs -= free->length;
@@ -2144,12 +2154,12 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2144#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 2154#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2145 2155
2146/** 2156/**
2147 * nand_do_write_ops - [INTERN] NAND write with ECC 2157 * nand_do_write_ops - [Internal] NAND write with ECC
2148 * @mtd: MTD device structure 2158 * @mtd: MTD device structure
2149 * @to: offset to write to 2159 * @to: offset to write to
2150 * @ops: oob operations description structure 2160 * @ops: oob operations description structure
2151 * 2161 *
2152 * NAND write with ECC. 2162 * NAND write with ECC
2153 */ 2163 */
2154static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, 2164static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2155 struct mtd_oob_ops *ops) 2165 struct mtd_oob_ops *ops)
@@ -2159,22 +2169,21 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2159 uint32_t writelen = ops->len; 2169 uint32_t writelen = ops->len;
2160 2170
2161 uint32_t oobwritelen = ops->ooblen; 2171 uint32_t oobwritelen = ops->ooblen;
2162 uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ? 2172 uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
2163 mtd->oobavail : mtd->oobsize; 2173 mtd->oobavail : mtd->oobsize;
2164 2174
2165 uint8_t *oob = ops->oobbuf; 2175 uint8_t *oob = ops->oobbuf;
2166 uint8_t *buf = ops->datbuf; 2176 uint8_t *buf = ops->datbuf;
2167 int ret, subpage; 2177 int ret, subpage;
2168 int oob_required = oob ? 1 : 0;
2169 2178
2170 ops->retlen = 0; 2179 ops->retlen = 0;
2171 if (!writelen) 2180 if (!writelen)
2172 return 0; 2181 return 0;
2173 2182
2174 /* Reject writes, which are not page aligned */ 2183 /* reject writes, which are not page aligned */
2175 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 2184 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2176 pr_notice("%s: attempt to write non page aligned data\n", 2185 printk(KERN_NOTICE "%s: Attempt to write not "
2177 __func__); 2186 "page aligned data\n", __func__);
2178 return -EINVAL; 2187 return -EINVAL;
2179 } 2188 }
2180 2189
@@ -2188,10 +2197,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2188 chip->select_chip(mtd, chipnr); 2197 chip->select_chip(mtd, chipnr);
2189 2198
2190 /* Check, if it is write protected */ 2199 /* Check, if it is write protected */
2191 if (nand_check_wp(mtd)) { 2200 if (nand_check_wp(mtd))
2192 ret = -EIO; 2201 return -EIO;
2193 goto err_out;
2194 }
2195 2202
2196 realpage = (int)(to >> chip->page_shift); 2203 realpage = (int)(to >> chip->page_shift);
2197 page = realpage & chip->pagemask; 2204 page = realpage & chip->pagemask;
@@ -2203,17 +2210,15 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2203 chip->pagebuf = -1; 2210 chip->pagebuf = -1;
2204 2211
2205 /* Don't allow multipage oob writes with offset */ 2212 /* Don't allow multipage oob writes with offset */
2206 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 2213 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
2207 ret = -EINVAL; 2214 return -EINVAL;
2208 goto err_out;
2209 }
2210 2215
2211 while (1) { 2216 while (1) {
2212 int bytes = mtd->writesize; 2217 int bytes = mtd->writesize;
2213 int cached = writelen > bytes && page != blockmask; 2218 int cached = writelen > bytes && page != blockmask;
2214 uint8_t *wbuf = buf; 2219 uint8_t *wbuf = buf;
2215 2220
2216 /* Partial page write? */ 2221 /* Partial page write ? */
2217 if (unlikely(column || writelen < (mtd->writesize - 1))) { 2222 if (unlikely(column || writelen < (mtd->writesize - 1))) {
2218 cached = 0; 2223 cached = 0;
2219 bytes = min_t(int, bytes - column, (int) writelen); 2224 bytes = min_t(int, bytes - column, (int) writelen);
@@ -2232,8 +2237,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2232 memset(chip->oob_poi, 0xff, mtd->oobsize); 2237 memset(chip->oob_poi, 0xff, mtd->oobsize);
2233 } 2238 }
2234 2239
2235 ret = chip->write_page(mtd, chip, wbuf, oob_required, page, 2240 ret = chip->write_page(mtd, chip, wbuf, page, cached,
2236 cached, (ops->mode == MTD_OPS_RAW)); 2241 (ops->mode == MTD_OOB_RAW));
2237 if (ret) 2242 if (ret)
2238 break; 2243 break;
2239 2244
@@ -2257,19 +2262,16 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2257 ops->retlen = ops->len - writelen; 2262 ops->retlen = ops->len - writelen;
2258 if (unlikely(oob)) 2263 if (unlikely(oob))
2259 ops->oobretlen = ops->ooblen; 2264 ops->oobretlen = ops->ooblen;
2260
2261err_out:
2262 chip->select_chip(mtd, -1);
2263 return ret; 2265 return ret;
2264} 2266}
2265 2267
2266/** 2268/**
2267 * panic_nand_write - [MTD Interface] NAND write with ECC 2269 * panic_nand_write - [MTD Interface] NAND write with ECC
2268 * @mtd: MTD device structure 2270 * @mtd: MTD device structure
2269 * @to: offset to write to 2271 * @to: offset to write to
2270 * @len: number of bytes to write 2272 * @len: number of bytes to write
2271 * @retlen: pointer to variable to store the number of written bytes 2273 * @retlen: pointer to variable to store the number of written bytes
2272 * @buf: the data to write 2274 * @buf: the data to write
2273 * 2275 *
2274 * NAND write with ECC. Used when performing writes in interrupt context, this 2276 * NAND write with ECC. Used when performing writes in interrupt context, this
2275 * may for example be called by mtdoops when writing an oops while in panic. 2277 * may for example be called by mtdoops when writing an oops while in panic.
@@ -2278,60 +2280,74 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2278 size_t *retlen, const uint8_t *buf) 2280 size_t *retlen, const uint8_t *buf)
2279{ 2281{
2280 struct nand_chip *chip = mtd->priv; 2282 struct nand_chip *chip = mtd->priv;
2281 struct mtd_oob_ops ops;
2282 int ret; 2283 int ret;
2283 2284
2284 /* Wait for the device to get ready */ 2285 /* Do not allow reads past end of device */
2286 if ((to + len) > mtd->size)
2287 return -EINVAL;
2288 if (!len)
2289 return 0;
2290
2291 /* Wait for the device to get ready. */
2285 panic_nand_wait(mtd, chip, 400); 2292 panic_nand_wait(mtd, chip, 400);
2286 2293
2287 /* Grab the device */ 2294 /* Grab the device. */
2288 panic_nand_get_device(chip, mtd, FL_WRITING); 2295 panic_nand_get_device(chip, mtd, FL_WRITING);
2289 2296
2290 ops.len = len; 2297 chip->ops.len = len;
2291 ops.datbuf = (uint8_t *)buf; 2298 chip->ops.datbuf = (uint8_t *)buf;
2292 ops.oobbuf = NULL; 2299 chip->ops.oobbuf = NULL;
2293 ops.mode = MTD_OPS_PLACE_OOB;
2294 2300
2295 ret = nand_do_write_ops(mtd, to, &ops); 2301 ret = nand_do_write_ops(mtd, to, &chip->ops);
2296 2302
2297 *retlen = ops.retlen; 2303 *retlen = chip->ops.retlen;
2298 return ret; 2304 return ret;
2299} 2305}
2300 2306
2301/** 2307/**
2302 * nand_write - [MTD Interface] NAND write with ECC 2308 * nand_write - [MTD Interface] NAND write with ECC
2303 * @mtd: MTD device structure 2309 * @mtd: MTD device structure
2304 * @to: offset to write to 2310 * @to: offset to write to
2305 * @len: number of bytes to write 2311 * @len: number of bytes to write
2306 * @retlen: pointer to variable to store the number of written bytes 2312 * @retlen: pointer to variable to store the number of written bytes
2307 * @buf: the data to write 2313 * @buf: the data to write
2308 * 2314 *
2309 * NAND write with ECC. 2315 * NAND write with ECC
2310 */ 2316 */
2311static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, 2317static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2312 size_t *retlen, const uint8_t *buf) 2318 size_t *retlen, const uint8_t *buf)
2313{ 2319{
2314 struct mtd_oob_ops ops; 2320 struct nand_chip *chip = mtd->priv;
2315 int ret; 2321 int ret;
2316 2322
2317 nand_get_device(mtd, FL_WRITING); 2323 /* Do not allow reads past end of device */
2318 ops.len = len; 2324 if ((to + len) > mtd->size)
2319 ops.datbuf = (uint8_t *)buf; 2325 return -EINVAL;
2320 ops.oobbuf = NULL; 2326 if (!len)
2321 ops.mode = MTD_OPS_PLACE_OOB; 2327 return 0;
2322 ret = nand_do_write_ops(mtd, to, &ops); 2328
2323 *retlen = ops.retlen; 2329 nand_get_device(chip, mtd, FL_WRITING);
2330
2331 chip->ops.len = len;
2332 chip->ops.datbuf = (uint8_t *)buf;
2333 chip->ops.oobbuf = NULL;
2334
2335 ret = nand_do_write_ops(mtd, to, &chip->ops);
2336
2337 *retlen = chip->ops.retlen;
2338
2324 nand_release_device(mtd); 2339 nand_release_device(mtd);
2340
2325 return ret; 2341 return ret;
2326} 2342}
2327 2343
2328/** 2344/**
2329 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 2345 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2330 * @mtd: MTD device structure 2346 * @mtd: MTD device structure
2331 * @to: offset to write to 2347 * @to: offset to write to
2332 * @ops: oob operation description structure 2348 * @ops: oob operation description structure
2333 * 2349 *
2334 * NAND write out-of-band. 2350 * NAND write out-of-band
2335 */ 2351 */
2336static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, 2352static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2337 struct mtd_oob_ops *ops) 2353 struct mtd_oob_ops *ops)
@@ -2339,24 +2355,24 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2339 int chipnr, page, status, len; 2355 int chipnr, page, status, len;
2340 struct nand_chip *chip = mtd->priv; 2356 struct nand_chip *chip = mtd->priv;
2341 2357
2342 pr_debug("%s: to = 0x%08x, len = %i\n", 2358 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
2343 __func__, (unsigned int)to, (int)ops->ooblen); 2359 __func__, (unsigned int)to, (int)ops->ooblen);
2344 2360
2345 if (ops->mode == MTD_OPS_AUTO_OOB) 2361 if (ops->mode == MTD_OOB_AUTO)
2346 len = chip->ecc.layout->oobavail; 2362 len = chip->ecc.layout->oobavail;
2347 else 2363 else
2348 len = mtd->oobsize; 2364 len = mtd->oobsize;
2349 2365
2350 /* Do not allow write past end of page */ 2366 /* Do not allow write past end of page */
2351 if ((ops->ooboffs + ops->ooblen) > len) { 2367 if ((ops->ooboffs + ops->ooblen) > len) {
2352 pr_debug("%s: attempt to write past end of page\n", 2368 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write "
2353 __func__); 2369 "past end of page\n", __func__);
2354 return -EINVAL; 2370 return -EINVAL;
2355 } 2371 }
2356 2372
2357 if (unlikely(ops->ooboffs >= len)) { 2373 if (unlikely(ops->ooboffs >= len)) {
2358 pr_debug("%s: attempt to start write outside oob\n", 2374 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start "
2359 __func__); 2375 "write outside oob\n", __func__);
2360 return -EINVAL; 2376 return -EINVAL;
2361 } 2377 }
2362 2378
@@ -2365,8 +2381,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2365 ops->ooboffs + ops->ooblen > 2381 ops->ooboffs + ops->ooblen >
2366 ((mtd->size >> chip->page_shift) - 2382 ((mtd->size >> chip->page_shift) -
2367 (to >> chip->page_shift)) * len)) { 2383 (to >> chip->page_shift)) * len)) {
2368 pr_debug("%s: attempt to write beyond end of device\n", 2384 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
2369 __func__); 2385 "end of device\n", __func__);
2370 return -EINVAL; 2386 return -EINVAL;
2371 } 2387 }
2372 2388
@@ -2385,23 +2401,15 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2385 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2401 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2386 2402
2387 /* Check, if it is write protected */ 2403 /* Check, if it is write protected */
2388 if (nand_check_wp(mtd)) { 2404 if (nand_check_wp(mtd))
2389 chip->select_chip(mtd, -1);
2390 return -EROFS; 2405 return -EROFS;
2391 }
2392 2406
2393 /* Invalidate the page cache, if we write to the cached page */ 2407 /* Invalidate the page cache, if we write to the cached page */
2394 if (page == chip->pagebuf) 2408 if (page == chip->pagebuf)
2395 chip->pagebuf = -1; 2409 chip->pagebuf = -1;
2396 2410
2397 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops); 2411 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2398 2412 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2399 if (ops->mode == MTD_OPS_RAW)
2400 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2401 else
2402 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2403
2404 chip->select_chip(mtd, -1);
2405 2413
2406 if (status) 2414 if (status)
2407 return status; 2415 return status;
@@ -2413,30 +2421,31 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2413 2421
2414/** 2422/**
2415 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 2423 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2416 * @mtd: MTD device structure 2424 * @mtd: MTD device structure
2417 * @to: offset to write to 2425 * @to: offset to write to
2418 * @ops: oob operation description structure 2426 * @ops: oob operation description structure
2419 */ 2427 */
2420static int nand_write_oob(struct mtd_info *mtd, loff_t to, 2428static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2421 struct mtd_oob_ops *ops) 2429 struct mtd_oob_ops *ops)
2422{ 2430{
2431 struct nand_chip *chip = mtd->priv;
2423 int ret = -ENOTSUPP; 2432 int ret = -ENOTSUPP;
2424 2433
2425 ops->retlen = 0; 2434 ops->retlen = 0;
2426 2435
2427 /* Do not allow writes past end of device */ 2436 /* Do not allow writes past end of device */
2428 if (ops->datbuf && (to + ops->len) > mtd->size) { 2437 if (ops->datbuf && (to + ops->len) > mtd->size) {
2429 pr_debug("%s: attempt to write beyond end of device\n", 2438 DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
2430 __func__); 2439 "end of device\n", __func__);
2431 return -EINVAL; 2440 return -EINVAL;
2432 } 2441 }
2433 2442
2434 nand_get_device(mtd, FL_WRITING); 2443 nand_get_device(chip, mtd, FL_WRITING);
2435 2444
2436 switch (ops->mode) { 2445 switch (ops->mode) {
2437 case MTD_OPS_PLACE_OOB: 2446 case MTD_OOB_PLACE:
2438 case MTD_OPS_AUTO_OOB: 2447 case MTD_OOB_AUTO:
2439 case MTD_OPS_RAW: 2448 case MTD_OOB_RAW:
2440 break; 2449 break;
2441 2450
2442 default: 2451 default:
@@ -2454,11 +2463,11 @@ out:
2454} 2463}
2455 2464
2456/** 2465/**
2457 * single_erase_cmd - [GENERIC] NAND standard block erase command function 2466 * single_erease_cmd - [GENERIC] NAND standard block erase command function
2458 * @mtd: MTD device structure 2467 * @mtd: MTD device structure
2459 * @page: the page address of the block which will be erased 2468 * @page: the page address of the block which will be erased
2460 * 2469 *
2461 * Standard erase command for NAND chips. 2470 * Standard erase command for NAND chips
2462 */ 2471 */
2463static void single_erase_cmd(struct mtd_info *mtd, int page) 2472static void single_erase_cmd(struct mtd_info *mtd, int page)
2464{ 2473{
@@ -2469,11 +2478,12 @@ static void single_erase_cmd(struct mtd_info *mtd, int page)
2469} 2478}
2470 2479
2471/** 2480/**
2472 * multi_erase_cmd - [GENERIC] AND specific block erase command function 2481 * multi_erease_cmd - [GENERIC] AND specific block erase command function
2473 * @mtd: MTD device structure 2482 * @mtd: MTD device structure
2474 * @page: the page address of the block which will be erased 2483 * @page: the page address of the block which will be erased
2475 * 2484 *
2476 * AND multi block erase command function. Erase 4 consecutive blocks. 2485 * AND multi block erase command function
2486 * Erase 4 consecutive blocks
2477 */ 2487 */
2478static void multi_erase_cmd(struct mtd_info *mtd, int page) 2488static void multi_erase_cmd(struct mtd_info *mtd, int page)
2479{ 2489{
@@ -2488,10 +2498,10 @@ static void multi_erase_cmd(struct mtd_info *mtd, int page)
2488 2498
2489/** 2499/**
2490 * nand_erase - [MTD Interface] erase block(s) 2500 * nand_erase - [MTD Interface] erase block(s)
2491 * @mtd: MTD device structure 2501 * @mtd: MTD device structure
2492 * @instr: erase instruction 2502 * @instr: erase instruction
2493 * 2503 *
2494 * Erase one ore more blocks. 2504 * Erase one ore more blocks
2495 */ 2505 */
2496static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 2506static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2497{ 2507{
@@ -2500,12 +2510,12 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2500 2510
2501#define BBT_PAGE_MASK 0xffffff3f 2511#define BBT_PAGE_MASK 0xffffff3f
2502/** 2512/**
2503 * nand_erase_nand - [INTERN] erase block(s) 2513 * nand_erase_nand - [Internal] erase block(s)
2504 * @mtd: MTD device structure 2514 * @mtd: MTD device structure
2505 * @instr: erase instruction 2515 * @instr: erase instruction
2506 * @allowbbt: allow erasing the bbt area 2516 * @allowbbt: allow erasing the bbt area
2507 * 2517 *
2508 * Erase one ore more blocks. 2518 * Erase one ore more blocks
2509 */ 2519 */
2510int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 2520int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2511 int allowbbt) 2521 int allowbbt)
@@ -2516,15 +2526,17 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2516 unsigned int bbt_masked_page = 0xffffffff; 2526 unsigned int bbt_masked_page = 0xffffffff;
2517 loff_t len; 2527 loff_t len;
2518 2528
2519 pr_debug("%s: start = 0x%012llx, len = %llu\n", 2529 DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
2520 __func__, (unsigned long long)instr->addr, 2530 __func__, (unsigned long long)instr->addr,
2521 (unsigned long long)instr->len); 2531 (unsigned long long)instr->len);
2522 2532
2523 if (check_offs_len(mtd, instr->addr, instr->len)) 2533 if (check_offs_len(mtd, instr->addr, instr->len))
2524 return -EINVAL; 2534 return -EINVAL;
2525 2535
2536 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2537
2526 /* Grab the lock and see if the device is available */ 2538 /* Grab the lock and see if the device is available */
2527 nand_get_device(mtd, FL_ERASING); 2539 nand_get_device(chip, mtd, FL_ERASING);
2528 2540
2529 /* Shift to get first page */ 2541 /* Shift to get first page */
2530 page = (int)(instr->addr >> chip->page_shift); 2542 page = (int)(instr->addr >> chip->page_shift);
@@ -2538,8 +2550,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2538 2550
2539 /* Check, if it is write protected */ 2551 /* Check, if it is write protected */
2540 if (nand_check_wp(mtd)) { 2552 if (nand_check_wp(mtd)) {
2541 pr_debug("%s: device is write protected!\n", 2553 DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
2542 __func__); 2554 __func__);
2543 instr->state = MTD_ERASE_FAILED; 2555 instr->state = MTD_ERASE_FAILED;
2544 goto erase_exit; 2556 goto erase_exit;
2545 } 2557 }
@@ -2548,7 +2560,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2548 * If BBT requires refresh, set the BBT page mask to see if the BBT 2560 * If BBT requires refresh, set the BBT page mask to see if the BBT
2549 * should be rewritten. Otherwise the mask is set to 0xffffffff which 2561 * should be rewritten. Otherwise the mask is set to 0xffffffff which
2550 * can not be matched. This is also done when the bbt is actually 2562 * can not be matched. This is also done when the bbt is actually
2551 * erased to avoid recursive updates. 2563 * erased to avoid recusrsive updates
2552 */ 2564 */
2553 if (chip->options & BBT_AUTO_REFRESH && !allowbbt) 2565 if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
2554 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; 2566 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
@@ -2559,18 +2571,20 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2559 instr->state = MTD_ERASING; 2571 instr->state = MTD_ERASING;
2560 2572
2561 while (len) { 2573 while (len) {
2562 /* Check if we have a bad block, we do not erase bad blocks! */ 2574 /*
2575 * heck if we have a bad block, we do not erase bad blocks !
2576 */
2563 if (nand_block_checkbad(mtd, ((loff_t) page) << 2577 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2564 chip->page_shift, 0, allowbbt)) { 2578 chip->page_shift, 0, allowbbt)) {
2565 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 2579 printk(KERN_WARNING "%s: attempt to erase a bad block "
2566 __func__, page); 2580 "at page 0x%08x\n", __func__, page);
2567 instr->state = MTD_ERASE_FAILED; 2581 instr->state = MTD_ERASE_FAILED;
2568 goto erase_exit; 2582 goto erase_exit;
2569 } 2583 }
2570 2584
2571 /* 2585 /*
2572 * Invalidate the page cache, if we erase the block which 2586 * Invalidate the page cache, if we erase the block which
2573 * contains the current cached page. 2587 * contains the current cached page
2574 */ 2588 */
2575 if (page <= chip->pagebuf && chip->pagebuf < 2589 if (page <= chip->pagebuf && chip->pagebuf <
2576 (page + pages_per_block)) 2590 (page + pages_per_block))
@@ -2590,8 +2604,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2590 2604
2591 /* See if block erase succeeded */ 2605 /* See if block erase succeeded */
2592 if (status & NAND_STATUS_FAIL) { 2606 if (status & NAND_STATUS_FAIL) {
2593 pr_debug("%s: failed erase, page 0x%08x\n", 2607 DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, "
2594 __func__, page); 2608 "page 0x%08x\n", __func__, page);
2595 instr->state = MTD_ERASE_FAILED; 2609 instr->state = MTD_ERASE_FAILED;
2596 instr->fail_addr = 2610 instr->fail_addr =
2597 ((loff_t)page << chip->page_shift); 2611 ((loff_t)page << chip->page_shift);
@@ -2600,7 +2614,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2600 2614
2601 /* 2615 /*
2602 * If BBT requires refresh, set the BBT rewrite flag to the 2616 * If BBT requires refresh, set the BBT rewrite flag to the
2603 * page being erased. 2617 * page being erased
2604 */ 2618 */
2605 if (bbt_masked_page != 0xffffffff && 2619 if (bbt_masked_page != 0xffffffff &&
2606 (page & BBT_PAGE_MASK) == bbt_masked_page) 2620 (page & BBT_PAGE_MASK) == bbt_masked_page)
@@ -2619,7 +2633,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2619 2633
2620 /* 2634 /*
2621 * If BBT requires refresh and BBT-PERCHIP, set the BBT 2635 * If BBT requires refresh and BBT-PERCHIP, set the BBT
2622 * page mask to see if this BBT should be rewritten. 2636 * page mask to see if this BBT should be rewritten
2623 */ 2637 */
2624 if (bbt_masked_page != 0xffffffff && 2638 if (bbt_masked_page != 0xffffffff &&
2625 (chip->bbt_td->options & NAND_BBT_PERCHIP)) 2639 (chip->bbt_td->options & NAND_BBT_PERCHIP))
@@ -2634,7 +2648,6 @@ erase_exit:
2634 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2648 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
2635 2649
2636 /* Deselect and wake up anyone waiting on the device */ 2650 /* Deselect and wake up anyone waiting on the device */
2637 chip->select_chip(mtd, -1);
2638 nand_release_device(mtd); 2651 nand_release_device(mtd);
2639 2652
2640 /* Do call back function */ 2653 /* Do call back function */
@@ -2643,7 +2656,7 @@ erase_exit:
2643 2656
2644 /* 2657 /*
2645 * If BBT requires refresh and erase was successful, rewrite any 2658 * If BBT requires refresh and erase was successful, rewrite any
2646 * selected bad block tables. 2659 * selected bad block tables
2647 */ 2660 */
2648 if (bbt_masked_page == 0xffffffff || ret) 2661 if (bbt_masked_page == 0xffffffff || ret)
2649 return ret; 2662 return ret;
@@ -2651,10 +2664,10 @@ erase_exit:
2651 for (chipnr = 0; chipnr < chip->numchips; chipnr++) { 2664 for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
2652 if (!rewrite_bbt[chipnr]) 2665 if (!rewrite_bbt[chipnr])
2653 continue; 2666 continue;
2654 /* Update the BBT for chip */ 2667 /* update the BBT for chip */
2655 pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n", 2668 DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt "
2656 __func__, chipnr, rewrite_bbt[chipnr], 2669 "(%d:0x%0llx 0x%0x)\n", __func__, chipnr,
2657 chip->bbt_td->pages[chipnr]); 2670 rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]);
2658 nand_update_bbt(mtd, rewrite_bbt[chipnr]); 2671 nand_update_bbt(mtd, rewrite_bbt[chipnr]);
2659 } 2672 }
2660 2673
@@ -2664,34 +2677,40 @@ erase_exit:
2664 2677
2665/** 2678/**
2666 * nand_sync - [MTD Interface] sync 2679 * nand_sync - [MTD Interface] sync
2667 * @mtd: MTD device structure 2680 * @mtd: MTD device structure
2668 * 2681 *
2669 * Sync is actually a wait for chip ready function. 2682 * Sync is actually a wait for chip ready function
2670 */ 2683 */
2671static void nand_sync(struct mtd_info *mtd) 2684static void nand_sync(struct mtd_info *mtd)
2672{ 2685{
2673 pr_debug("%s: called\n", __func__); 2686 struct nand_chip *chip = mtd->priv;
2687
2688 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
2674 2689
2675 /* Grab the lock and see if the device is available */ 2690 /* Grab the lock and see if the device is available */
2676 nand_get_device(mtd, FL_SYNCING); 2691 nand_get_device(chip, mtd, FL_SYNCING);
2677 /* Release it and go back */ 2692 /* Release it and go back */
2678 nand_release_device(mtd); 2693 nand_release_device(mtd);
2679} 2694}
2680 2695
2681/** 2696/**
2682 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 2697 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
2683 * @mtd: MTD device structure 2698 * @mtd: MTD device structure
2684 * @offs: offset relative to mtd start 2699 * @offs: offset relative to mtd start
2685 */ 2700 */
2686static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2701static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2687{ 2702{
2703 /* Check for invalid offset */
2704 if (offs > mtd->size)
2705 return -EINVAL;
2706
2688 return nand_block_checkbad(mtd, offs, 1, 0); 2707 return nand_block_checkbad(mtd, offs, 1, 0);
2689} 2708}
2690 2709
2691/** 2710/**
2692 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 2711 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
2693 * @mtd: MTD device structure 2712 * @mtd: MTD device structure
2694 * @ofs: offset relative to mtd start 2713 * @ofs: offset relative to mtd start
2695 */ 2714 */
2696static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2715static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2697{ 2716{
@@ -2700,7 +2719,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2700 2719
2701 ret = nand_block_isbad(mtd, ofs); 2720 ret = nand_block_isbad(mtd, ofs);
2702 if (ret) { 2721 if (ret) {
2703 /* If it was bad already, return success and do nothing */ 2722 /* If it was bad already, return success and do nothing. */
2704 if (ret > 0) 2723 if (ret > 0)
2705 return 0; 2724 return 0;
2706 return ret; 2725 return ret;
@@ -2710,61 +2729,19 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2710} 2729}
2711 2730
2712/** 2731/**
2713 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
2714 * @mtd: MTD device structure
2715 * @chip: nand chip info structure
2716 * @addr: feature address.
2717 * @subfeature_param: the subfeature parameters, a four bytes array.
2718 */
2719static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
2720 int addr, uint8_t *subfeature_param)
2721{
2722 int status;
2723
2724 if (!chip->onfi_version)
2725 return -EINVAL;
2726
2727 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
2728 chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
2729 status = chip->waitfunc(mtd, chip);
2730 if (status & NAND_STATUS_FAIL)
2731 return -EIO;
2732 return 0;
2733}
2734
2735/**
2736 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
2737 * @mtd: MTD device structure
2738 * @chip: nand chip info structure
2739 * @addr: feature address.
2740 * @subfeature_param: the subfeature parameters, a four bytes array.
2741 */
2742static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
2743 int addr, uint8_t *subfeature_param)
2744{
2745 if (!chip->onfi_version)
2746 return -EINVAL;
2747
2748 /* clear the sub feature parameters */
2749 memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
2750
2751 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
2752 chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
2753 return 0;
2754}
2755
2756/**
2757 * nand_suspend - [MTD Interface] Suspend the NAND flash 2732 * nand_suspend - [MTD Interface] Suspend the NAND flash
2758 * @mtd: MTD device structure 2733 * @mtd: MTD device structure
2759 */ 2734 */
2760static int nand_suspend(struct mtd_info *mtd) 2735static int nand_suspend(struct mtd_info *mtd)
2761{ 2736{
2762 return nand_get_device(mtd, FL_PM_SUSPENDED); 2737 struct nand_chip *chip = mtd->priv;
2738
2739 return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
2763} 2740}
2764 2741
2765/** 2742/**
2766 * nand_resume - [MTD Interface] Resume the NAND flash 2743 * nand_resume - [MTD Interface] Resume the NAND flash
2767 * @mtd: MTD device structure 2744 * @mtd: MTD device structure
2768 */ 2745 */
2769static void nand_resume(struct mtd_info *mtd) 2746static void nand_resume(struct mtd_info *mtd)
2770{ 2747{
@@ -2773,11 +2750,13 @@ static void nand_resume(struct mtd_info *mtd)
2773 if (chip->state == FL_PM_SUSPENDED) 2750 if (chip->state == FL_PM_SUSPENDED)
2774 nand_release_device(mtd); 2751 nand_release_device(mtd);
2775 else 2752 else
2776 pr_err("%s called for a chip which is not in suspended state\n", 2753 printk(KERN_ERR "%s called for a chip which is not "
2777 __func__); 2754 "in suspended state\n", __func__);
2778} 2755}
2779 2756
2780/* Set default functions */ 2757/*
2758 * Set default functions
2759 */
2781static void nand_set_defaults(struct nand_chip *chip, int busw) 2760static void nand_set_defaults(struct nand_chip *chip, int busw)
2782{ 2761{
2783 /* check for proper chip_delay setup, set 20us if not */ 2762 /* check for proper chip_delay setup, set 20us if not */
@@ -2806,6 +2785,8 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2806 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; 2785 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2807 if (!chip->read_buf) 2786 if (!chip->read_buf)
2808 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf; 2787 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2788 if (!chip->verify_buf)
2789 chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
2809 if (!chip->scan_bbt) 2790 if (!chip->scan_bbt)
2810 chip->scan_bbt = nand_default_bbt; 2791 chip->scan_bbt = nand_default_bbt;
2811 2792
@@ -2817,21 +2798,23 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2817 2798
2818} 2799}
2819 2800
2820/* Sanitize ONFI strings so we can safely print them */ 2801/*
2802 * sanitize ONFI strings so we can safely print them
2803 */
2821static void sanitize_string(uint8_t *s, size_t len) 2804static void sanitize_string(uint8_t *s, size_t len)
2822{ 2805{
2823 ssize_t i; 2806 ssize_t i;
2824 2807
2825 /* Null terminate */ 2808 /* null terminate */
2826 s[len - 1] = 0; 2809 s[len - 1] = 0;
2827 2810
2828 /* Remove non printable chars */ 2811 /* remove non printable chars */
2829 for (i = 0; i < len - 1; i++) { 2812 for (i = 0; i < len - 1; i++) {
2830 if (s[i] < ' ' || s[i] > 127) 2813 if (s[i] < ' ' || s[i] > 127)
2831 s[i] = '?'; 2814 s[i] = '?';
2832 } 2815 }
2833 2816
2834 /* Remove trailing spaces */ 2817 /* remove trailing spaces */
2835 strim(s); 2818 strim(s);
2836} 2819}
2837 2820
@@ -2848,29 +2831,28 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
2848} 2831}
2849 2832
2850/* 2833/*
2851 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. 2834 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
2852 */ 2835 */
2853static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, 2836static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2854 int *busw) 2837 int busw)
2855{ 2838{
2856 struct nand_onfi_params *p = &chip->onfi_params; 2839 struct nand_onfi_params *p = &chip->onfi_params;
2857 int i; 2840 int i;
2858 int val; 2841 int val;
2859 2842
2860 /* ONFI need to be probed in 8 bits mode */ 2843 /* try ONFI for unknow chip or LP */
2861 WARN_ON(chip->options & NAND_BUSWIDTH_16);
2862 /* Try ONFI for unknown chip or LP */
2863 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2844 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
2864 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || 2845 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
2865 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 2846 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
2866 return 0; 2847 return 0;
2867 2848
2849 printk(KERN_INFO "ONFI flash detected\n");
2868 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2850 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
2869 for (i = 0; i < 3; i++) { 2851 for (i = 0; i < 3; i++) {
2870 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); 2852 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
2871 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == 2853 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
2872 le16_to_cpu(p->crc)) { 2854 le16_to_cpu(p->crc)) {
2873 pr_info("ONFI param page %d valid\n", i); 2855 printk(KERN_INFO "ONFI param page %d valid\n", i);
2874 break; 2856 break;
2875 } 2857 }
2876 } 2858 }
@@ -2878,7 +2860,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2878 if (i == 3) 2860 if (i == 3)
2879 return 0; 2861 return 0;
2880 2862
2881 /* Check version */ 2863 /* check version */
2882 val = le16_to_cpu(p->revision); 2864 val = le16_to_cpu(p->revision);
2883 if (val & (1 << 5)) 2865 if (val & (1 << 5))
2884 chip->onfi_version = 23; 2866 chip->onfi_version = 23;
@@ -2894,7 +2876,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2894 chip->onfi_version = 0; 2876 chip->onfi_version = 0;
2895 2877
2896 if (!chip->onfi_version) { 2878 if (!chip->onfi_version) {
2897 pr_info("%s: unsupported ONFI version: %d\n", __func__, val); 2879 printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
2880 __func__, val);
2898 return 0; 2881 return 0;
2899 } 2882 }
2900 2883
@@ -2905,259 +2888,20 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2905 mtd->writesize = le32_to_cpu(p->byte_per_page); 2888 mtd->writesize = le32_to_cpu(p->byte_per_page);
2906 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2889 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2907 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2890 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2908 chip->chipsize = le32_to_cpu(p->blocks_per_lun); 2891 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
2909 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; 2892 busw = 0;
2910 *busw = 0;
2911 if (le16_to_cpu(p->features) & 1) 2893 if (le16_to_cpu(p->features) & 1)
2912 *busw = NAND_BUSWIDTH_16; 2894 busw = NAND_BUSWIDTH_16;
2913 2895
2914 pr_info("ONFI flash detected\n"); 2896 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2915 return 1; 2897 chip->options |= (NAND_NO_READRDY |
2916} 2898 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
2917 2899
2918/*
2919 * nand_id_has_period - Check if an ID string has a given wraparound period
2920 * @id_data: the ID string
2921 * @arrlen: the length of the @id_data array
2922 * @period: the period of repitition
2923 *
2924 * Check if an ID string is repeated within a given sequence of bytes at
2925 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
2926 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
2927 * if the repetition has a period of @period; otherwise, returns zero.
2928 */
2929static int nand_id_has_period(u8 *id_data, int arrlen, int period)
2930{
2931 int i, j;
2932 for (i = 0; i < period; i++)
2933 for (j = i + period; j < arrlen; j += period)
2934 if (id_data[i] != id_data[j])
2935 return 0;
2936 return 1; 2900 return 1;
2937} 2901}
2938 2902
2939/* 2903/*
2940 * nand_id_len - Get the length of an ID string returned by CMD_READID 2904 * Get the flash and manufacturer id and lookup if the type is supported
2941 * @id_data: the ID string
2942 * @arrlen: the length of the @id_data array
2943
2944 * Returns the length of the ID string, according to known wraparound/trailing
2945 * zero patterns. If no pattern exists, returns the length of the array.
2946 */
2947static int nand_id_len(u8 *id_data, int arrlen)
2948{
2949 int last_nonzero, period;
2950
2951 /* Find last non-zero byte */
2952 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
2953 if (id_data[last_nonzero])
2954 break;
2955
2956 /* All zeros */
2957 if (last_nonzero < 0)
2958 return 0;
2959
2960 /* Calculate wraparound period */
2961 for (period = 1; period < arrlen; period++)
2962 if (nand_id_has_period(id_data, arrlen, period))
2963 break;
2964
2965 /* There's a repeated pattern */
2966 if (period < arrlen)
2967 return period;
2968
2969 /* There are trailing zeros */
2970 if (last_nonzero < arrlen - 1)
2971 return last_nonzero + 1;
2972
2973 /* No pattern detected */
2974 return arrlen;
2975}
2976
2977/*
2978 * Many new NAND share similar device ID codes, which represent the size of the
2979 * chip. The rest of the parameters must be decoded according to generic or
2980 * manufacturer-specific "extended ID" decoding patterns.
2981 */
2982static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
2983 u8 id_data[8], int *busw)
2984{
2985 int extid, id_len;
2986 /* The 3rd id byte holds MLC / multichip data */
2987 chip->cellinfo = id_data[2];
2988 /* The 4th id byte is the important one */
2989 extid = id_data[3];
2990
2991 id_len = nand_id_len(id_data, 8);
2992
2993 /*
2994 * Field definitions are in the following datasheets:
2995 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
2996 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
2997 * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
2998 *
2999 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
3000 * ID to decide what to do.
3001 */
3002 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
3003 (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3004 id_data[5] != 0x00) {
3005 /* Calc pagesize */
3006 mtd->writesize = 2048 << (extid & 0x03);
3007 extid >>= 2;
3008 /* Calc oobsize */
3009 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3010 case 1:
3011 mtd->oobsize = 128;
3012 break;
3013 case 2:
3014 mtd->oobsize = 218;
3015 break;
3016 case 3:
3017 mtd->oobsize = 400;
3018 break;
3019 case 4:
3020 mtd->oobsize = 436;
3021 break;
3022 case 5:
3023 mtd->oobsize = 512;
3024 break;
3025 case 6:
3026 default: /* Other cases are "reserved" (unknown) */
3027 mtd->oobsize = 640;
3028 break;
3029 }
3030 extid >>= 2;
3031 /* Calc blocksize */
3032 mtd->erasesize = (128 * 1024) <<
3033 (((extid >> 1) & 0x04) | (extid & 0x03));
3034 *busw = 0;
3035 } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
3036 (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
3037 unsigned int tmp;
3038
3039 /* Calc pagesize */
3040 mtd->writesize = 2048 << (extid & 0x03);
3041 extid >>= 2;
3042 /* Calc oobsize */
3043 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3044 case 0:
3045 mtd->oobsize = 128;
3046 break;
3047 case 1:
3048 mtd->oobsize = 224;
3049 break;
3050 case 2:
3051 mtd->oobsize = 448;
3052 break;
3053 case 3:
3054 mtd->oobsize = 64;
3055 break;
3056 case 4:
3057 mtd->oobsize = 32;
3058 break;
3059 case 5:
3060 mtd->oobsize = 16;
3061 break;
3062 default:
3063 mtd->oobsize = 640;
3064 break;
3065 }
3066 extid >>= 2;
3067 /* Calc blocksize */
3068 tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
3069 if (tmp < 0x03)
3070 mtd->erasesize = (128 * 1024) << tmp;
3071 else if (tmp == 0x03)
3072 mtd->erasesize = 768 * 1024;
3073 else
3074 mtd->erasesize = (64 * 1024) << tmp;
3075 *busw = 0;
3076 } else {
3077 /* Calc pagesize */
3078 mtd->writesize = 1024 << (extid & 0x03);
3079 extid >>= 2;
3080 /* Calc oobsize */
3081 mtd->oobsize = (8 << (extid & 0x01)) *
3082 (mtd->writesize >> 9);
3083 extid >>= 2;
3084 /* Calc blocksize. Blocksize is multiples of 64KiB */
3085 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3086 extid >>= 2;
3087 /* Get buswidth information */
3088 *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3089 }
3090}
3091
3092/*
3093 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3094 * decodes a matching ID table entry and assigns the MTD size parameters for
3095 * the chip.
3096 */
3097static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
3098 struct nand_flash_dev *type, u8 id_data[8],
3099 int *busw)
3100{
3101 int maf_id = id_data[0];
3102
3103 mtd->erasesize = type->erasesize;
3104 mtd->writesize = type->pagesize;
3105 mtd->oobsize = mtd->writesize / 32;
3106 *busw = type->options & NAND_BUSWIDTH_16;
3107
3108 /*
3109 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3110 * some Spansion chips have erasesize that conflicts with size
3111 * listed in nand_ids table.
3112 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3113 */
3114 if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
3115 && id_data[6] == 0x00 && id_data[7] == 0x00
3116 && mtd->writesize == 512) {
3117 mtd->erasesize = 128 * 1024;
3118 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3119 }
3120}
3121
3122/*
3123 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3124 * heuristic patterns using various detected parameters (e.g., manufacturer,
3125 * page size, cell-type information).
3126 */
3127static void nand_decode_bbm_options(struct mtd_info *mtd,
3128 struct nand_chip *chip, u8 id_data[8])
3129{
3130 int maf_id = id_data[0];
3131
3132 /* Set the bad block position */
3133 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3134 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3135 else
3136 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3137
3138 /*
3139 * Bad block marker is stored in the last page of each block on Samsung
3140 * and Hynix MLC devices; stored in first two pages of each block on
3141 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
3142 * AMD/Spansion, and Macronix. All others scan only the first page.
3143 */
3144 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3145 (maf_id == NAND_MFR_SAMSUNG ||
3146 maf_id == NAND_MFR_HYNIX))
3147 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
3148 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3149 (maf_id == NAND_MFR_SAMSUNG ||
3150 maf_id == NAND_MFR_HYNIX ||
3151 maf_id == NAND_MFR_TOSHIBA ||
3152 maf_id == NAND_MFR_AMD ||
3153 maf_id == NAND_MFR_MACRONIX)) ||
3154 (mtd->writesize == 2048 &&
3155 maf_id == NAND_MFR_MICRON))
3156 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
3157}
3158
3159/*
3160 * Get the flash and manufacturer id and lookup if the type is supported.
3161 */ 2905 */
3162static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2906static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3163 struct nand_chip *chip, 2907 struct nand_chip *chip,
@@ -3167,13 +2911,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3167{ 2911{
3168 int i, maf_idx; 2912 int i, maf_idx;
3169 u8 id_data[8]; 2913 u8 id_data[8];
2914 int ret;
3170 2915
3171 /* Select the device */ 2916 /* Select the device */
3172 chip->select_chip(mtd, 0); 2917 chip->select_chip(mtd, 0);
3173 2918
3174 /* 2919 /*
3175 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 2920 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3176 * after power-up. 2921 * after power-up
3177 */ 2922 */
3178 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 2923 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
3179 2924
@@ -3184,8 +2929,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3184 *maf_id = chip->read_byte(mtd); 2929 *maf_id = chip->read_byte(mtd);
3185 *dev_id = chip->read_byte(mtd); 2930 *dev_id = chip->read_byte(mtd);
3186 2931
3187 /* 2932 /* Try again to make sure, as some systems the bus-hold or other
3188 * Try again to make sure, as some systems the bus-hold or other
3189 * interface concerns can cause random data which looks like a 2933 * interface concerns can cause random data which looks like a
3190 * possibly credible NAND flash to appear. If the two results do 2934 * possibly credible NAND flash to appear. If the two results do
3191 * not match, ignore the device completely. 2935 * not match, ignore the device completely.
@@ -3193,14 +2937,13 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3193 2937
3194 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 2938 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3195 2939
3196 /* Read entire ID string */ 2940 for (i = 0; i < 2; i++)
3197 for (i = 0; i < 8; i++)
3198 id_data[i] = chip->read_byte(mtd); 2941 id_data[i] = chip->read_byte(mtd);
3199 2942
3200 if (id_data[0] != *maf_id || id_data[1] != *dev_id) { 2943 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
3201 pr_info("%s: second ID read did not match " 2944 printk(KERN_INFO "%s: second ID read did not match "
3202 "%02x,%02x against %02x,%02x\n", __func__, 2945 "%02x,%02x against %02x,%02x\n", __func__,
3203 *maf_id, *dev_id, id_data[0], id_data[1]); 2946 *maf_id, *dev_id, id_data[0], id_data[1]);
3204 return ERR_PTR(-ENODEV); 2947 return ERR_PTR(-ENODEV);
3205 } 2948 }
3206 2949
@@ -3214,10 +2957,18 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3214 chip->onfi_version = 0; 2957 chip->onfi_version = 0;
3215 if (!type->name || !type->pagesize) { 2958 if (!type->name || !type->pagesize) {
3216 /* Check is chip is ONFI compliant */ 2959 /* Check is chip is ONFI compliant */
3217 if (nand_flash_detect_onfi(mtd, chip, &busw)) 2960 ret = nand_flash_detect_onfi(mtd, chip, busw);
2961 if (ret)
3218 goto ident_done; 2962 goto ident_done;
3219 } 2963 }
3220 2964
2965 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2966
2967 /* Read entire ID string */
2968
2969 for (i = 0; i < 8; i++)
2970 id_data[i] = chip->read_byte(mtd);
2971
3221 if (!type->name) 2972 if (!type->name)
3222 return ERR_PTR(-ENODEV); 2973 return ERR_PTR(-ENODEV);
3223 2974
@@ -3227,54 +2978,125 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3227 chip->chipsize = (uint64_t)type->chipsize << 20; 2978 chip->chipsize = (uint64_t)type->chipsize << 20;
3228 2979
3229 if (!type->pagesize && chip->init_size) { 2980 if (!type->pagesize && chip->init_size) {
3230 /* Set the pagesize, oobsize, erasesize by the driver */ 2981 /* set the pagesize, oobsize, erasesize by the driver*/
3231 busw = chip->init_size(mtd, chip, id_data); 2982 busw = chip->init_size(mtd, chip, id_data);
3232 } else if (!type->pagesize) { 2983 } else if (!type->pagesize) {
3233 /* Decode parameters from extended ID */ 2984 int extid;
3234 nand_decode_ext_id(mtd, chip, id_data, &busw); 2985 /* The 3rd id byte holds MLC / multichip data */
2986 chip->cellinfo = id_data[2];
2987 /* The 4th id byte is the important one */
2988 extid = id_data[3];
2989
2990 /*
2991 * Field definitions are in the following datasheets:
2992 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
2993 * New style (6 byte ID): Samsung K9GBG08U0M (p.40)
2994 *
2995 * Check for wraparound + Samsung ID + nonzero 6th byte
2996 * to decide what to do.
2997 */
2998 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
2999 id_data[0] == NAND_MFR_SAMSUNG &&
3000 (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3001 id_data[5] != 0x00) {
3002 /* Calc pagesize */
3003 mtd->writesize = 2048 << (extid & 0x03);
3004 extid >>= 2;
3005 /* Calc oobsize */
3006 switch (extid & 0x03) {
3007 case 1:
3008 mtd->oobsize = 128;
3009 break;
3010 case 2:
3011 mtd->oobsize = 218;
3012 break;
3013 case 3:
3014 mtd->oobsize = 400;
3015 break;
3016 default:
3017 mtd->oobsize = 436;
3018 break;
3019 }
3020 extid >>= 2;
3021 /* Calc blocksize */
3022 mtd->erasesize = (128 * 1024) <<
3023 (((extid >> 1) & 0x04) | (extid & 0x03));
3024 busw = 0;
3025 } else {
3026 /* Calc pagesize */
3027 mtd->writesize = 1024 << (extid & 0x03);
3028 extid >>= 2;
3029 /* Calc oobsize */
3030 mtd->oobsize = (8 << (extid & 0x01)) *
3031 (mtd->writesize >> 9);
3032 extid >>= 2;
3033 /* Calc blocksize. Blocksize is multiples of 64KiB */
3034 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3035 extid >>= 2;
3036 /* Get buswidth information */
3037 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3038 }
3235 } else { 3039 } else {
3236 nand_decode_id(mtd, chip, type, id_data, &busw); 3040 /*
3041 * Old devices have chip data hardcoded in the device id table
3042 */
3043 mtd->erasesize = type->erasesize;
3044 mtd->writesize = type->pagesize;
3045 mtd->oobsize = mtd->writesize / 32;
3046 busw = type->options & NAND_BUSWIDTH_16;
3047
3048 /*
3049 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3050 * some Spansion chips have erasesize that conflicts with size
3051 * listed in nand_ids table
3052 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3053 */
3054 if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
3055 id_data[5] == 0x00 && id_data[6] == 0x00 &&
3056 id_data[7] == 0x00 && mtd->writesize == 512) {
3057 mtd->erasesize = 128 * 1024;
3058 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3059 }
3237 } 3060 }
3238 /* Get chip options */ 3061 /* Get chip options, preserve non chip based options */
3239 chip->options |= type->options; 3062 chip->options &= ~NAND_CHIPOPTIONS_MSK;
3063 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
3240 3064
3241 /* 3065 /* Check if chip is a not a samsung device. Do not clear the
3242 * Check if chip is not a Samsung device. Do not clear the 3066 * options for chips which are not having an extended id.
3243 * options for chips which do not have an extended id.
3244 */ 3067 */
3245 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) 3068 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
3246 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 3069 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
3247ident_done: 3070ident_done:
3248 3071
3072 /*
3073 * Set chip as a default. Board drivers can override it, if necessary
3074 */
3075 chip->options |= NAND_NO_AUTOINCR;
3076
3249 /* Try to identify manufacturer */ 3077 /* Try to identify manufacturer */
3250 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { 3078 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
3251 if (nand_manuf_ids[maf_idx].id == *maf_id) 3079 if (nand_manuf_ids[maf_idx].id == *maf_id)
3252 break; 3080 break;
3253 } 3081 }
3254 3082
3255 if (chip->options & NAND_BUSWIDTH_AUTO) { 3083 /*
3256 WARN_ON(chip->options & NAND_BUSWIDTH_16); 3084 * Check, if buswidth is correct. Hardware drivers should set
3257 chip->options |= busw; 3085 * chip correct !
3258 nand_set_defaults(chip, busw); 3086 */
3259 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 3087 if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3260 /* 3088 printk(KERN_INFO "NAND device: Manufacturer ID:"
3261 * Check, if buswidth is correct. Hardware drivers should set 3089 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
3262 * chip correct! 3090 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
3263 */ 3091 printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
3264 pr_info("NAND device: Manufacturer ID:" 3092 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
3265 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, 3093 busw ? 16 : 8);
3266 *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
3267 pr_warn("NAND bus width %d instead %d bit\n",
3268 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
3269 busw ? 16 : 8);
3270 return ERR_PTR(-EINVAL); 3094 return ERR_PTR(-EINVAL);
3271 } 3095 }
3272 3096
3273 nand_decode_bbm_options(mtd, chip, id_data);
3274
3275 /* Calculate the address shift from the page size */ 3097 /* Calculate the address shift from the page size */
3276 chip->page_shift = ffs(mtd->writesize) - 1; 3098 chip->page_shift = ffs(mtd->writesize) - 1;
3277 /* Convert chipsize to number of pages per chip -1 */ 3099 /* Convert chipsize to number of pages per chip -1. */
3278 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; 3100 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
3279 3101
3280 chip->bbt_erase_shift = chip->phys_erase_shift = 3102 chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -3288,33 +3110,69 @@ ident_done:
3288 3110
3289 chip->badblockbits = 8; 3111 chip->badblockbits = 8;
3290 3112
3113 /* Set the bad block position */
3114 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
3115 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3116 else
3117 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3118
3119 /*
3120 * Bad block marker is stored in the last page of each block
3121 * on Samsung and Hynix MLC devices; stored in first two pages
3122 * of each block on Micron devices with 2KiB pages and on
3123 * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
3124 * only the first page.
3125 */
3126 if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3127 (*maf_id == NAND_MFR_SAMSUNG ||
3128 *maf_id == NAND_MFR_HYNIX))
3129 chip->options |= NAND_BBT_SCANLASTPAGE;
3130 else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
3131 (*maf_id == NAND_MFR_SAMSUNG ||
3132 *maf_id == NAND_MFR_HYNIX ||
3133 *maf_id == NAND_MFR_TOSHIBA ||
3134 *maf_id == NAND_MFR_AMD)) ||
3135 (mtd->writesize == 2048 &&
3136 *maf_id == NAND_MFR_MICRON))
3137 chip->options |= NAND_BBT_SCAN2NDPAGE;
3138
3139 /*
3140 * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
3141 */
3142 if (!(busw & NAND_BUSWIDTH_16) &&
3143 *maf_id == NAND_MFR_STMICRO &&
3144 mtd->writesize == 2048) {
3145 chip->options |= NAND_BBT_SCANBYTE1AND6;
3146 chip->badblockpos = 0;
3147 }
3148
3291 /* Check for AND chips with 4 page planes */ 3149 /* Check for AND chips with 4 page planes */
3292 if (chip->options & NAND_4PAGE_ARRAY) 3150 if (chip->options & NAND_4PAGE_ARRAY)
3293 chip->erase_cmd = multi_erase_cmd; 3151 chip->erase_cmd = multi_erase_cmd;
3294 else 3152 else
3295 chip->erase_cmd = single_erase_cmd; 3153 chip->erase_cmd = single_erase_cmd;
3296 3154
3297 /* Do not replace user supplied command function! */ 3155 /* Do not replace user supplied command function ! */
3298 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3156 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3299 chip->cmdfunc = nand_command_lp; 3157 chip->cmdfunc = nand_command_lp;
3300 3158
3301 pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)," 3159 /* TODO onfi flash name */
3302 " %dMiB, page size: %d, OOB size: %d\n", 3160 printk(KERN_INFO "NAND device: Manufacturer ID:"
3303 *maf_id, *dev_id, nand_manuf_ids[maf_idx].name, 3161 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
3304 chip->onfi_version ? chip->onfi_params.model : type->name, 3162 nand_manuf_ids[maf_idx].name,
3305 (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize); 3163 chip->onfi_version ? chip->onfi_params.model : type->name);
3306 3164
3307 return type; 3165 return type;
3308} 3166}
3309 3167
3310/** 3168/**
3311 * nand_scan_ident - [NAND Interface] Scan for the NAND device 3169 * nand_scan_ident - [NAND Interface] Scan for the NAND device
3312 * @mtd: MTD device structure 3170 * @mtd: MTD device structure
3313 * @maxchips: number of chips to scan for 3171 * @maxchips: Number of chips to scan for
3314 * @table: alternative NAND ID table 3172 * @table: Alternative NAND ID table
3315 * 3173 *
3316 * This is the first phase of the normal nand_scan() function. It reads the 3174 * This is the first phase of the normal nand_scan() function. It
3317 * flash ID and sets up MTD fields accordingly. 3175 * reads the flash ID and sets up MTD fields accordingly.
3318 * 3176 *
3319 * The mtd->owner field must be set to the module of the caller. 3177 * The mtd->owner field must be set to the module of the caller.
3320 */ 3178 */
@@ -3336,13 +3194,11 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3336 3194
3337 if (IS_ERR(type)) { 3195 if (IS_ERR(type)) {
3338 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 3196 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
3339 pr_warn("No NAND device found\n"); 3197 printk(KERN_WARNING "No NAND device found.\n");
3340 chip->select_chip(mtd, -1); 3198 chip->select_chip(mtd, -1);
3341 return PTR_ERR(type); 3199 return PTR_ERR(type);
3342 } 3200 }
3343 3201
3344 chip->select_chip(mtd, -1);
3345
3346 /* Check for a chip array */ 3202 /* Check for a chip array */
3347 for (i = 1; i < maxchips; i++) { 3203 for (i = 1; i < maxchips; i++) {
3348 chip->select_chip(mtd, i); 3204 chip->select_chip(mtd, i);
@@ -3352,14 +3208,11 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3352 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 3208 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3353 /* Read manufacturer and device IDs */ 3209 /* Read manufacturer and device IDs */
3354 if (nand_maf_id != chip->read_byte(mtd) || 3210 if (nand_maf_id != chip->read_byte(mtd) ||
3355 nand_dev_id != chip->read_byte(mtd)) { 3211 nand_dev_id != chip->read_byte(mtd))
3356 chip->select_chip(mtd, -1);
3357 break; 3212 break;
3358 }
3359 chip->select_chip(mtd, -1);
3360 } 3213 }
3361 if (i > 1) 3214 if (i > 1)
3362 pr_info("%d NAND chips detected\n", i); 3215 printk(KERN_INFO "%d NAND chips detected\n", i);
3363 3216
3364 /* Store the number of chips and calc total size for mtd */ 3217 /* Store the number of chips and calc total size for mtd */
3365 chip->numchips = i; 3218 chip->numchips = i;
@@ -3369,24 +3222,58 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3369} 3222}
3370EXPORT_SYMBOL(nand_scan_ident); 3223EXPORT_SYMBOL(nand_scan_ident);
3371 3224
3225static void nand_panic_wait(struct mtd_info *mtd)
3226{
3227 struct nand_chip *chip = mtd->priv;
3228 int i;
3229
3230 if (chip->state != FL_READY)
3231 for (i = 0; i < 40; i++) {
3232 if (chip->dev_ready(mtd))
3233 break;
3234 mdelay(10);
3235 }
3236 chip->state = FL_READY;
3237}
3238
3239static int nand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
3240 size_t *retlen, const u_char *buf)
3241{
3242 struct nand_chip *chip = mtd->priv;
3243 int ret;
3244
3245 /* Do not allow reads past end of device */
3246 if ((to + len) > mtd->size)
3247 return -EINVAL;
3248 if (!len)
3249 return 0;
3250
3251 nand_panic_wait(mtd);
3252
3253 chip->ops.len = len;
3254 chip->ops.datbuf = (uint8_t *)buf;
3255 chip->ops.oobbuf = NULL;
3256
3257 ret = nand_do_write_ops(mtd, to, &chip->ops);
3258
3259 *retlen = chip->ops.retlen;
3260 return ret;
3261}
3262
3372 3263
3373/** 3264/**
3374 * nand_scan_tail - [NAND Interface] Scan for the NAND device 3265 * nand_scan_tail - [NAND Interface] Scan for the NAND device
3375 * @mtd: MTD device structure 3266 * @mtd: MTD device structure
3376 * 3267 *
3377 * This is the second phase of the normal nand_scan() function. It fills out 3268 * This is the second phase of the normal nand_scan() function. It
3378 * all the uninitialized function pointers with the defaults and scans for a 3269 * fills out all the uninitialized function pointers with the defaults
3379 * bad block table if appropriate. 3270 * and scans for a bad block table if appropriate.
3380 */ 3271 */
3381int nand_scan_tail(struct mtd_info *mtd) 3272int nand_scan_tail(struct mtd_info *mtd)
3382{ 3273{
3383 int i; 3274 int i;
3384 struct nand_chip *chip = mtd->priv; 3275 struct nand_chip *chip = mtd->priv;
3385 3276
3386 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
3387 BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
3388 !(chip->bbt_options & NAND_BBT_USE_FLASH));
3389
3390 if (!(chip->options & NAND_OWN_BUFFERS)) 3277 if (!(chip->options & NAND_OWN_BUFFERS))
3391 chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL); 3278 chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
3392 if (!chip->buffers) 3279 if (!chip->buffers)
@@ -3396,7 +3283,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3396 chip->oob_poi = chip->buffers->databuf + mtd->writesize; 3283 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
3397 3284
3398 /* 3285 /*
3399 * If no default placement scheme is given, select an appropriate one. 3286 * If no default placement scheme is given, select an appropriate one
3400 */ 3287 */
3401 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { 3288 if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
3402 switch (mtd->oobsize) { 3289 switch (mtd->oobsize) {
@@ -3413,8 +3300,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3413 chip->ecc.layout = &nand_oob_128; 3300 chip->ecc.layout = &nand_oob_128;
3414 break; 3301 break;
3415 default: 3302 default:
3416 pr_warn("No oob scheme defined for oobsize %d\n", 3303 printk(KERN_WARNING "No oob scheme defined for "
3417 mtd->oobsize); 3304 "oobsize %d\n", mtd->oobsize);
3418 BUG(); 3305 BUG();
3419 } 3306 }
3420 } 3307 }
@@ -3422,14 +3309,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3422 if (!chip->write_page) 3309 if (!chip->write_page)
3423 chip->write_page = nand_write_page; 3310 chip->write_page = nand_write_page;
3424 3311
3425 /* set for ONFI nand */
3426 if (!chip->onfi_set_features)
3427 chip->onfi_set_features = nand_onfi_set_features;
3428 if (!chip->onfi_get_features)
3429 chip->onfi_get_features = nand_onfi_get_features;
3430
3431 /* 3312 /*
3432 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 3313 * check ECC mode, default to software if 3byte/512byte hardware ECC is
3433 * selected and we have 256 byte pagesize fallback to software ECC 3314 * selected and we have 256 byte pagesize fallback to software ECC
3434 */ 3315 */
3435 3316
@@ -3438,15 +3319,15 @@ int nand_scan_tail(struct mtd_info *mtd)
3438 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 3319 /* Similar to NAND_ECC_HW, but a separate read_page handle */
3439 if (!chip->ecc.calculate || !chip->ecc.correct || 3320 if (!chip->ecc.calculate || !chip->ecc.correct ||
3440 !chip->ecc.hwctl) { 3321 !chip->ecc.hwctl) {
3441 pr_warn("No ECC functions supplied; " 3322 printk(KERN_WARNING "No ECC functions supplied; "
3442 "hardware ECC not possible\n"); 3323 "Hardware ECC not possible\n");
3443 BUG(); 3324 BUG();
3444 } 3325 }
3445 if (!chip->ecc.read_page) 3326 if (!chip->ecc.read_page)
3446 chip->ecc.read_page = nand_read_page_hwecc_oob_first; 3327 chip->ecc.read_page = nand_read_page_hwecc_oob_first;
3447 3328
3448 case NAND_ECC_HW: 3329 case NAND_ECC_HW:
3449 /* Use standard hwecc read page function? */ 3330 /* Use standard hwecc read page function ? */
3450 if (!chip->ecc.read_page) 3331 if (!chip->ecc.read_page)
3451 chip->ecc.read_page = nand_read_page_hwecc; 3332 chip->ecc.read_page = nand_read_page_hwecc;
3452 if (!chip->ecc.write_page) 3333 if (!chip->ecc.write_page)
@@ -3467,11 +3348,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3467 chip->ecc.read_page == nand_read_page_hwecc || 3348 chip->ecc.read_page == nand_read_page_hwecc ||
3468 !chip->ecc.write_page || 3349 !chip->ecc.write_page ||
3469 chip->ecc.write_page == nand_write_page_hwecc)) { 3350 chip->ecc.write_page == nand_write_page_hwecc)) {
3470 pr_warn("No ECC functions supplied; " 3351 printk(KERN_WARNING "No ECC functions supplied; "
3471 "hardware ECC not possible\n"); 3352 "Hardware ECC not possible\n");
3472 BUG(); 3353 BUG();
3473 } 3354 }
3474 /* Use standard syndrome read/write page function? */ 3355 /* Use standard syndrome read/write page function ? */
3475 if (!chip->ecc.read_page) 3356 if (!chip->ecc.read_page)
3476 chip->ecc.read_page = nand_read_page_syndrome; 3357 chip->ecc.read_page = nand_read_page_syndrome;
3477 if (!chip->ecc.write_page) 3358 if (!chip->ecc.write_page)
@@ -3485,16 +3366,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3485 if (!chip->ecc.write_oob) 3366 if (!chip->ecc.write_oob)
3486 chip->ecc.write_oob = nand_write_oob_syndrome; 3367 chip->ecc.write_oob = nand_write_oob_syndrome;
3487 3368
3488 if (mtd->writesize >= chip->ecc.size) { 3369 if (mtd->writesize >= chip->ecc.size)
3489 if (!chip->ecc.strength) {
3490 pr_warn("Driver must set ecc.strength when using hardware ECC\n");
3491 BUG();
3492 }
3493 break; 3370 break;
3494 } 3371 printk(KERN_WARNING "%d byte HW ECC not possible on "
3495 pr_warn("%d byte HW ECC not possible on " 3372 "%d byte page size, fallback to SW ECC\n",
3496 "%d byte page size, fallback to SW ECC\n", 3373 chip->ecc.size, mtd->writesize);
3497 chip->ecc.size, mtd->writesize);
3498 chip->ecc.mode = NAND_ECC_SOFT; 3374 chip->ecc.mode = NAND_ECC_SOFT;
3499 3375
3500 case NAND_ECC_SOFT: 3376 case NAND_ECC_SOFT:
@@ -3510,12 +3386,11 @@ int nand_scan_tail(struct mtd_info *mtd)
3510 if (!chip->ecc.size) 3386 if (!chip->ecc.size)
3511 chip->ecc.size = 256; 3387 chip->ecc.size = 256;
3512 chip->ecc.bytes = 3; 3388 chip->ecc.bytes = 3;
3513 chip->ecc.strength = 1;
3514 break; 3389 break;
3515 3390
3516 case NAND_ECC_SOFT_BCH: 3391 case NAND_ECC_SOFT_BCH:
3517 if (!mtd_nand_has_bch()) { 3392 if (!mtd_nand_has_bch()) {
3518 pr_warn("CONFIG_MTD_ECC_BCH not enabled\n"); 3393 printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n");
3519 BUG(); 3394 BUG();
3520 } 3395 }
3521 chip->ecc.calculate = nand_bch_calculate_ecc; 3396 chip->ecc.calculate = nand_bch_calculate_ecc;
@@ -3530,8 +3405,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3530 /* 3405 /*
3531 * Board driver should supply ecc.size and ecc.bytes values to 3406 * Board driver should supply ecc.size and ecc.bytes values to
3532 * select how many bits are correctable; see nand_bch_init() 3407 * select how many bits are correctable; see nand_bch_init()
3533 * for details. Otherwise, default to 4 bits for large page 3408 * for details.
3534 * devices. 3409 * Otherwise, default to 4 bits for large page devices
3535 */ 3410 */
3536 if (!chip->ecc.size && (mtd->oobsize >= 64)) { 3411 if (!chip->ecc.size && (mtd->oobsize >= 64)) {
3537 chip->ecc.size = 512; 3412 chip->ecc.size = 512;
@@ -3542,16 +3417,14 @@ int nand_scan_tail(struct mtd_info *mtd)
3542 chip->ecc.bytes, 3417 chip->ecc.bytes,
3543 &chip->ecc.layout); 3418 &chip->ecc.layout);
3544 if (!chip->ecc.priv) { 3419 if (!chip->ecc.priv) {
3545 pr_warn("BCH ECC initialization failed!\n"); 3420 printk(KERN_WARNING "BCH ECC initialization failed!\n");
3546 BUG(); 3421 BUG();
3547 } 3422 }
3548 chip->ecc.strength =
3549 chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
3550 break; 3423 break;
3551 3424
3552 case NAND_ECC_NONE: 3425 case NAND_ECC_NONE:
3553 pr_warn("NAND_ECC_NONE selected by board driver. " 3426 printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
3554 "This is not recommended!\n"); 3427 "This is not recommended !!\n");
3555 chip->ecc.read_page = nand_read_page_raw; 3428 chip->ecc.read_page = nand_read_page_raw;
3556 chip->ecc.write_page = nand_write_page_raw; 3429 chip->ecc.write_page = nand_write_page_raw;
3557 chip->ecc.read_oob = nand_read_oob_std; 3430 chip->ecc.read_oob = nand_read_oob_std;
@@ -3560,23 +3433,17 @@ int nand_scan_tail(struct mtd_info *mtd)
3560 chip->ecc.write_oob = nand_write_oob_std; 3433 chip->ecc.write_oob = nand_write_oob_std;
3561 chip->ecc.size = mtd->writesize; 3434 chip->ecc.size = mtd->writesize;
3562 chip->ecc.bytes = 0; 3435 chip->ecc.bytes = 0;
3563 chip->ecc.strength = 0;
3564 break; 3436 break;
3565 3437
3566 default: 3438 default:
3567 pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode); 3439 printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
3440 chip->ecc.mode);
3568 BUG(); 3441 BUG();
3569 } 3442 }
3570 3443
3571 /* For many systems, the standard OOB write also works for raw */
3572 if (!chip->ecc.read_oob_raw)
3573 chip->ecc.read_oob_raw = chip->ecc.read_oob;
3574 if (!chip->ecc.write_oob_raw)
3575 chip->ecc.write_oob_raw = chip->ecc.write_oob;
3576
3577 /* 3444 /*
3578 * The number of bytes available for a client to place data into 3445 * The number of bytes available for a client to place data into
3579 * the out of band area. 3446 * the out of band area
3580 */ 3447 */
3581 chip->ecc.layout->oobavail = 0; 3448 chip->ecc.layout->oobavail = 0;
3582 for (i = 0; chip->ecc.layout->oobfree[i].length 3449 for (i = 0; chip->ecc.layout->oobfree[i].length
@@ -3587,16 +3454,19 @@ int nand_scan_tail(struct mtd_info *mtd)
3587 3454
3588 /* 3455 /*
3589 * Set the number of read / write steps for one page depending on ECC 3456 * Set the number of read / write steps for one page depending on ECC
3590 * mode. 3457 * mode
3591 */ 3458 */
3592 chip->ecc.steps = mtd->writesize / chip->ecc.size; 3459 chip->ecc.steps = mtd->writesize / chip->ecc.size;
3593 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { 3460 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
3594 pr_warn("Invalid ECC parameters\n"); 3461 printk(KERN_WARNING "Invalid ecc parameters\n");
3595 BUG(); 3462 BUG();
3596 } 3463 }
3597 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; 3464 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
3598 3465
3599 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 3466 /*
3467 * Allow subpage writes up to ecc.steps. Not possible for MLC
3468 * FLASH.
3469 */
3600 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 3470 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3601 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { 3471 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
3602 switch (chip->ecc.steps) { 3472 switch (chip->ecc.steps) {
@@ -3615,44 +3485,36 @@ int nand_scan_tail(struct mtd_info *mtd)
3615 /* Initialize state */ 3485 /* Initialize state */
3616 chip->state = FL_READY; 3486 chip->state = FL_READY;
3617 3487
3488 /* De-select the device */
3489 chip->select_chip(mtd, -1);
3490
3618 /* Invalidate the pagebuffer reference */ 3491 /* Invalidate the pagebuffer reference */
3619 chip->pagebuf = -1; 3492 chip->pagebuf = -1;
3620 3493
3621 /* Large page NAND with SOFT_ECC should support subpage reads */
3622 if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
3623 chip->options |= NAND_SUBPAGE_READ;
3624
3625 /* Fill in remaining MTD driver data */ 3494 /* Fill in remaining MTD driver data */
3626 mtd->type = MTD_NANDFLASH; 3495 mtd->type = MTD_NANDFLASH;
3627 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : 3496 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
3628 MTD_CAP_NANDFLASH; 3497 MTD_CAP_NANDFLASH;
3629 mtd->_erase = nand_erase; 3498 mtd->erase = nand_erase;
3630 mtd->_point = NULL; 3499 mtd->point = NULL;
3631 mtd->_unpoint = NULL; 3500 mtd->unpoint = NULL;
3632 mtd->_read = nand_read; 3501 mtd->read = nand_read;
3633 mtd->_write = nand_write; 3502 mtd->write = nand_write;
3634 mtd->_panic_write = panic_nand_write; 3503 mtd->panic_write = panic_nand_write;
3635 mtd->_read_oob = nand_read_oob; 3504 mtd->read_oob = nand_read_oob;
3636 mtd->_write_oob = nand_write_oob; 3505 mtd->write_oob = nand_write_oob;
3637 mtd->_sync = nand_sync; 3506 mtd->panic_write = nand_panic_write;
3638 mtd->_lock = NULL; 3507 mtd->sync = nand_sync;
3639 mtd->_unlock = NULL; 3508 mtd->lock = NULL;
3640 mtd->_suspend = nand_suspend; 3509 mtd->unlock = NULL;
3641 mtd->_resume = nand_resume; 3510 mtd->suspend = nand_suspend;
3642 mtd->_block_isbad = nand_block_isbad; 3511 mtd->resume = nand_resume;
3643 mtd->_block_markbad = nand_block_markbad; 3512 mtd->block_isbad = nand_block_isbad;
3513 mtd->block_markbad = nand_block_markbad;
3644 mtd->writebufsize = mtd->writesize; 3514 mtd->writebufsize = mtd->writesize;
3645 3515
3646 /* propagate ecc info to mtd_info */ 3516 /* propagate ecc.layout to mtd_info */
3647 mtd->ecclayout = chip->ecc.layout; 3517 mtd->ecclayout = chip->ecc.layout;
3648 mtd->ecc_strength = chip->ecc.strength;
3649 /*
3650 * Initialize bitflip_threshold to its default prior scan_bbt() call.
3651 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
3652 * properly set.
3653 */
3654 if (!mtd->bitflip_threshold)
3655 mtd->bitflip_threshold = mtd->ecc_strength;
3656 3518
3657 /* Check, if we should skip the bad block table scan */ 3519 /* Check, if we should skip the bad block table scan */
3658 if (chip->options & NAND_SKIP_BBTSCAN) 3520 if (chip->options & NAND_SKIP_BBTSCAN)
@@ -3663,11 +3525,9 @@ int nand_scan_tail(struct mtd_info *mtd)
3663} 3525}
3664EXPORT_SYMBOL(nand_scan_tail); 3526EXPORT_SYMBOL(nand_scan_tail);
3665 3527
3666/* 3528/* is_module_text_address() isn't exported, and it's mostly a pointless
3667 * is_module_text_address() isn't exported, and it's mostly a pointless
3668 * test if this is a module _anyway_ -- they'd have to try _really_ hard 3529 * test if this is a module _anyway_ -- they'd have to try _really_ hard
3669 * to call us from in-kernel code if the core NAND support is modular. 3530 * to call us from in-kernel code if the core NAND support is modular. */
3670 */
3671#ifdef MODULE 3531#ifdef MODULE
3672#define caller_is_module() (1) 3532#define caller_is_module() (1)
3673#else 3533#else
@@ -3677,13 +3537,15 @@ EXPORT_SYMBOL(nand_scan_tail);
3677 3537
3678/** 3538/**
3679 * nand_scan - [NAND Interface] Scan for the NAND device 3539 * nand_scan - [NAND Interface] Scan for the NAND device
3680 * @mtd: MTD device structure 3540 * @mtd: MTD device structure
3681 * @maxchips: number of chips to scan for 3541 * @maxchips: Number of chips to scan for
3542 *
3543 * This fills out all the uninitialized function pointers
3544 * with the defaults.
3545 * The flash ID is read and the mtd/chip structures are
3546 * filled with the appropriate values.
3547 * The mtd->owner field must be set to the module of the caller
3682 * 3548 *
3683 * This fills out all the uninitialized function pointers with the defaults.
3684 * The flash ID is read and the mtd/chip structures are filled with the
3685 * appropriate values. The mtd->owner field must be set to the module of the
3686 * caller.
3687 */ 3549 */
3688int nand_scan(struct mtd_info *mtd, int maxchips) 3550int nand_scan(struct mtd_info *mtd, int maxchips)
3689{ 3551{
@@ -3691,7 +3553,8 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
3691 3553
3692 /* Many callers got this wrong, so check for it for a while... */ 3554 /* Many callers got this wrong, so check for it for a while... */
3693 if (!mtd->owner && caller_is_module()) { 3555 if (!mtd->owner && caller_is_module()) {
3694 pr_crit("%s called with NULL mtd->owner!\n", __func__); 3556 printk(KERN_CRIT "%s called with NULL mtd->owner!\n",
3557 __func__);
3695 BUG(); 3558 BUG();
3696 } 3559 }
3697 3560
@@ -3704,8 +3567,8 @@ EXPORT_SYMBOL(nand_scan);
3704 3567
3705/** 3568/**
3706 * nand_release - [NAND Interface] Free resources held by the NAND device 3569 * nand_release - [NAND Interface] Free resources held by the NAND device
3707 * @mtd: MTD device structure 3570 * @mtd: MTD device structure
3708 */ 3571*/
3709void nand_release(struct mtd_info *mtd) 3572void nand_release(struct mtd_info *mtd)
3710{ 3573{
3711 struct nand_chip *chip = mtd->priv; 3574 struct nand_chip *chip = mtd->priv;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 916d6e9c0ab..ccbeaa1e4a8 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -4,7 +4,7 @@
4 * Overview: 4 * Overview:
5 * Bad block table support for the NAND driver 5 * Bad block table support for the NAND driver
6 * 6 *
7 * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de) 7 * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -14,7 +14,7 @@
14 * 14 *
15 * When nand_scan_bbt is called, then it tries to find the bad block table 15 * When nand_scan_bbt is called, then it tries to find the bad block table
16 * depending on the options in the BBT descriptor(s). If no flash based BBT 16 * depending on the options in the BBT descriptor(s). If no flash based BBT
17 * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory 17 * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory
18 * marked good / bad blocks. This information is used to create a memory BBT. 18 * marked good / bad blocks. This information is used to create a memory BBT.
19 * Once a new bad block is discovered then the "factory" information is updated 19 * Once a new bad block is discovered then the "factory" information is updated
20 * on the device. 20 * on the device.
@@ -22,7 +22,7 @@
22 * BBT on flash. If a BBT is found then the contents are read and the memory 22 * BBT on flash. If a BBT is found then the contents are read and the memory
23 * based BBT is created. If a mirrored BBT is selected then the mirror is 23 * based BBT is created. If a mirrored BBT is selected then the mirror is
24 * searched too and the versions are compared. If the mirror has a greater 24 * searched too and the versions are compared. If the mirror has a greater
25 * version number, then the mirror BBT is used to build the memory based BBT. 25 * version number than the mirror BBT is used to build the memory based BBT.
26 * If the tables are not versioned, then we "or" the bad block information. 26 * If the tables are not versioned, then we "or" the bad block information.
27 * If one of the BBTs is out of date or does not exist it is (re)created. 27 * If one of the BBTs is out of date or does not exist it is (re)created.
28 * If no BBT exists at all then the device is scanned for factory marked 28 * If no BBT exists at all then the device is scanned for factory marked
@@ -36,9 +36,9 @@
36 * The table is marked in the OOB area with an ident pattern and a version 36 * The table is marked in the OOB area with an ident pattern and a version
37 * number which indicates which of both tables is more up to date. If the NAND 37 * number which indicates which of both tables is more up to date. If the NAND
38 * controller needs the complete OOB area for the ECC information then the 38 * controller needs the complete OOB area for the ECC information then the
39 * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of 39 * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern
40 * course): it moves the ident pattern and the version byte into the data area 40 * and the version byte into the data area and the OOB area will remain
41 * and the OOB area will remain untouched. 41 * untouched.
42 * 42 *
43 * The table uses 2 bits per block 43 * The table uses 2 bits per block
44 * 11b: block is good 44 * 11b: block is good
@@ -62,82 +62,126 @@
62#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/types.h> 63#include <linux/types.h>
64#include <linux/mtd/mtd.h> 64#include <linux/mtd/mtd.h>
65#include <linux/mtd/bbm.h>
66#include <linux/mtd/nand.h> 65#include <linux/mtd/nand.h>
67#include <linux/mtd/nand_ecc.h> 66#include <linux/mtd/nand_ecc.h>
68#include <linux/bitops.h> 67#include <linux/bitops.h>
69#include <linux/delay.h> 68#include <linux/delay.h>
70#include <linux/vmalloc.h> 69#include <linux/vmalloc.h>
71#include <linux/export.h>
72#include <linux/string.h>
73 70
74static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) 71static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
75{ 72{
76 if (memcmp(buf, td->pattern, td->len)) 73 int ret;
77 return -1; 74
78 return 0; 75 ret = memcmp(buf, td->pattern, td->len);
76 if (!ret)
77 return ret;
78 return -1;
79} 79}
80 80
81/** 81/**
82 * check_pattern - [GENERIC] check if a pattern is in the buffer 82 * check_pattern - [GENERIC] check if a pattern is in the buffer
83 * @buf: the buffer to search 83 * @buf: the buffer to search
84 * @len: the length of buffer to search 84 * @len: the length of buffer to search
85 * @paglen: the pagelength 85 * @paglen: the pagelength
86 * @td: search pattern descriptor 86 * @td: search pattern descriptor
87 * 87 *
88 * Check for a pattern at the given place. Used to search bad block tables and 88 * Check for a pattern at the given place. Used to search bad block
89 * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if 89 * tables and good / bad block identifiers.
90 * all bytes except the pattern area contain 0xff. 90 * If the SCAN_EMPTY option is set then check, if all bytes except the
91 */ 91 * pattern area contain 0xff
92 *
93*/
92static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) 94static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
93{ 95{
94 int end = 0; 96 int i, end = 0;
95 uint8_t *p = buf; 97 uint8_t *p = buf;
96 98
97 if (td->options & NAND_BBT_NO_OOB) 99 if (td->options & NAND_BBT_NO_OOB)
98 return check_pattern_no_oob(buf, td); 100 return check_pattern_no_oob(buf, td);
99 101
100 end = paglen + td->offs; 102 end = paglen + td->offs;
101 if (td->options & NAND_BBT_SCANEMPTY) 103 if (td->options & NAND_BBT_SCANEMPTY) {
102 if (memchr_inv(p, 0xff, end)) 104 for (i = 0; i < end; i++) {
103 return -1; 105 if (p[i] != 0xff)
106 return -1;
107 }
108 }
104 p += end; 109 p += end;
105 110
106 /* Compare the pattern */ 111 /* Compare the pattern */
107 if (memcmp(p, td->pattern, td->len)) 112 for (i = 0; i < td->len; i++) {
108 return -1; 113 if (p[i] != td->pattern[i])
114 return -1;
115 }
116
117 /* Check both positions 1 and 6 for pattern? */
118 if (td->options & NAND_BBT_SCANBYTE1AND6) {
119 if (td->options & NAND_BBT_SCANEMPTY) {
120 p += td->len;
121 end += NAND_SMALL_BADBLOCK_POS - td->offs;
122 /* Check region between positions 1 and 6 */
123 for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
124 i++) {
125 if (*p++ != 0xff)
126 return -1;
127 }
128 }
129 else {
130 p += NAND_SMALL_BADBLOCK_POS - td->offs;
131 }
132 /* Compare the pattern */
133 for (i = 0; i < td->len; i++) {
134 if (p[i] != td->pattern[i])
135 return -1;
136 }
137 }
109 138
110 if (td->options & NAND_BBT_SCANEMPTY) { 139 if (td->options & NAND_BBT_SCANEMPTY) {
111 p += td->len; 140 p += td->len;
112 end += td->len; 141 end += td->len;
113 if (memchr_inv(p, 0xff, len - end)) 142 for (i = end; i < len; i++) {
114 return -1; 143 if (*p++ != 0xff)
144 return -1;
145 }
115 } 146 }
116 return 0; 147 return 0;
117} 148}
118 149
119/** 150/**
120 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 151 * check_short_pattern - [GENERIC] check if a pattern is in the buffer
121 * @buf: the buffer to search 152 * @buf: the buffer to search
122 * @td: search pattern descriptor 153 * @td: search pattern descriptor
123 * 154 *
124 * Check for a pattern at the given place. Used to search bad block tables and 155 * Check for a pattern at the given place. Used to search bad block
125 * good / bad block identifiers. Same as check_pattern, but no optional empty 156 * tables and good / bad block identifiers. Same as check_pattern, but
126 * check. 157 * no optional empty check
127 */ 158 *
159*/
128static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) 160static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
129{ 161{
162 int i;
163 uint8_t *p = buf;
164
130 /* Compare the pattern */ 165 /* Compare the pattern */
131 if (memcmp(buf + td->offs, td->pattern, td->len)) 166 for (i = 0; i < td->len; i++) {
132 return -1; 167 if (p[td->offs + i] != td->pattern[i])
168 return -1;
169 }
170 /* Need to check location 1 AND 6? */
171 if (td->options & NAND_BBT_SCANBYTE1AND6) {
172 for (i = 0; i < td->len; i++) {
173 if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
174 return -1;
175 }
176 }
133 return 0; 177 return 0;
134} 178}
135 179
136/** 180/**
137 * add_marker_len - compute the length of the marker in data area 181 * add_marker_len - compute the length of the marker in data area
138 * @td: BBT descriptor used for computation 182 * @td: BBT descriptor used for computation
139 * 183 *
140 * The length will be 0 if the marker is located in OOB area. 184 * The length will be 0 if the markeris located in OOB area.
141 */ 185 */
142static u32 add_marker_len(struct nand_bbt_descr *td) 186static u32 add_marker_len(struct nand_bbt_descr *td)
143{ 187{
@@ -154,33 +198,34 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
154 198
155/** 199/**
156 * read_bbt - [GENERIC] Read the bad block table starting from page 200 * read_bbt - [GENERIC] Read the bad block table starting from page
157 * @mtd: MTD device structure 201 * @mtd: MTD device structure
158 * @buf: temporary buffer 202 * @buf: temporary buffer
159 * @page: the starting page 203 * @page: the starting page
160 * @num: the number of bbt descriptors to read 204 * @num: the number of bbt descriptors to read
161 * @td: the bbt describtion table 205 * @td: the bbt describtion table
162 * @offs: offset in the memory table 206 * @offs: offset in the memory table
163 * 207 *
164 * Read the bad block table starting from page. 208 * Read the bad block table starting from page.
209 *
165 */ 210 */
166static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, 211static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
167 struct nand_bbt_descr *td, int offs) 212 struct nand_bbt_descr *td, int offs)
168{ 213{
169 int res, ret = 0, i, j, act = 0; 214 int res, i, j, act = 0;
170 struct nand_chip *this = mtd->priv; 215 struct nand_chip *this = mtd->priv;
171 size_t retlen, len, totlen; 216 size_t retlen, len, totlen;
172 loff_t from; 217 loff_t from;
173 int bits = td->options & NAND_BBT_NRBITS_MSK; 218 int bits = td->options & NAND_BBT_NRBITS_MSK;
174 uint8_t msk = (uint8_t)((1 << bits) - 1); 219 uint8_t msk = (uint8_t) ((1 << bits) - 1);
175 u32 marker_len; 220 u32 marker_len;
176 int reserved_block_code = td->reserved_block_code; 221 int reserved_block_code = td->reserved_block_code;
177 222
178 totlen = (num * bits) >> 3; 223 totlen = (num * bits) >> 3;
179 marker_len = add_marker_len(td); 224 marker_len = add_marker_len(td);
180 from = ((loff_t)page) << this->page_shift; 225 from = ((loff_t) page) << this->page_shift;
181 226
182 while (totlen) { 227 while (totlen) {
183 len = min(totlen, (size_t)(1 << this->bbt_erase_shift)); 228 len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
184 if (marker_len) { 229 if (marker_len) {
185 /* 230 /*
186 * In case the BBT marker is not in the OOB area it 231 * In case the BBT marker is not in the OOB area it
@@ -190,20 +235,13 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
190 from += marker_len; 235 from += marker_len;
191 marker_len = 0; 236 marker_len = 0;
192 } 237 }
193 res = mtd_read(mtd, from, len, &retlen, buf); 238 res = mtd->read(mtd, from, len, &retlen, buf);
194 if (res < 0) { 239 if (res < 0) {
195 if (mtd_is_eccerr(res)) { 240 if (retlen != len) {
196 pr_info("nand_bbt: ECC error in BBT at " 241 printk(KERN_INFO "nand_bbt: Error reading bad block table\n");
197 "0x%012llx\n", from & ~mtd->writesize);
198 return res;
199 } else if (mtd_is_bitflip(res)) {
200 pr_info("nand_bbt: corrected error in BBT at "
201 "0x%012llx\n", from & ~mtd->writesize);
202 ret = res;
203 } else {
204 pr_info("nand_bbt: error reading BBT\n");
205 return res; 242 return res;
206 } 243 }
244 printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
207 } 245 }
208 246
209 /* Analyse data */ 247 /* Analyse data */
@@ -214,19 +252,17 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
214 if (tmp == msk) 252 if (tmp == msk)
215 continue; 253 continue;
216 if (reserved_block_code && (tmp == reserved_block_code)) { 254 if (reserved_block_code && (tmp == reserved_block_code)) {
217 pr_info("nand_read_bbt: reserved block at 0x%012llx\n", 255 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
218 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 256 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
219 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 257 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
220 mtd->ecc_stats.bbtblocks++; 258 mtd->ecc_stats.bbtblocks++;
221 continue; 259 continue;
222 } 260 }
223 /* 261 /* Leave it for now, if its matured we can move this
224 * Leave it for now, if it's matured we can 262 * message to MTD_DEBUG_LEVEL0 */
225 * move this message to pr_debug. 263 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
226 */ 264 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
227 pr_info("nand_read_bbt: bad block at 0x%012llx\n", 265 /* Factory marked bad or worn out ? */
228 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
229 /* Factory marked bad or worn out? */
230 if (tmp == 0) 266 if (tmp == 0)
231 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 267 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
232 else 268 else
@@ -237,20 +273,20 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
237 totlen -= len; 273 totlen -= len;
238 from += len; 274 from += len;
239 } 275 }
240 return ret; 276 return 0;
241} 277}
242 278
243/** 279/**
244 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page 280 * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
245 * @mtd: MTD device structure 281 * @mtd: MTD device structure
246 * @buf: temporary buffer 282 * @buf: temporary buffer
247 * @td: descriptor for the bad block table 283 * @td: descriptor for the bad block table
248 * @chip: read the table for a specific chip, -1 read all chips; applies only if 284 * @chip: read the table for a specific chip, -1 read all chips.
249 * NAND_BBT_PERCHIP option is set 285 * Applies only if NAND_BBT_PERCHIP option is set
250 * 286 *
251 * Read the bad block table for all chips starting at a given page. We assume 287 * Read the bad block table for all chips starting at a given page
252 * that the bbt bits are in consecutive order. 288 * We assume that the bbt bits are in consecutive order.
253 */ 289*/
254static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) 290static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
255{ 291{
256 struct nand_chip *this = mtd->priv; 292 struct nand_chip *this = mtd->priv;
@@ -276,8 +312,10 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
276 return 0; 312 return 0;
277} 313}
278 314
279/* BBT marker is in the first page, no OOB */ 315/*
280static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 316 * BBT marker is in the first page, no OOB.
317 */
318static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
281 struct nand_bbt_descr *td) 319 struct nand_bbt_descr *td)
282{ 320{
283 size_t retlen; 321 size_t retlen;
@@ -287,73 +325,70 @@ static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
287 if (td->options & NAND_BBT_VERSION) 325 if (td->options & NAND_BBT_VERSION)
288 len++; 326 len++;
289 327
290 return mtd_read(mtd, offs, len, &retlen, buf); 328 return mtd->read(mtd, offs, len, &retlen, buf);
291} 329}
292 330
293/** 331/*
294 * scan_read_oob - [GENERIC] Scan data+OOB region to buffer 332 * Scan read raw data from flash
295 * @mtd: MTD device structure
296 * @buf: temporary buffer
297 * @offs: offset at which to scan
298 * @len: length of data region to read
299 *
300 * Scan read data from data+OOB. May traverse multiple pages, interleaving
301 * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
302 * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
303 */ 333 */
304static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 334static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
305 size_t len) 335 size_t len)
306{ 336{
307 struct mtd_oob_ops ops; 337 struct mtd_oob_ops ops;
308 int res, ret = 0; 338 int res;
309 339
310 ops.mode = MTD_OPS_PLACE_OOB; 340 ops.mode = MTD_OOB_RAW;
311 ops.ooboffs = 0; 341 ops.ooboffs = 0;
312 ops.ooblen = mtd->oobsize; 342 ops.ooblen = mtd->oobsize;
313 343
344
314 while (len > 0) { 345 while (len > 0) {
315 ops.datbuf = buf; 346 if (len <= mtd->writesize) {
316 ops.len = min(len, (size_t)mtd->writesize); 347 ops.oobbuf = buf + len;
317 ops.oobbuf = buf + ops.len; 348 ops.datbuf = buf;
349 ops.len = len;
350 return mtd->read_oob(mtd, offs, &ops);
351 } else {
352 ops.oobbuf = buf + mtd->writesize;
353 ops.datbuf = buf;
354 ops.len = mtd->writesize;
355 res = mtd->read_oob(mtd, offs, &ops);
318 356
319 res = mtd_read_oob(mtd, offs, &ops); 357 if (res)
320 if (res) {
321 if (!mtd_is_bitflip_or_eccerr(res))
322 return res; 358 return res;
323 else if (mtd_is_eccerr(res) || !ret)
324 ret = res;
325 } 359 }
326 360
327 buf += mtd->oobsize + mtd->writesize; 361 buf += mtd->oobsize + mtd->writesize;
328 len -= mtd->writesize; 362 len -= mtd->writesize;
329 offs += mtd->writesize;
330 } 363 }
331 return ret; 364 return 0;
332} 365}
333 366
334static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs, 367static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
335 size_t len, struct nand_bbt_descr *td) 368 size_t len, struct nand_bbt_descr *td)
336{ 369{
337 if (td->options & NAND_BBT_NO_OOB) 370 if (td->options & NAND_BBT_NO_OOB)
338 return scan_read_data(mtd, buf, offs, td); 371 return scan_read_raw_data(mtd, buf, offs, td);
339 else 372 else
340 return scan_read_oob(mtd, buf, offs, len); 373 return scan_read_raw_oob(mtd, buf, offs, len);
341} 374}
342 375
343/* Scan write data with oob to flash */ 376/*
377 * Scan write data with oob to flash
378 */
344static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, 379static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
345 uint8_t *buf, uint8_t *oob) 380 uint8_t *buf, uint8_t *oob)
346{ 381{
347 struct mtd_oob_ops ops; 382 struct mtd_oob_ops ops;
348 383
349 ops.mode = MTD_OPS_PLACE_OOB; 384 ops.mode = MTD_OOB_PLACE;
350 ops.ooboffs = 0; 385 ops.ooboffs = 0;
351 ops.ooblen = mtd->oobsize; 386 ops.ooblen = mtd->oobsize;
352 ops.datbuf = buf; 387 ops.datbuf = buf;
353 ops.oobbuf = oob; 388 ops.oobbuf = oob;
354 ops.len = len; 389 ops.len = len;
355 390
356 return mtd_write_oob(mtd, offs, &ops); 391 return mtd->write_oob(mtd, offs, &ops);
357} 392}
358 393
359static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td) 394static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -367,60 +402,65 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
367 402
368/** 403/**
369 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page 404 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
370 * @mtd: MTD device structure 405 * @mtd: MTD device structure
371 * @buf: temporary buffer 406 * @buf: temporary buffer
372 * @td: descriptor for the bad block table 407 * @td: descriptor for the bad block table
373 * @md: descriptor for the bad block table mirror 408 * @md: descriptor for the bad block table mirror
374 * 409 *
375 * Read the bad block table(s) for all chips starting at a given page. We 410 * Read the bad block table(s) for all chips starting at a given page
376 * assume that the bbt bits are in consecutive order. 411 * We assume that the bbt bits are in consecutive order.
377 */ 412 *
378static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, 413*/
379 struct nand_bbt_descr *td, struct nand_bbt_descr *md) 414static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
415 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
380{ 416{
381 struct nand_chip *this = mtd->priv; 417 struct nand_chip *this = mtd->priv;
382 418
383 /* Read the primary version, if available */ 419 /* Read the primary version, if available */
384 if (td->options & NAND_BBT_VERSION) { 420 if (td->options & NAND_BBT_VERSION) {
385 scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, 421 scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
386 mtd->writesize, td); 422 mtd->writesize, td);
387 td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; 423 td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
388 pr_info("Bad block table at page %d, version 0x%02X\n", 424 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
389 td->pages[0], td->version[0]); 425 td->pages[0], td->version[0]);
390 } 426 }
391 427
392 /* Read the mirror version, if available */ 428 /* Read the mirror version, if available */
393 if (md && (md->options & NAND_BBT_VERSION)) { 429 if (md && (md->options & NAND_BBT_VERSION)) {
394 scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, 430 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
395 mtd->writesize, md); 431 mtd->writesize, td);
396 md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; 432 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
397 pr_info("Bad block table at page %d, version 0x%02X\n", 433 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
398 md->pages[0], md->version[0]); 434 md->pages[0], md->version[0]);
399 } 435 }
436 return 1;
400} 437}
401 438
402/* Scan a given block full */ 439/*
440 * Scan a given block full
441 */
403static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, 442static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
404 loff_t offs, uint8_t *buf, size_t readlen, 443 loff_t offs, uint8_t *buf, size_t readlen,
405 int scanlen, int numpages) 444 int scanlen, int len)
406{ 445{
407 int ret, j; 446 int ret, j;
408 447
409 ret = scan_read_oob(mtd, buf, offs, readlen); 448 ret = scan_read_raw_oob(mtd, buf, offs, readlen);
410 /* Ignore ECC errors when checking for BBM */ 449 if (ret)
411 if (ret && !mtd_is_bitflip_or_eccerr(ret))
412 return ret; 450 return ret;
413 451
414 for (j = 0; j < numpages; j++, buf += scanlen) { 452 for (j = 0; j < len; j++, buf += scanlen) {
415 if (check_pattern(buf, scanlen, mtd->writesize, bd)) 453 if (check_pattern(buf, scanlen, mtd->writesize, bd))
416 return 1; 454 return 1;
417 } 455 }
418 return 0; 456 return 0;
419} 457}
420 458
421/* Scan a given block partially */ 459/*
460 * Scan a given block partially
461 */
422static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, 462static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
423 loff_t offs, uint8_t *buf, int numpages) 463 loff_t offs, uint8_t *buf, int len)
424{ 464{
425 struct mtd_oob_ops ops; 465 struct mtd_oob_ops ops;
426 int j, ret; 466 int j, ret;
@@ -429,16 +469,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
429 ops.oobbuf = buf; 469 ops.oobbuf = buf;
430 ops.ooboffs = 0; 470 ops.ooboffs = 0;
431 ops.datbuf = NULL; 471 ops.datbuf = NULL;
432 ops.mode = MTD_OPS_PLACE_OOB; 472 ops.mode = MTD_OOB_PLACE;
433 473
434 for (j = 0; j < numpages; j++) { 474 for (j = 0; j < len; j++) {
435 /* 475 /*
436 * Read the full oob until read_oob is fixed to handle single 476 * Read the full oob until read_oob is fixed to
437 * byte reads for 16 bit buswidth. 477 * handle single byte reads for 16 bit
478 * buswidth
438 */ 479 */
439 ret = mtd_read_oob(mtd, offs, &ops); 480 ret = mtd->read_oob(mtd, offs, &ops);
440 /* Ignore ECC errors when checking for BBM */ 481 if (ret)
441 if (ret && !mtd_is_bitflip_or_eccerr(ret))
442 return ret; 482 return ret;
443 483
444 if (check_short_pattern(buf, bd)) 484 if (check_short_pattern(buf, bd))
@@ -451,32 +491,32 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
451 491
452/** 492/**
453 * create_bbt - [GENERIC] Create a bad block table by scanning the device 493 * create_bbt - [GENERIC] Create a bad block table by scanning the device
454 * @mtd: MTD device structure 494 * @mtd: MTD device structure
455 * @buf: temporary buffer 495 * @buf: temporary buffer
456 * @bd: descriptor for the good/bad block search pattern 496 * @bd: descriptor for the good/bad block search pattern
457 * @chip: create the table for a specific chip, -1 read all chips; applies only 497 * @chip: create the table for a specific chip, -1 read all chips.
458 * if NAND_BBT_PERCHIP option is set 498 * Applies only if NAND_BBT_PERCHIP option is set
459 * 499 *
460 * Create a bad block table by scanning the device for the given good/bad block 500 * Create a bad block table by scanning the device
461 * identify pattern. 501 * for the given good/bad block identify pattern
462 */ 502 */
463static int create_bbt(struct mtd_info *mtd, uint8_t *buf, 503static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
464 struct nand_bbt_descr *bd, int chip) 504 struct nand_bbt_descr *bd, int chip)
465{ 505{
466 struct nand_chip *this = mtd->priv; 506 struct nand_chip *this = mtd->priv;
467 int i, numblocks, numpages, scanlen; 507 int i, numblocks, len, scanlen;
468 int startblock; 508 int startblock;
469 loff_t from; 509 loff_t from;
470 size_t readlen; 510 size_t readlen;
471 511
472 pr_info("Scanning device for bad blocks\n"); 512 printk(KERN_INFO "Scanning device for bad blocks\n");
473 513
474 if (bd->options & NAND_BBT_SCANALLPAGES) 514 if (bd->options & NAND_BBT_SCANALLPAGES)
475 numpages = 1 << (this->bbt_erase_shift - this->page_shift); 515 len = 1 << (this->bbt_erase_shift - this->page_shift);
476 else if (bd->options & NAND_BBT_SCAN2NDPAGE) 516 else if (bd->options & NAND_BBT_SCAN2NDPAGE)
477 numpages = 2; 517 len = 2;
478 else 518 else
479 numpages = 1; 519 len = 1;
480 520
481 if (!(bd->options & NAND_BBT_SCANEMPTY)) { 521 if (!(bd->options & NAND_BBT_SCANEMPTY)) {
482 /* We need only read few bytes from the OOB area */ 522 /* We need only read few bytes from the OOB area */
@@ -485,20 +525,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
485 } else { 525 } else {
486 /* Full page content should be read */ 526 /* Full page content should be read */
487 scanlen = mtd->writesize + mtd->oobsize; 527 scanlen = mtd->writesize + mtd->oobsize;
488 readlen = numpages * mtd->writesize; 528 readlen = len * mtd->writesize;
489 } 529 }
490 530
491 if (chip == -1) { 531 if (chip == -1) {
492 /* 532 /* Note that numblocks is 2 * (real numblocks) here, see i+=2
493 * Note that numblocks is 2 * (real numblocks) here, see i+=2 533 * below as it makes shifting and masking less painful */
494 * below as it makes shifting and masking less painful
495 */
496 numblocks = mtd->size >> (this->bbt_erase_shift - 1); 534 numblocks = mtd->size >> (this->bbt_erase_shift - 1);
497 startblock = 0; 535 startblock = 0;
498 from = 0; 536 from = 0;
499 } else { 537 } else {
500 if (chip >= this->numchips) { 538 if (chip >= this->numchips) {
501 pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n", 539 printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
502 chip + 1, this->numchips); 540 chip + 1, this->numchips);
503 return -EINVAL; 541 return -EINVAL;
504 } 542 }
@@ -508,8 +546,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
508 from = (loff_t)startblock << (this->bbt_erase_shift - 1); 546 from = (loff_t)startblock << (this->bbt_erase_shift - 1);
509 } 547 }
510 548
511 if (this->bbt_options & NAND_BBT_SCANLASTPAGE) 549 if (this->options & NAND_BBT_SCANLASTPAGE)
512 from += mtd->erasesize - (mtd->writesize * numpages); 550 from += mtd->erasesize - (mtd->writesize * len);
513 551
514 for (i = startblock; i < numblocks;) { 552 for (i = startblock; i < numblocks;) {
515 int ret; 553 int ret;
@@ -518,17 +556,17 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
518 556
519 if (bd->options & NAND_BBT_SCANALLPAGES) 557 if (bd->options & NAND_BBT_SCANALLPAGES)
520 ret = scan_block_full(mtd, bd, from, buf, readlen, 558 ret = scan_block_full(mtd, bd, from, buf, readlen,
521 scanlen, numpages); 559 scanlen, len);
522 else 560 else
523 ret = scan_block_fast(mtd, bd, from, buf, numpages); 561 ret = scan_block_fast(mtd, bd, from, buf, len);
524 562
525 if (ret < 0) 563 if (ret < 0)
526 return ret; 564 return ret;
527 565
528 if (ret) { 566 if (ret) {
529 this->bbt[i >> 3] |= 0x03 << (i & 0x6); 567 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
530 pr_warn("Bad eraseblock %d at 0x%012llx\n", 568 printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
531 i >> 1, (unsigned long long)from); 569 i >> 1, (unsigned long long)from);
532 mtd->ecc_stats.badblocks++; 570 mtd->ecc_stats.badblocks++;
533 } 571 }
534 572
@@ -540,18 +578,20 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
540 578
541/** 579/**
542 * search_bbt - [GENERIC] scan the device for a specific bad block table 580 * search_bbt - [GENERIC] scan the device for a specific bad block table
543 * @mtd: MTD device structure 581 * @mtd: MTD device structure
544 * @buf: temporary buffer 582 * @buf: temporary buffer
545 * @td: descriptor for the bad block table 583 * @td: descriptor for the bad block table
546 * 584 *
547 * Read the bad block table by searching for a given ident pattern. Search is 585 * Read the bad block table by searching for a given ident pattern.
548 * preformed either from the beginning up or from the end of the device 586 * Search is preformed either from the beginning up or from the end of
549 * downwards. The search starts always at the start of a block. If the option 587 * the device downwards. The search starts always at the start of a
550 * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains 588 * block.
551 * the bad block information of this chip. This is necessary to provide support 589 * If the option NAND_BBT_PERCHIP is given, each chip is searched
552 * for certain DOC devices. 590 * for a bbt, which contains the bad block information of this chip.
591 * This is necessary to provide support for certain DOC devices.
553 * 592 *
554 * The bbt ident pattern resides in the oob area of the first page in a block. 593 * The bbt ident pattern resides in the oob area of the first page
594 * in a block.
555 */ 595 */
556static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) 596static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
557{ 597{
@@ -562,7 +602,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
562 int bbtblocks; 602 int bbtblocks;
563 int blocktopage = this->bbt_erase_shift - this->page_shift; 603 int blocktopage = this->bbt_erase_shift - this->page_shift;
564 604
565 /* Search direction top -> down? */ 605 /* Search direction top -> down ? */
566 if (td->options & NAND_BBT_LASTBLOCK) { 606 if (td->options & NAND_BBT_LASTBLOCK) {
567 startblock = (mtd->size >> this->bbt_erase_shift) - 1; 607 startblock = (mtd->size >> this->bbt_erase_shift) - 1;
568 dir = -1; 608 dir = -1;
@@ -571,7 +611,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
571 dir = 1; 611 dir = 1;
572 } 612 }
573 613
574 /* Do we have a bbt per chip? */ 614 /* Do we have a bbt per chip ? */
575 if (td->options & NAND_BBT_PERCHIP) { 615 if (td->options & NAND_BBT_PERCHIP) {
576 chips = this->numchips; 616 chips = this->numchips;
577 bbtblocks = this->chipsize >> this->bbt_erase_shift; 617 bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -595,7 +635,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
595 loff_t offs = (loff_t)actblock << this->bbt_erase_shift; 635 loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
596 636
597 /* Read first page */ 637 /* Read first page */
598 scan_read(mtd, buf, offs, mtd->writesize, td); 638 scan_read_raw(mtd, buf, offs, mtd->writesize, td);
599 if (!check_pattern(buf, scanlen, mtd->writesize, td)) { 639 if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
600 td->pages[i] = actblock << blocktopage; 640 td->pages[i] = actblock << blocktopage;
601 if (td->options & NAND_BBT_VERSION) { 641 if (td->options & NAND_BBT_VERSION) {
@@ -610,26 +650,24 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
610 /* Check, if we found a bbt for each requested chip */ 650 /* Check, if we found a bbt for each requested chip */
611 for (i = 0; i < chips; i++) { 651 for (i = 0; i < chips; i++) {
612 if (td->pages[i] == -1) 652 if (td->pages[i] == -1)
613 pr_warn("Bad block table not found for chip %d\n", i); 653 printk(KERN_WARNING "Bad block table not found for chip %d\n", i);
614 else 654 else
615 pr_info("Bad block table found at page %d, version " 655 printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i],
616 "0x%02X\n", td->pages[i], td->version[i]); 656 td->version[i]);
617 } 657 }
618 return 0; 658 return 0;
619} 659}
620 660
621/** 661/**
622 * search_read_bbts - [GENERIC] scan the device for bad block table(s) 662 * search_read_bbts - [GENERIC] scan the device for bad block table(s)
623 * @mtd: MTD device structure 663 * @mtd: MTD device structure
624 * @buf: temporary buffer 664 * @buf: temporary buffer
625 * @td: descriptor for the bad block table 665 * @td: descriptor for the bad block table
626 * @md: descriptor for the bad block table mirror 666 * @md: descriptor for the bad block table mirror
627 * 667 *
628 * Search and read the bad block table(s). 668 * Search and read the bad block table(s)
629 */ 669*/
630static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf, 670static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
631 struct nand_bbt_descr *td,
632 struct nand_bbt_descr *md)
633{ 671{
634 /* Search the primary table */ 672 /* Search the primary table */
635 search_bbt(mtd, buf, td); 673 search_bbt(mtd, buf, td);
@@ -637,18 +675,23 @@ static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
637 /* Search the mirror table */ 675 /* Search the mirror table */
638 if (md) 676 if (md)
639 search_bbt(mtd, buf, md); 677 search_bbt(mtd, buf, md);
678
679 /* Force result check */
680 return 1;
640} 681}
641 682
642/** 683/**
643 * write_bbt - [GENERIC] (Re)write the bad block table 684 * write_bbt - [GENERIC] (Re)write the bad block table
644 * @mtd: MTD device structure
645 * @buf: temporary buffer
646 * @td: descriptor for the bad block table
647 * @md: descriptor for the bad block table mirror
648 * @chipsel: selector for a specific chip, -1 for all
649 * 685 *
650 * (Re)write the bad block table. 686 * @mtd: MTD device structure
651 */ 687 * @buf: temporary buffer
688 * @td: descriptor for the bad block table
689 * @md: descriptor for the bad block table mirror
690 * @chipsel: selector for a specific chip, -1 for all
691 *
692 * (Re)write the bad block table
693 *
694*/
652static int write_bbt(struct mtd_info *mtd, uint8_t *buf, 695static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
653 struct nand_bbt_descr *td, struct nand_bbt_descr *md, 696 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
654 int chipsel) 697 int chipsel)
@@ -667,14 +710,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
667 ops.ooblen = mtd->oobsize; 710 ops.ooblen = mtd->oobsize;
668 ops.ooboffs = 0; 711 ops.ooboffs = 0;
669 ops.datbuf = NULL; 712 ops.datbuf = NULL;
670 ops.mode = MTD_OPS_PLACE_OOB; 713 ops.mode = MTD_OOB_PLACE;
671 714
672 if (!rcode) 715 if (!rcode)
673 rcode = 0xff; 716 rcode = 0xff;
674 /* Write bad block table per chip rather than per device? */ 717 /* Write bad block table per chip rather than per device ? */
675 if (td->options & NAND_BBT_PERCHIP) { 718 if (td->options & NAND_BBT_PERCHIP) {
676 numblocks = (int)(this->chipsize >> this->bbt_erase_shift); 719 numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
677 /* Full device write or specific chip? */ 720 /* Full device write or specific chip ? */
678 if (chipsel == -1) { 721 if (chipsel == -1) {
679 nrchips = this->numchips; 722 nrchips = this->numchips;
680 } else { 723 } else {
@@ -688,8 +731,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
688 731
689 /* Loop through the chips */ 732 /* Loop through the chips */
690 for (; chip < nrchips; chip++) { 733 for (; chip < nrchips; chip++) {
691 /* 734
692 * There was already a version of the table, reuse the page 735 /* There was already a version of the table, reuse the page
693 * This applies for absolute placement too, as we have the 736 * This applies for absolute placement too, as we have the
694 * page nr. in td->pages. 737 * page nr. in td->pages.
695 */ 738 */
@@ -698,10 +741,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
698 goto write; 741 goto write;
699 } 742 }
700 743
701 /* 744 /* Automatic placement of the bad block table */
702 * Automatic placement of the bad block table. Search direction 745 /* Search direction top -> down ? */
703 * top -> down?
704 */
705 if (td->options & NAND_BBT_LASTBLOCK) { 746 if (td->options & NAND_BBT_LASTBLOCK) {
706 startblock = numblocks * (chip + 1) - 1; 747 startblock = numblocks * (chip + 1) - 1;
707 dir = -1; 748 dir = -1;
@@ -725,7 +766,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
725 if (!md || md->pages[chip] != page) 766 if (!md || md->pages[chip] != page)
726 goto write; 767 goto write;
727 } 768 }
728 pr_err("No space left to write bad block table\n"); 769 printk(KERN_ERR "No space left to write bad block table\n");
729 return -ENOSPC; 770 return -ENOSPC;
730 write: 771 write:
731 772
@@ -750,27 +791,29 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
750 791
751 bbtoffs = chip * (numblocks >> 2); 792 bbtoffs = chip * (numblocks >> 2);
752 793
753 to = ((loff_t)page) << this->page_shift; 794 to = ((loff_t) page) << this->page_shift;
754 795
755 /* Must we save the block contents? */ 796 /* Must we save the block contents ? */
756 if (td->options & NAND_BBT_SAVECONTENT) { 797 if (td->options & NAND_BBT_SAVECONTENT) {
757 /* Make it block aligned */ 798 /* Make it block aligned */
758 to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1)); 799 to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
759 len = 1 << this->bbt_erase_shift; 800 len = 1 << this->bbt_erase_shift;
760 res = mtd_read(mtd, to, len, &retlen, buf); 801 res = mtd->read(mtd, to, len, &retlen, buf);
761 if (res < 0) { 802 if (res < 0) {
762 if (retlen != len) { 803 if (retlen != len) {
763 pr_info("nand_bbt: error reading block " 804 printk(KERN_INFO "nand_bbt: Error "
764 "for writing the bad block table\n"); 805 "reading block for writing "
806 "the bad block table\n");
765 return res; 807 return res;
766 } 808 }
767 pr_warn("nand_bbt: ECC error while reading " 809 printk(KERN_WARNING "nand_bbt: ECC error "
768 "block for writing bad block table\n"); 810 "while reading block for writing "
811 "bad block table\n");
769 } 812 }
770 /* Read oob data */ 813 /* Read oob data */
771 ops.ooblen = (len >> this->page_shift) * mtd->oobsize; 814 ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
772 ops.oobbuf = &buf[len]; 815 ops.oobbuf = &buf[len];
773 res = mtd_read_oob(mtd, to + mtd->writesize, &ops); 816 res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
774 if (res < 0 || ops.oobretlen != ops.ooblen) 817 if (res < 0 || ops.oobretlen != ops.ooblen)
775 goto outerr; 818 goto outerr;
776 819
@@ -778,19 +821,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
778 pageoffs = page - (int)(to >> this->page_shift); 821 pageoffs = page - (int)(to >> this->page_shift);
779 offs = pageoffs << this->page_shift; 822 offs = pageoffs << this->page_shift;
780 /* Preset the bbt area with 0xff */ 823 /* Preset the bbt area with 0xff */
781 memset(&buf[offs], 0xff, (size_t)(numblocks >> sft)); 824 memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
782 ooboffs = len + (pageoffs * mtd->oobsize); 825 ooboffs = len + (pageoffs * mtd->oobsize);
783 826
784 } else if (td->options & NAND_BBT_NO_OOB) { 827 } else if (td->options & NAND_BBT_NO_OOB) {
785 ooboffs = 0; 828 ooboffs = 0;
786 offs = td->len; 829 offs = td->len;
787 /* The version byte */ 830 /* the version byte */
788 if (td->options & NAND_BBT_VERSION) 831 if (td->options & NAND_BBT_VERSION)
789 offs++; 832 offs++;
790 /* Calc length */ 833 /* Calc length */
791 len = (size_t)(numblocks >> sft); 834 len = (size_t) (numblocks >> sft);
792 len += offs; 835 len += offs;
793 /* Make it page aligned! */ 836 /* Make it page aligned ! */
794 len = ALIGN(len, mtd->writesize); 837 len = ALIGN(len, mtd->writesize);
795 /* Preset the buffer with 0xff */ 838 /* Preset the buffer with 0xff */
796 memset(buf, 0xff, len); 839 memset(buf, 0xff, len);
@@ -798,8 +841,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
798 memcpy(buf, td->pattern, td->len); 841 memcpy(buf, td->pattern, td->len);
799 } else { 842 } else {
800 /* Calc length */ 843 /* Calc length */
801 len = (size_t)(numblocks >> sft); 844 len = (size_t) (numblocks >> sft);
802 /* Make it page aligned! */ 845 /* Make it page aligned ! */
803 len = ALIGN(len, mtd->writesize); 846 len = ALIGN(len, mtd->writesize);
804 /* Preset the buffer with 0xff */ 847 /* Preset the buffer with 0xff */
805 memset(buf, 0xff, len + 848 memset(buf, 0xff, len +
@@ -813,13 +856,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
813 if (td->options & NAND_BBT_VERSION) 856 if (td->options & NAND_BBT_VERSION)
814 buf[ooboffs + td->veroffs] = td->version[chip]; 857 buf[ooboffs + td->veroffs] = td->version[chip];
815 858
816 /* Walk through the memory table */ 859 /* walk through the memory table */
817 for (i = 0; i < numblocks;) { 860 for (i = 0; i < numblocks;) {
818 uint8_t dat; 861 uint8_t dat;
819 dat = this->bbt[bbtoffs + (i >> 2)]; 862 dat = this->bbt[bbtoffs + (i >> 2)];
820 for (j = 0; j < 4; j++, i++) { 863 for (j = 0; j < 4; j++, i++) {
821 int sftcnt = (i << (3 - sft)) & sftmsk; 864 int sftcnt = (i << (3 - sft)) & sftmsk;
822 /* Do not store the reserved bbt blocks! */ 865 /* Do not store the reserved bbt blocks ! */
823 buf[offs + (i >> sft)] &= 866 buf[offs + (i >> sft)] &=
824 ~(msk[dat & 0x03] << sftcnt); 867 ~(msk[dat & 0x03] << sftcnt);
825 dat >>= 2; 868 dat >>= 2;
@@ -840,8 +883,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
840 if (res < 0) 883 if (res < 0)
841 goto outerr; 884 goto outerr;
842 885
843 pr_info("Bad block table written to 0x%012llx, version 0x%02X\n", 886 printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
844 (unsigned long long)to, td->version[chip]); 887 "0x%02X\n", (unsigned long long)to, td->version[chip]);
845 888
846 /* Mark it as used */ 889 /* Mark it as used */
847 td->pages[chip] = page; 890 td->pages[chip] = page;
@@ -849,18 +892,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
849 return 0; 892 return 0;
850 893
851 outerr: 894 outerr:
852 pr_warn("nand_bbt: error while writing bad block table %d\n", res); 895 printk(KERN_WARNING
896 "nand_bbt: Error while writing bad block table %d\n", res);
853 return res; 897 return res;
854} 898}
855 899
856/** 900/**
857 * nand_memory_bbt - [GENERIC] create a memory based bad block table 901 * nand_memory_bbt - [GENERIC] create a memory based bad block table
858 * @mtd: MTD device structure 902 * @mtd: MTD device structure
859 * @bd: descriptor for the good/bad block search pattern 903 * @bd: descriptor for the good/bad block search pattern
860 * 904 *
861 * The function creates a memory based bbt by scanning the device for 905 * The function creates a memory based bbt by scanning the device
862 * manufacturer / software marked good / bad blocks. 906 * for manufacturer / software marked good / bad blocks
863 */ 907*/
864static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 908static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
865{ 909{
866 struct nand_chip *this = mtd->priv; 910 struct nand_chip *this = mtd->priv;
@@ -871,24 +915,25 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
871 915
872/** 916/**
873 * check_create - [GENERIC] create and write bbt(s) if necessary 917 * check_create - [GENERIC] create and write bbt(s) if necessary
874 * @mtd: MTD device structure 918 * @mtd: MTD device structure
875 * @buf: temporary buffer 919 * @buf: temporary buffer
876 * @bd: descriptor for the good/bad block search pattern 920 * @bd: descriptor for the good/bad block search pattern
877 * 921 *
878 * The function checks the results of the previous call to read_bbt and creates 922 * The function checks the results of the previous call to read_bbt
879 * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found 923 * and creates / updates the bbt(s) if necessary
880 * for the chip/device. Update is necessary if one of the tables is missing or 924 * Creation is necessary if no bbt was found for the chip/device
881 * the version nr. of one table is less than the other. 925 * Update is necessary if one of the tables is missing or the
882 */ 926 * version nr. of one table is less than the other
927*/
883static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) 928static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
884{ 929{
885 int i, chips, writeops, create, chipsel, res, res2; 930 int i, chips, writeops, chipsel, res;
886 struct nand_chip *this = mtd->priv; 931 struct nand_chip *this = mtd->priv;
887 struct nand_bbt_descr *td = this->bbt_td; 932 struct nand_bbt_descr *td = this->bbt_td;
888 struct nand_bbt_descr *md = this->bbt_md; 933 struct nand_bbt_descr *md = this->bbt_md;
889 struct nand_bbt_descr *rd, *rd2; 934 struct nand_bbt_descr *rd, *rd2;
890 935
891 /* Do we have a bbt per chip? */ 936 /* Do we have a bbt per chip ? */
892 if (td->options & NAND_BBT_PERCHIP) 937 if (td->options & NAND_BBT_PERCHIP)
893 chips = this->numchips; 938 chips = this->numchips;
894 else 939 else
@@ -896,98 +941,86 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
896 941
897 for (i = 0; i < chips; i++) { 942 for (i = 0; i < chips; i++) {
898 writeops = 0; 943 writeops = 0;
899 create = 0;
900 rd = NULL; 944 rd = NULL;
901 rd2 = NULL; 945 rd2 = NULL;
902 res = res2 = 0; 946 /* Per chip or per device ? */
903 /* Per chip or per device? */
904 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; 947 chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
905 /* Mirrored table available? */ 948 /* Mirrored table available ? */
906 if (md) { 949 if (md) {
907 if (td->pages[i] == -1 && md->pages[i] == -1) { 950 if (td->pages[i] == -1 && md->pages[i] == -1) {
908 create = 1;
909 writeops = 0x03; 951 writeops = 0x03;
910 } else if (td->pages[i] == -1) { 952 goto create;
953 }
954
955 if (td->pages[i] == -1) {
911 rd = md; 956 rd = md;
912 writeops = 0x01; 957 td->version[i] = md->version[i];
913 } else if (md->pages[i] == -1) { 958 writeops = 1;
959 goto writecheck;
960 }
961
962 if (md->pages[i] == -1) {
914 rd = td; 963 rd = td;
915 writeops = 0x02; 964 md->version[i] = td->version[i];
916 } else if (td->version[i] == md->version[i]) { 965 writeops = 2;
966 goto writecheck;
967 }
968
969 if (td->version[i] == md->version[i]) {
917 rd = td; 970 rd = td;
918 if (!(td->options & NAND_BBT_VERSION)) 971 if (!(td->options & NAND_BBT_VERSION))
919 rd2 = md; 972 rd2 = md;
920 } else if (((int8_t)(td->version[i] - md->version[i])) > 0) { 973 goto writecheck;
974 }
975
976 if (((int8_t) (td->version[i] - md->version[i])) > 0) {
921 rd = td; 977 rd = td;
922 writeops = 0x02; 978 md->version[i] = td->version[i];
979 writeops = 2;
923 } else { 980 } else {
924 rd = md; 981 rd = md;
925 writeops = 0x01; 982 td->version[i] = md->version[i];
983 writeops = 1;
926 } 984 }
985
986 goto writecheck;
987
927 } else { 988 } else {
928 if (td->pages[i] == -1) { 989 if (td->pages[i] == -1) {
929 create = 1;
930 writeops = 0x01; 990 writeops = 0x01;
931 } else { 991 goto create;
932 rd = td;
933 }
934 }
935
936 if (create) {
937 /* Create the bad block table by scanning the device? */
938 if (!(td->options & NAND_BBT_CREATE))
939 continue;
940
941 /* Create the table in memory by scanning the chip(s) */
942 if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
943 create_bbt(mtd, buf, bd, chipsel);
944
945 td->version[i] = 1;
946 if (md)
947 md->version[i] = 1;
948 }
949
950 /* Read back first? */
951 if (rd) {
952 res = read_abs_bbt(mtd, buf, rd, chipsel);
953 if (mtd_is_eccerr(res)) {
954 /* Mark table as invalid */
955 rd->pages[i] = -1;
956 rd->version[i] = 0;
957 i--;
958 continue;
959 } 992 }
993 rd = td;
994 goto writecheck;
960 } 995 }
961 /* If they weren't versioned, read both */ 996 create:
962 if (rd2) { 997 /* Create the bad block table by scanning the device ? */
963 res2 = read_abs_bbt(mtd, buf, rd2, chipsel); 998 if (!(td->options & NAND_BBT_CREATE))
964 if (mtd_is_eccerr(res2)) { 999 continue;
965 /* Mark table as invalid */
966 rd2->pages[i] = -1;
967 rd2->version[i] = 0;
968 i--;
969 continue;
970 }
971 }
972
973 /* Scrub the flash table(s)? */
974 if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
975 writeops = 0x03;
976
977 /* Update version numbers before writing */
978 if (md) {
979 td->version[i] = max(td->version[i], md->version[i]);
980 md->version[i] = td->version[i];
981 }
982 1000
983 /* Write the bad block table to the device? */ 1001 /* Create the table in memory by scanning the chip(s) */
1002 if (!(this->options & NAND_CREATE_EMPTY_BBT))
1003 create_bbt(mtd, buf, bd, chipsel);
1004
1005 td->version[i] = 1;
1006 if (md)
1007 md->version[i] = 1;
1008 writecheck:
1009 /* read back first ? */
1010 if (rd)
1011 read_abs_bbt(mtd, buf, rd, chipsel);
1012 /* If they weren't versioned, read both. */
1013 if (rd2)
1014 read_abs_bbt(mtd, buf, rd2, chipsel);
1015
1016 /* Write the bad block table to the device ? */
984 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 1017 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
985 res = write_bbt(mtd, buf, td, md, chipsel); 1018 res = write_bbt(mtd, buf, td, md, chipsel);
986 if (res < 0) 1019 if (res < 0)
987 return res; 1020 return res;
988 } 1021 }
989 1022
990 /* Write the mirror bad block table to the device? */ 1023 /* Write the mirror bad block table to the device ? */
991 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 1024 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
992 res = write_bbt(mtd, buf, md, td, chipsel); 1025 res = write_bbt(mtd, buf, md, td, chipsel);
993 if (res < 0) 1026 if (res < 0)
@@ -999,19 +1032,20 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
999 1032
1000/** 1033/**
1001 * mark_bbt_regions - [GENERIC] mark the bad block table regions 1034 * mark_bbt_regions - [GENERIC] mark the bad block table regions
1002 * @mtd: MTD device structure 1035 * @mtd: MTD device structure
1003 * @td: bad block table descriptor 1036 * @td: bad block table descriptor
1004 * 1037 *
1005 * The bad block table regions are marked as "bad" to prevent accidental 1038 * The bad block table regions are marked as "bad" to prevent
1006 * erasures / writes. The regions are identified by the mark 0x02. 1039 * accidental erasures / writes. The regions are identified by
1007 */ 1040 * the mark 0x02.
1041*/
1008static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) 1042static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1009{ 1043{
1010 struct nand_chip *this = mtd->priv; 1044 struct nand_chip *this = mtd->priv;
1011 int i, j, chips, block, nrblocks, update; 1045 int i, j, chips, block, nrblocks, update;
1012 uint8_t oldval, newval; 1046 uint8_t oldval, newval;
1013 1047
1014 /* Do we have a bbt per chip? */ 1048 /* Do we have a bbt per chip ? */
1015 if (td->options & NAND_BBT_PERCHIP) { 1049 if (td->options & NAND_BBT_PERCHIP) {
1016 chips = this->numchips; 1050 chips = this->numchips;
1017 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); 1051 nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -1048,11 +1082,9 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1048 update = 1; 1082 update = 1;
1049 block += 2; 1083 block += 2;
1050 } 1084 }
1051 /* 1085 /* If we want reserved blocks to be recorded to flash, and some
1052 * If we want reserved blocks to be recorded to flash, and some 1086 new ones have been marked, then we need to update the stored
1053 * new ones have been marked, then we need to update the stored 1087 bbts. This should only happen once. */
1054 * bbts. This should only happen once.
1055 */
1056 if (update && td->reserved_block_code) 1088 if (update && td->reserved_block_code)
1057 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); 1089 nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
1058 } 1090 }
@@ -1060,8 +1092,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1060 1092
1061/** 1093/**
1062 * verify_bbt_descr - verify the bad block description 1094 * verify_bbt_descr - verify the bad block description
1063 * @mtd: MTD device structure 1095 * @mtd: MTD device structure
1064 * @bd: the table to verify 1096 * @bd: the table to verify
1065 * 1097 *
1066 * This functions performs a few sanity checks on the bad block description 1098 * This functions performs a few sanity checks on the bad block description
1067 * table. 1099 * table.
@@ -1079,16 +1111,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1079 pattern_len = bd->len; 1111 pattern_len = bd->len;
1080 bits = bd->options & NAND_BBT_NRBITS_MSK; 1112 bits = bd->options & NAND_BBT_NRBITS_MSK;
1081 1113
1082 BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) && 1114 BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
1083 !(this->bbt_options & NAND_BBT_USE_FLASH)); 1115 !(this->options & NAND_USE_FLASH_BBT));
1084 BUG_ON(!bits); 1116 BUG_ON(!bits);
1085 1117
1086 if (bd->options & NAND_BBT_VERSION) 1118 if (bd->options & NAND_BBT_VERSION)
1087 pattern_len++; 1119 pattern_len++;
1088 1120
1089 if (bd->options & NAND_BBT_NO_OOB) { 1121 if (bd->options & NAND_BBT_NO_OOB) {
1090 BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH)); 1122 BUG_ON(!(this->options & NAND_USE_FLASH_BBT));
1091 BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB)); 1123 BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB));
1092 BUG_ON(bd->offs); 1124 BUG_ON(bd->offs);
1093 if (bd->options & NAND_BBT_VERSION) 1125 if (bd->options & NAND_BBT_VERSION)
1094 BUG_ON(bd->veroffs != bd->len); 1126 BUG_ON(bd->veroffs != bd->len);
@@ -1108,16 +1140,18 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1108 1140
1109/** 1141/**
1110 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) 1142 * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
1111 * @mtd: MTD device structure 1143 * @mtd: MTD device structure
1112 * @bd: descriptor for the good/bad block search pattern 1144 * @bd: descriptor for the good/bad block search pattern
1113 * 1145 *
1114 * The function checks, if a bad block table(s) is/are already available. If 1146 * The function checks, if a bad block table(s) is/are already
1115 * not it scans the device for manufacturer marked good / bad blocks and writes 1147 * available. If not it scans the device for manufacturer
1116 * the bad block table(s) to the selected place. 1148 * marked good / bad blocks and writes the bad block table(s) to
1149 * the selected place.
1117 * 1150 *
1118 * The bad block table memory is allocated here. It must be freed by calling 1151 * The bad block table memory is allocated here. It must be freed
1119 * the nand_free_bbt function. 1152 * by calling the nand_free_bbt function.
1120 */ 1153 *
1154*/
1121int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) 1155int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1122{ 1156{
1123 struct nand_chip *this = mtd->priv; 1157 struct nand_chip *this = mtd->priv;
@@ -1127,21 +1161,19 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1127 struct nand_bbt_descr *md = this->bbt_md; 1161 struct nand_bbt_descr *md = this->bbt_md;
1128 1162
1129 len = mtd->size >> (this->bbt_erase_shift + 2); 1163 len = mtd->size >> (this->bbt_erase_shift + 2);
1130 /* 1164 /* Allocate memory (2bit per block) and clear the memory bad block table */
1131 * Allocate memory (2bit per block) and clear the memory bad block
1132 * table.
1133 */
1134 this->bbt = kzalloc(len, GFP_KERNEL); 1165 this->bbt = kzalloc(len, GFP_KERNEL);
1135 if (!this->bbt) 1166 if (!this->bbt) {
1167 printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
1136 return -ENOMEM; 1168 return -ENOMEM;
1169 }
1137 1170
1138 /* 1171 /* If no primary table decriptor is given, scan the device
1139 * If no primary table decriptor is given, scan the device to build a 1172 * to build a memory based bad block table
1140 * memory based bad block table.
1141 */ 1173 */
1142 if (!td) { 1174 if (!td) {
1143 if ((res = nand_memory_bbt(mtd, bd))) { 1175 if ((res = nand_memory_bbt(mtd, bd))) {
1144 pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n"); 1176 printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
1145 kfree(this->bbt); 1177 kfree(this->bbt);
1146 this->bbt = NULL; 1178 this->bbt = NULL;
1147 } 1179 }
@@ -1155,20 +1187,22 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1155 len += (len >> this->page_shift) * mtd->oobsize; 1187 len += (len >> this->page_shift) * mtd->oobsize;
1156 buf = vmalloc(len); 1188 buf = vmalloc(len);
1157 if (!buf) { 1189 if (!buf) {
1190 printk(KERN_ERR "nand_bbt: Out of memory\n");
1158 kfree(this->bbt); 1191 kfree(this->bbt);
1159 this->bbt = NULL; 1192 this->bbt = NULL;
1160 return -ENOMEM; 1193 return -ENOMEM;
1161 } 1194 }
1162 1195
1163 /* Is the bbt at a given page? */ 1196 /* Is the bbt at a given page ? */
1164 if (td->options & NAND_BBT_ABSPAGE) { 1197 if (td->options & NAND_BBT_ABSPAGE) {
1165 read_abs_bbts(mtd, buf, td, md); 1198 res = read_abs_bbts(mtd, buf, td, md);
1166 } else { 1199 } else {
1167 /* Search the bad block table using a pattern in oob */ 1200 /* Search the bad block table using a pattern in oob */
1168 search_read_bbts(mtd, buf, td, md); 1201 res = search_read_bbts(mtd, buf, td, md);
1169 } 1202 }
1170 1203
1171 res = check_create(mtd, buf, bd); 1204 if (res)
1205 res = check_create(mtd, buf, bd);
1172 1206
1173 /* Prevent the bbt regions from erasing / writing */ 1207 /* Prevent the bbt regions from erasing / writing */
1174 mark_bbt_region(mtd, td); 1208 mark_bbt_region(mtd, td);
@@ -1181,15 +1215,15 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
1181 1215
1182/** 1216/**
1183 * nand_update_bbt - [NAND Interface] update bad block table(s) 1217 * nand_update_bbt - [NAND Interface] update bad block table(s)
1184 * @mtd: MTD device structure 1218 * @mtd: MTD device structure
1185 * @offs: the offset of the newly marked block 1219 * @offs: the offset of the newly marked block
1186 * 1220 *
1187 * The function updates the bad block table(s). 1221 * The function updates the bad block table(s)
1188 */ 1222*/
1189int nand_update_bbt(struct mtd_info *mtd, loff_t offs) 1223int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1190{ 1224{
1191 struct nand_chip *this = mtd->priv; 1225 struct nand_chip *this = mtd->priv;
1192 int len, res = 0; 1226 int len, res = 0, writeops = 0;
1193 int chip, chipsel; 1227 int chip, chipsel;
1194 uint8_t *buf; 1228 uint8_t *buf;
1195 struct nand_bbt_descr *td = this->bbt_td; 1229 struct nand_bbt_descr *td = this->bbt_td;
@@ -1202,10 +1236,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1202 len = (1 << this->bbt_erase_shift); 1236 len = (1 << this->bbt_erase_shift);
1203 len += (len >> this->page_shift) * mtd->oobsize; 1237 len += (len >> this->page_shift) * mtd->oobsize;
1204 buf = kmalloc(len, GFP_KERNEL); 1238 buf = kmalloc(len, GFP_KERNEL);
1205 if (!buf) 1239 if (!buf) {
1240 printk(KERN_ERR "nand_update_bbt: Out of memory\n");
1206 return -ENOMEM; 1241 return -ENOMEM;
1242 }
1207 1243
1208 /* Do we have a bbt per chip? */ 1244 writeops = md != NULL ? 0x03 : 0x01;
1245
1246 /* Do we have a bbt per chip ? */
1209 if (td->options & NAND_BBT_PERCHIP) { 1247 if (td->options & NAND_BBT_PERCHIP) {
1210 chip = (int)(offs >> this->chip_shift); 1248 chip = (int)(offs >> this->chip_shift);
1211 chipsel = chip; 1249 chipsel = chip;
@@ -1218,14 +1256,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1218 if (md) 1256 if (md)
1219 md->version[chip]++; 1257 md->version[chip]++;
1220 1258
1221 /* Write the bad block table to the device? */ 1259 /* Write the bad block table to the device ? */
1222 if (td->options & NAND_BBT_WRITE) { 1260 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
1223 res = write_bbt(mtd, buf, td, md, chipsel); 1261 res = write_bbt(mtd, buf, td, md, chipsel);
1224 if (res < 0) 1262 if (res < 0)
1225 goto out; 1263 goto out;
1226 } 1264 }
1227 /* Write the mirror bad block table to the device? */ 1265 /* Write the mirror bad block table to the device ? */
1228 if (md && (md->options & NAND_BBT_WRITE)) { 1266 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
1229 res = write_bbt(mtd, buf, md, td, chipsel); 1267 res = write_bbt(mtd, buf, md, td, chipsel);
1230 } 1268 }
1231 1269
@@ -1234,10 +1272,8 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
1234 return res; 1272 return res;
1235} 1273}
1236 1274
1237/* 1275/* Define some generic bad / good block scan pattern which are used
1238 * Define some generic bad / good block scan pattern which are used 1276 * while scanning a device for factory marked good / bad blocks. */
1239 * while scanning a device for factory marked good / bad blocks.
1240 */
1241static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 1277static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
1242 1278
1243static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; 1279static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
@@ -1249,7 +1285,8 @@ static struct nand_bbt_descr agand_flashbased = {
1249 .pattern = scan_agand_pattern 1285 .pattern = scan_agand_pattern
1250}; 1286};
1251 1287
1252/* Generic flash bbt descriptors */ 1288/* Generic flash bbt decriptors
1289*/
1253static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 1290static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1254static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; 1291static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1255 1292
@@ -1259,7 +1296,7 @@ static struct nand_bbt_descr bbt_main_descr = {
1259 .offs = 8, 1296 .offs = 8,
1260 .len = 4, 1297 .len = 4,
1261 .veroffs = 12, 1298 .veroffs = 12,
1262 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, 1299 .maxblocks = 4,
1263 .pattern = bbt_pattern 1300 .pattern = bbt_pattern
1264}; 1301};
1265 1302
@@ -1269,51 +1306,55 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1269 .offs = 8, 1306 .offs = 8,
1270 .len = 4, 1307 .len = 4,
1271 .veroffs = 12, 1308 .veroffs = 12,
1272 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, 1309 .maxblocks = 4,
1273 .pattern = mirror_pattern 1310 .pattern = mirror_pattern
1274}; 1311};
1275 1312
1276static struct nand_bbt_descr bbt_main_no_oob_descr = { 1313static struct nand_bbt_descr bbt_main_no_bbt_descr = {
1277 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 1314 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1278 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP 1315 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
1279 | NAND_BBT_NO_OOB, 1316 | NAND_BBT_NO_OOB,
1280 .len = 4, 1317 .len = 4,
1281 .veroffs = 4, 1318 .veroffs = 4,
1282 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, 1319 .maxblocks = 4,
1283 .pattern = bbt_pattern 1320 .pattern = bbt_pattern
1284}; 1321};
1285 1322
1286static struct nand_bbt_descr bbt_mirror_no_oob_descr = { 1323static struct nand_bbt_descr bbt_mirror_no_bbt_descr = {
1287 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 1324 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1288 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP 1325 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
1289 | NAND_BBT_NO_OOB, 1326 | NAND_BBT_NO_OOB,
1290 .len = 4, 1327 .len = 4,
1291 .veroffs = 4, 1328 .veroffs = 4,
1292 .maxblocks = NAND_BBT_SCAN_MAXBLOCKS, 1329 .maxblocks = 4,
1293 .pattern = mirror_pattern 1330 .pattern = mirror_pattern
1294}; 1331};
1295 1332
1296#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB) 1333#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
1334 NAND_BBT_SCANBYTE1AND6)
1297/** 1335/**
1298 * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure 1336 * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure
1299 * @this: NAND chip to create descriptor for 1337 * @this: NAND chip to create descriptor for
1300 * 1338 *
1301 * This function allocates and initializes a nand_bbt_descr for BBM detection 1339 * This function allocates and initializes a nand_bbt_descr for BBM detection
1302 * based on the properties of @this. The new descriptor is stored in 1340 * based on the properties of "this". The new descriptor is stored in
1303 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when 1341 * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
1304 * passed to this function. 1342 * passed to this function.
1343 *
1305 */ 1344 */
1306static int nand_create_badblock_pattern(struct nand_chip *this) 1345static int nand_create_default_bbt_descr(struct nand_chip *this)
1307{ 1346{
1308 struct nand_bbt_descr *bd; 1347 struct nand_bbt_descr *bd;
1309 if (this->badblock_pattern) { 1348 if (this->badblock_pattern) {
1310 pr_warn("Bad block pattern already allocated; not replacing\n"); 1349 printk(KERN_WARNING "BBT descr already allocated; not replacing.\n");
1311 return -EINVAL; 1350 return -EINVAL;
1312 } 1351 }
1313 bd = kzalloc(sizeof(*bd), GFP_KERNEL); 1352 bd = kzalloc(sizeof(*bd), GFP_KERNEL);
1314 if (!bd) 1353 if (!bd) {
1354 printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
1315 return -ENOMEM; 1355 return -ENOMEM;
1316 bd->options = this->bbt_options & BADBLOCK_SCAN_MASK; 1356 }
1357 bd->options = this->options & BBT_SCAN_OPTIONS;
1317 bd->offs = this->badblockpos; 1358 bd->offs = this->badblockpos;
1318 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; 1359 bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
1319 bd->pattern = scan_ff_pattern; 1360 bd->pattern = scan_ff_pattern;
@@ -1324,20 +1365,22 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
1324 1365
1325/** 1366/**
1326 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device 1367 * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
1327 * @mtd: MTD device structure 1368 * @mtd: MTD device structure
1328 * 1369 *
1329 * This function selects the default bad block table support for the device and 1370 * This function selects the default bad block table
1330 * calls the nand_scan_bbt function. 1371 * support for the device and calls the nand_scan_bbt function
1331 */ 1372 *
1373*/
1332int nand_default_bbt(struct mtd_info *mtd) 1374int nand_default_bbt(struct mtd_info *mtd)
1333{ 1375{
1334 struct nand_chip *this = mtd->priv; 1376 struct nand_chip *this = mtd->priv;
1335 1377
1336 /* 1378 /* Default for AG-AND. We must use a flash based
1337 * Default for AG-AND. We must use a flash based bad block table as the 1379 * bad block table as the devices have factory marked
1338 * devices have factory marked _good_ blocks. Erasing those blocks 1380 * _good_ blocks. Erasing those blocks leads to loss
1339 * leads to loss of the good / bad information, so we _must_ store this 1381 * of the good / bad information, so we _must_ store
1340 * information in a good / bad table during startup. 1382 * this information in a good / bad table during
1383 * startup
1341 */ 1384 */
1342 if (this->options & NAND_IS_AND) { 1385 if (this->options & NAND_IS_AND) {
1343 /* Use the default pattern descriptors */ 1386 /* Use the default pattern descriptors */
@@ -1345,17 +1388,17 @@ int nand_default_bbt(struct mtd_info *mtd)
1345 this->bbt_td = &bbt_main_descr; 1388 this->bbt_td = &bbt_main_descr;
1346 this->bbt_md = &bbt_mirror_descr; 1389 this->bbt_md = &bbt_mirror_descr;
1347 } 1390 }
1348 this->bbt_options |= NAND_BBT_USE_FLASH; 1391 this->options |= NAND_USE_FLASH_BBT;
1349 return nand_scan_bbt(mtd, &agand_flashbased); 1392 return nand_scan_bbt(mtd, &agand_flashbased);
1350 } 1393 }
1351 1394
1352 /* Is a flash based bad block table requested? */ 1395 /* Is a flash based bad block table requested ? */
1353 if (this->bbt_options & NAND_BBT_USE_FLASH) { 1396 if (this->options & NAND_USE_FLASH_BBT) {
1354 /* Use the default pattern descriptors */ 1397 /* Use the default pattern descriptors */
1355 if (!this->bbt_td) { 1398 if (!this->bbt_td) {
1356 if (this->bbt_options & NAND_BBT_NO_OOB) { 1399 if (this->options & NAND_USE_FLASH_BBT_NO_OOB) {
1357 this->bbt_td = &bbt_main_no_oob_descr; 1400 this->bbt_td = &bbt_main_no_bbt_descr;
1358 this->bbt_md = &bbt_mirror_no_oob_descr; 1401 this->bbt_md = &bbt_mirror_no_bbt_descr;
1359 } else { 1402 } else {
1360 this->bbt_td = &bbt_main_descr; 1403 this->bbt_td = &bbt_main_descr;
1361 this->bbt_md = &bbt_mirror_descr; 1404 this->bbt_md = &bbt_mirror_descr;
@@ -1367,17 +1410,18 @@ int nand_default_bbt(struct mtd_info *mtd)
1367 } 1410 }
1368 1411
1369 if (!this->badblock_pattern) 1412 if (!this->badblock_pattern)
1370 nand_create_badblock_pattern(this); 1413 nand_create_default_bbt_descr(this);
1371 1414
1372 return nand_scan_bbt(mtd, this->badblock_pattern); 1415 return nand_scan_bbt(mtd, this->badblock_pattern);
1373} 1416}
1374 1417
1375/** 1418/**
1376 * nand_isbad_bbt - [NAND Interface] Check if a block is bad 1419 * nand_isbad_bbt - [NAND Interface] Check if a block is bad
1377 * @mtd: MTD device structure 1420 * @mtd: MTD device structure
1378 * @offs: offset in the device 1421 * @offs: offset in the device
1379 * @allowbbt: allow access to bad block table region 1422 * @allowbbt: allow access to bad block table region
1380 */ 1423 *
1424*/
1381int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) 1425int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1382{ 1426{
1383 struct nand_chip *this = mtd->priv; 1427 struct nand_chip *this = mtd->priv;
@@ -1388,9 +1432,8 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1388 block = (int)(offs >> (this->bbt_erase_shift - 1)); 1432 block = (int)(offs >> (this->bbt_erase_shift - 1));
1389 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; 1433 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
1390 1434
1391 pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: " 1435 DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
1392 "(block %d) 0x%02x\n", 1436 (unsigned int)offs, block >> 1, res);
1393 (unsigned int)offs, block >> 1, res);
1394 1437
1395 switch ((int)res) { 1438 switch ((int)res) {
1396 case 0x00: 1439 case 0x00:
@@ -1405,4 +1448,3 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1405 1448
1406EXPORT_SYMBOL(nand_scan_bbt); 1449EXPORT_SYMBOL(nand_scan_bbt);
1407EXPORT_SYMBOL(nand_default_bbt); 1450EXPORT_SYMBOL(nand_default_bbt);
1408EXPORT_SYMBOL_GPL(nand_update_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 3803e0bba23..0f931e75711 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -93,8 +93,8 @@ int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); 93 buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
94 /* else error in ecc, no action needed */ 94 /* else error in ecc, no action needed */
95 95
96 pr_debug("%s: corrected bitflip %u\n", __func__, 96 DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n",
97 errloc[i]); 97 __func__, errloc[i]);
98 } 98 }
99 } else if (count < 0) { 99 } else if (count < 0) {
100 printk(KERN_ERR "ecc unrecoverable error\n"); 100 printk(KERN_ERR "ecc unrecoverable error\n");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index b7cfe0d3712..271b8e735e8 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -110,7 +110,7 @@ static const char bitsperbyte[256] = {
110 110
111/* 111/*
112 * addressbits is a lookup table to filter out the bits from the xor-ed 112 * addressbits is a lookup table to filter out the bits from the xor-ed
113 * ECC data that identify the faulty location. 113 * ecc data that identify the faulty location.
114 * this is only used for repairing parity 114 * this is only used for repairing parity
115 * see the comments in nand_correct_data for more details 115 * see the comments in nand_correct_data for more details
116 */ 116 */
@@ -153,7 +153,7 @@ static const char addressbits[256] = {
153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte 153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block 154 * block
155 * @buf: input buffer with raw data 155 * @buf: input buffer with raw data
156 * @eccsize: data bytes per ECC step (256 or 512) 156 * @eccsize: data bytes per ecc step (256 or 512)
157 * @code: output buffer with ECC 157 * @code: output buffer with ECC
158 */ 158 */
159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, 159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
@@ -348,7 +348,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
348 rp17 = (par ^ rp16) & 0xff; 348 rp17 = (par ^ rp16) & 0xff;
349 349
350 /* 350 /*
351 * Finally calculate the ECC bits. 351 * Finally calculate the ecc bits.
352 * Again here it might seem that there are performance optimisations 352 * Again here it might seem that there are performance optimisations
353 * possible, but benchmarks showed that on the system this is developed 353 * possible, but benchmarks showed that on the system this is developed
354 * the code below is the fastest 354 * the code below is the fastest
@@ -436,7 +436,7 @@ EXPORT_SYMBOL(nand_calculate_ecc);
436 * @buf: raw data read from the chip 436 * @buf: raw data read from the chip
437 * @read_ecc: ECC from the chip 437 * @read_ecc: ECC from the chip
438 * @calc_ecc: the ECC calculated from raw data 438 * @calc_ecc: the ECC calculated from raw data
439 * @eccsize: data bytes per ECC step (256 or 512) 439 * @eccsize: data bytes per ecc step (256 or 512)
440 * 440 *
441 * Detect and correct a 1 bit error for eccsize byte block 441 * Detect and correct a 1 bit error for eccsize byte block
442 */ 442 */
@@ -505,7 +505,7 @@ int __nand_correct_data(unsigned char *buf,
505 } 505 }
506 /* count nr of bits; use table lookup, faster than calculating it */ 506 /* count nr of bits; use table lookup, faster than calculating it */
507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) 507 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
508 return 1; /* error in ECC data; no action needed */ 508 return 1; /* error in ecc data; no action needed */
509 509
510 printk(KERN_ERR "uncorrectable error : "); 510 printk(KERN_ERR "uncorrectable error : ");
511 return -1; 511 return -1;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index e3aa2748a6e..00cf1b0d605 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,15 +70,14 @@ struct nand_flash_dev nand_flash_ids[] = {
70 * These are the new chips with large page size. The pagesize and the 70 * These are the new chips with large page size. The pagesize and the
71 * erasesize is determined from the extended id bytes 71 * erasesize is determined from the extended id bytes
72 */ 72 */
73#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS 73#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) 74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
75 75
76 /* 512 Megabit */ 76 /*512 Megabit */
77 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, 77 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
78 {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS}, 78 {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
79 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, 79 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
80 {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS}, 80 {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
81 {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
82 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, 81 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
83 {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16}, 82 {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
84 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, 83 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
@@ -157,7 +156,9 @@ struct nand_flash_dev nand_flash_ids[] = {
157 * writes possible, but not implemented now 156 * writes possible, but not implemented now
158 */ 157 */
159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, 158 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
160 NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, 159 NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
160 BBT_AUTO_REFRESH
161 },
161 162
162 {NULL,} 163 {NULL,}
163}; 164};
@@ -174,9 +175,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
174 {NAND_MFR_STMICRO, "ST Micro"}, 175 {NAND_MFR_STMICRO, "ST Micro"},
175 {NAND_MFR_HYNIX, "Hynix"}, 176 {NAND_MFR_HYNIX, "Hynix"},
176 {NAND_MFR_MICRON, "Micron"}, 177 {NAND_MFR_MICRON, "Micron"},
177 {NAND_MFR_AMD, "AMD/Spansion"}, 178 {NAND_MFR_AMD, "AMD"},
178 {NAND_MFR_MACRONIX, "Macronix"},
179 {NAND_MFR_EON, "Eon"},
180 {0x0, "Unknown"} 179 {0x0, "Unknown"}
181}; 180};
182 181
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 818b65c85d1..357e8c5252a 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -28,7 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <linux/math64.h> 31#include <asm/div64.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/errno.h> 33#include <linux/errno.h>
34#include <linux/string.h> 34#include <linux/string.h>
@@ -42,8 +42,6 @@
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/fs.h> 43#include <linux/fs.h>
44#include <linux/pagemap.h> 44#include <linux/pagemap.h>
45#include <linux/seq_file.h>
46#include <linux/debugfs.h>
47 45
48/* Default simulator parameters values */ 46/* Default simulator parameters values */
49#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 47#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -107,6 +105,7 @@ static char *weakblocks = NULL;
107static char *weakpages = NULL; 105static char *weakpages = NULL;
108static unsigned int bitflips = 0; 106static unsigned int bitflips = 0;
109static char *gravepages = NULL; 107static char *gravepages = NULL;
108static unsigned int rptwear = 0;
110static unsigned int overridesize = 0; 109static unsigned int overridesize = 0;
111static char *cache_file = NULL; 110static char *cache_file = NULL;
112static unsigned int bbt; 111static unsigned int bbt;
@@ -131,6 +130,7 @@ module_param(weakblocks, charp, 0400);
131module_param(weakpages, charp, 0400); 130module_param(weakpages, charp, 0400);
132module_param(bitflips, uint, 0400); 131module_param(bitflips, uint, 0400);
133module_param(gravepages, charp, 0400); 132module_param(gravepages, charp, 0400);
133module_param(rptwear, uint, 0400);
134module_param(overridesize, uint, 0400); 134module_param(overridesize, uint, 0400);
135module_param(cache_file, charp, 0400); 135module_param(cache_file, charp, 0400);
136module_param(bbt, uint, 0400); 136module_param(bbt, uint, 0400);
@@ -162,6 +162,7 @@ MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (z
162MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]" 162MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
163 " separated by commas e.g. 1401:2 means page 1401" 163 " separated by commas e.g. 1401:2 means page 1401"
164 " can be read only twice before failing"); 164 " can be read only twice before failing");
165MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
165MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. " 166MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
166 "The size is specified in erase blocks and as the exponent of a power of two" 167 "The size is specified in erase blocks and as the exponent of a power of two"
167 " e.g. 5 means a size of 32 erase blocks"); 168 " e.g. 5 means a size of 32 erase blocks");
@@ -267,6 +268,7 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
267#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 268#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
268#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 269#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
269#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 270#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
271#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
270#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 272#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
271#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 273#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
272#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 274#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -285,11 +287,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
285/* Maximum page cache pages needed to read or write a NAND page to the cache_file */ 287/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
286#define NS_MAX_HELD_PAGES 16 288#define NS_MAX_HELD_PAGES 16
287 289
288struct nandsim_debug_info {
289 struct dentry *dfs_root;
290 struct dentry *dfs_wear_report;
291};
292
293/* 290/*
294 * A union to represent flash memory contents and flash buffer. 291 * A union to represent flash memory contents and flash buffer.
295 */ 292 */
@@ -369,8 +366,6 @@ struct nandsim {
369 void *file_buf; 366 void *file_buf;
370 struct page *held_pages[NS_MAX_HELD_PAGES]; 367 struct page *held_pages[NS_MAX_HELD_PAGES];
371 int held_cnt; 368 int held_cnt;
372
373 struct nandsim_debug_info dbg;
374}; 369};
375 370
376/* 371/*
@@ -448,122 +443,12 @@ static LIST_HEAD(grave_pages);
448static unsigned long *erase_block_wear = NULL; 443static unsigned long *erase_block_wear = NULL;
449static unsigned int wear_eb_count = 0; 444static unsigned int wear_eb_count = 0;
450static unsigned long total_wear = 0; 445static unsigned long total_wear = 0;
446static unsigned int rptwear_cnt = 0;
451 447
452/* MTD structure for NAND controller */ 448/* MTD structure for NAND controller */
453static struct mtd_info *nsmtd; 449static struct mtd_info *nsmtd;
454 450
455static int nandsim_debugfs_show(struct seq_file *m, void *private) 451static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
456{
457 unsigned long wmin = -1, wmax = 0, avg;
458 unsigned long deciles[10], decile_max[10], tot = 0;
459 unsigned int i;
460
461 /* Calc wear stats */
462 for (i = 0; i < wear_eb_count; ++i) {
463 unsigned long wear = erase_block_wear[i];
464 if (wear < wmin)
465 wmin = wear;
466 if (wear > wmax)
467 wmax = wear;
468 tot += wear;
469 }
470
471 for (i = 0; i < 9; ++i) {
472 deciles[i] = 0;
473 decile_max[i] = (wmax * (i + 1) + 5) / 10;
474 }
475 deciles[9] = 0;
476 decile_max[9] = wmax;
477 for (i = 0; i < wear_eb_count; ++i) {
478 int d;
479 unsigned long wear = erase_block_wear[i];
480 for (d = 0; d < 10; ++d)
481 if (wear <= decile_max[d]) {
482 deciles[d] += 1;
483 break;
484 }
485 }
486 avg = tot / wear_eb_count;
487
488 /* Output wear report */
489 seq_printf(m, "Total numbers of erases: %lu\n", tot);
490 seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
491 seq_printf(m, "Average number of erases: %lu\n", avg);
492 seq_printf(m, "Maximum number of erases: %lu\n", wmax);
493 seq_printf(m, "Minimum number of erases: %lu\n", wmin);
494 for (i = 0; i < 10; ++i) {
495 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
496 if (from > decile_max[i])
497 continue;
498 seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
499 from,
500 decile_max[i],
501 deciles[i]);
502 }
503
504 return 0;
505}
506
507static int nandsim_debugfs_open(struct inode *inode, struct file *file)
508{
509 return single_open(file, nandsim_debugfs_show, inode->i_private);
510}
511
512static const struct file_operations dfs_fops = {
513 .open = nandsim_debugfs_open,
514 .read = seq_read,
515 .llseek = seq_lseek,
516 .release = single_release,
517};
518
519/**
520 * nandsim_debugfs_create - initialize debugfs
521 * @dev: nandsim device description object
522 *
523 * This function creates all debugfs files for UBI device @ubi. Returns zero in
524 * case of success and a negative error code in case of failure.
525 */
526static int nandsim_debugfs_create(struct nandsim *dev)
527{
528 struct nandsim_debug_info *dbg = &dev->dbg;
529 struct dentry *dent;
530 int err;
531
532 if (!IS_ENABLED(CONFIG_DEBUG_FS))
533 return 0;
534
535 dent = debugfs_create_dir("nandsim", NULL);
536 if (IS_ERR_OR_NULL(dent)) {
537 int err = dent ? -ENODEV : PTR_ERR(dent);
538
539 NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
540 err);
541 return err;
542 }
543 dbg->dfs_root = dent;
544
545 dent = debugfs_create_file("wear_report", S_IRUSR,
546 dbg->dfs_root, dev, &dfs_fops);
547 if (IS_ERR_OR_NULL(dent))
548 goto out_remove;
549 dbg->dfs_wear_report = dent;
550
551 return 0;
552
553out_remove:
554 debugfs_remove_recursive(dbg->dfs_root);
555 err = dent ? PTR_ERR(dent) : -ENODEV;
556 return err;
557}
558
559/**
560 * nandsim_debugfs_remove - destroy all debugfs files
561 */
562static void nandsim_debugfs_remove(struct nandsim *ns)
563{
564 if (IS_ENABLED(CONFIG_DEBUG_FS))
565 debugfs_remove_recursive(ns->dbg.dfs_root);
566}
567 452
568/* 453/*
569 * Allocate array of page pointers, create slab allocation for an array 454 * Allocate array of page pointers, create slab allocation for an array
@@ -662,6 +547,12 @@ static char *get_partition_name(int i)
662 return kstrdup(buf, GFP_KERNEL); 547 return kstrdup(buf, GFP_KERNEL);
663} 548}
664 549
550static uint64_t divide(uint64_t n, uint32_t d)
551{
552 do_div(n, d);
553 return n;
554}
555
665/* 556/*
666 * Initialize the nandsim structure. 557 * Initialize the nandsim structure.
667 * 558 *
@@ -690,7 +581,7 @@ static int init_nandsim(struct mtd_info *mtd)
690 ns->geom.oobsz = mtd->oobsize; 581 ns->geom.oobsz = mtd->oobsize;
691 ns->geom.secsz = mtd->erasesize; 582 ns->geom.secsz = mtd->erasesize;
692 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz; 583 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
693 ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz); 584 ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
694 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz; 585 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
695 ns->geom.secshift = ffs(ns->geom.secsz) - 1; 586 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
696 ns->geom.pgshift = chip->page_shift; 587 ns->geom.pgshift = chip->page_shift;
@@ -703,7 +594,7 @@ static int init_nandsim(struct mtd_info *mtd)
703 ns->options |= OPT_PAGE256; 594 ns->options |= OPT_PAGE256;
704 } 595 }
705 else if (ns->geom.pgsz == 512) { 596 else if (ns->geom.pgsz == 512) {
706 ns->options |= OPT_PAGE512; 597 ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
707 if (ns->busw == 8) 598 if (ns->busw == 8)
708 ns->options |= OPT_PAGE512_8BIT; 599 ns->options |= OPT_PAGE512_8BIT;
709 } else if (ns->geom.pgsz == 2048) { 600 } else if (ns->geom.pgsz == 2048) {
@@ -772,6 +663,8 @@ static int init_nandsim(struct mtd_info *mtd)
772 for (i = 0; nand_flash_ids[i].name != NULL; i++) { 663 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
773 if (second_id_byte != nand_flash_ids[i].id) 664 if (second_id_byte != nand_flash_ids[i].id)
774 continue; 665 continue;
666 if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
667 ns->options |= OPT_AUTOINCR;
775 } 668 }
776 669
777 if (ns->busw == 16) 670 if (ns->busw == 16)
@@ -844,7 +737,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
844 return -EINVAL; 737 return -EINVAL;
845 } 738 }
846 offset = erase_block_no * ns->geom.secsz; 739 offset = erase_block_no * ns->geom.secsz;
847 if (mtd_block_markbad(mtd, offset)) { 740 if (mtd->block_markbad(mtd, offset)) {
848 NS_ERR("invalid badblocks.\n"); 741 NS_ERR("invalid badblocks.\n");
849 return -EINVAL; 742 return -EINVAL;
850 } 743 }
@@ -1029,7 +922,9 @@ static int setup_wear_reporting(struct mtd_info *mtd)
1029{ 922{
1030 size_t mem; 923 size_t mem;
1031 924
1032 wear_eb_count = div_u64(mtd->size, mtd->erasesize); 925 if (!rptwear)
926 return 0;
927 wear_eb_count = divide(mtd->size, mtd->erasesize);
1033 mem = wear_eb_count * sizeof(unsigned long); 928 mem = wear_eb_count * sizeof(unsigned long);
1034 if (mem / sizeof(unsigned long) != wear_eb_count) { 929 if (mem / sizeof(unsigned long) != wear_eb_count) {
1035 NS_ERR("Too many erase blocks for wear reporting\n"); 930 NS_ERR("Too many erase blocks for wear reporting\n");
@@ -1045,18 +940,64 @@ static int setup_wear_reporting(struct mtd_info *mtd)
1045 940
1046static void update_wear(unsigned int erase_block_no) 941static void update_wear(unsigned int erase_block_no)
1047{ 942{
943 unsigned long wmin = -1, wmax = 0, avg;
944 unsigned long deciles[10], decile_max[10], tot = 0;
945 unsigned int i;
946
1048 if (!erase_block_wear) 947 if (!erase_block_wear)
1049 return; 948 return;
1050 total_wear += 1; 949 total_wear += 1;
1051 /*
1052 * TODO: Notify this through a debugfs entry,
1053 * instead of showing an error message.
1054 */
1055 if (total_wear == 0) 950 if (total_wear == 0)
1056 NS_ERR("Erase counter total overflow\n"); 951 NS_ERR("Erase counter total overflow\n");
1057 erase_block_wear[erase_block_no] += 1; 952 erase_block_wear[erase_block_no] += 1;
1058 if (erase_block_wear[erase_block_no] == 0) 953 if (erase_block_wear[erase_block_no] == 0)
1059 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no); 954 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
955 rptwear_cnt += 1;
956 if (rptwear_cnt < rptwear)
957 return;
958 rptwear_cnt = 0;
959 /* Calc wear stats */
960 for (i = 0; i < wear_eb_count; ++i) {
961 unsigned long wear = erase_block_wear[i];
962 if (wear < wmin)
963 wmin = wear;
964 if (wear > wmax)
965 wmax = wear;
966 tot += wear;
967 }
968 for (i = 0; i < 9; ++i) {
969 deciles[i] = 0;
970 decile_max[i] = (wmax * (i + 1) + 5) / 10;
971 }
972 deciles[9] = 0;
973 decile_max[9] = wmax;
974 for (i = 0; i < wear_eb_count; ++i) {
975 int d;
976 unsigned long wear = erase_block_wear[i];
977 for (d = 0; d < 10; ++d)
978 if (wear <= decile_max[d]) {
979 deciles[d] += 1;
980 break;
981 }
982 }
983 avg = tot / wear_eb_count;
984 /* Output wear report */
985 NS_INFO("*** Wear Report ***\n");
986 NS_INFO("Total numbers of erases: %lu\n", tot);
987 NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
988 NS_INFO("Average number of erases: %lu\n", avg);
989 NS_INFO("Maximum number of erases: %lu\n", wmax);
990 NS_INFO("Minimum number of erases: %lu\n", wmin);
991 for (i = 0; i < 10; ++i) {
992 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
993 if (from > decile_max[i])
994 continue;
995 NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
996 from,
997 decile_max[i],
998 deciles[i]);
999 }
1000 NS_INFO("*** End of Wear Report ***\n");
1060} 1001}
1061 1002
1062/* 1003/*
@@ -1467,7 +1408,10 @@ int do_read_error(struct nandsim *ns, int num)
1467 unsigned int page_no = ns->regs.row; 1408 unsigned int page_no = ns->regs.row;
1468 1409
1469 if (read_error(page_no)) { 1410 if (read_error(page_no)) {
1470 prandom_bytes(ns->buf.byte, num); 1411 int i;
1412 memset(ns->buf.byte, 0xFF, num);
1413 for (i = 0; i < num; ++i)
1414 ns->buf.byte[i] = random32();
1471 NS_WARN("simulating read error in page %u\n", page_no); 1415 NS_WARN("simulating read error in page %u\n", page_no);
1472 return 1; 1416 return 1;
1473 } 1417 }
@@ -1992,8 +1936,20 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
1992 if (ns->regs.count == ns->regs.num) { 1936 if (ns->regs.count == ns->regs.num) {
1993 NS_DBG("read_byte: all bytes were read\n"); 1937 NS_DBG("read_byte: all bytes were read\n");
1994 1938
1995 if (NS_STATE(ns->nxstate) == STATE_READY) 1939 /*
1940 * The OPT_AUTOINCR allows to read next consecutive pages without
1941 * new read operation cycle.
1942 */
1943 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1944 ns->regs.count = 0;
1945 if (ns->regs.row + 1 < ns->geom.pgnum)
1946 ns->regs.row += 1;
1947 NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
1948 do_state_action(ns, ACTION_CPY);
1949 }
1950 else if (NS_STATE(ns->nxstate) == STATE_READY)
1996 switch_state(ns); 1951 switch_state(ns);
1952
1997 } 1953 }
1998 1954
1999 return outb; 1955 return outb;
@@ -2247,13 +2203,33 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2247 ns->regs.count += len; 2203 ns->regs.count += len;
2248 2204
2249 if (ns->regs.count == ns->regs.num) { 2205 if (ns->regs.count == ns->regs.num) {
2250 if (NS_STATE(ns->nxstate) == STATE_READY) 2206 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
2207 ns->regs.count = 0;
2208 if (ns->regs.row + 1 < ns->geom.pgnum)
2209 ns->regs.row += 1;
2210 NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
2211 do_state_action(ns, ACTION_CPY);
2212 }
2213 else if (NS_STATE(ns->nxstate) == STATE_READY)
2251 switch_state(ns); 2214 switch_state(ns);
2252 } 2215 }
2253 2216
2254 return; 2217 return;
2255} 2218}
2256 2219
2220static int ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
2221{
2222 ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
2223
2224 if (!memcmp(buf, &ns_verify_buf[0], len)) {
2225 NS_DBG("verify_buf: the buffer is OK\n");
2226 return 0;
2227 } else {
2228 NS_DBG("verify_buf: the buffer is wrong\n");
2229 return -EFAULT;
2230 }
2231}
2232
2257/* 2233/*
2258 * Module initialization function 2234 * Module initialization function
2259 */ 2235 */
@@ -2288,6 +2264,7 @@ static int __init ns_init_module(void)
2288 chip->dev_ready = ns_device_ready; 2264 chip->dev_ready = ns_device_ready;
2289 chip->write_buf = ns_nand_write_buf; 2265 chip->write_buf = ns_nand_write_buf;
2290 chip->read_buf = ns_nand_read_buf; 2266 chip->read_buf = ns_nand_read_buf;
2267 chip->verify_buf = ns_nand_verify_buf;
2291 chip->read_word = ns_nand_read_word; 2268 chip->read_word = ns_nand_read_word;
2292 chip->ecc.mode = NAND_ECC_SOFT; 2269 chip->ecc.mode = NAND_ECC_SOFT;
2293 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ 2270 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2296,9 +2273,9 @@ static int __init ns_init_module(void)
2296 2273
2297 switch (bbt) { 2274 switch (bbt) {
2298 case 2: 2275 case 2:
2299 chip->bbt_options |= NAND_BBT_NO_OOB; 2276 chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
2300 case 1: 2277 case 1:
2301 chip->bbt_options |= NAND_BBT_USE_FLASH; 2278 chip->options |= NAND_USE_FLASH_BBT;
2302 case 0: 2279 case 0:
2303 break; 2280 break;
2304 default: 2281 default:
@@ -2384,7 +2361,6 @@ static int __init ns_init_module(void)
2384 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; 2361 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2385 if (new_size >> overridesize != nsmtd->erasesize) { 2362 if (new_size >> overridesize != nsmtd->erasesize) {
2386 NS_ERR("overridesize is too big\n"); 2363 NS_ERR("overridesize is too big\n");
2387 retval = -EINVAL;
2388 goto err_exit; 2364 goto err_exit;
2389 } 2365 }
2390 /* N.B. This relies on nand_scan not doing anything with the size before we change it */ 2366 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
@@ -2397,9 +2373,6 @@ static int __init ns_init_module(void)
2397 if ((retval = setup_wear_reporting(nsmtd)) != 0) 2373 if ((retval = setup_wear_reporting(nsmtd)) != 0)
2398 goto err_exit; 2374 goto err_exit;
2399 2375
2400 if ((retval = nandsim_debugfs_create(nand)) != 0)
2401 goto err_exit;
2402
2403 if ((retval = init_nandsim(nsmtd)) != 0) 2376 if ((retval = init_nandsim(nsmtd)) != 0)
2404 goto err_exit; 2377 goto err_exit;
2405 2378
@@ -2439,7 +2412,6 @@ static void __exit ns_cleanup_module(void)
2439 struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv; 2412 struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
2440 int i; 2413 int i;
2441 2414
2442 nandsim_debugfs_remove(ns);
2443 free_nandsim(ns); /* Free nandsim private resources */ 2415 free_nandsim(ns); /* Free nandsim private resources */
2444 nand_release(nsmtd); /* Unregister driver */ 2416 nand_release(nsmtd); /* Unregister driver */
2445 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) 2417 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 8e148f1478f..ea2dea8a9c8 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -42,6 +42,7 @@ struct ndfc_controller {
42 struct nand_chip chip; 42 struct nand_chip chip;
43 int chip_select; 43 int chip_select;
44 struct nand_hw_control ndfc_control; 44 struct nand_hw_control ndfc_control;
45 struct mtd_partition *parts;
45}; 46};
46 47
47static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS]; 48static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -140,15 +141,31 @@ static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
140 out_be32(ndfc->ndfcbase + NDFC_DATA, *p++); 141 out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
141} 142}
142 143
144static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
145{
146 struct nand_chip *chip = mtd->priv;
147 struct ndfc_controller *ndfc = chip->priv;
148 uint32_t *p = (uint32_t *) buf;
149
150 for(;len > 0; len -= 4)
151 if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
152 return -EFAULT;
153 return 0;
154}
155
143/* 156/*
144 * Initialize chip structure 157 * Initialize chip structure
145 */ 158 */
146static int ndfc_chip_init(struct ndfc_controller *ndfc, 159static int ndfc_chip_init(struct ndfc_controller *ndfc,
147 struct device_node *node) 160 struct device_node *node)
148{ 161{
162#ifdef CONFIG_MTD_CMDLINE_PARTS
163 static const char *part_types[] = { "cmdlinepart", NULL };
164#else
165 static const char *part_types[] = { NULL };
166#endif
149 struct device_node *flash_np; 167 struct device_node *flash_np;
150 struct nand_chip *chip = &ndfc->chip; 168 struct nand_chip *chip = &ndfc->chip;
151 struct mtd_part_parser_data ppdata;
152 int ret; 169 int ret;
153 170
154 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; 171 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
@@ -160,13 +177,13 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
160 chip->controller = &ndfc->ndfc_control; 177 chip->controller = &ndfc->ndfc_control;
161 chip->read_buf = ndfc_read_buf; 178 chip->read_buf = ndfc_read_buf;
162 chip->write_buf = ndfc_write_buf; 179 chip->write_buf = ndfc_write_buf;
180 chip->verify_buf = ndfc_verify_buf;
163 chip->ecc.correct = nand_correct_data; 181 chip->ecc.correct = nand_correct_data;
164 chip->ecc.hwctl = ndfc_enable_hwecc; 182 chip->ecc.hwctl = ndfc_enable_hwecc;
165 chip->ecc.calculate = ndfc_calculate_ecc; 183 chip->ecc.calculate = ndfc_calculate_ecc;
166 chip->ecc.mode = NAND_ECC_HW; 184 chip->ecc.mode = NAND_ECC_HW;
167 chip->ecc.size = 256; 185 chip->ecc.size = 256;
168 chip->ecc.bytes = 3; 186 chip->ecc.bytes = 3;
169 chip->ecc.strength = 1;
170 chip->priv = ndfc; 187 chip->priv = ndfc;
171 188
172 ndfc->mtd.priv = chip; 189 ndfc->mtd.priv = chip;
@@ -176,7 +193,6 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
176 if (!flash_np) 193 if (!flash_np)
177 return -ENODEV; 194 return -ENODEV;
178 195
179 ppdata.of_node = flash_np;
180 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", 196 ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
181 dev_name(&ndfc->ofdev->dev), flash_np->name); 197 dev_name(&ndfc->ofdev->dev), flash_np->name);
182 if (!ndfc->mtd.name) { 198 if (!ndfc->mtd.name) {
@@ -188,7 +204,18 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
188 if (ret) 204 if (ret)
189 goto err; 205 goto err;
190 206
191 ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0); 207 ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
208 if (ret < 0)
209 goto err;
210
211 if (ret == 0) {
212 ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
213 &ndfc->parts);
214 if (ret < 0)
215 goto err;
216 }
217
218 ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
192 219
193err: 220err:
194 of_node_put(flash_np); 221 of_node_put(flash_np);
@@ -197,7 +224,7 @@ err:
197 return ret; 224 return ret;
198} 225}
199 226
200static int ndfc_probe(struct platform_device *ofdev) 227static int __devinit ndfc_probe(struct platform_device *ofdev)
201{ 228{
202 struct ndfc_controller *ndfc; 229 struct ndfc_controller *ndfc;
203 const __be32 *reg; 230 const __be32 *reg;
@@ -256,12 +283,11 @@ static int ndfc_probe(struct platform_device *ofdev)
256 return 0; 283 return 0;
257} 284}
258 285
259static int ndfc_remove(struct platform_device *ofdev) 286static int __devexit ndfc_remove(struct platform_device *ofdev)
260{ 287{
261 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); 288 struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
262 289
263 nand_release(&ndfc->mtd); 290 nand_release(&ndfc->mtd);
264 kfree(ndfc->mtd.name);
265 291
266 return 0; 292 return 0;
267} 293}
@@ -279,10 +305,21 @@ static struct platform_driver ndfc_driver = {
279 .of_match_table = ndfc_match, 305 .of_match_table = ndfc_match,
280 }, 306 },
281 .probe = ndfc_probe, 307 .probe = ndfc_probe,
282 .remove = ndfc_remove, 308 .remove = __devexit_p(ndfc_remove),
283}; 309};
284 310
285module_platform_driver(ndfc_driver); 311static int __init ndfc_nand_init(void)
312{
313 return platform_driver_register(&ndfc_driver);
314}
315
316static void __exit ndfc_nand_exit(void)
317{
318 platform_driver_unregister(&ndfc_driver);
319}
320
321module_init(ndfc_nand_init);
322module_exit(ndfc_nand_exit);
286 323
287MODULE_LICENSE("GPL"); 324MODULE_LICENSE("GPL");
288MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 325MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index a6191198d25..9c30a0b0317 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -112,6 +112,22 @@ static void nuc900_nand_write_buf(struct mtd_info *mtd,
112 write_data_reg(nand, buf[i]); 112 write_data_reg(nand, buf[i]);
113} 113}
114 114
115static int nuc900_verify_buf(struct mtd_info *mtd,
116 const unsigned char *buf, int len)
117{
118 int i;
119 struct nuc900_nand *nand;
120
121 nand = container_of(mtd, struct nuc900_nand, mtd);
122
123 for (i = 0; i < len; i++) {
124 if (buf[i] != (unsigned char)read_data_reg(nand))
125 return -EFAULT;
126 }
127
128 return 0;
129}
130
115static int nuc900_check_rb(struct nuc900_nand *nand) 131static int nuc900_check_rb(struct nuc900_nand *nand)
116{ 132{
117 unsigned int val; 133 unsigned int val;
@@ -246,7 +262,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
246 spin_unlock(&nand->lock); 262 spin_unlock(&nand->lock);
247} 263}
248 264
249static int nuc900_nand_probe(struct platform_device *pdev) 265static int __devinit nuc900_nand_probe(struct platform_device *pdev)
250{ 266{
251 struct nuc900_nand *nuc900_nand; 267 struct nuc900_nand *nuc900_nand;
252 struct nand_chip *chip; 268 struct nand_chip *chip;
@@ -276,6 +292,7 @@ static int nuc900_nand_probe(struct platform_device *pdev)
276 chip->read_byte = nuc900_nand_read_byte; 292 chip->read_byte = nuc900_nand_read_byte;
277 chip->write_buf = nuc900_nand_write_buf; 293 chip->write_buf = nuc900_nand_write_buf;
278 chip->read_buf = nuc900_nand_read_buf; 294 chip->read_buf = nuc900_nand_read_buf;
295 chip->verify_buf = nuc900_verify_buf;
279 chip->chip_delay = 50; 296 chip->chip_delay = 50;
280 chip->options = 0; 297 chip->options = 0;
281 chip->ecc.mode = NAND_ECC_SOFT; 298 chip->ecc.mode = NAND_ECC_SOFT;
@@ -317,12 +334,11 @@ fail1: kfree(nuc900_nand);
317 return retval; 334 return retval;
318} 335}
319 336
320static int nuc900_nand_remove(struct platform_device *pdev) 337static int __devexit nuc900_nand_remove(struct platform_device *pdev)
321{ 338{
322 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 339 struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
323 struct resource *res; 340 struct resource *res;
324 341
325 nand_release(&nuc900_nand->mtd);
326 iounmap(nuc900_nand->reg); 342 iounmap(nuc900_nand->reg);
327 343
328 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -340,14 +356,25 @@ static int nuc900_nand_remove(struct platform_device *pdev)
340 356
341static struct platform_driver nuc900_nand_driver = { 357static struct platform_driver nuc900_nand_driver = {
342 .probe = nuc900_nand_probe, 358 .probe = nuc900_nand_probe,
343 .remove = nuc900_nand_remove, 359 .remove = __devexit_p(nuc900_nand_remove),
344 .driver = { 360 .driver = {
345 .name = "nuc900-fmi", 361 .name = "nuc900-fmi",
346 .owner = THIS_MODULE, 362 .owner = THIS_MODULE,
347 }, 363 },
348}; 364};
349 365
350module_platform_driver(nuc900_nand_driver); 366static int __init nuc900_nand_init(void)
367{
368 return platform_driver_register(&nuc900_nand_driver);
369}
370
371static void __exit nuc900_nand_exit(void)
372{
373 platform_driver_unregister(&nuc900_nand_driver);
374}
375
376module_init(nuc900_nand_init);
377module_exit(nuc900_nand_exit);
351 378
352MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 379MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
353MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); 380MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0002d5e94f0..0db2c0e7656 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,25 +9,20 @@
9 */ 9 */
10 10
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
14#include <linux/delay.h> 13#include <linux/delay.h>
15#include <linux/module.h>
16#include <linux/interrupt.h> 14#include <linux/interrupt.h>
17#include <linux/jiffies.h> 15#include <linux/jiffies.h>
18#include <linux/sched.h> 16#include <linux/sched.h>
19#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h> 18#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
22#include <linux/omap-dma.h>
23#include <linux/io.h> 20#include <linux/io.h>
24#include <linux/slab.h> 21#include <linux/slab.h>
25 22
26#ifdef CONFIG_MTD_NAND_OMAP_BCH 23#include <plat/dma.h>
27#include <linux/bch.h> 24#include <plat/gpmc.h>
28#endif 25#include <plat/nand.h>
29
30#include <linux/platform_data/mtd-nand-omap2.h>
31 26
32#define DRIVER_NAME "omap2-nand" 27#define DRIVER_NAME "omap2-nand"
33#define OMAP_NAND_TIMEOUT_MS 5000 28#define OMAP_NAND_TIMEOUT_MS 5000
@@ -99,23 +94,7 @@
99#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 94#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
100#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 95#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
101 96
102#define PREFETCH_CONFIG1_CS_SHIFT 24 97static const char *part_probes[] = { "cmdlinepart", NULL };
103#define ECC_CONFIG_CS_SHIFT 1
104#define CS_MASK 0x7
105#define ENABLE_PREFETCH (0x1 << 7)
106#define DMA_MPU_MODE_SHIFT 2
107#define ECCSIZE0_SHIFT 12
108#define ECCSIZE1_SHIFT 22
109#define ECC1RESULTSIZE 0x1
110#define ECCCLEAR 0x100
111#define ECC1 0x1
112#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
113#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
114#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
115#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
116#define STATUS_BUFF_EMPTY 0x00000001
117
118#define OMAP24XX_DMA_GPMC 4
119 98
120/* oob info generated runtime depending on ecc algorithm and layout selected */ 99/* oob info generated runtime depending on ecc algorithm and layout selected */
121static struct nand_ecclayout omap_oobinfo; 100static struct nand_ecclayout omap_oobinfo;
@@ -135,88 +114,24 @@ struct omap_nand_info {
135 struct nand_hw_control controller; 114 struct nand_hw_control controller;
136 struct omap_nand_platform_data *pdata; 115 struct omap_nand_platform_data *pdata;
137 struct mtd_info mtd; 116 struct mtd_info mtd;
117 struct mtd_partition *parts;
138 struct nand_chip nand; 118 struct nand_chip nand;
139 struct platform_device *pdev; 119 struct platform_device *pdev;
140 120
141 int gpmc_cs; 121 int gpmc_cs;
142 unsigned long phys_base; 122 unsigned long phys_base;
143 unsigned long mem_size;
144 struct completion comp; 123 struct completion comp;
145 struct dma_chan *dma; 124 int dma_ch;
146 int gpmc_irq_fifo; 125 int gpmc_irq;
147 int gpmc_irq_count;
148 enum { 126 enum {
149 OMAP_NAND_IO_READ = 0, /* read */ 127 OMAP_NAND_IO_READ = 0, /* read */
150 OMAP_NAND_IO_WRITE, /* write */ 128 OMAP_NAND_IO_WRITE, /* write */
151 } iomode; 129 } iomode;
152 u_char *buf; 130 u_char *buf;
153 int buf_len; 131 int buf_len;
154 struct gpmc_nand_regs reg;
155
156#ifdef CONFIG_MTD_NAND_OMAP_BCH
157 struct bch_control *bch;
158 struct nand_ecclayout ecclayout;
159#endif
160}; 132};
161 133
162/** 134/**
163 * omap_prefetch_enable - configures and starts prefetch transfer
164 * @cs: cs (chip select) number
165 * @fifo_th: fifo threshold to be used for read/ write
166 * @dma_mode: dma mode enable (1) or disable (0)
167 * @u32_count: number of bytes to be transferred
168 * @is_write: prefetch read(0) or write post(1) mode
169 */
170static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
171 unsigned int u32_count, int is_write, struct omap_nand_info *info)
172{
173 u32 val;
174
175 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
176 return -1;
177
178 if (readl(info->reg.gpmc_prefetch_control))
179 return -EBUSY;
180
181 /* Set the amount of bytes to be prefetched */
182 writel(u32_count, info->reg.gpmc_prefetch_config2);
183
184 /* Set dma/mpu mode, the prefetch read / post write and
185 * enable the engine. Set which cs is has requested for.
186 */
187 val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
188 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
189 (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
190 writel(val, info->reg.gpmc_prefetch_config1);
191
192 /* Start the prefetch engine */
193 writel(0x1, info->reg.gpmc_prefetch_control);
194
195 return 0;
196}
197
198/**
199 * omap_prefetch_reset - disables and stops the prefetch engine
200 */
201static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
202{
203 u32 config1;
204
205 /* check if the same module/cs is trying to reset */
206 config1 = readl(info->reg.gpmc_prefetch_config1);
207 if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
208 return -EINVAL;
209
210 /* Stop the PFPW engine */
211 writel(0x0, info->reg.gpmc_prefetch_control);
212
213 /* Reset/disable the PFPW engine */
214 writel(0x0, info->reg.gpmc_prefetch_config1);
215
216 return 0;
217}
218
219/**
220 * omap_hwcontrol - hardware specific access to control-lines 135 * omap_hwcontrol - hardware specific access to control-lines
221 * @mtd: MTD device structure 136 * @mtd: MTD device structure
222 * @cmd: command to device 137 * @cmd: command to device
@@ -234,13 +149,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
234 149
235 if (cmd != NAND_CMD_NONE) { 150 if (cmd != NAND_CMD_NONE) {
236 if (ctrl & NAND_CLE) 151 if (ctrl & NAND_CLE)
237 writeb(cmd, info->reg.gpmc_nand_command); 152 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
238 153
239 else if (ctrl & NAND_ALE) 154 else if (ctrl & NAND_ALE)
240 writeb(cmd, info->reg.gpmc_nand_address); 155 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
241 156
242 else /* NAND_NCE */ 157 else /* NAND_NCE */
243 writeb(cmd, info->reg.gpmc_nand_data); 158 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
244 } 159 }
245} 160}
246 161
@@ -274,8 +189,7 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
274 iowrite8(*p++, info->nand.IO_ADDR_W); 189 iowrite8(*p++, info->nand.IO_ADDR_W);
275 /* wait until buffer is available for write */ 190 /* wait until buffer is available for write */
276 do { 191 do {
277 status = readl(info->reg.gpmc_status) & 192 status = gpmc_read_status(GPMC_STATUS_BUFFER);
278 STATUS_BUFF_EMPTY;
279 } while (!status); 193 } while (!status);
280 } 194 }
281} 195}
@@ -312,8 +226,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
312 iowrite16(*p++, info->nand.IO_ADDR_W); 226 iowrite16(*p++, info->nand.IO_ADDR_W);
313 /* wait until buffer is available for write */ 227 /* wait until buffer is available for write */
314 do { 228 do {
315 status = readl(info->reg.gpmc_status) & 229 status = gpmc_read_status(GPMC_STATUS_BUFFER);
316 STATUS_BUFF_EMPTY;
317 } while (!status); 230 } while (!status);
318 } 231 }
319} 232}
@@ -343,8 +256,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
343 } 256 }
344 257
345 /* configure and start prefetch transfer */ 258 /* configure and start prefetch transfer */
346 ret = omap_prefetch_enable(info->gpmc_cs, 259 ret = gpmc_prefetch_enable(info->gpmc_cs,
347 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info); 260 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
348 if (ret) { 261 if (ret) {
349 /* PFPW engine is busy, use cpu copy method */ 262 /* PFPW engine is busy, use cpu copy method */
350 if (info->nand.options & NAND_BUSWIDTH_16) 263 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -353,15 +266,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
353 omap_read_buf8(mtd, (u_char *)p, len); 266 omap_read_buf8(mtd, (u_char *)p, len);
354 } else { 267 } else {
355 do { 268 do {
356 r_count = readl(info->reg.gpmc_prefetch_status); 269 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
357 r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
358 r_count = r_count >> 2; 270 r_count = r_count >> 2;
359 ioread32_rep(info->nand.IO_ADDR_R, p, r_count); 271 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
360 p += r_count; 272 p += r_count;
361 len -= r_count << 2; 273 len -= r_count << 2;
362 } while (len); 274 } while (len);
363 /* disable and stop the PFPW engine */ 275 /* disable and stop the PFPW engine */
364 omap_prefetch_reset(info->gpmc_cs, info); 276 gpmc_prefetch_reset(info->gpmc_cs);
365 } 277 }
366} 278}
367 279
@@ -380,7 +292,6 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
380 int i = 0, ret = 0; 292 int i = 0, ret = 0;
381 u16 *p = (u16 *)buf; 293 u16 *p = (u16 *)buf;
382 unsigned long tim, limit; 294 unsigned long tim, limit;
383 u32 val;
384 295
385 /* take care of subpage writes */ 296 /* take care of subpage writes */
386 if (len % 2 != 0) { 297 if (len % 2 != 0) {
@@ -390,8 +301,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
390 } 301 }
391 302
392 /* configure and start prefetch transfer */ 303 /* configure and start prefetch transfer */
393 ret = omap_prefetch_enable(info->gpmc_cs, 304 ret = gpmc_prefetch_enable(info->gpmc_cs,
394 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info); 305 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
395 if (ret) { 306 if (ret) {
396 /* PFPW engine is busy, use cpu copy method */ 307 /* PFPW engine is busy, use cpu copy method */
397 if (info->nand.options & NAND_BUSWIDTH_16) 308 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -400,8 +311,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
400 omap_write_buf8(mtd, (u_char *)p, len); 311 omap_write_buf8(mtd, (u_char *)p, len);
401 } else { 312 } else {
402 while (len) { 313 while (len) {
403 w_count = readl(info->reg.gpmc_prefetch_status); 314 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
404 w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
405 w_count = w_count >> 1; 315 w_count = w_count >> 1;
406 for (i = 0; (i < w_count) && len; i++, len -= 2) 316 for (i = 0; (i < w_count) && len; i++, len -= 2)
407 iowrite16(*p++, info->nand.IO_ADDR_W); 317 iowrite16(*p++, info->nand.IO_ADDR_W);
@@ -410,28 +320,27 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
410 tim = 0; 320 tim = 0;
411 limit = (loops_per_jiffy * 321 limit = (loops_per_jiffy *
412 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 322 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
413 do { 323 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
414 cpu_relax(); 324 cpu_relax();
415 val = readl(info->reg.gpmc_prefetch_status);
416 val = PREFETCH_STATUS_COUNT(val);
417 } while (val && (tim++ < limit));
418 325
419 /* disable and stop the PFPW engine */ 326 /* disable and stop the PFPW engine */
420 omap_prefetch_reset(info->gpmc_cs, info); 327 gpmc_prefetch_reset(info->gpmc_cs);
421 } 328 }
422} 329}
423 330
424/* 331/*
425 * omap_nand_dma_callback: callback on the completion of dma transfer 332 * omap_nand_dma_cb: callback on the completion of dma transfer
333 * @lch: logical channel
334 * @ch_satuts: channel status
426 * @data: pointer to completion data structure 335 * @data: pointer to completion data structure
427 */ 336 */
428static void omap_nand_dma_callback(void *data) 337static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
429{ 338{
430 complete((struct completion *) data); 339 complete((struct completion *) data);
431} 340}
432 341
433/* 342/*
434 * omap_nand_dma_transfer: configure and start dma transfer 343 * omap_nand_dma_transfer: configer and start dma transfer
435 * @mtd: MTD device structure 344 * @mtd: MTD device structure
436 * @addr: virtual address in RAM of source/destination 345 * @addr: virtual address in RAM of source/destination
437 * @len: number of data bytes to be transferred 346 * @len: number of data bytes to be transferred
@@ -442,14 +351,17 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
442{ 351{
443 struct omap_nand_info *info = container_of(mtd, 352 struct omap_nand_info *info = container_of(mtd,
444 struct omap_nand_info, mtd); 353 struct omap_nand_info, mtd);
445 struct dma_async_tx_descriptor *tx;
446 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 354 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
447 DMA_FROM_DEVICE; 355 DMA_FROM_DEVICE;
448 struct scatterlist sg; 356 dma_addr_t dma_addr;
449 unsigned long tim, limit;
450 unsigned n;
451 int ret; 357 int ret;
452 u32 val; 358 unsigned long tim, limit;
359
360 /* The fifo depth is 64 bytes max.
361 * But configure the FIFO-threahold to 32 to get a sync at each frame
362 * and frame length is 32 bytes.
363 */
364 int buf_len = len >> 6;
453 365
454 if (addr >= high_memory) { 366 if (addr >= high_memory) {
455 struct page *p1; 367 struct page *p1;
@@ -463,53 +375,54 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
463 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 375 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
464 } 376 }
465 377
466 sg_init_one(&sg, addr, len); 378 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
467 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); 379 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
468 if (n == 0) {
469 dev_err(&info->pdev->dev, 380 dev_err(&info->pdev->dev,
470 "Couldn't DMA map a %d byte buffer\n", len); 381 "Couldn't DMA map a %d byte buffer\n", len);
471 goto out_copy; 382 goto out_copy;
472 } 383 }
473 384
474 tx = dmaengine_prep_slave_sg(info->dma, &sg, n, 385 if (is_write) {
475 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 386 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
476 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 387 info->phys_base, 0, 0);
477 if (!tx) 388 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
478 goto out_copy_unmap; 389 dma_addr, 0, 0);
479 390 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
480 tx->callback = omap_nand_dma_callback; 391 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
481 tx->callback_param = &info->comp; 392 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
482 dmaengine_submit(tx); 393 } else {
483 394 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
395 info->phys_base, 0, 0);
396 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
397 dma_addr, 0, 0);
398 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
399 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
400 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
401 }
484 /* configure and start prefetch transfer */ 402 /* configure and start prefetch transfer */
485 ret = omap_prefetch_enable(info->gpmc_cs, 403 ret = gpmc_prefetch_enable(info->gpmc_cs,
486 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info); 404 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
487 if (ret) 405 if (ret)
488 /* PFPW engine is busy, use cpu copy method */ 406 /* PFPW engine is busy, use cpu copy method */
489 goto out_copy_unmap; 407 goto out_copy;
490 408
491 init_completion(&info->comp); 409 init_completion(&info->comp);
492 dma_async_issue_pending(info->dma); 410
411 omap_start_dma(info->dma_ch);
493 412
494 /* setup and start DMA using dma_addr */ 413 /* setup and start DMA using dma_addr */
495 wait_for_completion(&info->comp); 414 wait_for_completion(&info->comp);
496 tim = 0; 415 tim = 0;
497 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 416 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
498 417 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
499 do {
500 cpu_relax(); 418 cpu_relax();
501 val = readl(info->reg.gpmc_prefetch_status);
502 val = PREFETCH_STATUS_COUNT(val);
503 } while (val && (tim++ < limit));
504 419
505 /* disable and stop the PFPW engine */ 420 /* disable and stop the PFPW engine */
506 omap_prefetch_reset(info->gpmc_cs, info); 421 gpmc_prefetch_reset(info->gpmc_cs);
507 422
508 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); 423 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
509 return 0; 424 return 0;
510 425
511out_copy_unmap:
512 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
513out_copy: 426out_copy:
514 if (info->nand.options & NAND_BUSWIDTH_16) 427 if (info->nand.options & NAND_BUSWIDTH_16)
515 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 428 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -552,7 +465,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
552} 465}
553 466
554/* 467/*
555 * omap_nand_irq - GPMC irq handler 468 * omap_nand_irq - GMPC irq handler
556 * @this_irq: gpmc irq number 469 * @this_irq: gpmc irq number
557 * @dev: omap_nand_info structure pointer is passed here 470 * @dev: omap_nand_info structure pointer is passed here
558 */ 471 */
@@ -560,12 +473,13 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
560{ 473{
561 struct omap_nand_info *info = (struct omap_nand_info *) dev; 474 struct omap_nand_info *info = (struct omap_nand_info *) dev;
562 u32 bytes; 475 u32 bytes;
476 u32 irq_stat;
563 477
564 bytes = readl(info->reg.gpmc_prefetch_status); 478 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
565 bytes = PREFETCH_STATUS_FIFO_CNT(bytes); 479 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
566 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ 480 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
567 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ 481 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
568 if (this_irq == info->gpmc_irq_count) 482 if (irq_stat & 0x2)
569 goto done; 483 goto done;
570 484
571 if (info->buf_len && (info->buf_len < bytes)) 485 if (info->buf_len && (info->buf_len < bytes))
@@ -582,17 +496,20 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
582 (u32 *)info->buf, bytes >> 2); 496 (u32 *)info->buf, bytes >> 2);
583 info->buf = info->buf + bytes; 497 info->buf = info->buf + bytes;
584 498
585 if (this_irq == info->gpmc_irq_count) 499 if (irq_stat & 0x2)
586 goto done; 500 goto done;
587 } 501 }
502 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
588 503
589 return IRQ_HANDLED; 504 return IRQ_HANDLED;
590 505
591done: 506done:
592 complete(&info->comp); 507 complete(&info->comp);
508 /* disable irq */
509 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
593 510
594 disable_irq_nosync(info->gpmc_irq_fifo); 511 /* clear status */
595 disable_irq_nosync(info->gpmc_irq_count); 512 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
596 513
597 return IRQ_HANDLED; 514 return IRQ_HANDLED;
598} 515}
@@ -619,22 +536,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
619 init_completion(&info->comp); 536 init_completion(&info->comp);
620 537
621 /* configure and start prefetch transfer */ 538 /* configure and start prefetch transfer */
622 ret = omap_prefetch_enable(info->gpmc_cs, 539 ret = gpmc_prefetch_enable(info->gpmc_cs,
623 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info); 540 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
624 if (ret) 541 if (ret)
625 /* PFPW engine is busy, use cpu copy method */ 542 /* PFPW engine is busy, use cpu copy method */
626 goto out_copy; 543 goto out_copy;
627 544
628 info->buf_len = len; 545 info->buf_len = len;
629 546 /* enable irq */
630 enable_irq(info->gpmc_irq_count); 547 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
631 enable_irq(info->gpmc_irq_fifo); 548 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
632 549
633 /* waiting for read to complete */ 550 /* waiting for read to complete */
634 wait_for_completion(&info->comp); 551 wait_for_completion(&info->comp);
635 552
636 /* disable and stop the PFPW engine */ 553 /* disable and stop the PFPW engine */
637 omap_prefetch_reset(info->gpmc_cs, info); 554 gpmc_prefetch_reset(info->gpmc_cs);
638 return; 555 return;
639 556
640out_copy: 557out_copy:
@@ -657,7 +574,6 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
657 struct omap_nand_info, mtd); 574 struct omap_nand_info, mtd);
658 int ret = 0; 575 int ret = 0;
659 unsigned long tim, limit; 576 unsigned long tim, limit;
660 u32 val;
661 577
662 if (len <= mtd->oobsize) { 578 if (len <= mtd->oobsize) {
663 omap_write_buf_pref(mtd, buf, len); 579 omap_write_buf_pref(mtd, buf, len);
@@ -669,31 +585,27 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
669 init_completion(&info->comp); 585 init_completion(&info->comp);
670 586
671 /* configure and start prefetch transfer : size=24 */ 587 /* configure and start prefetch transfer : size=24 */
672 ret = omap_prefetch_enable(info->gpmc_cs, 588 ret = gpmc_prefetch_enable(info->gpmc_cs,
673 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info); 589 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
674 if (ret) 590 if (ret)
675 /* PFPW engine is busy, use cpu copy method */ 591 /* PFPW engine is busy, use cpu copy method */
676 goto out_copy; 592 goto out_copy;
677 593
678 info->buf_len = len; 594 info->buf_len = len;
679 595 /* enable irq */
680 enable_irq(info->gpmc_irq_count); 596 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
681 enable_irq(info->gpmc_irq_fifo); 597 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
682 598
683 /* waiting for write to complete */ 599 /* waiting for write to complete */
684 wait_for_completion(&info->comp); 600 wait_for_completion(&info->comp);
685
686 /* wait for data to flushed-out before reset the prefetch */ 601 /* wait for data to flushed-out before reset the prefetch */
687 tim = 0; 602 tim = 0;
688 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 603 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
689 do { 604 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
690 val = readl(info->reg.gpmc_prefetch_status);
691 val = PREFETCH_STATUS_COUNT(val);
692 cpu_relax(); 605 cpu_relax();
693 } while (val && (tim++ < limit));
694 606
695 /* disable and stop the PFPW engine */ 607 /* disable and stop the PFPW engine */
696 omap_prefetch_reset(info->gpmc_cs, info); 608 gpmc_prefetch_reset(info->gpmc_cs);
697 return; 609 return;
698 610
699out_copy: 611out_copy:
@@ -704,6 +616,27 @@ out_copy:
704} 616}
705 617
706/** 618/**
619 * omap_verify_buf - Verify chip data against buffer
620 * @mtd: MTD device structure
621 * @buf: buffer containing the data to compare
622 * @len: number of bytes to compare
623 */
624static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
625{
626 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
627 mtd);
628 u16 *p = (u16 *) buf;
629
630 len >>= 1;
631 while (len--) {
632 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
633 return -EFAULT;
634 }
635
636 return 0;
637}
638
639/**
707 * gen_true_ecc - This function will generate true ECC value 640 * gen_true_ecc - This function will generate true ECC value
708 * @ecc_buf: buffer to store ecc code 641 * @ecc_buf: buffer to store ecc code
709 * 642 *
@@ -811,12 +744,12 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
811 744
812 case 1: 745 case 1:
813 /* Uncorrectable error */ 746 /* Uncorrectable error */
814 pr_debug("ECC UNCORRECTED_ERROR 1\n"); 747 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
815 return -1; 748 return -1;
816 749
817 case 11: 750 case 11:
818 /* UN-Correctable error */ 751 /* UN-Correctable error */
819 pr_debug("ECC UNCORRECTED_ERROR B\n"); 752 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
820 return -1; 753 return -1;
821 754
822 case 12: 755 case 12:
@@ -833,8 +766,8 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
833 766
834 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 767 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
835 768
836 pr_debug("Correcting single bit ECC error at offset: " 769 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
837 "%d, bit: %d\n", find_byte, find_bit); 770 "offset: %d, bit: %d\n", find_byte, find_bit);
838 771
839 page_data[find_byte] ^= (1 << find_bit); 772 page_data[find_byte] ^= (1 << find_bit);
840 773
@@ -846,7 +779,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
846 ecc_data2[2] == 0) 779 ecc_data2[2] == 0)
847 return 0; 780 return 0;
848 } 781 }
849 pr_debug("UNCORRECTED_ERROR default\n"); 782 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
850 return -1; 783 return -1;
851 } 784 }
852} 785}
@@ -912,20 +845,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
912{ 845{
913 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 846 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
914 mtd); 847 mtd);
915 u32 val; 848 return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
916
917 val = readl(info->reg.gpmc_ecc_config);
918 if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
919 return -EINVAL;
920
921 /* read ecc result */
922 val = readl(info->reg.gpmc_ecc1_result);
923 *ecc_code++ = val; /* P128e, ..., P1e */
924 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
925 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
926 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
927
928 return 0;
929} 849}
930 850
931/** 851/**
@@ -939,34 +859,8 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
939 mtd); 859 mtd);
940 struct nand_chip *chip = mtd->priv; 860 struct nand_chip *chip = mtd->priv;
941 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 861 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
942 u32 val;
943
944 /* clear ecc and enable bits */
945 val = ECCCLEAR | ECC1;
946 writel(val, info->reg.gpmc_ecc_control);
947
948 /* program ecc and result sizes */
949 val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
950 ECC1RESULTSIZE);
951 writel(val, info->reg.gpmc_ecc_size_config);
952
953 switch (mode) {
954 case NAND_ECC_READ:
955 case NAND_ECC_WRITE:
956 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
957 break;
958 case NAND_ECC_READSYN:
959 writel(ECCCLEAR, info->reg.gpmc_ecc_control);
960 break;
961 default:
962 dev_info(&info->pdev->dev,
963 "error: unrecognized Mode[%d]!\n", mode);
964 break;
965 }
966 862
967 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */ 863 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
968 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
969 writel(val, info->reg.gpmc_ecc_config);
970} 864}
971 865
972/** 866/**
@@ -987,22 +881,21 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
987 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 881 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
988 mtd); 882 mtd);
989 unsigned long timeo = jiffies; 883 unsigned long timeo = jiffies;
990 int status, state = this->state; 884 int status = NAND_STATUS_FAIL, state = this->state;
991 885
992 if (state == FL_ERASING) 886 if (state == FL_ERASING)
993 timeo += (HZ * 400) / 1000; 887 timeo += (HZ * 400) / 1000;
994 else 888 else
995 timeo += (HZ * 20) / 1000; 889 timeo += (HZ * 20) / 1000;
996 890
997 writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); 891 gpmc_nand_write(info->gpmc_cs,
892 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
998 while (time_before(jiffies, timeo)) { 893 while (time_before(jiffies, timeo)) {
999 status = readb(info->reg.gpmc_nand_data); 894 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
1000 if (status & NAND_STATUS_READY) 895 if (status & NAND_STATUS_READY)
1001 break; 896 break;
1002 cond_resched(); 897 cond_resched();
1003 } 898 }
1004
1005 status = readb(info->reg.gpmc_nand_data);
1006 return status; 899 return status;
1007} 900}
1008 901
@@ -1016,322 +909,30 @@ static int omap_dev_ready(struct mtd_info *mtd)
1016 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 909 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1017 mtd); 910 mtd);
1018 911
1019 val = readl(info->reg.gpmc_status); 912 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
1020
1021 if ((val & 0x100) == 0x100) { 913 if ((val & 0x100) == 0x100) {
1022 return 1; 914 /* Clear IRQ Interrupt */
915 val |= 0x100;
916 val &= ~(0x0);
917 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
1023 } else { 918 } else {
1024 return 0; 919 unsigned int cnt = 0;
1025 } 920 while (cnt++ < 0x1FF) {
1026} 921 if ((val & 0x100) == 0x100)
1027 922 return 0;
1028#ifdef CONFIG_MTD_NAND_OMAP_BCH 923 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
1029
1030/**
1031 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
1032 * @mtd: MTD device structure
1033 * @mode: Read/Write mode
1034 */
1035static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1036{
1037 int nerrors;
1038 unsigned int dev_width, nsectors;
1039 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1040 mtd);
1041 struct nand_chip *chip = mtd->priv;
1042 u32 val;
1043
1044 nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
1045 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1046 nsectors = 1;
1047 /*
1048 * Program GPMC to perform correction on one 512-byte sector at a time.
1049 * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
1050 * gives a slight (5%) performance gain (but requires additional code).
1051 */
1052
1053 writel(ECC1, info->reg.gpmc_ecc_control);
1054
1055 /*
1056 * When using BCH, sector size is hardcoded to 512 bytes.
1057 * Here we are using wrapping mode 6 both for reading and writing, with:
1058 * size0 = 0 (no additional protected byte in spare area)
1059 * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1060 */
1061 val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
1062 writel(val, info->reg.gpmc_ecc_size_config);
1063
1064 /* BCH configuration */
1065 val = ((1 << 16) | /* enable BCH */
1066 (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
1067 (0x06 << 8) | /* wrap mode = 6 */
1068 (dev_width << 7) | /* bus width */
1069 (((nsectors-1) & 0x7) << 4) | /* number of sectors */
1070 (info->gpmc_cs << 1) | /* ECC CS */
1071 (0x1)); /* enable ECC */
1072
1073 writel(val, info->reg.gpmc_ecc_config);
1074
1075 /* clear ecc and enable bits */
1076 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
1077}
1078
1079/**
1080 * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
1081 * @mtd: MTD device structure
1082 * @dat: The pointer to data on which ecc is computed
1083 * @ecc_code: The ecc_code buffer
1084 */
1085static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
1086 u_char *ecc_code)
1087{
1088 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1089 mtd);
1090 unsigned long nsectors, val1, val2;
1091 int i;
1092
1093 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1094
1095 for (i = 0; i < nsectors; i++) {
1096
1097 /* Read hw-computed remainder */
1098 val1 = readl(info->reg.gpmc_bch_result0[i]);
1099 val2 = readl(info->reg.gpmc_bch_result1[i]);
1100
1101 /*
1102 * Add constant polynomial to remainder, in order to get an ecc
1103 * sequence of 0xFFs for a buffer filled with 0xFFs; and
1104 * left-justify the resulting polynomial.
1105 */
1106 *ecc_code++ = 0x28 ^ ((val2 >> 12) & 0xFF);
1107 *ecc_code++ = 0x13 ^ ((val2 >> 4) & 0xFF);
1108 *ecc_code++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
1109 *ecc_code++ = 0x39 ^ ((val1 >> 20) & 0xFF);
1110 *ecc_code++ = 0x96 ^ ((val1 >> 12) & 0xFF);
1111 *ecc_code++ = 0xac ^ ((val1 >> 4) & 0xFF);
1112 *ecc_code++ = 0x7f ^ ((val1 & 0xF) << 4);
1113 }
1114
1115 return 0;
1116}
1117
1118/**
1119 * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
1120 * @mtd: MTD device structure
1121 * @dat: The pointer to data on which ecc is computed
1122 * @ecc_code: The ecc_code buffer
1123 */
1124static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
1125 u_char *ecc_code)
1126{
1127 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1128 mtd);
1129 unsigned long nsectors, val1, val2, val3, val4;
1130 int i;
1131
1132 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1133
1134 for (i = 0; i < nsectors; i++) {
1135
1136 /* Read hw-computed remainder */
1137 val1 = readl(info->reg.gpmc_bch_result0[i]);
1138 val2 = readl(info->reg.gpmc_bch_result1[i]);
1139 val3 = readl(info->reg.gpmc_bch_result2[i]);
1140 val4 = readl(info->reg.gpmc_bch_result3[i]);
1141
1142 /*
1143 * Add constant polynomial to remainder, in order to get an ecc
1144 * sequence of 0xFFs for a buffer filled with 0xFFs.
1145 */
1146 *ecc_code++ = 0xef ^ (val4 & 0xFF);
1147 *ecc_code++ = 0x51 ^ ((val3 >> 24) & 0xFF);
1148 *ecc_code++ = 0x2e ^ ((val3 >> 16) & 0xFF);
1149 *ecc_code++ = 0x09 ^ ((val3 >> 8) & 0xFF);
1150 *ecc_code++ = 0xed ^ (val3 & 0xFF);
1151 *ecc_code++ = 0x93 ^ ((val2 >> 24) & 0xFF);
1152 *ecc_code++ = 0x9a ^ ((val2 >> 16) & 0xFF);
1153 *ecc_code++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
1154 *ecc_code++ = 0x97 ^ (val2 & 0xFF);
1155 *ecc_code++ = 0x79 ^ ((val1 >> 24) & 0xFF);
1156 *ecc_code++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
1157 *ecc_code++ = 0x24 ^ ((val1 >> 8) & 0xFF);
1158 *ecc_code++ = 0xb5 ^ (val1 & 0xFF);
1159 }
1160
1161 return 0;
1162}
1163
1164/**
1165 * omap3_correct_data_bch - Decode received data and correct errors
1166 * @mtd: MTD device structure
1167 * @data: page data
1168 * @read_ecc: ecc read from nand flash
1169 * @calc_ecc: ecc read from HW ECC registers
1170 */
1171static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1172 u_char *read_ecc, u_char *calc_ecc)
1173{
1174 int i, count;
1175 /* cannot correct more than 8 errors */
1176 unsigned int errloc[8];
1177 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1178 mtd);
1179
1180 count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
1181 errloc);
1182 if (count > 0) {
1183 /* correct errors */
1184 for (i = 0; i < count; i++) {
1185 /* correct data only, not ecc bytes */
1186 if (errloc[i] < 8*512)
1187 data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
1188 pr_debug("corrected bitflip %u\n", errloc[i]);
1189 } 924 }
1190 } else if (count < 0) {
1191 pr_err("ecc unrecoverable error\n");
1192 }
1193 return count;
1194}
1195
1196/**
1197 * omap3_free_bch - Release BCH ecc resources
1198 * @mtd: MTD device structure
1199 */
1200static void omap3_free_bch(struct mtd_info *mtd)
1201{
1202 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1203 mtd);
1204 if (info->bch) {
1205 free_bch(info->bch);
1206 info->bch = NULL;
1207 }
1208}
1209
1210/**
1211 * omap3_init_bch - Initialize BCH ECC
1212 * @mtd: MTD device structure
1213 * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
1214 */
1215static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1216{
1217 int max_errors;
1218 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1219 mtd);
1220#ifdef CONFIG_MTD_NAND_OMAP_BCH8
1221 const int hw_errors = 8;
1222#else
1223 const int hw_errors = 4;
1224#endif
1225 info->bch = NULL;
1226
1227 max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
1228 if (max_errors != hw_errors) {
1229 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1230 max_errors, hw_errors);
1231 goto fail;
1232 }
1233
1234 /* software bch library is only used to detect and locate errors */
1235 info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
1236 if (!info->bch)
1237 goto fail;
1238
1239 info->nand.ecc.size = 512;
1240 info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1241 info->nand.ecc.correct = omap3_correct_data_bch;
1242 info->nand.ecc.mode = NAND_ECC_HW;
1243
1244 /*
1245 * The number of corrected errors in an ecc block that will trigger
1246 * block scrubbing defaults to the ecc strength (4 or 8).
1247 * Set mtd->bitflip_threshold here to define a custom threshold.
1248 */
1249
1250 if (max_errors == 8) {
1251 info->nand.ecc.strength = 8;
1252 info->nand.ecc.bytes = 13;
1253 info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1254 } else {
1255 info->nand.ecc.strength = 4;
1256 info->nand.ecc.bytes = 7;
1257 info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1258 }
1259
1260 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1261 return 0;
1262fail:
1263 omap3_free_bch(mtd);
1264 return -1;
1265}
1266
1267/**
1268 * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
1269 * @mtd: MTD device structure
1270 */
1271static int omap3_init_bch_tail(struct mtd_info *mtd)
1272{
1273 int i, steps;
1274 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1275 mtd);
1276 struct nand_ecclayout *layout = &info->ecclayout;
1277
1278 /* build oob layout */
1279 steps = mtd->writesize/info->nand.ecc.size;
1280 layout->eccbytes = steps*info->nand.ecc.bytes;
1281
1282 /* do not bother creating special oob layouts for small page devices */
1283 if (mtd->oobsize < 64) {
1284 pr_err("BCH ecc is not supported on small page devices\n");
1285 goto fail;
1286 }
1287
1288 /* reserve 2 bytes for bad block marker */
1289 if (layout->eccbytes+2 > mtd->oobsize) {
1290 pr_err("no oob layout available for oobsize %d eccbytes %u\n",
1291 mtd->oobsize, layout->eccbytes);
1292 goto fail;
1293 } 925 }
1294 926
1295 /* put ecc bytes at oob tail */ 927 return 1;
1296 for (i = 0; i < layout->eccbytes; i++)
1297 layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
1298
1299 layout->oobfree[0].offset = 2;
1300 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1301 info->nand.ecc.layout = layout;
1302
1303 if (!(info->nand.options & NAND_BUSWIDTH_16))
1304 info->nand.badblock_pattern = &bb_descrip_flashbased;
1305 return 0;
1306fail:
1307 omap3_free_bch(mtd);
1308 return -1;
1309} 928}
1310 929
1311#else 930static int __devinit omap_nand_probe(struct platform_device *pdev)
1312static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1313{
1314 pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1315 return -1;
1316}
1317static int omap3_init_bch_tail(struct mtd_info *mtd)
1318{
1319 return -1;
1320}
1321static void omap3_free_bch(struct mtd_info *mtd)
1322{
1323}
1324#endif /* CONFIG_MTD_NAND_OMAP_BCH */
1325
1326static int omap_nand_probe(struct platform_device *pdev)
1327{ 931{
1328 struct omap_nand_info *info; 932 struct omap_nand_info *info;
1329 struct omap_nand_platform_data *pdata; 933 struct omap_nand_platform_data *pdata;
1330 int err; 934 int err;
1331 int i, offset; 935 int i, offset;
1332 dma_cap_mask_t mask;
1333 unsigned sig;
1334 struct resource *res;
1335 936
1336 pdata = pdev->dev.platform_data; 937 pdata = pdev->dev.platform_data;
1337 if (pdata == NULL) { 938 if (pdata == NULL) {
@@ -1351,7 +952,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1351 info->pdev = pdev; 952 info->pdev = pdev;
1352 953
1353 info->gpmc_cs = pdata->cs; 954 info->gpmc_cs = pdata->cs;
1354 info->reg = pdata->reg; 955 info->phys_base = pdata->phys_base;
1355 956
1356 info->mtd.priv = &info->nand; 957 info->mtd.priv = &info->nand;
1357 info->mtd.name = dev_name(&pdev->dev); 958 info->mtd.name = dev_name(&pdev->dev);
@@ -1360,23 +961,16 @@ static int omap_nand_probe(struct platform_device *pdev)
1360 info->nand.options = pdata->devsize; 961 info->nand.options = pdata->devsize;
1361 info->nand.options |= NAND_SKIP_BBTSCAN; 962 info->nand.options |= NAND_SKIP_BBTSCAN;
1362 963
1363 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 964 /* NAND write protect off */
1364 if (res == NULL) { 965 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
1365 err = -EINVAL;
1366 dev_err(&pdev->dev, "error getting memory resource\n");
1367 goto out_free_info;
1368 }
1369
1370 info->phys_base = res->start;
1371 info->mem_size = resource_size(res);
1372 966
1373 if (!request_mem_region(info->phys_base, info->mem_size, 967 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
1374 pdev->dev.driver->name)) { 968 pdev->dev.driver->name)) {
1375 err = -EBUSY; 969 err = -EBUSY;
1376 goto out_free_info; 970 goto out_free_info;
1377 } 971 }
1378 972
1379 info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size); 973 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
1380 if (!info->nand.IO_ADDR_R) { 974 if (!info->nand.IO_ADDR_R) {
1381 err = -ENOMEM; 975 err = -ENOMEM;
1382 goto out_release_mem_region; 976 goto out_release_mem_region;
@@ -1389,8 +983,8 @@ static int omap_nand_probe(struct platform_device *pdev)
1389 983
1390 /* 984 /*
1391 * If RDY/BSY line is connected to OMAP then use the omap ready 985 * If RDY/BSY line is connected to OMAP then use the omap ready
1392 * function and the generic nand_wait function which reads the status 986 * funcrtion and the generic nand_wait function which reads the status
1393 * register after monitoring the RDY/BSY line. Otherwise use a standard 987 * register after monitoring the RDY/BSY line.Otherwise use a standard
1394 * chip delay which is slightly more than tR (AC Timing) of the NAND 988 * chip delay which is slightly more than tR (AC Timing) of the NAND
1395 * device and read status register until you get a failure or success 989 * device and read status register until you get a failure or success
1396 */ 990 */
@@ -1419,69 +1013,35 @@ static int omap_nand_probe(struct platform_device *pdev)
1419 break; 1013 break;
1420 1014
1421 case NAND_OMAP_PREFETCH_DMA: 1015 case NAND_OMAP_PREFETCH_DMA:
1422 dma_cap_zero(mask); 1016 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1423 dma_cap_set(DMA_SLAVE, mask); 1017 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1424 sig = OMAP24XX_DMA_GPMC; 1018 if (err < 0) {
1425 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1019 info->dma_ch = -1;
1426 if (!info->dma) { 1020 dev_err(&pdev->dev, "DMA request failed!\n");
1427 dev_err(&pdev->dev, "DMA engine request failed\n");
1428 err = -ENXIO;
1429 goto out_release_mem_region; 1021 goto out_release_mem_region;
1430 } else { 1022 } else {
1431 struct dma_slave_config cfg; 1023 omap_set_dma_dest_burst_mode(info->dma_ch,
1432 1024 OMAP_DMA_DATA_BURST_16);
1433 memset(&cfg, 0, sizeof(cfg)); 1025 omap_set_dma_src_burst_mode(info->dma_ch,
1434 cfg.src_addr = info->phys_base; 1026 OMAP_DMA_DATA_BURST_16);
1435 cfg.dst_addr = info->phys_base; 1027
1436 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1437 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1438 cfg.src_maxburst = 16;
1439 cfg.dst_maxburst = 16;
1440 err = dmaengine_slave_config(info->dma, &cfg);
1441 if (err) {
1442 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
1443 err);
1444 goto out_release_mem_region;
1445 }
1446 info->nand.read_buf = omap_read_buf_dma_pref; 1028 info->nand.read_buf = omap_read_buf_dma_pref;
1447 info->nand.write_buf = omap_write_buf_dma_pref; 1029 info->nand.write_buf = omap_write_buf_dma_pref;
1448 } 1030 }
1449 break; 1031 break;
1450 1032
1451 case NAND_OMAP_PREFETCH_IRQ: 1033 case NAND_OMAP_PREFETCH_IRQ:
1452 info->gpmc_irq_fifo = platform_get_irq(pdev, 0); 1034 err = request_irq(pdata->gpmc_irq,
1453 if (info->gpmc_irq_fifo <= 0) { 1035 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1454 dev_err(&pdev->dev, "error getting fifo irq\n");
1455 err = -ENODEV;
1456 goto out_release_mem_region;
1457 }
1458 err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
1459 IRQF_SHARED, "gpmc-nand-fifo", info);
1460 if (err) {
1461 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1462 info->gpmc_irq_fifo, err);
1463 info->gpmc_irq_fifo = 0;
1464 goto out_release_mem_region;
1465 }
1466
1467 info->gpmc_irq_count = platform_get_irq(pdev, 1);
1468 if (info->gpmc_irq_count <= 0) {
1469 dev_err(&pdev->dev, "error getting count irq\n");
1470 err = -ENODEV;
1471 goto out_release_mem_region;
1472 }
1473 err = request_irq(info->gpmc_irq_count, omap_nand_irq,
1474 IRQF_SHARED, "gpmc-nand-count", info);
1475 if (err) { 1036 if (err) {
1476 dev_err(&pdev->dev, "requesting irq(%d) error:%d", 1037 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1477 info->gpmc_irq_count, err); 1038 pdata->gpmc_irq, err);
1478 info->gpmc_irq_count = 0;
1479 goto out_release_mem_region; 1039 goto out_release_mem_region;
1040 } else {
1041 info->gpmc_irq = pdata->gpmc_irq;
1042 info->nand.read_buf = omap_read_buf_irq_pref;
1043 info->nand.write_buf = omap_write_buf_irq_pref;
1480 } 1044 }
1481
1482 info->nand.read_buf = omap_read_buf_irq_pref;
1483 info->nand.write_buf = omap_write_buf_irq_pref;
1484
1485 break; 1045 break;
1486 1046
1487 default: 1047 default:
@@ -1491,25 +1051,19 @@ static int omap_nand_probe(struct platform_device *pdev)
1491 goto out_release_mem_region; 1051 goto out_release_mem_region;
1492 } 1052 }
1493 1053
1494 /* select the ecc type */ 1054 info->nand.verify_buf = omap_verify_buf;
1055
1056 /* selsect the ecc type */
1495 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) 1057 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1496 info->nand.ecc.mode = NAND_ECC_SOFT; 1058 info->nand.ecc.mode = NAND_ECC_SOFT;
1497 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || 1059 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1498 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { 1060 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1499 info->nand.ecc.bytes = 3; 1061 info->nand.ecc.bytes = 3;
1500 info->nand.ecc.size = 512; 1062 info->nand.ecc.size = 512;
1501 info->nand.ecc.strength = 1;
1502 info->nand.ecc.calculate = omap_calculate_ecc; 1063 info->nand.ecc.calculate = omap_calculate_ecc;
1503 info->nand.ecc.hwctl = omap_enable_hwecc; 1064 info->nand.ecc.hwctl = omap_enable_hwecc;
1504 info->nand.ecc.correct = omap_correct_data; 1065 info->nand.ecc.correct = omap_correct_data;
1505 info->nand.ecc.mode = NAND_ECC_HW; 1066 info->nand.ecc.mode = NAND_ECC_HW;
1506 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1507 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1508 err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
1509 if (err) {
1510 err = -EINVAL;
1511 goto out_release_mem_region;
1512 }
1513 } 1067 }
1514 1068
1515 /* DIP switches on some boards change between 8 and 16 bit 1069 /* DIP switches on some boards change between 8 and 16 bit
@@ -1541,14 +1095,6 @@ static int omap_nand_probe(struct platform_device *pdev)
1541 (offset + omap_oobinfo.eccbytes); 1095 (offset + omap_oobinfo.eccbytes);
1542 1096
1543 info->nand.ecc.layout = &omap_oobinfo; 1097 info->nand.ecc.layout = &omap_oobinfo;
1544 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1545 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1546 /* build OOB layout for BCH ECC correction */
1547 err = omap3_init_bch_tail(&info->mtd);
1548 if (err) {
1549 err = -EINVAL;
1550 goto out_release_mem_region;
1551 }
1552 } 1098 }
1553 1099
1554 /* second phase scan */ 1100 /* second phase scan */
@@ -1557,21 +1103,20 @@ static int omap_nand_probe(struct platform_device *pdev)
1557 goto out_release_mem_region; 1103 goto out_release_mem_region;
1558 } 1104 }
1559 1105
1560 mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts, 1106 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1561 pdata->nr_parts); 1107 if (err > 0)
1108 mtd_device_register(&info->mtd, info->parts, err);
1109 else if (pdata->parts)
1110 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1111 else
1112 mtd_device_register(&info->mtd, NULL, 0);
1562 1113
1563 platform_set_drvdata(pdev, &info->mtd); 1114 platform_set_drvdata(pdev, &info->mtd);
1564 1115
1565 return 0; 1116 return 0;
1566 1117
1567out_release_mem_region: 1118out_release_mem_region:
1568 if (info->dma) 1119 release_mem_region(info->phys_base, NAND_IO_SIZE);
1569 dma_release_channel(info->dma);
1570 if (info->gpmc_irq_count > 0)
1571 free_irq(info->gpmc_irq_count, info);
1572 if (info->gpmc_irq_fifo > 0)
1573 free_irq(info->gpmc_irq_fifo, info);
1574 release_mem_region(info->phys_base, info->mem_size);
1575out_free_info: 1120out_free_info:
1576 kfree(info); 1121 kfree(info);
1577 1122
@@ -1583,22 +1128,18 @@ static int omap_nand_remove(struct platform_device *pdev)
1583 struct mtd_info *mtd = platform_get_drvdata(pdev); 1128 struct mtd_info *mtd = platform_get_drvdata(pdev);
1584 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1129 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1585 mtd); 1130 mtd);
1586 omap3_free_bch(&info->mtd);
1587 1131
1588 platform_set_drvdata(pdev, NULL); 1132 platform_set_drvdata(pdev, NULL);
1589 if (info->dma) 1133 if (info->dma_ch != -1)
1590 dma_release_channel(info->dma); 1134 omap_free_dma(info->dma_ch);
1591 1135
1592 if (info->gpmc_irq_count > 0) 1136 if (info->gpmc_irq)
1593 free_irq(info->gpmc_irq_count, info); 1137 free_irq(info->gpmc_irq, info);
1594 if (info->gpmc_irq_fifo > 0)
1595 free_irq(info->gpmc_irq_fifo, info);
1596 1138
1597 /* Release NAND device, its internal structures and partitions */ 1139 /* Release NAND device, its internal structures and partitions */
1598 nand_release(&info->mtd); 1140 nand_release(&info->mtd);
1599 iounmap(info->nand.IO_ADDR_R); 1141 iounmap(info->nand.IO_ADDR_R);
1600 release_mem_region(info->phys_base, info->mem_size); 1142 kfree(&info->mtd);
1601 kfree(info);
1602 return 0; 1143 return 0;
1603} 1144}
1604 1145
@@ -1611,7 +1152,20 @@ static struct platform_driver omap_nand_driver = {
1611 }, 1152 },
1612}; 1153};
1613 1154
1614module_platform_driver(omap_nand_driver); 1155static int __init omap_nand_init(void)
1156{
1157 pr_info("%s driver initializing\n", DRIVER_NAME);
1158
1159 return platform_driver_register(&omap_nand_driver);
1160}
1161
1162static void __exit omap_nand_exit(void)
1163{
1164 platform_driver_unregister(&omap_nand_driver);
1165}
1166
1167module_init(omap_nand_init);
1168module_exit(omap_nand_exit);
1615 1169
1616MODULE_ALIAS("platform:" DRIVER_NAME); 1170MODULE_ALIAS("platform:" DRIVER_NAME);
1617MODULE_LICENSE("GPL"); 1171MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index cd72b9299f6..7794d0680f9 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -13,15 +13,15 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/of.h>
17#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h> 17#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <asm/io.h> 19#include <asm/io.h>
23#include <asm/sizes.h> 20#include <asm/sizes.h>
24#include <linux/platform_data/mtd-orion_nand.h> 21#include <mach/hardware.h>
22#include <plat/orion_nand.h>
23
24static const char *part_probes[] = { "cmdlinepart", NULL };
25 25
26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
27{ 27{
@@ -76,14 +76,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
76static int __init orion_nand_probe(struct platform_device *pdev) 76static int __init orion_nand_probe(struct platform_device *pdev)
77{ 77{
78 struct mtd_info *mtd; 78 struct mtd_info *mtd;
79 struct mtd_part_parser_data ppdata = {};
80 struct nand_chip *nc; 79 struct nand_chip *nc;
81 struct orion_nand_data *board; 80 struct orion_nand_data *board;
82 struct resource *res; 81 struct resource *res;
83 struct clk *clk;
84 void __iomem *io_base; 82 void __iomem *io_base;
85 int ret = 0; 83 int ret = 0;
86 u32 val = 0; 84 struct mtd_partition *partitions = NULL;
85 int num_part = 0;
87 86
88 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 87 nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
89 if (!nc) { 88 if (!nc) {
@@ -106,32 +105,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
106 goto no_res; 105 goto no_res;
107 } 106 }
108 107
109 if (pdev->dev.of_node) { 108 board = pdev->dev.platform_data;
110 board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
111 GFP_KERNEL);
112 if (!board) {
113 printk(KERN_ERR "orion_nand: failed to allocate board structure.\n");
114 ret = -ENOMEM;
115 goto no_res;
116 }
117 if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
118 board->cle = (u8)val;
119 else
120 board->cle = 0;
121 if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
122 board->ale = (u8)val;
123 else
124 board->ale = 1;
125 if (!of_property_read_u32(pdev->dev.of_node,
126 "bank-width", &val))
127 board->width = (u8)val * 8;
128 else
129 board->width = 8;
130 if (!of_property_read_u32(pdev->dev.of_node,
131 "chip-delay", &val))
132 board->chip_delay = (u8)val;
133 } else
134 board = pdev->dev.platform_data;
135 109
136 mtd->priv = nc; 110 mtd->priv = nc;
137 mtd->owner = THIS_MODULE; 111 mtd->owner = THIS_MODULE;
@@ -145,10 +119,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
145 if (board->chip_delay) 119 if (board->chip_delay)
146 nc->chip_delay = board->chip_delay; 120 nc->chip_delay = board->chip_delay;
147 121
148 WARN(board->width > 16,
149 "%d bit bus width out of range",
150 board->width);
151
152 if (board->width == 16) 122 if (board->width == 16)
153 nc->options |= NAND_BUSWIDTH_16; 123 nc->options |= NAND_BUSWIDTH_16;
154 124
@@ -157,23 +127,22 @@ static int __init orion_nand_probe(struct platform_device *pdev)
157 127
158 platform_set_drvdata(pdev, mtd); 128 platform_set_drvdata(pdev, mtd);
159 129
160 /* Not all platforms can gate the clock, so it is not
161 an error if the clock does not exists. */
162 clk = clk_get(&pdev->dev, NULL);
163 if (!IS_ERR(clk)) {
164 clk_prepare_enable(clk);
165 clk_put(clk);
166 }
167
168 if (nand_scan(mtd, 1)) { 130 if (nand_scan(mtd, 1)) {
169 ret = -ENXIO; 131 ret = -ENXIO;
170 goto no_dev; 132 goto no_dev;
171 } 133 }
172 134
135#ifdef CONFIG_MTD_CMDLINE_PARTS
173 mtd->name = "orion_nand"; 136 mtd->name = "orion_nand";
174 ppdata.of_node = pdev->dev.of_node; 137 num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
175 ret = mtd_device_parse_register(mtd, NULL, &ppdata, 138#endif
176 board->parts, board->nr_parts); 139 /* If cmdline partitions have been passed, let them be used */
140 if (num_part <= 0) {
141 num_part = board->nr_parts;
142 partitions = board->parts;
143 }
144
145 ret = mtd_device_register(mtd, partitions, num_part);
177 if (ret) { 146 if (ret) {
178 nand_release(mtd); 147 nand_release(mtd);
179 goto no_dev; 148 goto no_dev;
@@ -182,10 +151,6 @@ static int __init orion_nand_probe(struct platform_device *pdev)
182 return 0; 151 return 0;
183 152
184no_dev: 153no_dev:
185 if (!IS_ERR(clk)) {
186 clk_disable_unprepare(clk);
187 clk_put(clk);
188 }
189 platform_set_drvdata(pdev, NULL); 154 platform_set_drvdata(pdev, NULL);
190 iounmap(io_base); 155 iounmap(io_base);
191no_res: 156no_res:
@@ -194,11 +159,10 @@ no_res:
194 return ret; 159 return ret;
195} 160}
196 161
197static int orion_nand_remove(struct platform_device *pdev) 162static int __devexit orion_nand_remove(struct platform_device *pdev)
198{ 163{
199 struct mtd_info *mtd = platform_get_drvdata(pdev); 164 struct mtd_info *mtd = platform_get_drvdata(pdev);
200 struct nand_chip *nc = mtd->priv; 165 struct nand_chip *nc = mtd->priv;
201 struct clk *clk;
202 166
203 nand_release(mtd); 167 nand_release(mtd);
204 168
@@ -206,28 +170,14 @@ static int orion_nand_remove(struct platform_device *pdev)
206 170
207 kfree(nc); 171 kfree(nc);
208 172
209 clk = clk_get(&pdev->dev, NULL);
210 if (!IS_ERR(clk)) {
211 clk_disable_unprepare(clk);
212 clk_put(clk);
213 }
214
215 return 0; 173 return 0;
216} 174}
217 175
218#ifdef CONFIG_OF
219static struct of_device_id orion_nand_of_match_table[] = {
220 { .compatible = "marvell,orion-nand", },
221 {},
222};
223#endif
224
225static struct platform_driver orion_nand_driver = { 176static struct platform_driver orion_nand_driver = {
226 .remove = orion_nand_remove, 177 .remove = __devexit_p(orion_nand_remove),
227 .driver = { 178 .driver = {
228 .name = "orion_nand", 179 .name = "orion_nand",
229 .owner = THIS_MODULE, 180 .owner = THIS_MODULE,
230 .of_match_table = of_match_ptr(orion_nand_of_match_table),
231 }, 181 },
232}; 182};
233 183
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 5a67082c07e..b1aa41b8a4e 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,7 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); 89 return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
90} 90}
91 91
92static int pasemi_nand_probe(struct platform_device *ofdev) 92static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
93{ 93{
94 struct pci_dev *pdev; 94 struct pci_dev *pdev;
95 struct device_node *np = ofdev->dev.of_node; 95 struct device_node *np = ofdev->dev.of_node;
@@ -155,7 +155,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
155 chip->ecc.mode = NAND_ECC_SOFT; 155 chip->ecc.mode = NAND_ECC_SOFT;
156 156
157 /* Enable the following for a flash based bad block table */ 157 /* Enable the following for a flash based bad block table */
158 chip->bbt_options = NAND_BBT_USE_FLASH; 158 chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
159 159
160 /* Scan to find existence of the device */ 160 /* Scan to find existence of the device */
161 if (nand_scan(pasemi_nand_mtd, 1)) { 161 if (nand_scan(pasemi_nand_mtd, 1)) {
@@ -184,7 +184,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
184 return err; 184 return err;
185} 185}
186 186
187static int pasemi_nand_remove(struct platform_device *ofdev) 187static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
188{ 188{
189 struct nand_chip *chip; 189 struct nand_chip *chip;
190 190
@@ -229,7 +229,17 @@ static struct platform_driver pasemi_nand_driver =
229 .remove = pasemi_nand_remove, 229 .remove = pasemi_nand_remove,
230}; 230};
231 231
232module_platform_driver(pasemi_nand_driver); 232static int __init pasemi_nand_init(void)
233{
234 return platform_driver_register(&pasemi_nand_driver);
235}
236module_init(pasemi_nand_init);
237
238static void __exit pasemi_nand_exit(void)
239{
240 platform_driver_unregister(&pasemi_nand_driver);
241}
242module_exit(pasemi_nand_exit);
233 243
234MODULE_LICENSE("GPL"); 244MODULE_LICENSE("GPL");
235MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); 245MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index c004566a9ad..633c04bf76f 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,27 +21,20 @@ struct plat_nand_data {
21 struct nand_chip chip; 21 struct nand_chip chip;
22 struct mtd_info mtd; 22 struct mtd_info mtd;
23 void __iomem *io_base; 23 void __iomem *io_base;
24 int nr_parts;
25 struct mtd_partition *parts;
24}; 26};
25 27
26static const char *part_probe_types[] = { "cmdlinepart", NULL };
27
28/* 28/*
29 * Probe for the NAND device. 29 * Probe for the NAND device.
30 */ 30 */
31static int plat_nand_probe(struct platform_device *pdev) 31static int __devinit plat_nand_probe(struct platform_device *pdev)
32{ 32{
33 struct platform_nand_data *pdata = pdev->dev.platform_data; 33 struct platform_nand_data *pdata = pdev->dev.platform_data;
34 struct mtd_part_parser_data ppdata;
35 struct plat_nand_data *data; 34 struct plat_nand_data *data;
36 struct resource *res; 35 struct resource *res;
37 const char **part_types;
38 int err = 0; 36 int err = 0;
39 37
40 if (!pdata) {
41 dev_err(&pdev->dev, "platform_nand_data is missing\n");
42 return -EINVAL;
43 }
44
45 if (pdata->chip.nr_chips < 1) { 38 if (pdata->chip.nr_chips < 1) {
46 dev_err(&pdev->dev, "invalid number of chips specified\n"); 39 dev_err(&pdev->dev, "invalid number of chips specified\n");
47 return -EINVAL; 40 return -EINVAL;
@@ -84,10 +77,8 @@ static int plat_nand_probe(struct platform_device *pdev)
84 data->chip.select_chip = pdata->ctrl.select_chip; 77 data->chip.select_chip = pdata->ctrl.select_chip;
85 data->chip.write_buf = pdata->ctrl.write_buf; 78 data->chip.write_buf = pdata->ctrl.write_buf;
86 data->chip.read_buf = pdata->ctrl.read_buf; 79 data->chip.read_buf = pdata->ctrl.read_buf;
87 data->chip.read_byte = pdata->ctrl.read_byte;
88 data->chip.chip_delay = pdata->chip.chip_delay; 80 data->chip.chip_delay = pdata->chip.chip_delay;
89 data->chip.options |= pdata->chip.options; 81 data->chip.options |= pdata->chip.options;
90 data->chip.bbt_options |= pdata->chip.bbt_options;
91 82
92 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; 83 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
93 data->chip.ecc.layout = pdata->chip.ecclayout; 84 data->chip.ecc.layout = pdata->chip.ecclayout;
@@ -108,12 +99,23 @@ static int plat_nand_probe(struct platform_device *pdev)
108 goto out; 99 goto out;
109 } 100 }
110 101
111 part_types = pdata->chip.part_probe_types ? : part_probe_types; 102 if (pdata->chip.part_probe_types) {
112 103 err = parse_mtd_partitions(&data->mtd,
113 ppdata.of_node = pdev->dev.of_node; 104 pdata->chip.part_probe_types,
114 err = mtd_device_parse_register(&data->mtd, part_types, &ppdata, 105 &data->parts, 0);
115 pdata->chip.partitions, 106 if (err > 0) {
116 pdata->chip.nr_partitions); 107 mtd_device_register(&data->mtd, data->parts, err);
108 return 0;
109 }
110 }
111 if (pdata->chip.set_parts)
112 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
113 if (pdata->chip.partitions) {
114 data->parts = pdata->chip.partitions;
115 err = mtd_device_register(&data->mtd, data->parts,
116 pdata->chip.nr_partitions);
117 } else
118 err = mtd_device_register(&data->mtd, NULL, 0);
117 119
118 if (!err) 120 if (!err)
119 return err; 121 return err;
@@ -134,7 +136,7 @@ out_free:
134/* 136/*
135 * Remove a NAND device. 137 * Remove a NAND device.
136 */ 138 */
137static int plat_nand_remove(struct platform_device *pdev) 139static int __devexit plat_nand_remove(struct platform_device *pdev)
138{ 140{
139 struct plat_nand_data *data = platform_get_drvdata(pdev); 141 struct plat_nand_data *data = platform_get_drvdata(pdev);
140 struct platform_nand_data *pdata = pdev->dev.platform_data; 142 struct platform_nand_data *pdata = pdev->dev.platform_data;
@@ -143,6 +145,8 @@ static int plat_nand_remove(struct platform_device *pdev)
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 145 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 146
145 nand_release(&data->mtd); 147 nand_release(&data->mtd);
148 if (data->parts && data->parts != pdata->chip.partitions)
149 kfree(data->parts);
146 if (pdata->ctrl.remove) 150 if (pdata->ctrl.remove)
147 pdata->ctrl.remove(pdev); 151 pdata->ctrl.remove(pdev);
148 iounmap(data->io_base); 152 iounmap(data->io_base);
@@ -152,23 +156,27 @@ static int plat_nand_remove(struct platform_device *pdev)
152 return 0; 156 return 0;
153} 157}
154 158
155static const struct of_device_id plat_nand_match[] = {
156 { .compatible = "gen_nand" },
157 {},
158};
159MODULE_DEVICE_TABLE(of, plat_nand_match);
160
161static struct platform_driver plat_nand_driver = { 159static struct platform_driver plat_nand_driver = {
162 .probe = plat_nand_probe, 160 .probe = plat_nand_probe,
163 .remove = plat_nand_remove, 161 .remove = __devexit_p(plat_nand_remove),
164 .driver = { 162 .driver = {
165 .name = "gen_nand", 163 .name = "gen_nand",
166 .owner = THIS_MODULE, 164 .owner = THIS_MODULE,
167 .of_match_table = plat_nand_match,
168 }, 165 },
169}; 166};
170 167
171module_platform_driver(plat_nand_driver); 168static int __init plat_nand_init(void)
169{
170 return platform_driver_register(&plat_nand_driver);
171}
172
173static void __exit plat_nand_exit(void)
174{
175 platform_driver_unregister(&plat_nand_driver);
176}
177
178module_init(plat_nand_init);
179module_exit(plat_nand_exit);
172 180
173MODULE_LICENSE("GPL"); 181MODULE_LICENSE("GPL");
174MODULE_AUTHOR("Vitaly Wool"); 182MODULE_AUTHOR("Vitaly Wool");
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 0ddd90e5788..3bbb796b451 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -99,6 +99,8 @@ static struct mtd_partition partition_info_evb[] = {
99 99
100#define NUM_PARTITIONS 1 100#define NUM_PARTITIONS 1
101 101
102extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
103
102/* 104/*
103 * hardware specific access to control-lines 105 * hardware specific access to control-lines
104 */ 106 */
@@ -185,12 +187,18 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo)
185} 187}
186#endif 188#endif
187 189
190const char *part_probes[] = { "cmdlinepart", NULL };
191const char *part_probes_evb[] = { "cmdlinepart", NULL };
192
188/* 193/*
189 * Main initialization routine 194 * Main initialization routine
190 */ 195 */
191static int __init ppchameleonevb_init(void) 196static int __init ppchameleonevb_init(void)
192{ 197{
193 struct nand_chip *this; 198 struct nand_chip *this;
199 const char *part_type = 0;
200 int mtd_parts_nb = 0;
201 struct mtd_partition *mtd_parts = 0;
194 void __iomem *ppchameleon_fio_base; 202 void __iomem *ppchameleon_fio_base;
195 void __iomem *ppchameleonevb_fio_base; 203 void __iomem *ppchameleonevb_fio_base;
196 204
@@ -273,12 +281,24 @@ static int __init ppchameleonevb_init(void)
273#endif 281#endif
274 282
275 ppchameleon_mtd->name = "ppchameleon-nand"; 283 ppchameleon_mtd->name = "ppchameleon-nand";
284 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
285 if (mtd_parts_nb > 0)
286 part_type = "command line";
287 else
288 mtd_parts_nb = 0;
289
290 if (mtd_parts_nb == 0) {
291 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
292 mtd_parts = partition_info_me;
293 else
294 mtd_parts = partition_info_hi;
295 mtd_parts_nb = NUM_PARTITIONS;
296 part_type = "static";
297 }
276 298
277 /* Register the partitions */ 299 /* Register the partitions */
278 mtd_device_parse_register(ppchameleon_mtd, NULL, NULL, 300 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
279 ppchameleon_mtd->size == NAND_SMALL_SIZE ? 301 mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
280 partition_info_me : partition_info_hi,
281 NUM_PARTITIONS);
282 302
283 nand_evb_init: 303 nand_evb_init:
284 /**************************** 304 /****************************
@@ -362,12 +382,21 @@ static int __init ppchameleonevb_init(void)
362 } 382 }
363 383
364 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
385 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
386 if (mtd_parts_nb > 0)
387 part_type = "command line";
388 else
389 mtd_parts_nb = 0;
390
391 if (mtd_parts_nb == 0) {
392 mtd_parts = partition_info_evb;
393 mtd_parts_nb = NUM_PARTITIONS;
394 part_type = "static";
395 }
365 396
366 /* Register the partitions */ 397 /* Register the partitions */
367 mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL, 398 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
368 ppchameleon_mtd->size == NAND_SMALL_SIZE ? 399 mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
369 partition_info_me : partition_info_hi,
370 NUM_PARTITIONS);
371 400
372 /* Return happy */ 401 /* Return happy */
373 return 0; 402 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 37ee75c7bac..30689cc2b3c 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -22,11 +22,9 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27 25
28#include <mach/dma.h> 26#include <mach/dma.h>
29#include <linux/platform_data/mtd-nand-pxa3xx.h> 27#include <plat/pxa3xx_nand.h>
30 28
31#define CHIP_DELAY_TIMEOUT (2 * HZ/10) 29#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
32#define NAND_STOP_DELAY (2 * HZ/50) 30#define NAND_STOP_DELAY (2 * HZ/50)
@@ -112,7 +110,6 @@ enum {
112 110
113enum { 111enum {
114 STATE_IDLE = 0, 112 STATE_IDLE = 0,
115 STATE_PREPARED,
116 STATE_CMD_HANDLE, 113 STATE_CMD_HANDLE,
117 STATE_DMA_READING, 114 STATE_DMA_READING,
118 STATE_DMA_WRITING, 115 STATE_DMA_WRITING,
@@ -123,40 +120,21 @@ enum {
123 STATE_READY, 120 STATE_READY,
124}; 121};
125 122
126struct pxa3xx_nand_host {
127 struct nand_chip chip;
128 struct pxa3xx_nand_cmdset *cmdset;
129 struct mtd_info *mtd;
130 void *info_data;
131
132 /* page size of attached chip */
133 unsigned int page_size;
134 int use_ecc;
135 int cs;
136
137 /* calculated from pxa3xx_nand_flash data */
138 unsigned int col_addr_cycles;
139 unsigned int row_addr_cycles;
140 size_t read_id_bytes;
141
142 /* cached register value */
143 uint32_t reg_ndcr;
144 uint32_t ndtr0cs0;
145 uint32_t ndtr1cs0;
146};
147
148struct pxa3xx_nand_info { 123struct pxa3xx_nand_info {
124 struct nand_chip nand_chip;
125
149 struct nand_hw_control controller; 126 struct nand_hw_control controller;
150 struct platform_device *pdev; 127 struct platform_device *pdev;
128 struct pxa3xx_nand_cmdset *cmdset;
151 129
152 struct clk *clk; 130 struct clk *clk;
153 void __iomem *mmio_base; 131 void __iomem *mmio_base;
154 unsigned long mmio_phys; 132 unsigned long mmio_phys;
155 struct completion cmd_complete;
156 133
157 unsigned int buf_start; 134 unsigned int buf_start;
158 unsigned int buf_count; 135 unsigned int buf_count;
159 136
137 struct mtd_info *mtd;
160 /* DMA information */ 138 /* DMA information */
161 int drcmr_dat; 139 int drcmr_dat;
162 int drcmr_cmd; 140 int drcmr_cmd;
@@ -164,30 +142,47 @@ struct pxa3xx_nand_info {
164 unsigned char *data_buff; 142 unsigned char *data_buff;
165 unsigned char *oob_buff; 143 unsigned char *oob_buff;
166 dma_addr_t data_buff_phys; 144 dma_addr_t data_buff_phys;
145 size_t data_buff_size;
167 int data_dma_ch; 146 int data_dma_ch;
168 struct pxa_dma_desc *data_desc; 147 struct pxa_dma_desc *data_desc;
169 dma_addr_t data_desc_addr; 148 dma_addr_t data_desc_addr;
170 149
171 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; 150 uint32_t reg_ndcr;
151
152 /* saved column/page_addr during CMD_SEQIN */
153 int seqin_column;
154 int seqin_page_addr;
155
156 /* relate to the command */
172 unsigned int state; 157 unsigned int state;
173 158
174 int cs;
175 int use_ecc; /* use HW ECC ? */ 159 int use_ecc; /* use HW ECC ? */
176 int use_dma; /* use DMA ? */ 160 int use_dma; /* use DMA ? */
177 int is_ready; 161 int is_ready;
178 162
179 unsigned int page_size; /* page size of attached chip */ 163 unsigned int page_size; /* page size of attached chip */
180 unsigned int data_size; /* data size in FIFO */ 164 unsigned int data_size; /* data size in FIFO */
181 unsigned int oob_size;
182 int retcode; 165 int retcode;
166 struct completion cmd_complete;
183 167
184 /* generated NDCBx register values */ 168 /* generated NDCBx register values */
185 uint32_t ndcb0; 169 uint32_t ndcb0;
186 uint32_t ndcb1; 170 uint32_t ndcb1;
187 uint32_t ndcb2; 171 uint32_t ndcb2;
172
173 /* timing calcuted from setting */
174 uint32_t ndtr0cs0;
175 uint32_t ndtr1cs0;
176
177 /* calculated from pxa3xx_nand_flash data */
178 size_t oob_size;
179 size_t read_id_bytes;
180
181 unsigned int col_addr_cycles;
182 unsigned int row_addr_cycles;
188}; 183};
189 184
190static bool use_dma = 1; 185static int use_dma = 1;
191module_param(use_dma, bool, 0444); 186module_param(use_dma, bool, 0444);
192MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW"); 187MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
193 188
@@ -230,7 +225,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
230/* Define a default flash type setting serve as flash detecting only */ 225/* Define a default flash type setting serve as flash detecting only */
231#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) 226#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
232 227
233const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL}; 228const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
234 229
235#define NDTR0_tCH(c) (min((c), 7) << 19) 230#define NDTR0_tCH(c) (min((c), 7) << 19)
236#define NDTR0_tCS(c) (min((c), 7) << 16) 231#define NDTR0_tCS(c) (min((c), 7) << 16)
@@ -246,10 +241,9 @@ const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
246/* convert nano-seconds to nand flash controller clock cycles */ 241/* convert nano-seconds to nand flash controller clock cycles */
247#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 242#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
248 243
249static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, 244static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
250 const struct pxa3xx_nand_timing *t) 245 const struct pxa3xx_nand_timing *t)
251{ 246{
252 struct pxa3xx_nand_info *info = host->info_data;
253 unsigned long nand_clk = clk_get_rate(info->clk); 247 unsigned long nand_clk = clk_get_rate(info->clk);
254 uint32_t ndtr0, ndtr1; 248 uint32_t ndtr0, ndtr1;
255 249
@@ -264,24 +258,23 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
264 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | 258 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
265 NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); 259 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
266 260
267 host->ndtr0cs0 = ndtr0; 261 info->ndtr0cs0 = ndtr0;
268 host->ndtr1cs0 = ndtr1; 262 info->ndtr1cs0 = ndtr1;
269 nand_writel(info, NDTR0CS0, ndtr0); 263 nand_writel(info, NDTR0CS0, ndtr0);
270 nand_writel(info, NDTR1CS0, ndtr1); 264 nand_writel(info, NDTR1CS0, ndtr1);
271} 265}
272 266
273static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) 267static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
274{ 268{
275 struct pxa3xx_nand_host *host = info->host[info->cs]; 269 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
276 int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
277 270
278 info->data_size = host->page_size; 271 info->data_size = info->page_size;
279 if (!oob_enable) { 272 if (!oob_enable) {
280 info->oob_size = 0; 273 info->oob_size = 0;
281 return; 274 return;
282 } 275 }
283 276
284 switch (host->page_size) { 277 switch (info->page_size) {
285 case 2048: 278 case 2048:
286 info->oob_size = (info->use_ecc) ? 40 : 64; 279 info->oob_size = (info->use_ecc) ? 40 : 64;
287 break; 280 break;
@@ -299,10 +292,9 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
299 */ 292 */
300static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) 293static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
301{ 294{
302 struct pxa3xx_nand_host *host = info->host[info->cs];
303 uint32_t ndcr; 295 uint32_t ndcr;
304 296
305 ndcr = host->reg_ndcr; 297 ndcr = info->reg_ndcr;
306 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; 298 ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
307 ndcr |= info->use_dma ? NDCR_DMA_EN : 0; 299 ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
308 ndcr |= NDCR_ND_RUN; 300 ndcr |= NDCR_ND_RUN;
@@ -367,7 +359,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
367 DIV_ROUND_UP(info->oob_size, 4)); 359 DIV_ROUND_UP(info->oob_size, 4));
368 break; 360 break;
369 default: 361 default:
370 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 362 printk(KERN_ERR "%s: invalid state %d\n", __func__,
371 info->state); 363 info->state);
372 BUG(); 364 BUG();
373 } 365 }
@@ -393,7 +385,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
393 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; 385 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
394 break; 386 break;
395 default: 387 default:
396 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 388 printk(KERN_ERR "%s: invalid state %d\n", __func__,
397 info->state); 389 info->state);
398 BUG(); 390 BUG();
399 } 391 }
@@ -424,15 +416,6 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
424{ 416{
425 struct pxa3xx_nand_info *info = devid; 417 struct pxa3xx_nand_info *info = devid;
426 unsigned int status, is_completed = 0; 418 unsigned int status, is_completed = 0;
427 unsigned int ready, cmd_done;
428
429 if (info->cs == 0) {
430 ready = NDSR_FLASH_RDY;
431 cmd_done = NDSR_CS0_CMDD;
432 } else {
433 ready = NDSR_RDY;
434 cmd_done = NDSR_CS1_CMDD;
435 }
436 419
437 status = nand_readl(info, NDSR); 420 status = nand_readl(info, NDSR);
438 421
@@ -454,11 +437,11 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
454 handle_data_pio(info); 437 handle_data_pio(info);
455 } 438 }
456 } 439 }
457 if (status & cmd_done) { 440 if (status & NDSR_CS0_CMDD) {
458 info->state = STATE_CMD_DONE; 441 info->state = STATE_CMD_DONE;
459 is_completed = 1; 442 is_completed = 1;
460 } 443 }
461 if (status & ready) { 444 if (status & NDSR_FLASH_RDY) {
462 info->is_ready = 1; 445 info->is_ready = 1;
463 info->state = STATE_READY; 446 info->state = STATE_READY;
464 } 447 }
@@ -480,6 +463,12 @@ NORMAL_IRQ_EXIT:
480 return IRQ_HANDLED; 463 return IRQ_HANDLED;
481} 464}
482 465
466static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
467{
468 struct pxa3xx_nand_info *info = mtd->priv;
469 return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
470}
471
483static inline int is_buf_blank(uint8_t *buf, size_t len) 472static inline int is_buf_blank(uint8_t *buf, size_t len)
484{ 473{
485 for (; len > 0; len--) 474 for (; len > 0; len--)
@@ -492,12 +481,10 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
492 uint16_t column, int page_addr) 481 uint16_t column, int page_addr)
493{ 482{
494 uint16_t cmd; 483 uint16_t cmd;
495 int addr_cycle, exec_cmd; 484 int addr_cycle, exec_cmd, ndcb0;
496 struct pxa3xx_nand_host *host; 485 struct mtd_info *mtd = info->mtd;
497 struct mtd_info *mtd;
498 486
499 host = info->host[info->cs]; 487 ndcb0 = 0;
500 mtd = host->mtd;
501 addr_cycle = 0; 488 addr_cycle = 0;
502 exec_cmd = 1; 489 exec_cmd = 1;
503 490
@@ -508,10 +495,6 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
508 info->use_ecc = 0; 495 info->use_ecc = 0;
509 info->is_ready = 0; 496 info->is_ready = 0;
510 info->retcode = ERR_NONE; 497 info->retcode = ERR_NONE;
511 if (info->cs != 0)
512 info->ndcb0 = NDCB0_CSEL;
513 else
514 info->ndcb0 = 0;
515 498
516 switch (command) { 499 switch (command) {
517 case NAND_CMD_READ0: 500 case NAND_CMD_READ0:
@@ -529,19 +512,20 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
529 break; 512 break;
530 } 513 }
531 514
532 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles 515 info->ndcb0 = ndcb0;
533 + host->col_addr_cycles); 516 addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles
517 + info->col_addr_cycles);
534 518
535 switch (command) { 519 switch (command) {
536 case NAND_CMD_READOOB: 520 case NAND_CMD_READOOB:
537 case NAND_CMD_READ0: 521 case NAND_CMD_READ0:
538 cmd = host->cmdset->read1; 522 cmd = info->cmdset->read1;
539 if (command == NAND_CMD_READOOB) 523 if (command == NAND_CMD_READOOB)
540 info->buf_start = mtd->writesize + column; 524 info->buf_start = mtd->writesize + column;
541 else 525 else
542 info->buf_start = column; 526 info->buf_start = column;
543 527
544 if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) 528 if (unlikely(info->page_size < PAGE_CHUNK_SIZE))
545 info->ndcb0 |= NDCB0_CMD_TYPE(0) 529 info->ndcb0 |= NDCB0_CMD_TYPE(0)
546 | addr_cycle 530 | addr_cycle
547 | (cmd & NDCB0_CMD1_MASK); 531 | (cmd & NDCB0_CMD1_MASK);
@@ -553,7 +537,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
553 537
554 case NAND_CMD_SEQIN: 538 case NAND_CMD_SEQIN:
555 /* small page addr setting */ 539 /* small page addr setting */
556 if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { 540 if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) {
557 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) 541 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
558 | (column & 0xFF); 542 | (column & 0xFF);
559 543
@@ -580,7 +564,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
580 break; 564 break;
581 } 565 }
582 566
583 cmd = host->cmdset->program; 567 cmd = info->cmdset->program;
584 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 568 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
585 | NDCB0_AUTO_RS 569 | NDCB0_AUTO_RS
586 | NDCB0_ST_ROW_EN 570 | NDCB0_ST_ROW_EN
@@ -590,8 +574,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
590 break; 574 break;
591 575
592 case NAND_CMD_READID: 576 case NAND_CMD_READID:
593 cmd = host->cmdset->read_id; 577 cmd = info->cmdset->read_id;
594 info->buf_count = host->read_id_bytes; 578 info->buf_count = info->read_id_bytes;
595 info->ndcb0 |= NDCB0_CMD_TYPE(3) 579 info->ndcb0 |= NDCB0_CMD_TYPE(3)
596 | NDCB0_ADDR_CYC(1) 580 | NDCB0_ADDR_CYC(1)
597 | cmd; 581 | cmd;
@@ -599,7 +583,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
599 info->data_size = 8; 583 info->data_size = 8;
600 break; 584 break;
601 case NAND_CMD_STATUS: 585 case NAND_CMD_STATUS:
602 cmd = host->cmdset->read_status; 586 cmd = info->cmdset->read_status;
603 info->buf_count = 1; 587 info->buf_count = 1;
604 info->ndcb0 |= NDCB0_CMD_TYPE(4) 588 info->ndcb0 |= NDCB0_CMD_TYPE(4)
605 | NDCB0_ADDR_CYC(1) 589 | NDCB0_ADDR_CYC(1)
@@ -609,7 +593,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
609 break; 593 break;
610 594
611 case NAND_CMD_ERASE1: 595 case NAND_CMD_ERASE1:
612 cmd = host->cmdset->erase; 596 cmd = info->cmdset->erase;
613 info->ndcb0 |= NDCB0_CMD_TYPE(2) 597 info->ndcb0 |= NDCB0_CMD_TYPE(2)
614 | NDCB0_AUTO_RS 598 | NDCB0_AUTO_RS
615 | NDCB0_ADDR_CYC(3) 599 | NDCB0_ADDR_CYC(3)
@@ -620,7 +604,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
620 604
621 break; 605 break;
622 case NAND_CMD_RESET: 606 case NAND_CMD_RESET:
623 cmd = host->cmdset->reset; 607 cmd = info->cmdset->reset;
624 info->ndcb0 |= NDCB0_CMD_TYPE(5) 608 info->ndcb0 |= NDCB0_CMD_TYPE(5)
625 | cmd; 609 | cmd;
626 610
@@ -632,8 +616,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
632 616
633 default: 617 default:
634 exec_cmd = 0; 618 exec_cmd = 0;
635 dev_err(&info->pdev->dev, "non-supported command %x\n", 619 printk(KERN_ERR "pxa3xx-nand: non-supported"
636 command); 620 " command %x\n", command);
637 break; 621 break;
638 } 622 }
639 623
@@ -643,8 +627,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
643static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, 627static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
644 int column, int page_addr) 628 int column, int page_addr)
645{ 629{
646 struct pxa3xx_nand_host *host = mtd->priv; 630 struct pxa3xx_nand_info *info = mtd->priv;
647 struct pxa3xx_nand_info *info = host->info_data;
648 int ret, exec_cmd; 631 int ret, exec_cmd;
649 632
650 /* 633 /*
@@ -652,21 +635,9 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
652 * "byte" address into a "word" address appropriate 635 * "byte" address into a "word" address appropriate
653 * for indexing a word-oriented device 636 * for indexing a word-oriented device
654 */ 637 */
655 if (host->reg_ndcr & NDCR_DWIDTH_M) 638 if (info->reg_ndcr & NDCR_DWIDTH_M)
656 column /= 2; 639 column /= 2;
657 640
658 /*
659 * There may be different NAND chip hooked to
660 * different chip select, so check whether
661 * chip select has been changed, if yes, reset the timing
662 */
663 if (info->cs != host->cs) {
664 info->cs = host->cs;
665 nand_writel(info, NDTR0CS0, host->ndtr0cs0);
666 nand_writel(info, NDTR1CS0, host->ndtr1cs0);
667 }
668
669 info->state = STATE_PREPARED;
670 exec_cmd = prepare_command_pool(info, command, column, page_addr); 641 exec_cmd = prepare_command_pool(info, command, column, page_addr);
671 if (exec_cmd) { 642 if (exec_cmd) {
672 init_completion(&info->cmd_complete); 643 init_completion(&info->cmd_complete);
@@ -675,29 +646,25 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
675 ret = wait_for_completion_timeout(&info->cmd_complete, 646 ret = wait_for_completion_timeout(&info->cmd_complete,
676 CHIP_DELAY_TIMEOUT); 647 CHIP_DELAY_TIMEOUT);
677 if (!ret) { 648 if (!ret) {
678 dev_err(&info->pdev->dev, "Wait time out!!!\n"); 649 printk(KERN_ERR "Wait time out!!!\n");
679 /* Stop State Machine for next command cycle */ 650 /* Stop State Machine for next command cycle */
680 pxa3xx_nand_stop(info); 651 pxa3xx_nand_stop(info);
681 } 652 }
653 info->state = STATE_IDLE;
682 } 654 }
683 info->state = STATE_IDLE;
684} 655}
685 656
686static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 657static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
687 struct nand_chip *chip, const uint8_t *buf, int oob_required) 658 struct nand_chip *chip, const uint8_t *buf)
688{ 659{
689 chip->write_buf(mtd, buf, mtd->writesize); 660 chip->write_buf(mtd, buf, mtd->writesize);
690 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 661 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
691
692 return 0;
693} 662}
694 663
695static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 664static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
696 struct nand_chip *chip, uint8_t *buf, int oob_required, 665 struct nand_chip *chip, uint8_t *buf, int page)
697 int page)
698{ 666{
699 struct pxa3xx_nand_host *host = mtd->priv; 667 struct pxa3xx_nand_info *info = mtd->priv;
700 struct pxa3xx_nand_info *info = host->info_data;
701 668
702 chip->read_buf(mtd, buf, mtd->writesize); 669 chip->read_buf(mtd, buf, mtd->writesize);
703 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 670 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -728,8 +695,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
728 695
729static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) 696static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
730{ 697{
731 struct pxa3xx_nand_host *host = mtd->priv; 698 struct pxa3xx_nand_info *info = mtd->priv;
732 struct pxa3xx_nand_info *info = host->info_data;
733 char retval = 0xFF; 699 char retval = 0xFF;
734 700
735 if (info->buf_start < info->buf_count) 701 if (info->buf_start < info->buf_count)
@@ -741,8 +707,7 @@ static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
741 707
742static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) 708static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
743{ 709{
744 struct pxa3xx_nand_host *host = mtd->priv; 710 struct pxa3xx_nand_info *info = mtd->priv;
745 struct pxa3xx_nand_info *info = host->info_data;
746 u16 retval = 0xFFFF; 711 u16 retval = 0xFFFF;
747 712
748 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { 713 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
@@ -754,8 +719,7 @@ static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
754 719
755static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 720static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
756{ 721{
757 struct pxa3xx_nand_host *host = mtd->priv; 722 struct pxa3xx_nand_info *info = mtd->priv;
758 struct pxa3xx_nand_info *info = host->info_data;
759 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 723 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
760 724
761 memcpy(buf, info->data_buff + info->buf_start, real_len); 725 memcpy(buf, info->data_buff + info->buf_start, real_len);
@@ -765,14 +729,19 @@ static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
765static void pxa3xx_nand_write_buf(struct mtd_info *mtd, 729static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
766 const uint8_t *buf, int len) 730 const uint8_t *buf, int len)
767{ 731{
768 struct pxa3xx_nand_host *host = mtd->priv; 732 struct pxa3xx_nand_info *info = mtd->priv;
769 struct pxa3xx_nand_info *info = host->info_data;
770 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 733 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
771 734
772 memcpy(info->data_buff + info->buf_start, buf, real_len); 735 memcpy(info->data_buff + info->buf_start, buf, real_len);
773 info->buf_start += real_len; 736 info->buf_start += real_len;
774} 737}
775 738
739static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
740 const uint8_t *buf, int len)
741{
742 return 0;
743}
744
776static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) 745static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
777{ 746{
778 return; 747 return;
@@ -780,8 +749,7 @@ static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
780 749
781static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) 750static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
782{ 751{
783 struct pxa3xx_nand_host *host = mtd->priv; 752 struct pxa3xx_nand_info *info = mtd->priv;
784 struct pxa3xx_nand_info *info = host->info_data;
785 753
786 /* pxa3xx_nand_send_command has waited for command complete */ 754 /* pxa3xx_nand_send_command has waited for command complete */
787 if (this->state == FL_WRITING || this->state == FL_ERASING) { 755 if (this->state == FL_WRITING || this->state == FL_ERASING) {
@@ -804,70 +772,54 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
804{ 772{
805 struct platform_device *pdev = info->pdev; 773 struct platform_device *pdev = info->pdev;
806 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 774 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
807 struct pxa3xx_nand_host *host = info->host[info->cs];
808 uint32_t ndcr = 0x0; /* enable all interrupts */ 775 uint32_t ndcr = 0x0; /* enable all interrupts */
809 776
810 if (f->page_size != 2048 && f->page_size != 512) { 777 if (f->page_size != 2048 && f->page_size != 512)
811 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
812 return -EINVAL; 778 return -EINVAL;
813 }
814 779
815 if (f->flash_width != 16 && f->flash_width != 8) { 780 if (f->flash_width != 16 && f->flash_width != 8)
816 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
817 return -EINVAL; 781 return -EINVAL;
818 }
819 782
820 /* calculate flash information */ 783 /* calculate flash information */
821 host->cmdset = &default_cmdset; 784 info->cmdset = &default_cmdset;
822 host->page_size = f->page_size; 785 info->page_size = f->page_size;
823 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 786 info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
824 787
825 /* calculate addressing information */ 788 /* calculate addressing information */
826 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 789 info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
827 790
828 if (f->num_blocks * f->page_per_block > 65536) 791 if (f->num_blocks * f->page_per_block > 65536)
829 host->row_addr_cycles = 3; 792 info->row_addr_cycles = 3;
830 else 793 else
831 host->row_addr_cycles = 2; 794 info->row_addr_cycles = 2;
832 795
833 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; 796 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
834 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; 797 ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
835 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; 798 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
836 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; 799 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
837 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; 800 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
838 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; 801 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
839 802
840 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); 803 ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
841 ndcr |= NDCR_SPARE_EN; /* enable spare by default */ 804 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
842 805
843 host->reg_ndcr = ndcr; 806 info->reg_ndcr = ndcr;
844 807
845 pxa3xx_nand_set_timing(host, f->timing); 808 pxa3xx_nand_set_timing(info, f->timing);
846 return 0; 809 return 0;
847} 810}
848 811
849static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) 812static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
850{ 813{
851 /*
852 * We set 0 by hard coding here, for we don't support keep_config
853 * when there is more than one chip attached to the controller
854 */
855 struct pxa3xx_nand_host *host = info->host[0];
856 uint32_t ndcr = nand_readl(info, NDCR); 814 uint32_t ndcr = nand_readl(info, NDCR);
815 info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
816 /* set info fields needed to read id */
817 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
818 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
819 info->cmdset = &default_cmdset;
857 820
858 if (ndcr & NDCR_PAGE_SZ) { 821 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
859 host->page_size = 2048; 822 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
860 host->read_id_bytes = 4;
861 } else {
862 host->page_size = 512;
863 host->read_id_bytes = 2;
864 }
865
866 host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
867 host->cmdset = &default_cmdset;
868
869 host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
870 host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
871 823
872 return 0; 824 return 0;
873} 825}
@@ -897,6 +849,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
897 return -ENOMEM; 849 return -ENOMEM;
898 } 850 }
899 851
852 info->data_buff_size = MAX_BUFF_SIZE;
900 info->data_desc = (void *)info->data_buff + data_desc_offset; 853 info->data_desc = (void *)info->data_buff + data_desc_offset;
901 info->data_desc_addr = info->data_buff_phys + data_desc_offset; 854 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
902 855
@@ -904,7 +857,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
904 pxa3xx_nand_data_dma_irq, info); 857 pxa3xx_nand_data_dma_irq, info);
905 if (info->data_dma_ch < 0) { 858 if (info->data_dma_ch < 0) {
906 dev_err(&pdev->dev, "failed to request data dma\n"); 859 dev_err(&pdev->dev, "failed to request data dma\n");
907 dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, 860 dma_free_coherent(&pdev->dev, info->data_buff_size,
908 info->data_buff, info->data_buff_phys); 861 info->data_buff, info->data_buff_phys);
909 return info->data_dma_ch; 862 return info->data_dma_ch;
910 } 863 }
@@ -914,25 +867,21 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
914 867
915static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) 868static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
916{ 869{
917 struct mtd_info *mtd; 870 struct mtd_info *mtd = info->mtd;
918 int ret; 871 struct nand_chip *chip = mtd->priv;
919 mtd = info->host[info->cs]->mtd;
920 /* use the common timing to make a try */
921 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
922 if (ret)
923 return ret;
924 872
925 pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); 873 /* use the common timing to make a try */
874 pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
875 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
926 if (info->is_ready) 876 if (info->is_ready)
877 return 1;
878 else
927 return 0; 879 return 0;
928
929 return -ENODEV;
930} 880}
931 881
932static int pxa3xx_nand_scan(struct mtd_info *mtd) 882static int pxa3xx_nand_scan(struct mtd_info *mtd)
933{ 883{
934 struct pxa3xx_nand_host *host = mtd->priv; 884 struct pxa3xx_nand_info *info = mtd->priv;
935 struct pxa3xx_nand_info *info = host->info_data;
936 struct platform_device *pdev = info->pdev; 885 struct platform_device *pdev = info->pdev;
937 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; 886 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
938 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL; 887 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
@@ -946,20 +895,22 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
946 goto KEEP_CONFIG; 895 goto KEEP_CONFIG;
947 896
948 ret = pxa3xx_nand_sensing(info); 897 ret = pxa3xx_nand_sensing(info);
949 if (ret) { 898 if (!ret) {
950 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", 899 kfree(mtd);
951 info->cs); 900 info->mtd = NULL;
901 printk(KERN_INFO "There is no nand chip on cs 0!\n");
952 902
953 return ret; 903 return -EINVAL;
954 } 904 }
955 905
956 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); 906 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
957 id = *((uint16_t *)(info->data_buff)); 907 id = *((uint16_t *)(info->data_buff));
958 if (id != 0) 908 if (id != 0)
959 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id); 909 printk(KERN_INFO "Detect a flash id %x\n", id);
960 else { 910 else {
961 dev_warn(&info->pdev->dev, 911 kfree(mtd);
962 "Read out ID 0, potential timing set wrong!!\n"); 912 info->mtd = NULL;
913 printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
963 914
964 return -EINVAL; 915 return -EINVAL;
965 } 916 }
@@ -977,17 +928,14 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
977 } 928 }
978 929
979 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { 930 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
980 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n"); 931 kfree(mtd);
932 info->mtd = NULL;
933 printk(KERN_ERR "ERROR!! flash not defined!!!\n");
981 934
982 return -EINVAL; 935 return -EINVAL;
983 } 936 }
984 937
985 ret = pxa3xx_nand_config_flash(info, f); 938 pxa3xx_nand_config_flash(info, f);
986 if (ret) {
987 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
988 return ret;
989 }
990
991 pxa3xx_flash_ids[0].name = f->name; 939 pxa3xx_flash_ids[0].name = f->name;
992 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; 940 pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
993 pxa3xx_flash_ids[0].pagesize = f->page_size; 941 pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -999,73 +947,61 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
999 pxa3xx_flash_ids[1].name = NULL; 947 pxa3xx_flash_ids[1].name = NULL;
1000 def = pxa3xx_flash_ids; 948 def = pxa3xx_flash_ids;
1001KEEP_CONFIG: 949KEEP_CONFIG:
1002 chip->ecc.mode = NAND_ECC_HW;
1003 chip->ecc.size = host->page_size;
1004 chip->ecc.strength = 1;
1005
1006 if (host->reg_ndcr & NDCR_DWIDTH_M)
1007 chip->options |= NAND_BUSWIDTH_16;
1008
1009 if (nand_scan_ident(mtd, 1, def)) 950 if (nand_scan_ident(mtd, 1, def))
1010 return -ENODEV; 951 return -ENODEV;
1011 /* calculate addressing information */ 952 /* calculate addressing information */
1012 if (mtd->writesize >= 2048) 953 info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
1013 host->col_addr_cycles = 2;
1014 else
1015 host->col_addr_cycles = 1;
1016
1017 info->oob_buff = info->data_buff + mtd->writesize; 954 info->oob_buff = info->data_buff + mtd->writesize;
1018 if ((mtd->size >> chip->page_shift) > 65536) 955 if ((mtd->size >> chip->page_shift) > 65536)
1019 host->row_addr_cycles = 3; 956 info->row_addr_cycles = 3;
1020 else 957 else
1021 host->row_addr_cycles = 2; 958 info->row_addr_cycles = 2;
1022
1023 mtd->name = mtd_names[0]; 959 mtd->name = mtd_names[0];
960 chip->ecc.mode = NAND_ECC_HW;
961 chip->ecc.size = info->page_size;
962
963 chip->options = (info->reg_ndcr & NDCR_DWIDTH_M) ? NAND_BUSWIDTH_16 : 0;
964 chip->options |= NAND_NO_AUTOINCR;
965 chip->options |= NAND_NO_READRDY;
966
1024 return nand_scan_tail(mtd); 967 return nand_scan_tail(mtd);
1025} 968}
1026 969
1027static int alloc_nand_resource(struct platform_device *pdev) 970static
971struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
1028{ 972{
1029 struct pxa3xx_nand_platform_data *pdata;
1030 struct pxa3xx_nand_info *info; 973 struct pxa3xx_nand_info *info;
1031 struct pxa3xx_nand_host *host; 974 struct nand_chip *chip;
1032 struct nand_chip *chip = NULL;
1033 struct mtd_info *mtd; 975 struct mtd_info *mtd;
1034 struct resource *r; 976 struct resource *r;
1035 int ret, irq, cs; 977 int ret, irq;
1036 978
1037 pdata = pdev->dev.platform_data; 979 mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1038 info = kzalloc(sizeof(*info) + (sizeof(*mtd) + 980 GFP_KERNEL);
1039 sizeof(*host)) * pdata->num_cs, GFP_KERNEL); 981 if (!mtd) {
1040 if (!info) {
1041 dev_err(&pdev->dev, "failed to allocate memory\n"); 982 dev_err(&pdev->dev, "failed to allocate memory\n");
1042 return -ENOMEM; 983 return NULL;
1043 } 984 }
1044 985
986 info = (struct pxa3xx_nand_info *)(&mtd[1]);
987 chip = (struct nand_chip *)(&mtd[1]);
1045 info->pdev = pdev; 988 info->pdev = pdev;
1046 for (cs = 0; cs < pdata->num_cs; cs++) { 989 info->mtd = mtd;
1047 mtd = (struct mtd_info *)((unsigned int)&info[1] + 990 mtd->priv = info;
1048 (sizeof(*mtd) + sizeof(*host)) * cs); 991 mtd->owner = THIS_MODULE;
1049 chip = (struct nand_chip *)(&mtd[1]); 992
1050 host = (struct pxa3xx_nand_host *)chip; 993 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1051 info->host[cs] = host; 994 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1052 host->mtd = mtd; 995 chip->controller = &info->controller;
1053 host->cs = cs; 996 chip->waitfunc = pxa3xx_nand_waitfunc;
1054 host->info_data = info; 997 chip->select_chip = pxa3xx_nand_select_chip;
1055 mtd->priv = host; 998 chip->dev_ready = pxa3xx_nand_dev_ready;
1056 mtd->owner = THIS_MODULE; 999 chip->cmdfunc = pxa3xx_nand_cmdfunc;
1057 1000 chip->read_word = pxa3xx_nand_read_word;
1058 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; 1001 chip->read_byte = pxa3xx_nand_read_byte;
1059 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; 1002 chip->read_buf = pxa3xx_nand_read_buf;
1060 chip->controller = &info->controller; 1003 chip->write_buf = pxa3xx_nand_write_buf;
1061 chip->waitfunc = pxa3xx_nand_waitfunc; 1004 chip->verify_buf = pxa3xx_nand_verify_buf;
1062 chip->select_chip = pxa3xx_nand_select_chip;
1063 chip->cmdfunc = pxa3xx_nand_cmdfunc;
1064 chip->read_word = pxa3xx_nand_read_word;
1065 chip->read_byte = pxa3xx_nand_read_byte;
1066 chip->read_buf = pxa3xx_nand_read_buf;
1067 chip->write_buf = pxa3xx_nand_write_buf;
1068 }
1069 1005
1070 spin_lock_init(&chip->controller->lock); 1006 spin_lock_init(&chip->controller->lock);
1071 init_waitqueue_head(&chip->controller->wq); 1007 init_waitqueue_head(&chip->controller->wq);
@@ -1077,31 +1013,21 @@ static int alloc_nand_resource(struct platform_device *pdev)
1077 } 1013 }
1078 clk_enable(info->clk); 1014 clk_enable(info->clk);
1079 1015
1080 /* 1016 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1081 * This is a dirty hack to make this driver work from devicetree 1017 if (r == NULL) {
1082 * bindings. It can be removed once we have a prober DMA controller 1018 dev_err(&pdev->dev, "no resource defined for data DMA\n");
1083 * framework for DT. 1019 ret = -ENXIO;
1084 */ 1020 goto fail_put_clk;
1085 if (pdev->dev.of_node && cpu_is_pxa3xx()) { 1021 }
1086 info->drcmr_dat = 97; 1022 info->drcmr_dat = r->start;
1087 info->drcmr_cmd = 99;
1088 } else {
1089 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1090 if (r == NULL) {
1091 dev_err(&pdev->dev, "no resource defined for data DMA\n");
1092 ret = -ENXIO;
1093 goto fail_put_clk;
1094 }
1095 info->drcmr_dat = r->start;
1096 1023
1097 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1024 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1098 if (r == NULL) { 1025 if (r == NULL) {
1099 dev_err(&pdev->dev, "no resource defined for command DMA\n"); 1026 dev_err(&pdev->dev, "no resource defined for command DMA\n");
1100 ret = -ENXIO; 1027 ret = -ENXIO;
1101 goto fail_put_clk; 1028 goto fail_put_clk;
1102 }
1103 info->drcmr_cmd = r->start;
1104 } 1029 }
1030 info->drcmr_cmd = r->start;
1105 1031
1106 irq = platform_get_irq(pdev, 0); 1032 irq = platform_get_irq(pdev, 0);
1107 if (irq < 0) { 1033 if (irq < 0) {
@@ -1148,13 +1074,13 @@ static int alloc_nand_resource(struct platform_device *pdev)
1148 1074
1149 platform_set_drvdata(pdev, info); 1075 platform_set_drvdata(pdev, info);
1150 1076
1151 return 0; 1077 return info;
1152 1078
1153fail_free_buf: 1079fail_free_buf:
1154 free_irq(irq, info); 1080 free_irq(irq, info);
1155 if (use_dma) { 1081 if (use_dma) {
1156 pxa_free_dma(info->data_dma_ch); 1082 pxa_free_dma(info->data_dma_ch);
1157 dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, 1083 dma_free_coherent(&pdev->dev, info->data_buff_size,
1158 info->data_buff, info->data_buff_phys); 1084 info->data_buff, info->data_buff_phys);
1159 } else 1085 } else
1160 kfree(info->data_buff); 1086 kfree(info->data_buff);
@@ -1166,21 +1092,17 @@ fail_put_clk:
1166 clk_disable(info->clk); 1092 clk_disable(info->clk);
1167 clk_put(info->clk); 1093 clk_put(info->clk);
1168fail_free_mtd: 1094fail_free_mtd:
1169 kfree(info); 1095 kfree(mtd);
1170 return ret; 1096 return NULL;
1171} 1097}
1172 1098
1173static int pxa3xx_nand_remove(struct platform_device *pdev) 1099static int pxa3xx_nand_remove(struct platform_device *pdev)
1174{ 1100{
1175 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1101 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1176 struct pxa3xx_nand_platform_data *pdata; 1102 struct mtd_info *mtd = info->mtd;
1177 struct resource *r; 1103 struct resource *r;
1178 int irq, cs; 1104 int irq;
1179 1105
1180 if (!info)
1181 return 0;
1182
1183 pdata = pdev->dev.platform_data;
1184 platform_set_drvdata(pdev, NULL); 1106 platform_set_drvdata(pdev, NULL);
1185 1107
1186 irq = platform_get_irq(pdev, 0); 1108 irq = platform_get_irq(pdev, 0);
@@ -1188,7 +1110,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1188 free_irq(irq, info); 1110 free_irq(irq, info);
1189 if (use_dma) { 1111 if (use_dma) {
1190 pxa_free_dma(info->data_dma_ch); 1112 pxa_free_dma(info->data_dma_ch);
1191 dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE, 1113 dma_free_writecombine(&pdev->dev, info->data_buff_size,
1192 info->data_buff, info->data_buff_phys); 1114 info->data_buff, info->data_buff_phys);
1193 } else 1115 } else
1194 kfree(info->data_buff); 1116 kfree(info->data_buff);
@@ -1200,60 +1122,17 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1200 clk_disable(info->clk); 1122 clk_disable(info->clk);
1201 clk_put(info->clk); 1123 clk_put(info->clk);
1202 1124
1203 for (cs = 0; cs < pdata->num_cs; cs++) 1125 if (mtd) {
1204 nand_release(info->host[cs]->mtd); 1126 mtd_device_unregister(mtd);
1205 kfree(info); 1127 kfree(mtd);
1128 }
1206 return 0; 1129 return 0;
1207} 1130}
1208 1131
1209#ifdef CONFIG_OF
1210static struct of_device_id pxa3xx_nand_dt_ids[] = {
1211 { .compatible = "marvell,pxa3xx-nand" },
1212 {}
1213};
1214MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
1215
1216static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1217{
1218 struct pxa3xx_nand_platform_data *pdata;
1219 struct device_node *np = pdev->dev.of_node;
1220 const struct of_device_id *of_id =
1221 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1222
1223 if (!of_id)
1224 return 0;
1225
1226 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1227 if (!pdata)
1228 return -ENOMEM;
1229
1230 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1231 pdata->enable_arbiter = 1;
1232 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1233 pdata->keep_config = 1;
1234 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1235
1236 pdev->dev.platform_data = pdata;
1237
1238 return 0;
1239}
1240#else
1241static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1242{
1243 return 0;
1244}
1245#endif
1246
1247static int pxa3xx_nand_probe(struct platform_device *pdev) 1132static int pxa3xx_nand_probe(struct platform_device *pdev)
1248{ 1133{
1249 struct pxa3xx_nand_platform_data *pdata; 1134 struct pxa3xx_nand_platform_data *pdata;
1250 struct mtd_part_parser_data ppdata = {};
1251 struct pxa3xx_nand_info *info; 1135 struct pxa3xx_nand_info *info;
1252 int ret, cs, probe_success;
1253
1254 ret = pxa3xx_nand_probe_dt(pdev);
1255 if (ret)
1256 return ret;
1257 1136
1258 pdata = pdev->dev.platform_data; 1137 pdata = pdev->dev.platform_data;
1259 if (!pdata) { 1138 if (!pdata) {
@@ -1261,90 +1140,52 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1261 return -ENODEV; 1140 return -ENODEV;
1262 } 1141 }
1263 1142
1264 ret = alloc_nand_resource(pdev); 1143 info = alloc_nand_resource(pdev);
1265 if (ret) { 1144 if (info == NULL)
1266 dev_err(&pdev->dev, "alloc nand resource failed\n"); 1145 return -ENOMEM;
1267 return ret; 1146
1147 if (pxa3xx_nand_scan(info->mtd)) {
1148 dev_err(&pdev->dev, "failed to scan nand\n");
1149 pxa3xx_nand_remove(pdev);
1150 return -ENODEV;
1268 } 1151 }
1269 1152
1270 info = platform_get_drvdata(pdev); 1153 if (mtd_has_cmdlinepart()) {
1271 probe_success = 0; 1154 const char *probes[] = { "cmdlinepart", NULL };
1272 for (cs = 0; cs < pdata->num_cs; cs++) { 1155 struct mtd_partition *parts;
1273 info->cs = cs; 1156 int nr_parts;
1274 ret = pxa3xx_nand_scan(info->host[cs]->mtd);
1275 if (ret) {
1276 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1277 cs);
1278 continue;
1279 }
1280 1157
1281 ppdata.of_node = pdev->dev.of_node; 1158 nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
1282 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
1283 &ppdata, pdata->parts[cs],
1284 pdata->nr_parts[cs]);
1285 if (!ret)
1286 probe_success = 1;
1287 }
1288 1159
1289 if (!probe_success) { 1160 if (nr_parts)
1290 pxa3xx_nand_remove(pdev); 1161 return mtd_device_register(info->mtd, parts, nr_parts);
1291 return -ENODEV;
1292 } 1162 }
1293 1163
1294 return 0; 1164 return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
1295} 1165}
1296 1166
1297#ifdef CONFIG_PM 1167#ifdef CONFIG_PM
1298static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) 1168static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1299{ 1169{
1300 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1170 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1301 struct pxa3xx_nand_platform_data *pdata; 1171 struct mtd_info *mtd = info->mtd;
1302 struct mtd_info *mtd;
1303 int cs;
1304 1172
1305 pdata = pdev->dev.platform_data;
1306 if (info->state) { 1173 if (info->state) {
1307 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); 1174 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1308 return -EAGAIN; 1175 return -EAGAIN;
1309 } 1176 }
1310 1177
1311 for (cs = 0; cs < pdata->num_cs; cs++) {
1312 mtd = info->host[cs]->mtd;
1313 mtd_suspend(mtd);
1314 }
1315
1316 return 0; 1178 return 0;
1317} 1179}
1318 1180
1319static int pxa3xx_nand_resume(struct platform_device *pdev) 1181static int pxa3xx_nand_resume(struct platform_device *pdev)
1320{ 1182{
1321 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1183 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1322 struct pxa3xx_nand_platform_data *pdata; 1184 struct mtd_info *mtd = info->mtd;
1323 struct mtd_info *mtd;
1324 int cs;
1325 1185
1326 pdata = pdev->dev.platform_data; 1186 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1327 /* We don't want to handle interrupt without calling mtd routine */ 1187 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1328 disable_int(info, NDCR_INT_MASK); 1188 clk_enable(info->clk);
1329
1330 /*
1331 * Directly set the chip select to a invalid value,
1332 * then the driver would reset the timing according
1333 * to current chip select at the beginning of cmdfunc
1334 */
1335 info->cs = 0xff;
1336
1337 /*
1338 * As the spec says, the NDSR would be updated to 0x1800 when
1339 * doing the nand_clk disable/enable.
1340 * To prevent it damaging state machine of the driver, clear
1341 * all status before resume
1342 */
1343 nand_writel(info, NDSR, NDSR_MASK);
1344 for (cs = 0; cs < pdata->num_cs; cs++) {
1345 mtd = info->host[cs]->mtd;
1346 mtd_resume(mtd);
1347 }
1348 1189
1349 return 0; 1190 return 0;
1350} 1191}
@@ -1356,7 +1197,6 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
1356static struct platform_driver pxa3xx_nand_driver = { 1197static struct platform_driver pxa3xx_nand_driver = {
1357 .driver = { 1198 .driver = {
1358 .name = "pxa3xx-nand", 1199 .name = "pxa3xx-nand",
1359 .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
1360 }, 1200 },
1361 .probe = pxa3xx_nand_probe, 1201 .probe = pxa3xx_nand_probe,
1362 .remove = pxa3xx_nand_remove, 1202 .remove = pxa3xx_nand_remove,
@@ -1364,7 +1204,17 @@ static struct platform_driver pxa3xx_nand_driver = {
1364 .resume = pxa3xx_nand_resume, 1204 .resume = pxa3xx_nand_resume,
1365}; 1205};
1366 1206
1367module_platform_driver(pxa3xx_nand_driver); 1207static int __init pxa3xx_nand_init(void)
1208{
1209 return platform_driver_register(&pxa3xx_nand_driver);
1210}
1211module_init(pxa3xx_nand_init);
1212
1213static void __exit pxa3xx_nand_exit(void)
1214{
1215 platform_driver_unregister(&pxa3xx_nand_driver);
1216}
1217module_exit(pxa3xx_nand_exit);
1368 1218
1369MODULE_LICENSE("GPL"); 1219MODULE_LICENSE("GPL");
1370MODULE_DESCRIPTION("PXA3xx NAND controller driver"); 1220MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 4495f8551fa..cae2e013c98 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -22,7 +22,7 @@
22#include "r852.h" 22#include "r852.h"
23 23
24 24
25static bool r852_enable_dma = 1; 25static int r852_enable_dma = 1;
26module_param(r852_enable_dma, bool, S_IRUGO); 26module_param(r852_enable_dma, bool, S_IRUGO);
27MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); 27MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
28 28
@@ -309,6 +309,27 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
309 return r852_read_reg(dev, R852_DATALINE); 309 return r852_read_reg(dev, R852_DATALINE);
310} 310}
311 311
312
313/*
314 * Readback the buffer to verify it
315 */
316int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
317{
318 struct r852_device *dev = r852_get_dev(mtd);
319
320 /* We can't be sure about anything here... */
321 if (dev->card_unstable)
322 return -1;
323
324 /* This will never happen, unless you wired up a nand chip
325 with > 512 bytes page size to the reader */
326 if (len > SM_SECTOR_SIZE)
327 return 0;
328
329 r852_read_buf(mtd, dev->tmp_buffer, len);
330 return memcmp(buf, dev->tmp_buffer, len);
331}
332
312/* 333/*
313 * Control several chip lines & send commands 334 * Control several chip lines & send commands
314 */ 335 */
@@ -518,11 +539,14 @@ exit:
518 * nand_read_oob_syndrome assumes we can send column address - we can't 539 * nand_read_oob_syndrome assumes we can send column address - we can't
519 */ 540 */
520static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 541static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
521 int page) 542 int page, int sndcmd)
522{ 543{
523 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 544 if (sndcmd) {
545 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
546 sndcmd = 0;
547 }
524 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
525 return 0; 549 return sndcmd;
526} 550}
527 551
528/* 552/*
@@ -861,12 +885,12 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
861 chip->read_byte = r852_read_byte; 885 chip->read_byte = r852_read_byte;
862 chip->read_buf = r852_read_buf; 886 chip->read_buf = r852_read_buf;
863 chip->write_buf = r852_write_buf; 887 chip->write_buf = r852_write_buf;
888 chip->verify_buf = r852_verify_buf;
864 889
865 /* ecc */ 890 /* ecc */
866 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 891 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
867 chip->ecc.size = R852_DMA_LEN; 892 chip->ecc.size = R852_DMA_LEN;
868 chip->ecc.bytes = SM_OOB_SIZE; 893 chip->ecc.bytes = SM_OOB_SIZE;
869 chip->ecc.strength = 2;
870 chip->ecc.hwctl = r852_ecc_hwctl; 894 chip->ecc.hwctl = r852_ecc_hwctl;
871 chip->ecc.calculate = r852_ecc_calculate; 895 chip->ecc.calculate = r852_ecc_calculate;
872 chip->ecc.correct = r852_ecc_correct; 896 chip->ecc.correct = r852_ecc_correct;
@@ -1003,7 +1027,7 @@ void r852_shutdown(struct pci_dev *pci_dev)
1003} 1027}
1004 1028
1005#ifdef CONFIG_PM 1029#ifdef CONFIG_PM
1006static int r852_suspend(struct device *device) 1030int r852_suspend(struct device *device)
1007{ 1031{
1008 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1032 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1009 1033
@@ -1024,7 +1048,7 @@ static int r852_suspend(struct device *device)
1024 return 0; 1048 return 0;
1025} 1049}
1026 1050
1027static int r852_resume(struct device *device) 1051int r852_resume(struct device *device)
1028{ 1052{
1029 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1053 struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
1030 1054
@@ -1068,7 +1092,7 @@ static const struct pci_device_id r852_pci_id_tbl[] = {
1068 1092
1069MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1093MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
1070 1094
1071static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1095SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
1072 1096
1073static struct pci_driver r852_pci_driver = { 1097static struct pci_driver r852_pci_driver = {
1074 .name = DRV_NAME, 1098 .name = DRV_NAME,
@@ -1079,7 +1103,18 @@ static struct pci_driver r852_pci_driver = {
1079 .driver.pm = &r852_pm_ops, 1103 .driver.pm = &r852_pm_ops,
1080}; 1104};
1081 1105
1082module_pci_driver(r852_pci_driver); 1106static __init int r852_module_init(void)
1107{
1108 return pci_register_driver(&r852_pci_driver);
1109}
1110
1111static void __exit r852_module_exit(void)
1112{
1113 pci_unregister_driver(&r852_pci_driver);
1114}
1115
1116module_init(r852_module_init);
1117module_exit(r852_module_exit);
1083 1118
1084MODULE_LICENSE("GPL"); 1119MODULE_LICENSE("GPL");
1085MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1120MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index e55b5cfbe14..c9f9127ff77 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -351,7 +351,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
351 return 0; 351 return 0;
352 } 352 }
353 353
354 /* Read the syndrome pattern from the FPGA and correct the bitorder */ 354 /* Read the syndrom pattern from the FPGA and correct the bitorder */
355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); 355 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
356 for (i = 0; i < 8; i++) { 356 for (i = 0; i < 8; i++) {
357 ecc[i] = bitrev8(*rs_ecc); 357 ecc[i] = bitrev8(*rs_ecc);
@@ -380,7 +380,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
380 /* Let the library code do its magic. */ 380 /* Let the library code do its magic. */
381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); 381 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
382 if (res > 0) { 382 if (res > 0) {
383 pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); 383 DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
384 } 384 }
385 return res; 385 return res;
386} 386}
@@ -444,6 +444,7 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
444 len = mtd->writesize; 444 len = mtd->writesize;
445 buf = kmalloc(len, GFP_KERNEL); 445 buf = kmalloc(len, GFP_KERNEL);
446 if (!buf) { 446 if (!buf) {
447 printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
447 er_stat = 1; 448 er_stat = 1;
448 goto out; 449 goto out;
449 } 450 }
@@ -527,7 +528,6 @@ static int __init rtc_from4_init(void)
527 this->ecc.mode = NAND_ECC_HW_SYNDROME; 528 this->ecc.mode = NAND_ECC_HW_SYNDROME;
528 this->ecc.size = 512; 529 this->ecc.size = 512;
529 this->ecc.bytes = 8; 530 this->ecc.bytes = 8;
530 this->ecc.strength = 3;
531 /* return the status of extra status and ECC checks */ 531 /* return the status of extra status and ECC checks */
532 this->errstat = rtc_from4_errstat; 532 this->errstat = rtc_from4_errstat;
533 /* set the nand_oobinfo to support FPGA H/W error detection */ 533 /* set the nand_oobinfo to support FPGA H/W error detection */
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index df954b4dcba..4405468f196 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -21,8 +21,6 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22*/ 22*/
23 23
24#define pr_fmt(fmt) "nand-s3c2410: " fmt
25
26#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG 24#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
27#define DEBUG 25#define DEBUG
28#endif 26#endif
@@ -32,7 +30,6 @@
32#include <linux/init.h> 30#include <linux/init.h>
33#include <linux/kernel.h> 31#include <linux/kernel.h>
34#include <linux/string.h> 32#include <linux/string.h>
35#include <linux/io.h>
36#include <linux/ioport.h> 33#include <linux/ioport.h>
37#include <linux/platform_device.h> 34#include <linux/platform_device.h>
38#include <linux/delay.h> 35#include <linux/delay.h>
@@ -46,8 +43,23 @@
46#include <linux/mtd/nand_ecc.h> 43#include <linux/mtd/nand_ecc.h>
47#include <linux/mtd/partitions.h> 44#include <linux/mtd/partitions.h>
48 45
46#include <asm/io.h>
47
49#include <plat/regs-nand.h> 48#include <plat/regs-nand.h>
50#include <linux/platform_data/mtd-nand-s3c2410.h> 49#include <plat/nand.h>
50
51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
52static int hardware_ecc = 1;
53#else
54static int hardware_ecc = 0;
55#endif
56
57#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
58static const int clock_stop = 1;
59#else
60static const int clock_stop = 0;
61#endif
62
51 63
52/* new oob placement block for use with hardware ecc generation 64/* new oob placement block for use with hardware ecc generation
53 */ 65 */
@@ -97,8 +109,9 @@ enum s3c_nand_clk_state {
97 * @mtds: An array of MTD instances on this controoler. 109 * @mtds: An array of MTD instances on this controoler.
98 * @platform: The platform data for this board. 110 * @platform: The platform data for this board.
99 * @device: The platform device we bound to. 111 * @device: The platform device we bound to.
112 * @area: The IO area resource that came from request_mem_region().
100 * @clk: The clock resource for this controller. 113 * @clk: The clock resource for this controller.
101 * @regs: The area mapped for the hardware registers. 114 * @regs: The area mapped for the hardware registers described by @area.
102 * @sel_reg: Pointer to the register controlling the NAND selection. 115 * @sel_reg: Pointer to the register controlling the NAND selection.
103 * @sel_bit: The bit in @sel_reg to select the NAND chip. 116 * @sel_bit: The bit in @sel_reg to select the NAND chip.
104 * @mtd_count: The number of MTDs created from this controller. 117 * @mtd_count: The number of MTDs created from this controller.
@@ -115,6 +128,7 @@ struct s3c2410_nand_info {
115 128
116 /* device info */ 129 /* device info */
117 struct device *device; 130 struct device *device;
131 struct resource *area;
118 struct clk *clk; 132 struct clk *clk;
119 void __iomem *regs; 133 void __iomem *regs;
120 void __iomem *sel_reg; 134 void __iomem *sel_reg;
@@ -155,11 +169,7 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
155 169
156static inline int allow_clk_suspend(struct s3c2410_nand_info *info) 170static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
157{ 171{
158#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP 172 return clock_stop;
159 return 1;
160#else
161 return 0;
162#endif
163} 173}
164 174
165/** 175/**
@@ -205,8 +215,7 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
205 pr_debug("result %d from %ld, %d\n", result, clk, wanted); 215 pr_debug("result %d from %ld, %d\n", result, clk, wanted);
206 216
207 if (result > max) { 217 if (result > max) {
208 pr_err("%d ns is too big for current clock rate %ld\n", 218 printk("%d ns is too big for current clock rate %ld\n", wanted, clk);
209 wanted, clk);
210 return -1; 219 return -1;
211 } 220 }
212 221
@@ -216,7 +225,7 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
216 return result; 225 return result;
217} 226}
218 227
219#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) 228#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
220 229
221/* controller setup */ 230/* controller setup */
222 231
@@ -259,8 +268,7 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
259 } 268 }
260 269
261 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", 270 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
262 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), 271 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
263 twrph1, to_ns(twrph1, clkrate));
264 272
265 switch (info->cpu_type) { 273 switch (info->cpu_type) {
266 case TYPE_S3C2410: 274 case TYPE_S3C2410:
@@ -317,13 +325,13 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
317 if (ret < 0) 325 if (ret < 0)
318 return ret; 326 return ret;
319 327
320 switch (info->cpu_type) { 328 switch (info->cpu_type) {
321 case TYPE_S3C2410: 329 case TYPE_S3C2410:
322 default: 330 default:
323 break; 331 break;
324 332
325 case TYPE_S3C2440: 333 case TYPE_S3C2440:
326 case TYPE_S3C2412: 334 case TYPE_S3C2412:
327 /* enable the controller and de-assert nFCE */ 335 /* enable the controller and de-assert nFCE */
328 336
329 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); 337 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
@@ -442,7 +450,6 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
442 450
443/* ECC handling functions */ 451/* ECC handling functions */
444 452
445#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
446static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, 453static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
447 u_char *read_ecc, u_char *calc_ecc) 454 u_char *read_ecc, u_char *calc_ecc)
448{ 455{
@@ -456,8 +463,10 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
456 diff1 = read_ecc[1] ^ calc_ecc[1]; 463 diff1 = read_ecc[1] ^ calc_ecc[1];
457 diff2 = read_ecc[2] ^ calc_ecc[2]; 464 diff2 = read_ecc[2] ^ calc_ecc[2];
458 465
459 pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n", 466 pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n",
460 __func__, 3, read_ecc, 3, calc_ecc, 467 __func__,
468 read_ecc[0], read_ecc[1], read_ecc[2],
469 calc_ecc[0], calc_ecc[1], calc_ecc[2],
461 diff0, diff1, diff2); 470 diff0, diff1, diff2);
462 471
463 if (diff0 == 0 && diff1 == 0 && diff2 == 0) 472 if (diff0 == 0 && diff1 == 0 && diff2 == 0)
@@ -537,8 +546,7 @@ static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
537 unsigned long ctrl; 546 unsigned long ctrl;
538 547
539 ctrl = readl(info->regs + S3C2440_NFCONT); 548 ctrl = readl(info->regs + S3C2440_NFCONT);
540 writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, 549 writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT);
541 info->regs + S3C2440_NFCONT);
542} 550}
543 551
544static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) 552static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
@@ -550,8 +558,7 @@ static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
550 writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); 558 writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
551} 559}
552 560
553static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 561static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
554 u_char *ecc_code)
555{ 562{
556 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 563 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
557 564
@@ -559,13 +566,13 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
559 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); 566 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
560 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); 567 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
561 568
562 pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); 569 pr_debug("%s: returning ecc %02x%02x%02x\n", __func__,
570 ecc_code[0], ecc_code[1], ecc_code[2]);
563 571
564 return 0; 572 return 0;
565} 573}
566 574
567static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 575static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
568 u_char *ecc_code)
569{ 576{
570 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 577 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
571 unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); 578 unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
@@ -574,13 +581,12 @@ static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
574 ecc_code[1] = ecc >> 8; 581 ecc_code[1] = ecc >> 8;
575 ecc_code[2] = ecc >> 16; 582 ecc_code[2] = ecc >> 16;
576 583
577 pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code); 584 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
578 585
579 return 0; 586 return 0;
580} 587}
581 588
582static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 589static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
583 u_char *ecc_code)
584{ 590{
585 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 591 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
586 unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); 592 unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
@@ -593,7 +599,6 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
593 599
594 return 0; 600 return 0;
595} 601}
596#endif
597 602
598/* over-ride the standard functions for a little more speed. We can 603/* over-ride the standard functions for a little more speed. We can
599 * use read/write block to move the data buffers to/from the controller 604 * use read/write block to move the data buffers to/from the controller
@@ -620,15 +625,13 @@ static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
620 } 625 }
621} 626}
622 627
623static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, 628static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
624 int len)
625{ 629{
626 struct nand_chip *this = mtd->priv; 630 struct nand_chip *this = mtd->priv;
627 writesb(this->IO_ADDR_W, buf, len); 631 writesb(this->IO_ADDR_W, buf, len);
628} 632}
629 633
630static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, 634static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
631 int len)
632{ 635{
633 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 636 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
634 637
@@ -672,8 +675,7 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
672 CPUFREQ_TRANSITION_NOTIFIER); 675 CPUFREQ_TRANSITION_NOTIFIER);
673} 676}
674 677
675static inline void 678static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
676s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
677{ 679{
678 cpufreq_unregister_notifier(&info->freq_transition, 680 cpufreq_unregister_notifier(&info->freq_transition,
679 CPUFREQ_TRANSITION_NOTIFIER); 681 CPUFREQ_TRANSITION_NOTIFIER);
@@ -685,8 +687,7 @@ static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
685 return 0; 687 return 0;
686} 688}
687 689
688static inline void 690static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
689s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
690{ 691{
691} 692}
692#endif 693#endif
@@ -716,28 +717,53 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
716 pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); 717 pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
717 nand_release(&ptr->mtd); 718 nand_release(&ptr->mtd);
718 } 719 }
720
721 kfree(info->mtds);
719 } 722 }
720 723
721 /* free the common resources */ 724 /* free the common resources */
722 725
723 if (!IS_ERR(info->clk)) 726 if (info->clk != NULL && !IS_ERR(info->clk)) {
724 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); 727 s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
728 clk_put(info->clk);
729 }
730
731 if (info->regs != NULL) {
732 iounmap(info->regs);
733 info->regs = NULL;
734 }
735
736 if (info->area != NULL) {
737 release_resource(info->area);
738 kfree(info->area);
739 info->area = NULL;
740 }
741
742 kfree(info);
725 743
726 return 0; 744 return 0;
727} 745}
728 746
747const char *part_probes[] = { "cmdlinepart", NULL };
729static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, 748static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
730 struct s3c2410_nand_mtd *mtd, 749 struct s3c2410_nand_mtd *mtd,
731 struct s3c2410_nand_set *set) 750 struct s3c2410_nand_set *set)
732{ 751{
733 if (set) { 752 struct mtd_partition *part_info;
734 mtd->mtd.name = set->name; 753 int nr_part = 0;
754
755 if (set == NULL)
756 return mtd_device_register(&mtd->mtd, NULL, 0);
757
758 mtd->mtd.name = set->name;
759 nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
735 760
736 return mtd_device_parse_register(&mtd->mtd, NULL, NULL, 761 if (nr_part <= 0 && set->nr_partitions > 0) {
737 set->partitions, set->nr_partitions); 762 nr_part = set->nr_partitions;
763 part_info = set->partitions;
738 } 764 }
739 765
740 return -ENODEV; 766 return mtd_device_register(&mtd->mtd, part_info, nr_part);
741} 767}
742 768
743/** 769/**
@@ -795,7 +821,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
795 dev_info(info->device, "System booted from NAND\n"); 821 dev_info(info->device, "System booted from NAND\n");
796 822
797 break; 823 break;
798 } 824 }
799 825
800 chip->IO_ADDR_R = chip->IO_ADDR_W; 826 chip->IO_ADDR_R = chip->IO_ADDR_W;
801 827
@@ -804,31 +830,31 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
804 nmtd->mtd.owner = THIS_MODULE; 830 nmtd->mtd.owner = THIS_MODULE;
805 nmtd->set = set; 831 nmtd->set = set;
806 832
807#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 833 if (hardware_ecc) {
808 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
809 chip->ecc.correct = s3c2410_nand_correct_data;
810 chip->ecc.mode = NAND_ECC_HW;
811 chip->ecc.strength = 1;
812
813 switch (info->cpu_type) {
814 case TYPE_S3C2410:
815 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
816 chip->ecc.calculate = s3c2410_nand_calculate_ecc; 834 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
817 break; 835 chip->ecc.correct = s3c2410_nand_correct_data;
836 chip->ecc.mode = NAND_ECC_HW;
818 837
819 case TYPE_S3C2412: 838 switch (info->cpu_type) {
820 chip->ecc.hwctl = s3c2412_nand_enable_hwecc; 839 case TYPE_S3C2410:
821 chip->ecc.calculate = s3c2412_nand_calculate_ecc; 840 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
822 break; 841 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
842 break;
823 843
824 case TYPE_S3C2440: 844 case TYPE_S3C2412:
825 chip->ecc.hwctl = s3c2440_nand_enable_hwecc; 845 chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
826 chip->ecc.calculate = s3c2440_nand_calculate_ecc; 846 chip->ecc.calculate = s3c2412_nand_calculate_ecc;
827 break; 847 break;
848
849 case TYPE_S3C2440:
850 chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
851 chip->ecc.calculate = s3c2440_nand_calculate_ecc;
852 break;
853
854 }
855 } else {
856 chip->ecc.mode = NAND_ECC_SOFT;
828 } 857 }
829#else
830 chip->ecc.mode = NAND_ECC_SOFT;
831#endif
832 858
833 if (set->ecc_layout != NULL) 859 if (set->ecc_layout != NULL)
834 chip->ecc.layout = set->ecc_layout; 860 chip->ecc.layout = set->ecc_layout;
@@ -854,10 +880,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
854 /* If you use u-boot BBT creation code, specifying this flag will 880 /* If you use u-boot BBT creation code, specifying this flag will
855 * let the kernel fish out the BBT from the NAND, and also skip the 881 * let the kernel fish out the BBT from the NAND, and also skip the
856 * full NAND scan that can take 1/2s or so. Little things... */ 882 * full NAND scan that can take 1/2s or so. Little things... */
857 if (set->flash_bbt) { 883 if (set->flash_bbt)
858 chip->bbt_options |= NAND_BBT_USE_FLASH; 884 chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
859 chip->options |= NAND_SKIP_BBTSCAN;
860 }
861} 885}
862 886
863/** 887/**
@@ -882,7 +906,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
882 if (chip->ecc.mode != NAND_ECC_HW) 906 if (chip->ecc.mode != NAND_ECC_HW)
883 return; 907 return;
884 908
885 /* change the behaviour depending on whether we are using 909 /* change the behaviour depending on wether we are using
886 * the large or small page nand device */ 910 * the large or small page nand device */
887 911
888 if (chip->page_shift > 10) { 912 if (chip->page_shift > 10) {
@@ -905,7 +929,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
905static int s3c24xx_nand_probe(struct platform_device *pdev) 929static int s3c24xx_nand_probe(struct platform_device *pdev)
906{ 930{
907 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 931 struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
908 enum s3c_cpu_type cpu_type; 932 enum s3c_cpu_type cpu_type;
909 struct s3c2410_nand_info *info; 933 struct s3c2410_nand_info *info;
910 struct s3c2410_nand_mtd *nmtd; 934 struct s3c2410_nand_mtd *nmtd;
911 struct s3c2410_nand_set *sets; 935 struct s3c2410_nand_set *sets;
@@ -919,7 +943,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
919 943
920 pr_debug("s3c2410_nand_probe(%p)\n", pdev); 944 pr_debug("s3c2410_nand_probe(%p)\n", pdev);
921 945
922 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 946 info = kzalloc(sizeof(*info), GFP_KERNEL);
923 if (info == NULL) { 947 if (info == NULL) {
924 dev_err(&pdev->dev, "no memory for flash info\n"); 948 dev_err(&pdev->dev, "no memory for flash info\n");
925 err = -ENOMEM; 949 err = -ENOMEM;
@@ -933,7 +957,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
933 957
934 /* get the clock source and enable it */ 958 /* get the clock source and enable it */
935 959
936 info->clk = devm_clk_get(&pdev->dev, "nand"); 960 info->clk = clk_get(&pdev->dev, "nand");
937 if (IS_ERR(info->clk)) { 961 if (IS_ERR(info->clk)) {
938 dev_err(&pdev->dev, "failed to get clock\n"); 962 dev_err(&pdev->dev, "failed to get clock\n");
939 err = -ENOENT; 963 err = -ENOENT;
@@ -945,14 +969,22 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
945 /* allocate and map the resource */ 969 /* allocate and map the resource */
946 970
947 /* currently we assume we have the one resource */ 971 /* currently we assume we have the one resource */
948 res = pdev->resource; 972 res = pdev->resource;
949 size = resource_size(res); 973 size = resource_size(res);
950 974
951 info->device = &pdev->dev; 975 info->area = request_mem_region(res->start, size, pdev->name);
952 info->platform = plat; 976
953 info->cpu_type = cpu_type; 977 if (info->area == NULL) {
978 dev_err(&pdev->dev, "cannot reserve register region\n");
979 err = -ENOENT;
980 goto exit_error;
981 }
982
983 info->device = &pdev->dev;
984 info->platform = plat;
985 info->regs = ioremap(res->start, size);
986 info->cpu_type = cpu_type;
954 987
955 info->regs = devm_request_and_ioremap(&pdev->dev, res);
956 if (info->regs == NULL) { 988 if (info->regs == NULL) {
957 dev_err(&pdev->dev, "cannot reserve register region\n"); 989 dev_err(&pdev->dev, "cannot reserve register region\n");
958 err = -EIO; 990 err = -EIO;
@@ -975,7 +1007,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
975 /* allocate our information */ 1007 /* allocate our information */
976 1008
977 size = nr_sets * sizeof(*info->mtds); 1009 size = nr_sets * sizeof(*info->mtds);
978 info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 1010 info->mtds = kzalloc(size, GFP_KERNEL);
979 if (info->mtds == NULL) { 1011 if (info->mtds == NULL) {
980 dev_err(&pdev->dev, "failed to allocate mtd storage\n"); 1012 dev_err(&pdev->dev, "failed to allocate mtd storage\n");
981 err = -ENOMEM; 1013 err = -ENOMEM;
@@ -987,8 +1019,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
987 nmtd = info->mtds; 1019 nmtd = info->mtds;
988 1020
989 for (setno = 0; setno < nr_sets; setno++, nmtd++) { 1021 for (setno = 0; setno < nr_sets; setno++, nmtd++) {
990 pr_debug("initialising set %d (%p, info %p)\n", 1022 pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info);
991 setno, nmtd, info);
992 1023
993 s3c2410_nand_init_chip(info, nmtd, sets); 1024 s3c2410_nand_init_chip(info, nmtd, sets);
994 1025
@@ -1111,7 +1142,20 @@ static struct platform_driver s3c24xx_nand_driver = {
1111 }, 1142 },
1112}; 1143};
1113 1144
1114module_platform_driver(s3c24xx_nand_driver); 1145static int __init s3c2410_nand_init(void)
1146{
1147 printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
1148
1149 return platform_driver_register(&s3c24xx_nand_driver);
1150}
1151
1152static void __exit s3c2410_nand_exit(void)
1153{
1154 platform_driver_unregister(&s3c24xx_nand_driver);
1155}
1156
1157module_init(s3c2410_nand_init);
1158module_exit(s3c2410_nand_exit);
1115 1159
1116MODULE_LICENSE("GPL"); 1160MODULE_LICENSE("GPL");
1117MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 1161MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 57b3971c9c0..93b1f74321c 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,20 +23,10 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/completion.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/dmaengine.h>
29#include <linux/dma-mapping.h>
30#include <linux/interrupt.h>
31#include <linux/io.h> 27#include <linux/io.h>
32#include <linux/of.h>
33#include <linux/of_device.h>
34#include <linux/of_mtd.h>
35#include <linux/platform_device.h> 28#include <linux/platform_device.h>
36#include <linux/pm_runtime.h>
37#include <linux/sh_dma.h>
38#include <linux/slab.h> 29#include <linux/slab.h>
39#include <linux/string.h>
40 30
41#include <linux/mtd/mtd.h> 31#include <linux/mtd/mtd.h>
42#include <linux/mtd/nand.h> 32#include <linux/mtd/nand.h>
@@ -52,17 +42,11 @@ static struct nand_ecclayout flctl_4secc_oob_16 = {
52}; 42};
53 43
54static struct nand_ecclayout flctl_4secc_oob_64 = { 44static struct nand_ecclayout flctl_4secc_oob_64 = {
55 .eccbytes = 4 * 10, 45 .eccbytes = 10,
56 .eccpos = { 46 .eccpos = {48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
57 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
58 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
59 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
60 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
61 .oobfree = { 47 .oobfree = {
62 {.offset = 2, .length = 4}, 48 {.offset = 60,
63 {.offset = 16, .length = 6}, 49 . length = 4} },
64 {.offset = 32, .length = 6},
65 {.offset = 48, .length = 6} },
66}; 50};
67 51
68static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 52static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -76,15 +60,15 @@ static struct nand_bbt_descr flctl_4secc_smallpage = {
76 60
77static struct nand_bbt_descr flctl_4secc_largepage = { 61static struct nand_bbt_descr flctl_4secc_largepage = {
78 .options = NAND_BBT_SCAN2NDPAGE, 62 .options = NAND_BBT_SCAN2NDPAGE,
79 .offs = 0, 63 .offs = 58,
80 .len = 2, 64 .len = 2,
81 .pattern = scan_ff_pattern, 65 .pattern = scan_ff_pattern,
82}; 66};
83 67
84static void empty_fifo(struct sh_flctl *flctl) 68static void empty_fifo(struct sh_flctl *flctl)
85{ 69{
86 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl)); 70 writel(0x000c0000, FLINTDMACR(flctl)); /* FIFO Clear */
87 writel(flctl->flintdmacr_base, FLINTDMACR(flctl)); 71 writel(0x00000000, FLINTDMACR(flctl)); /* Clear Error flags */
88} 72}
89 73
90static void start_translation(struct sh_flctl *flctl) 74static void start_translation(struct sh_flctl *flctl)
@@ -113,84 +97,6 @@ static void wait_completion(struct sh_flctl *flctl)
113 writeb(0x0, FLTRCR(flctl)); 97 writeb(0x0, FLTRCR(flctl));
114} 98}
115 99
116static void flctl_dma_complete(void *param)
117{
118 struct sh_flctl *flctl = param;
119
120 complete(&flctl->dma_complete);
121}
122
123static void flctl_release_dma(struct sh_flctl *flctl)
124{
125 if (flctl->chan_fifo0_rx) {
126 dma_release_channel(flctl->chan_fifo0_rx);
127 flctl->chan_fifo0_rx = NULL;
128 }
129 if (flctl->chan_fifo0_tx) {
130 dma_release_channel(flctl->chan_fifo0_tx);
131 flctl->chan_fifo0_tx = NULL;
132 }
133}
134
135static void flctl_setup_dma(struct sh_flctl *flctl)
136{
137 dma_cap_mask_t mask;
138 struct dma_slave_config cfg;
139 struct platform_device *pdev = flctl->pdev;
140 struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
141 int ret;
142
143 if (!pdata)
144 return;
145
146 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
147 return;
148
149 /* We can only either use DMA for both Tx and Rx or not use it at all */
150 dma_cap_zero(mask);
151 dma_cap_set(DMA_SLAVE, mask);
152
153 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
154 (void *)pdata->slave_id_fifo0_tx);
155 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
156 flctl->chan_fifo0_tx);
157
158 if (!flctl->chan_fifo0_tx)
159 return;
160
161 memset(&cfg, 0, sizeof(cfg));
162 cfg.slave_id = pdata->slave_id_fifo0_tx;
163 cfg.direction = DMA_MEM_TO_DEV;
164 cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
165 cfg.src_addr = 0;
166 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
167 if (ret < 0)
168 goto err;
169
170 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
171 (void *)pdata->slave_id_fifo0_rx);
172 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
173 flctl->chan_fifo0_rx);
174
175 if (!flctl->chan_fifo0_rx)
176 goto err;
177
178 cfg.slave_id = pdata->slave_id_fifo0_rx;
179 cfg.direction = DMA_DEV_TO_MEM;
180 cfg.dst_addr = 0;
181 cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
182 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
183 if (ret < 0)
184 goto err;
185
186 init_completion(&flctl->dma_complete);
187
188 return;
189
190err:
191 flctl_release_dma(flctl);
192}
193
194static void set_addr(struct mtd_info *mtd, int column, int page_addr) 100static void set_addr(struct mtd_info *mtd, int column, int page_addr)
195{ 101{
196 struct sh_flctl *flctl = mtd_to_flctl(mtd); 102 struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -251,56 +157,27 @@ static void wait_wfifo_ready(struct sh_flctl *flctl)
251 timeout_error(flctl, __func__); 157 timeout_error(flctl, __func__);
252} 158}
253 159
254static enum flctl_ecc_res_t wait_recfifo_ready 160static int wait_recfifo_ready(struct sh_flctl *flctl, int sector_number)
255 (struct sh_flctl *flctl, int sector_number)
256{ 161{
257 uint32_t timeout = LOOP_TIMEOUT_MAX; 162 uint32_t timeout = LOOP_TIMEOUT_MAX;
163 int checked[4];
258 void __iomem *ecc_reg[4]; 164 void __iomem *ecc_reg[4];
259 int i; 165 int i;
260 int state = FL_SUCCESS;
261 uint32_t data, size; 166 uint32_t data, size;
262 167
263 /* 168 memset(checked, 0, sizeof(checked));
264 * First this loops checks in FLDTCNTR if we are ready to read out the 169
265 * oob data. This is the case if either all went fine without errors or
266 * if the bottom part of the loop corrected the errors or marked them as
267 * uncorrectable and the controller is given time to push the data into
268 * the FIFO.
269 */
270 while (timeout--) { 170 while (timeout--) {
271 /* check if all is ok and we can read out the OOB */
272 size = readl(FLDTCNTR(flctl)) >> 24; 171 size = readl(FLDTCNTR(flctl)) >> 24;
273 if ((size & 0xFF) == 4) 172 if (size & 0xFF)
274 return state; 173 return 0; /* success */
275
276 /* check if a correction code has been calculated */
277 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
278 /*
279 * either we wait for the fifo to be filled or a
280 * correction pattern is being generated
281 */
282 udelay(1);
283 continue;
284 }
285 174
286 /* check for an uncorrectable error */ 175 if (readl(FL4ECCCR(flctl)) & _4ECCFA)
287 if (readl(FL4ECCCR(flctl)) & _4ECCFA) { 176 return 1; /* can't correct */
288 /* check if we face a non-empty page */
289 for (i = 0; i < 512; i++) {
290 if (flctl->done_buff[i] != 0xff) {
291 state = FL_ERROR; /* can't correct */
292 break;
293 }
294 }
295
296 if (state == FL_SUCCESS)
297 dev_dbg(&flctl->pdev->dev,
298 "reading empty sector %d, ecc error ignored\n",
299 sector_number);
300 177
301 writel(0, FL4ECCCR(flctl)); 178 udelay(1);
179 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND))
302 continue; 180 continue;
303 }
304 181
305 /* start error correction */ 182 /* start error correction */
306 ecc_reg[0] = FL4ECCRESULT0(flctl); 183 ecc_reg[0] = FL4ECCRESULT0(flctl);
@@ -309,26 +186,28 @@ static enum flctl_ecc_res_t wait_recfifo_ready
309 ecc_reg[3] = FL4ECCRESULT3(flctl); 186 ecc_reg[3] = FL4ECCRESULT3(flctl);
310 187
311 for (i = 0; i < 3; i++) { 188 for (i = 0; i < 3; i++) {
312 uint8_t org;
313 unsigned int index;
314
315 data = readl(ecc_reg[i]); 189 data = readl(ecc_reg[i]);
316 190 if (data != INIT_FL4ECCRESULT_VAL && !checked[i]) {
317 if (flctl->page_size) 191 uint8_t org;
318 index = (512 * sector_number) + 192 int index;
319 (data >> 16); 193
320 else 194 if (flctl->page_size)
321 index = data >> 16; 195 index = (512 * sector_number) +
322 196 (data >> 16);
323 org = flctl->done_buff[index]; 197 else
324 flctl->done_buff[index] = org ^ (data & 0xFF); 198 index = data >> 16;
199
200 org = flctl->done_buff[index];
201 flctl->done_buff[index] = org ^ (data & 0xFF);
202 checked[i] = 1;
203 }
325 } 204 }
326 state = FL_REPAIRABLE; 205
327 writel(0, FL4ECCCR(flctl)); 206 writel(0, FL4ECCCR(flctl));
328 } 207 }
329 208
330 timeout_error(flctl, __func__); 209 timeout_error(flctl, __func__);
331 return FL_TIMEOUT; /* timeout */ 210 return 1; /* timeout */
332} 211}
333 212
334static void wait_wecfifo_ready(struct sh_flctl *flctl) 213static void wait_wecfifo_ready(struct sh_flctl *flctl)
@@ -346,70 +225,6 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
346 timeout_error(flctl, __func__); 225 timeout_error(flctl, __func__);
347} 226}
348 227
349static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
350 int len, enum dma_data_direction dir)
351{
352 struct dma_async_tx_descriptor *desc = NULL;
353 struct dma_chan *chan;
354 enum dma_transfer_direction tr_dir;
355 dma_addr_t dma_addr;
356 dma_cookie_t cookie = -EINVAL;
357 uint32_t reg;
358 int ret;
359
360 if (dir == DMA_FROM_DEVICE) {
361 chan = flctl->chan_fifo0_rx;
362 tr_dir = DMA_DEV_TO_MEM;
363 } else {
364 chan = flctl->chan_fifo0_tx;
365 tr_dir = DMA_MEM_TO_DEV;
366 }
367
368 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
369
370 if (dma_addr)
371 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
372 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
373
374 if (desc) {
375 reg = readl(FLINTDMACR(flctl));
376 reg |= DREQ0EN;
377 writel(reg, FLINTDMACR(flctl));
378
379 desc->callback = flctl_dma_complete;
380 desc->callback_param = flctl;
381 cookie = dmaengine_submit(desc);
382
383 dma_async_issue_pending(chan);
384 } else {
385 /* DMA failed, fall back to PIO */
386 flctl_release_dma(flctl);
387 dev_warn(&flctl->pdev->dev,
388 "DMA failed, falling back to PIO\n");
389 ret = -EIO;
390 goto out;
391 }
392
393 ret =
394 wait_for_completion_timeout(&flctl->dma_complete,
395 msecs_to_jiffies(3000));
396
397 if (ret <= 0) {
398 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
399 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
400 }
401
402out:
403 reg = readl(FLINTDMACR(flctl));
404 reg &= ~DREQ0EN;
405 writel(reg, FLINTDMACR(flctl));
406
407 dma_unmap_single(chan->device->dev, dma_addr, len, dir);
408
409 /* ret > 0 is success */
410 return ret;
411}
412
413static void read_datareg(struct sh_flctl *flctl, int offset) 228static void read_datareg(struct sh_flctl *flctl, int offset)
414{ 229{
415 unsigned long data; 230 unsigned long data;
@@ -425,84 +240,50 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
425{ 240{
426 int i, len_4align; 241 int i, len_4align;
427 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 242 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
243 void *fifo_addr = (void *)FLDTFIFO(flctl);
428 244
429 len_4align = (rlen + 3) / 4; 245 len_4align = (rlen + 3) / 4;
430 246
431 /* initiate DMA transfer */
432 if (flctl->chan_fifo0_rx && rlen >= 32 &&
433 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
434 goto convert; /* DMA success */
435
436 /* do polling transfer */
437 for (i = 0; i < len_4align; i++) { 247 for (i = 0; i < len_4align; i++) {
438 wait_rfifo_ready(flctl); 248 wait_rfifo_ready(flctl);
439 buf[i] = readl(FLDTFIFO(flctl)); 249 buf[i] = readl(fifo_addr);
440 }
441
442convert:
443 for (i = 0; i < len_4align; i++)
444 buf[i] = be32_to_cpu(buf[i]); 250 buf[i] = be32_to_cpu(buf[i]);
251 }
445} 252}
446 253
447static enum flctl_ecc_res_t read_ecfiforeg 254static int read_ecfiforeg(struct sh_flctl *flctl, uint8_t *buff, int sector)
448 (struct sh_flctl *flctl, uint8_t *buff, int sector)
449{ 255{
450 int i; 256 int i;
451 enum flctl_ecc_res_t res;
452 unsigned long *ecc_buf = (unsigned long *)buff; 257 unsigned long *ecc_buf = (unsigned long *)buff;
258 void *fifo_addr = (void *)FLECFIFO(flctl);
453 259
454 res = wait_recfifo_ready(flctl , sector); 260 for (i = 0; i < 4; i++) {
455 261 if (wait_recfifo_ready(flctl , sector))
456 if (res != FL_ERROR) { 262 return 1;
457 for (i = 0; i < 4; i++) { 263 ecc_buf[i] = readl(fifo_addr);
458 ecc_buf[i] = readl(FLECFIFO(flctl)); 264 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
459 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
460 }
461 } 265 }
462 266
463 return res; 267 return 0;
464} 268}
465 269
466static void write_fiforeg(struct sh_flctl *flctl, int rlen, 270static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
467 unsigned int offset)
468{ 271{
469 int i, len_4align; 272 int i, len_4align;
470 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; 273 unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
274 void *fifo_addr = (void *)FLDTFIFO(flctl);
471 275
472 len_4align = (rlen + 3) / 4; 276 len_4align = (rlen + 3) / 4;
473 for (i = 0; i < len_4align; i++) { 277 for (i = 0; i < len_4align; i++) {
474 wait_wfifo_ready(flctl); 278 wait_wfifo_ready(flctl);
475 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); 279 writel(cpu_to_be32(data[i]), fifo_addr);
476 }
477}
478
479static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
480 unsigned int offset)
481{
482 int i, len_4align;
483 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
484
485 len_4align = (rlen + 3) / 4;
486
487 for (i = 0; i < len_4align; i++)
488 buf[i] = cpu_to_be32(buf[i]);
489
490 /* initiate DMA transfer */
491 if (flctl->chan_fifo0_tx && rlen >= 32 &&
492 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
493 return; /* DMA success */
494
495 /* do polling transfer */
496 for (i = 0; i < len_4align; i++) {
497 wait_wecfifo_ready(flctl);
498 writel(buf[i], FLECFIFO(flctl));
499 } 280 }
500} 281}
501 282
502static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 283static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
503{ 284{
504 struct sh_flctl *flctl = mtd_to_flctl(mtd); 285 struct sh_flctl *flctl = mtd_to_flctl(mtd);
505 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT; 286 uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT;
506 uint32_t flcmdcr_val, addr_len_bytes = 0; 287 uint32_t flcmdcr_val, addr_len_bytes = 0;
507 288
508 /* Set SNAND bit if page size is 2048byte */ 289 /* Set SNAND bit if page size is 2048byte */
@@ -522,7 +303,6 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
522 break; 303 break;
523 case NAND_CMD_READ0: 304 case NAND_CMD_READ0:
524 case NAND_CMD_READOOB: 305 case NAND_CMD_READOOB:
525 case NAND_CMD_RNDOUT:
526 addr_len_bytes = flctl->rw_ADRCNT; 306 addr_len_bytes = flctl->rw_ADRCNT;
527 flcmdcr_val |= CDSRC_E; 307 flcmdcr_val |= CDSRC_E;
528 if (flctl->chip.options & NAND_BUSWIDTH_16) 308 if (flctl->chip.options & NAND_BUSWIDTH_16)
@@ -540,7 +320,6 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
540 break; 320 break;
541 case NAND_CMD_READID: 321 case NAND_CMD_READID:
542 flcmncr_val &= ~SNAND_E; 322 flcmncr_val &= ~SNAND_E;
543 flcmdcr_val |= CDSRC_E;
544 addr_len_bytes = ADRCNT_1; 323 addr_len_bytes = ADRCNT_1;
545 break; 324 break;
546 case NAND_CMD_STATUS: 325 case NAND_CMD_STATUS:
@@ -562,67 +341,75 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
562} 341}
563 342
564static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 343static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
565 uint8_t *buf, int oob_required, int page) 344 uint8_t *buf, int page)
566{ 345{
567 chip->read_buf(mtd, buf, mtd->writesize); 346 int i, eccsize = chip->ecc.size;
568 if (oob_required) 347 int eccbytes = chip->ecc.bytes;
569 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 348 int eccsteps = chip->ecc.steps;
349 uint8_t *p = buf;
350 struct sh_flctl *flctl = mtd_to_flctl(mtd);
351
352 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
353 chip->read_buf(mtd, p, eccsize);
354
355 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
356 if (flctl->hwecc_cant_correct[i])
357 mtd->ecc_stats.failed++;
358 else
359 mtd->ecc_stats.corrected += 0;
360 }
361
570 return 0; 362 return 0;
571} 363}
572 364
573static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 365static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
574 const uint8_t *buf, int oob_required) 366 const uint8_t *buf)
575{ 367{
576 chip->write_buf(mtd, buf, mtd->writesize); 368 int i, eccsize = chip->ecc.size;
577 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 369 int eccbytes = chip->ecc.bytes;
578 return 0; 370 int eccsteps = chip->ecc.steps;
371 const uint8_t *p = buf;
372
373 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
374 chip->write_buf(mtd, p, eccsize);
579} 375}
580 376
581static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr) 377static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
582{ 378{
583 struct sh_flctl *flctl = mtd_to_flctl(mtd); 379 struct sh_flctl *flctl = mtd_to_flctl(mtd);
584 int sector, page_sectors; 380 int sector, page_sectors;
585 enum flctl_ecc_res_t ecc_result;
586
587 page_sectors = flctl->page_size ? 4 : 1;
588 381
589 set_cmd_regs(mtd, NAND_CMD_READ0, 382 if (flctl->page_size)
590 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 383 page_sectors = 4;
384 else
385 page_sectors = 1;
591 386
592 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT, 387 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
593 FLCMNCR(flctl)); 388 FLCMNCR(flctl));
594 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
595 writel(page_addr << 2, FLADR(flctl));
596 389
597 empty_fifo(flctl); 390 set_cmd_regs(mtd, NAND_CMD_READ0,
598 start_translation(flctl); 391 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
599 392
600 for (sector = 0; sector < page_sectors; sector++) { 393 for (sector = 0; sector < page_sectors; sector++) {
394 int ret;
395
396 empty_fifo(flctl);
397 writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
398 writel(page_addr << 2 | sector, FLADR(flctl));
399
400 start_translation(flctl);
601 read_fiforeg(flctl, 512, 512 * sector); 401 read_fiforeg(flctl, 512, 512 * sector);
602 402
603 ecc_result = read_ecfiforeg(flctl, 403 ret = read_ecfiforeg(flctl,
604 &flctl->done_buff[mtd->writesize + 16 * sector], 404 &flctl->done_buff[mtd->writesize + 16 * sector],
605 sector); 405 sector);
606 406
607 switch (ecc_result) { 407 if (ret)
608 case FL_REPAIRABLE: 408 flctl->hwecc_cant_correct[sector] = 1;
609 dev_info(&flctl->pdev->dev,
610 "applied ecc on page 0x%x", page_addr);
611 flctl->mtd.ecc_stats.corrected++;
612 break;
613 case FL_ERROR:
614 dev_warn(&flctl->pdev->dev,
615 "page 0x%x contains corrupted data\n",
616 page_addr);
617 flctl->mtd.ecc_stats.failed++;
618 break;
619 default:
620 ;
621 }
622 }
623
624 wait_completion(flctl);
625 409
410 writel(0x0, FL4ECCCR(flctl));
411 wait_completion(flctl);
412 }
626 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), 413 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
627 FLCMNCR(flctl)); 414 FLCMNCR(flctl));
628} 415}
@@ -630,20 +417,30 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
630static void execmd_read_oob(struct mtd_info *mtd, int page_addr) 417static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
631{ 418{
632 struct sh_flctl *flctl = mtd_to_flctl(mtd); 419 struct sh_flctl *flctl = mtd_to_flctl(mtd);
633 int page_sectors = flctl->page_size ? 4 : 1;
634 int i;
635 420
636 set_cmd_regs(mtd, NAND_CMD_READ0, 421 set_cmd_regs(mtd, NAND_CMD_READ0,
637 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0); 422 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
638 423
639 empty_fifo(flctl); 424 empty_fifo(flctl);
425 if (flctl->page_size) {
426 int i;
427 /* In case that the page size is 2k */
428 for (i = 0; i < 16 * 3; i++)
429 flctl->done_buff[i] = 0xFF;
640 430
641 for (i = 0; i < page_sectors; i++) { 431 set_addr(mtd, 3 * 528 + 512, page_addr);
642 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
643 writel(16, FLDTCNTR(flctl)); 432 writel(16, FLDTCNTR(flctl));
644 433
645 start_translation(flctl); 434 start_translation(flctl);
646 read_fiforeg(flctl, 16, 16 * i); 435 read_fiforeg(flctl, 16, 16 * 3);
436 wait_completion(flctl);
437 } else {
438 /* In case that the page size is 512b */
439 set_addr(mtd, 512, page_addr);
440 writel(16, FLDTCNTR(flctl));
441
442 start_translation(flctl);
443 read_fiforeg(flctl, 16, 0);
647 wait_completion(flctl); 444 wait_completion(flctl);
648 } 445 }
649} 446}
@@ -651,26 +448,34 @@ static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
651static void execmd_write_page_sector(struct mtd_info *mtd) 448static void execmd_write_page_sector(struct mtd_info *mtd)
652{ 449{
653 struct sh_flctl *flctl = mtd_to_flctl(mtd); 450 struct sh_flctl *flctl = mtd_to_flctl(mtd);
654 int page_addr = flctl->seqin_page_addr; 451 int i, page_addr = flctl->seqin_page_addr;
655 int sector, page_sectors; 452 int sector, page_sectors;
656 453
657 page_sectors = flctl->page_size ? 4 : 1; 454 if (flctl->page_size)
455 page_sectors = 4;
456 else
457 page_sectors = 1;
458
459 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
658 460
659 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 461 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
660 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 462 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
661 463
662 empty_fifo(flctl);
663 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
664 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
665 writel(page_addr << 2, FLADR(flctl));
666 start_translation(flctl);
667
668 for (sector = 0; sector < page_sectors; sector++) { 464 for (sector = 0; sector < page_sectors; sector++) {
465 empty_fifo(flctl);
466 writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
467 writel(page_addr << 2 | sector, FLADR(flctl));
468
469 start_translation(flctl);
669 write_fiforeg(flctl, 512, 512 * sector); 470 write_fiforeg(flctl, 512, 512 * sector);
670 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector); 471
472 for (i = 0; i < 4; i++) {
473 wait_wecfifo_ready(flctl); /* wait for write ready */
474 writel(0xFFFFFFFF, FLECFIFO(flctl));
475 }
476 wait_completion(flctl);
671 } 477 }
672 478
673 wait_completion(flctl);
674 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); 479 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
675} 480}
676 481
@@ -680,12 +485,18 @@ static void execmd_write_oob(struct mtd_info *mtd)
680 int page_addr = flctl->seqin_page_addr; 485 int page_addr = flctl->seqin_page_addr;
681 int sector, page_sectors; 486 int sector, page_sectors;
682 487
683 page_sectors = flctl->page_size ? 4 : 1; 488 if (flctl->page_size) {
489 sector = 3;
490 page_sectors = 4;
491 } else {
492 sector = 0;
493 page_sectors = 1;
494 }
684 495
685 set_cmd_regs(mtd, NAND_CMD_PAGEPROG, 496 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
686 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); 497 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
687 498
688 for (sector = 0; sector < page_sectors; sector++) { 499 for (; sector < page_sectors; sector++) {
689 empty_fifo(flctl); 500 empty_fifo(flctl);
690 set_addr(mtd, sector * 528 + 512, page_addr); 501 set_addr(mtd, sector * 528 + 512, page_addr);
691 writel(16, FLDTCNTR(flctl)); /* set read size */ 502 writel(16, FLDTCNTR(flctl)); /* set read size */
@@ -702,8 +513,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
702 struct sh_flctl *flctl = mtd_to_flctl(mtd); 513 struct sh_flctl *flctl = mtd_to_flctl(mtd);
703 uint32_t read_cmd = 0; 514 uint32_t read_cmd = 0;
704 515
705 pm_runtime_get_sync(&flctl->pdev->dev);
706
707 flctl->read_bytes = 0; 516 flctl->read_bytes = 0;
708 if (command != NAND_CMD_PAGEPROG) 517 if (command != NAND_CMD_PAGEPROG)
709 flctl->index = 0; 518 flctl->index = 0;
@@ -716,6 +525,7 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
716 execmd_read_page_sector(mtd, page_addr); 525 execmd_read_page_sector(mtd, page_addr);
717 break; 526 break;
718 } 527 }
528 empty_fifo(flctl);
719 if (flctl->page_size) 529 if (flctl->page_size)
720 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 530 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
721 | command); 531 | command);
@@ -737,6 +547,7 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
737 break; 547 break;
738 } 548 }
739 549
550 empty_fifo(flctl);
740 if (flctl->page_size) { 551 if (flctl->page_size) {
741 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 552 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
742 | NAND_CMD_READ0); 553 | NAND_CMD_READ0);
@@ -748,35 +559,15 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
748 flctl->read_bytes = mtd->oobsize; 559 flctl->read_bytes = mtd->oobsize;
749 goto read_normal_exit; 560 goto read_normal_exit;
750 561
751 case NAND_CMD_RNDOUT:
752 if (flctl->hwecc)
753 break;
754
755 if (flctl->page_size)
756 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
757 | command);
758 else
759 set_cmd_regs(mtd, command, command);
760
761 set_addr(mtd, column, 0);
762
763 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
764 goto read_normal_exit;
765
766 case NAND_CMD_READID: 562 case NAND_CMD_READID:
563 empty_fifo(flctl);
767 set_cmd_regs(mtd, command, command); 564 set_cmd_regs(mtd, command, command);
565 set_addr(mtd, 0, 0);
768 566
769 /* READID is always performed using an 8-bit bus */ 567 flctl->read_bytes = 4;
770 if (flctl->chip.options & NAND_BUSWIDTH_16)
771 column <<= 1;
772 set_addr(mtd, column, 0);
773
774 flctl->read_bytes = 8;
775 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 568 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
776 empty_fifo(flctl);
777 start_translation(flctl); 569 start_translation(flctl);
778 read_fiforeg(flctl, flctl->read_bytes, 0); 570 read_datareg(flctl, 0); /* read and end */
779 wait_completion(flctl);
780 break; 571 break;
781 572
782 case NAND_CMD_ERASE1: 573 case NAND_CMD_ERASE1:
@@ -859,57 +650,29 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
859 default: 650 default:
860 break; 651 break;
861 } 652 }
862 goto runtime_exit; 653 return;
863 654
864read_normal_exit: 655read_normal_exit:
865 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 656 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
866 empty_fifo(flctl);
867 start_translation(flctl); 657 start_translation(flctl);
868 read_fiforeg(flctl, flctl->read_bytes, 0); 658 read_fiforeg(flctl, flctl->read_bytes, 0);
869 wait_completion(flctl); 659 wait_completion(flctl);
870runtime_exit:
871 pm_runtime_put_sync(&flctl->pdev->dev);
872 return; 660 return;
873} 661}
874 662
875static void flctl_select_chip(struct mtd_info *mtd, int chipnr) 663static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
876{ 664{
877 struct sh_flctl *flctl = mtd_to_flctl(mtd); 665 struct sh_flctl *flctl = mtd_to_flctl(mtd);
878 int ret; 666 uint32_t flcmncr_val = readl(FLCMNCR(flctl));
879 667
880 switch (chipnr) { 668 switch (chipnr) {
881 case -1: 669 case -1:
882 flctl->flcmncr_base &= ~CE0_ENABLE; 670 flcmncr_val &= ~CE0_ENABLE;
883 671 writel(flcmncr_val, FLCMNCR(flctl));
884 pm_runtime_get_sync(&flctl->pdev->dev);
885 writel(flctl->flcmncr_base, FLCMNCR(flctl));
886
887 if (flctl->qos_request) {
888 dev_pm_qos_remove_request(&flctl->pm_qos);
889 flctl->qos_request = 0;
890 }
891
892 pm_runtime_put_sync(&flctl->pdev->dev);
893 break; 672 break;
894 case 0: 673 case 0:
895 flctl->flcmncr_base |= CE0_ENABLE; 674 flcmncr_val |= CE0_ENABLE;
896 675 writel(flcmncr_val, FLCMNCR(flctl));
897 if (!flctl->qos_request) {
898 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
899 &flctl->pm_qos,
900 DEV_PM_QOS_LATENCY,
901 100);
902 if (ret < 0)
903 dev_err(&flctl->pdev->dev,
904 "PM QoS request failed: %d\n", ret);
905 flctl->qos_request = 1;
906 }
907
908 if (flctl->holden) {
909 pm_runtime_get_sync(&flctl->pdev->dev);
910 writel(HOLDEN, FLHOLDCR(flctl));
911 pm_runtime_put_sync(&flctl->pdev->dev);
912 }
913 break; 676 break;
914 default: 677 default:
915 BUG(); 678 BUG();
@@ -919,36 +682,57 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
919static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 682static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
920{ 683{
921 struct sh_flctl *flctl = mtd_to_flctl(mtd); 684 struct sh_flctl *flctl = mtd_to_flctl(mtd);
685 int i, index = flctl->index;
922 686
923 memcpy(&flctl->done_buff[flctl->index], buf, len); 687 for (i = 0; i < len; i++)
688 flctl->done_buff[index + i] = buf[i];
924 flctl->index += len; 689 flctl->index += len;
925} 690}
926 691
927static uint8_t flctl_read_byte(struct mtd_info *mtd) 692static uint8_t flctl_read_byte(struct mtd_info *mtd)
928{ 693{
929 struct sh_flctl *flctl = mtd_to_flctl(mtd); 694 struct sh_flctl *flctl = mtd_to_flctl(mtd);
695 int index = flctl->index;
930 uint8_t data; 696 uint8_t data;
931 697
932 data = flctl->done_buff[flctl->index]; 698 data = flctl->done_buff[index];
933 flctl->index++; 699 flctl->index++;
934 return data; 700 return data;
935} 701}
936 702
937static uint16_t flctl_read_word(struct mtd_info *mtd) 703static uint16_t flctl_read_word(struct mtd_info *mtd)
938{ 704{
939 struct sh_flctl *flctl = mtd_to_flctl(mtd); 705 struct sh_flctl *flctl = mtd_to_flctl(mtd);
940 uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index]; 706 int index = flctl->index;
707 uint16_t data;
708 uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
941 709
942 flctl->index += 2; 710 data = *buf;
943 return *buf; 711 flctl->index += 2;
712 return data;
944} 713}
945 714
946static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 715static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
947{ 716{
948 struct sh_flctl *flctl = mtd_to_flctl(mtd); 717 int i;
949 718
950 memcpy(buf, &flctl->done_buff[flctl->index], len); 719 for (i = 0; i < len; i++)
951 flctl->index += len; 720 buf[i] = flctl_read_byte(mtd);
721}
722
723static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
724{
725 int i;
726
727 for (i = 0; i < len; i++)
728 if (buf[i] != flctl_read_byte(mtd))
729 return -EFAULT;
730 return 0;
731}
732
733static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
734{
735 writel(val, FLCMNCR(flctl));
952} 736}
953 737
954static int flctl_chip_init_tail(struct mtd_info *mtd) 738static int flctl_chip_init_tail(struct mtd_info *mtd)
@@ -997,13 +781,13 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
997 781
998 chip->ecc.size = 512; 782 chip->ecc.size = 512;
999 chip->ecc.bytes = 10; 783 chip->ecc.bytes = 10;
1000 chip->ecc.strength = 4;
1001 chip->ecc.read_page = flctl_read_page_hwecc; 784 chip->ecc.read_page = flctl_read_page_hwecc;
1002 chip->ecc.write_page = flctl_write_page_hwecc; 785 chip->ecc.write_page = flctl_write_page_hwecc;
1003 chip->ecc.mode = NAND_ECC_HW; 786 chip->ecc.mode = NAND_ECC_HW;
1004 787
1005 /* 4 symbols ECC enabled */ 788 /* 4 symbols ECC enabled */
1006 flctl->flcmncr_base |= _4ECCEN; 789 writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02,
790 FLCMNCR(flctl));
1007 } else { 791 } else {
1008 chip->ecc.mode = NAND_ECC_SOFT; 792 chip->ecc.mode = NAND_ECC_SOFT;
1009 } 793 }
@@ -1011,84 +795,7 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
1011 return 0; 795 return 0;
1012} 796}
1013 797
1014static irqreturn_t flctl_handle_flste(int irq, void *dev_id) 798static int __devinit flctl_probe(struct platform_device *pdev)
1015{
1016 struct sh_flctl *flctl = dev_id;
1017
1018 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1019 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1020
1021 return IRQ_HANDLED;
1022}
1023
1024#ifdef CONFIG_OF
1025struct flctl_soc_config {
1026 unsigned long flcmncr_val;
1027 unsigned has_hwecc:1;
1028 unsigned use_holden:1;
1029};
1030
1031static struct flctl_soc_config flctl_sh7372_config = {
1032 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1033 .has_hwecc = 1,
1034 .use_holden = 1,
1035};
1036
1037static const struct of_device_id of_flctl_match[] = {
1038 { .compatible = "renesas,shmobile-flctl-sh7372",
1039 .data = &flctl_sh7372_config },
1040 {},
1041};
1042MODULE_DEVICE_TABLE(of, of_flctl_match);
1043
1044static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1045{
1046 const struct of_device_id *match;
1047 struct flctl_soc_config *config;
1048 struct sh_flctl_platform_data *pdata;
1049 struct device_node *dn = dev->of_node;
1050 int ret;
1051
1052 match = of_match_device(of_flctl_match, dev);
1053 if (match)
1054 config = (struct flctl_soc_config *)match->data;
1055 else {
1056 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1057 return NULL;
1058 }
1059
1060 pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1061 GFP_KERNEL);
1062 if (!pdata) {
1063 dev_err(dev, "%s: failed to allocate config data\n", __func__);
1064 return NULL;
1065 }
1066
1067 /* set SoC specific options */
1068 pdata->flcmncr_val = config->flcmncr_val;
1069 pdata->has_hwecc = config->has_hwecc;
1070 pdata->use_holden = config->use_holden;
1071
1072 /* parse user defined options */
1073 ret = of_get_nand_bus_width(dn);
1074 if (ret == 16)
1075 pdata->flcmncr_val |= SEL_16BIT;
1076 else if (ret != 8) {
1077 dev_err(dev, "%s: invalid bus width\n", __func__);
1078 return NULL;
1079 }
1080
1081 return pdata;
1082}
1083#else /* CONFIG_OF */
1084#define of_flctl_match NULL
1085static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1086{
1087 return NULL;
1088}
1089#endif /* CONFIG_OF */
1090
1091static int flctl_probe(struct platform_device *pdev)
1092{ 799{
1093 struct resource *res; 800 struct resource *res;
1094 struct sh_flctl *flctl; 801 struct sh_flctl *flctl;
@@ -1096,8 +803,12 @@ static int flctl_probe(struct platform_device *pdev)
1096 struct nand_chip *nand; 803 struct nand_chip *nand;
1097 struct sh_flctl_platform_data *pdata; 804 struct sh_flctl_platform_data *pdata;
1098 int ret = -ENXIO; 805 int ret = -ENXIO;
1099 int irq; 806
1100 struct mtd_part_parser_data ppdata = {}; 807 pdata = pdev->dev.platform_data;
808 if (pdata == NULL) {
809 dev_err(&pdev->dev, "no platform data defined\n");
810 return -EINVAL;
811 }
1101 812
1102 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL); 813 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
1103 if (!flctl) { 814 if (!flctl) {
@@ -1108,36 +819,13 @@ static int flctl_probe(struct platform_device *pdev)
1108 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 819 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1109 if (!res) { 820 if (!res) {
1110 dev_err(&pdev->dev, "failed to get I/O memory\n"); 821 dev_err(&pdev->dev, "failed to get I/O memory\n");
1111 goto err_iomap; 822 goto err;
1112 } 823 }
1113 824
1114 flctl->reg = ioremap(res->start, resource_size(res)); 825 flctl->reg = ioremap(res->start, resource_size(res));
1115 if (flctl->reg == NULL) { 826 if (flctl->reg == NULL) {
1116 dev_err(&pdev->dev, "failed to remap I/O memory\n"); 827 dev_err(&pdev->dev, "failed to remap I/O memory\n");
1117 goto err_iomap; 828 goto err;
1118 }
1119
1120 irq = platform_get_irq(pdev, 0);
1121 if (irq < 0) {
1122 dev_err(&pdev->dev, "failed to get flste irq data\n");
1123 goto err_flste;
1124 }
1125
1126 ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
1127 if (ret) {
1128 dev_err(&pdev->dev, "request interrupt failed.\n");
1129 goto err_flste;
1130 }
1131
1132 if (pdev->dev.of_node)
1133 pdata = flctl_parse_dt(&pdev->dev);
1134 else
1135 pdata = pdev->dev.platform_data;
1136
1137 if (!pdata) {
1138 dev_err(&pdev->dev, "no setup data defined\n");
1139 ret = -EINVAL;
1140 goto err_pdata;
1141 } 829 }
1142 830
1143 platform_set_drvdata(pdev, flctl); 831 platform_set_drvdata(pdev, flctl);
@@ -1146,9 +834,10 @@ static int flctl_probe(struct platform_device *pdev)
1146 flctl_mtd->priv = nand; 834 flctl_mtd->priv = nand;
1147 flctl->pdev = pdev; 835 flctl->pdev = pdev;
1148 flctl->hwecc = pdata->has_hwecc; 836 flctl->hwecc = pdata->has_hwecc;
1149 flctl->holden = pdata->use_holden; 837
1150 flctl->flcmncr_base = pdata->flcmncr_val; 838 flctl_register_init(flctl, pdata->flcmncr_val);
1151 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE; 839
840 nand->options = NAND_NO_AUTOINCR;
1152 841
1153 /* Set address of hardware control function */ 842 /* Set address of hardware control function */
1154 /* 20 us command delay time */ 843 /* 20 us command delay time */
@@ -1157,6 +846,7 @@ static int flctl_probe(struct platform_device *pdev)
1157 nand->read_byte = flctl_read_byte; 846 nand->read_byte = flctl_read_byte;
1158 nand->write_buf = flctl_write_buf; 847 nand->write_buf = flctl_write_buf;
1159 nand->read_buf = flctl_read_buf; 848 nand->read_buf = flctl_read_buf;
849 nand->verify_buf = flctl_verify_buf;
1160 nand->select_chip = flctl_select_chip; 850 nand->select_chip = flctl_select_chip;
1161 nand->cmdfunc = flctl_cmdfunc; 851 nand->cmdfunc = flctl_cmdfunc;
1162 852
@@ -1165,50 +855,32 @@ static int flctl_probe(struct platform_device *pdev)
1165 nand->read_word = flctl_read_word; 855 nand->read_word = flctl_read_word;
1166 } 856 }
1167 857
1168 pm_runtime_enable(&pdev->dev);
1169 pm_runtime_resume(&pdev->dev);
1170
1171 flctl_setup_dma(flctl);
1172
1173 ret = nand_scan_ident(flctl_mtd, 1, NULL); 858 ret = nand_scan_ident(flctl_mtd, 1, NULL);
1174 if (ret) 859 if (ret)
1175 goto err_chip; 860 goto err;
1176 861
1177 ret = flctl_chip_init_tail(flctl_mtd); 862 ret = flctl_chip_init_tail(flctl_mtd);
1178 if (ret) 863 if (ret)
1179 goto err_chip; 864 goto err;
1180 865
1181 ret = nand_scan_tail(flctl_mtd); 866 ret = nand_scan_tail(flctl_mtd);
1182 if (ret) 867 if (ret)
1183 goto err_chip; 868 goto err;
1184 869
1185 ppdata.of_node = pdev->dev.of_node; 870 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
1186 ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
1187 pdata->nr_parts);
1188 871
1189 return 0; 872 return 0;
1190 873
1191err_chip: 874err:
1192 flctl_release_dma(flctl);
1193 pm_runtime_disable(&pdev->dev);
1194err_pdata:
1195 free_irq(irq, flctl);
1196err_flste:
1197 iounmap(flctl->reg);
1198err_iomap:
1199 kfree(flctl); 875 kfree(flctl);
1200 return ret; 876 return ret;
1201} 877}
1202 878
1203static int flctl_remove(struct platform_device *pdev) 879static int __devexit flctl_remove(struct platform_device *pdev)
1204{ 880{
1205 struct sh_flctl *flctl = platform_get_drvdata(pdev); 881 struct sh_flctl *flctl = platform_get_drvdata(pdev);
1206 882
1207 flctl_release_dma(flctl);
1208 nand_release(&flctl->mtd); 883 nand_release(&flctl->mtd);
1209 pm_runtime_disable(&pdev->dev);
1210 free_irq(platform_get_irq(pdev, 0), flctl);
1211 iounmap(flctl->reg);
1212 kfree(flctl); 884 kfree(flctl);
1213 885
1214 return 0; 886 return 0;
@@ -1219,7 +891,6 @@ static struct platform_driver flctl_driver = {
1219 .driver = { 891 .driver = {
1220 .name = "sh_flctl", 892 .name = "sh_flctl",
1221 .owner = THIS_MODULE, 893 .owner = THIS_MODULE,
1222 .of_match_table = of_flctl_match,
1223 }, 894 },
1224}; 895};
1225 896
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 127bc427182..19e24ed089e 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,12 +103,16 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
103 return readb(sharpsl->io + ECCCNTR) != 0; 103 return readb(sharpsl->io + ECCCNTR) != 0;
104} 104}
105 105
106static const char *part_probes[] = { "cmdlinepart", NULL };
107
106/* 108/*
107 * Main initialization routine 109 * Main initialization routine
108 */ 110 */
109static int sharpsl_nand_probe(struct platform_device *pdev) 111static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
110{ 112{
111 struct nand_chip *this; 113 struct nand_chip *this;
114 struct mtd_partition *sharpsl_partition_info;
115 int nr_partitions;
112 struct resource *r; 116 struct resource *r;
113 int err = 0; 117 int err = 0;
114 struct sharpsl_nand *sharpsl; 118 struct sharpsl_nand *sharpsl;
@@ -167,7 +171,6 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
167 this->ecc.mode = NAND_ECC_HW; 171 this->ecc.mode = NAND_ECC_HW;
168 this->ecc.size = 256; 172 this->ecc.size = 256;
169 this->ecc.bytes = 3; 173 this->ecc.bytes = 3;
170 this->ecc.strength = 1;
171 this->badblock_pattern = data->badblock_pattern; 174 this->badblock_pattern = data->badblock_pattern;
172 this->ecc.layout = data->ecc_layout; 175 this->ecc.layout = data->ecc_layout;
173 this->ecc.hwctl = sharpsl_nand_enable_hwecc; 176 this->ecc.hwctl = sharpsl_nand_enable_hwecc;
@@ -181,9 +184,14 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
181 184
182 /* Register the partitions */ 185 /* Register the partitions */
183 sharpsl->mtd.name = "sharpsl-nand"; 186 sharpsl->mtd.name = "sharpsl-nand";
187 nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
188 if (nr_partitions <= 0) {
189 nr_partitions = data->nr_partitions;
190 sharpsl_partition_info = data->partitions;
191 }
184 192
185 err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL, 193 err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
186 data->partitions, data->nr_partitions); 194 nr_partitions);
187 if (err) 195 if (err)
188 goto err_add; 196 goto err_add;
189 197
@@ -205,7 +213,7 @@ err_get_res:
205/* 213/*
206 * Clean up routine 214 * Clean up routine
207 */ 215 */
208static int sharpsl_nand_remove(struct platform_device *pdev) 216static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
209{ 217{
210 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); 218 struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
211 219
@@ -228,10 +236,20 @@ static struct platform_driver sharpsl_nand_driver = {
228 .owner = THIS_MODULE, 236 .owner = THIS_MODULE,
229 }, 237 },
230 .probe = sharpsl_nand_probe, 238 .probe = sharpsl_nand_probe,
231 .remove = sharpsl_nand_remove, 239 .remove = __devexit_p(sharpsl_nand_remove),
232}; 240};
233 241
234module_platform_driver(sharpsl_nand_driver); 242static int __init sharpsl_nand_init(void)
243{
244 return platform_driver_register(&sharpsl_nand_driver);
245}
246module_init(sharpsl_nand_init);
247
248static void __exit sharpsl_nand_exit(void)
249{
250 platform_driver_unregister(&sharpsl_nand_driver);
251}
252module_exit(sharpsl_nand_exit);
235 253
236MODULE_LICENSE("GPL"); 254MODULE_LICENSE("GPL");
237MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); 255MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 082bcdcd6bc..b6332e83b28 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -8,7 +8,6 @@
8 */ 8 */
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/mtd/nand.h> 10#include <linux/mtd/nand.h>
11#include <linux/module.h>
12#include "sm_common.h" 11#include "sm_common.h"
13 12
14static struct nand_ecclayout nand_oob_sm = { 13static struct nand_ecclayout nand_oob_sm = {
@@ -48,14 +47,14 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
48 47
49 /* As long as this function is called on erase block boundaries 48 /* As long as this function is called on erase block boundaries
50 it will work correctly for 256 byte nand */ 49 it will work correctly for 256 byte nand */
51 ops.mode = MTD_OPS_PLACE_OOB; 50 ops.mode = MTD_OOB_PLACE;
52 ops.ooboffs = 0; 51 ops.ooboffs = 0;
53 ops.ooblen = mtd->oobsize; 52 ops.ooblen = mtd->oobsize;
54 ops.oobbuf = (void *)&oob; 53 ops.oobbuf = (void *)&oob;
55 ops.datbuf = NULL; 54 ops.datbuf = NULL;
56 55
57 56
58 ret = mtd_write_oob(mtd, ofs, &ops); 57 ret = mtd->write_oob(mtd, ofs, &ops);
59 if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { 58 if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
60 printk(KERN_NOTICE 59 printk(KERN_NOTICE
61 "sm_common: can't mark sector at %i as bad\n", 60 "sm_common: can't mark sector at %i as bad\n",
@@ -94,16 +93,17 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
94 {NULL,} 93 {NULL,}
95}; 94};
96 95
96#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
97static struct nand_flash_dev nand_xd_flash_ids[] = { 97static struct nand_flash_dev nand_xd_flash_ids[] = {
98 98
99 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, 99 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
100 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, 100 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
101 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, 101 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
102 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, 102 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
103 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD}, 103 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
104 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD}, 104 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
105 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD}, 105 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
106 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD}, 106 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
107 {NULL,} 107 {NULL,}
108}; 108};
109 109
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 09dde7d2717..ca2d0555729 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -98,6 +98,24 @@ static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
98 return word; 98 return word;
99} 99}
100 100
101/**
102 * socrates_nand_verify_buf - Verify chip data against buffer
103 * @mtd: MTD device structure
104 * @buf: buffer containing the data to compare
105 * @len: number of bytes to compare
106 */
107static int socrates_nand_verify_buf(struct mtd_info *mtd, const u8 *buf,
108 int len)
109{
110 int i;
111
112 for (i = 0; i < len; i++) {
113 if (buf[i] != socrates_nand_read_byte(mtd))
114 return -EFAULT;
115 }
116 return 0;
117}
118
101/* 119/*
102 * Hardware specific access to control-lines 120 * Hardware specific access to control-lines
103 */ 121 */
@@ -137,16 +155,19 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
137 return 1; 155 return 1;
138} 156}
139 157
158static const char *part_probes[] = { "cmdlinepart", NULL };
159
140/* 160/*
141 * Probe for the NAND device. 161 * Probe for the NAND device.
142 */ 162 */
143static int socrates_nand_probe(struct platform_device *ofdev) 163static int __devinit socrates_nand_probe(struct platform_device *ofdev)
144{ 164{
145 struct socrates_nand_host *host; 165 struct socrates_nand_host *host;
146 struct mtd_info *mtd; 166 struct mtd_info *mtd;
147 struct nand_chip *nand_chip; 167 struct nand_chip *nand_chip;
148 int res; 168 int res;
149 struct mtd_part_parser_data ppdata; 169 struct mtd_partition *partitions = NULL;
170 int num_partitions = 0;
150 171
151 /* Allocate memory for the device structure (and zero it) */ 172 /* Allocate memory for the device structure (and zero it) */
152 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); 173 host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -172,7 +193,6 @@ static int socrates_nand_probe(struct platform_device *ofdev)
172 mtd->name = "socrates_nand"; 193 mtd->name = "socrates_nand";
173 mtd->owner = THIS_MODULE; 194 mtd->owner = THIS_MODULE;
174 mtd->dev.parent = &ofdev->dev; 195 mtd->dev.parent = &ofdev->dev;
175 ppdata.of_node = ofdev->dev.of_node;
176 196
177 /*should never be accessed directly */ 197 /*should never be accessed directly */
178 nand_chip->IO_ADDR_R = (void *)0xdeadbeef; 198 nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
@@ -183,6 +203,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
183 nand_chip->read_word = socrates_nand_read_word; 203 nand_chip->read_word = socrates_nand_read_word;
184 nand_chip->write_buf = socrates_nand_write_buf; 204 nand_chip->write_buf = socrates_nand_write_buf;
185 nand_chip->read_buf = socrates_nand_read_buf; 205 nand_chip->read_buf = socrates_nand_read_buf;
206 nand_chip->verify_buf = socrates_nand_verify_buf;
186 nand_chip->dev_ready = socrates_nand_device_ready; 207 nand_chip->dev_ready = socrates_nand_device_ready;
187 208
188 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */ 209 nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
@@ -204,10 +225,30 @@ static int socrates_nand_probe(struct platform_device *ofdev)
204 goto out; 225 goto out;
205 } 226 }
206 227
207 res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); 228#ifdef CONFIG_MTD_CMDLINE_PARTS
229 num_partitions = parse_mtd_partitions(mtd, part_probes,
230 &partitions, 0);
231 if (num_partitions < 0) {
232 res = num_partitions;
233 goto release;
234 }
235#endif
236
237 if (num_partitions == 0) {
238 num_partitions = of_mtd_parse_partitions(&ofdev->dev,
239 ofdev->dev.of_node,
240 &partitions);
241 if (num_partitions < 0) {
242 res = num_partitions;
243 goto release;
244 }
245 }
246
247 res = mtd_device_register(mtd, partitions, num_partitions);
208 if (!res) 248 if (!res)
209 return res; 249 return res;
210 250
251release:
211 nand_release(mtd); 252 nand_release(mtd);
212 253
213out: 254out:
@@ -220,7 +261,7 @@ out:
220/* 261/*
221 * Remove a NAND device. 262 * Remove a NAND device.
222 */ 263 */
223static int socrates_nand_remove(struct platform_device *ofdev) 264static int __devexit socrates_nand_remove(struct platform_device *ofdev)
224{ 265{
225 struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); 266 struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
226 struct mtd_info *mtd = &host->mtd; 267 struct mtd_info *mtd = &host->mtd;
@@ -251,10 +292,21 @@ static struct platform_driver socrates_nand_driver = {
251 .of_match_table = socrates_nand_match, 292 .of_match_table = socrates_nand_match,
252 }, 293 },
253 .probe = socrates_nand_probe, 294 .probe = socrates_nand_probe,
254 .remove = socrates_nand_remove, 295 .remove = __devexit_p(socrates_nand_remove),
255}; 296};
256 297
257module_platform_driver(socrates_nand_driver); 298static int __init socrates_nand_init(void)
299{
300 return platform_driver_register(&socrates_nand_driver);
301}
302
303static void __exit socrates_nand_exit(void)
304{
305 platform_driver_unregister(&socrates_nand_driver);
306}
307
308module_init(socrates_nand_init);
309module_exit(socrates_nand_exit);
258 310
259MODULE_LICENSE("GPL"); 311MODULE_LICENSE("GPL");
260MODULE_AUTHOR("Ilya Yanok"); 312MODULE_AUTHOR("Ilya Yanok");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 508e9e04b09..11e8371b568 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -121,6 +121,9 @@ struct tmio_nand {
121 121
122#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd) 122#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
123 123
124#ifdef CONFIG_MTD_CMDLINE_PARTS
125static const char *part_probes[] = { "cmdlinepart", NULL };
126#endif
124 127
125/*--------------------------------------------------------------------------*/ 128/*--------------------------------------------------------------------------*/
126 129
@@ -256,6 +259,18 @@ static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
256 tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1); 259 tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
257} 260}
258 261
262static int
263tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
264{
265 struct tmio_nand *tmio = mtd_to_tmio(mtd);
266 u16 *p = (u16 *) buf;
267
268 for (len >>= 1; len; len--)
269 if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
270 return -EFAULT;
271 return 0;
272}
273
259static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode) 274static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
260{ 275{
261 struct tmio_nand *tmio = mtd_to_tmio(mtd); 276 struct tmio_nand *tmio = mtd_to_tmio(mtd);
@@ -366,6 +381,8 @@ static int tmio_probe(struct platform_device *dev)
366 struct tmio_nand *tmio; 381 struct tmio_nand *tmio;
367 struct mtd_info *mtd; 382 struct mtd_info *mtd;
368 struct nand_chip *nand_chip; 383 struct nand_chip *nand_chip;
384 struct mtd_partition *parts;
385 int nbparts = 0;
369 int retval; 386 int retval;
370 387
371 if (data == NULL) 388 if (data == NULL)
@@ -412,12 +429,12 @@ static int tmio_probe(struct platform_device *dev)
412 nand_chip->read_byte = tmio_nand_read_byte; 429 nand_chip->read_byte = tmio_nand_read_byte;
413 nand_chip->write_buf = tmio_nand_write_buf; 430 nand_chip->write_buf = tmio_nand_write_buf;
414 nand_chip->read_buf = tmio_nand_read_buf; 431 nand_chip->read_buf = tmio_nand_read_buf;
432 nand_chip->verify_buf = tmio_nand_verify_buf;
415 433
416 /* set eccmode using hardware ECC */ 434 /* set eccmode using hardware ECC */
417 nand_chip->ecc.mode = NAND_ECC_HW; 435 nand_chip->ecc.mode = NAND_ECC_HW;
418 nand_chip->ecc.size = 512; 436 nand_chip->ecc.size = 512;
419 nand_chip->ecc.bytes = 6; 437 nand_chip->ecc.bytes = 6;
420 nand_chip->ecc.strength = 2;
421 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc; 438 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
422 nand_chip->ecc.calculate = tmio_nand_calculate_ecc; 439 nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
423 nand_chip->ecc.correct = tmio_nand_correct_data; 440 nand_chip->ecc.correct = tmio_nand_correct_data;
@@ -444,9 +461,15 @@ static int tmio_probe(struct platform_device *dev)
444 goto err_scan; 461 goto err_scan;
445 } 462 }
446 /* Register the partitions */ 463 /* Register the partitions */
447 retval = mtd_device_parse_register(mtd, NULL, NULL, 464#ifdef CONFIG_MTD_CMDLINE_PARTS
448 data ? data->partition : NULL, 465 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
449 data ? data->num_partitions : 0); 466#endif
467 if (nbparts <= 0 && data) {
468 parts = data->partition;
469 nbparts = data->num_partitions;
470 }
471
472 retval = mtd_device_register(mtd, parts, nbparts);
450 if (!retval) 473 if (!retval)
451 return retval; 474 return retval;
452 475
@@ -521,7 +544,18 @@ static struct platform_driver tmio_driver = {
521 .resume = tmio_resume, 544 .resume = tmio_resume,
522}; 545};
523 546
524module_platform_driver(tmio_driver); 547static int __init tmio_init(void)
548{
549 return platform_driver_register(&tmio_driver);
550}
551
552static void __exit tmio_exit(void)
553{
554 platform_driver_unregister(&tmio_driver);
555}
556
557module_init(tmio_init);
558module_exit(tmio_exit);
525 559
526MODULE_LICENSE("GPL v2"); 560MODULE_LICENSE("GPL v2");
527MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov"); 561MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index e3d7266e256..bfba4e39a6c 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,6 +74,7 @@ struct txx9ndfmc_drvdata {
74 unsigned char hold; /* in gbusclock */ 74 unsigned char hold; /* in gbusclock */
75 unsigned char spw; /* in gbusclock */ 75 unsigned char spw; /* in gbusclock */
76 struct nand_hw_control hw_control; 76 struct nand_hw_control hw_control;
77 struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
77}; 78};
78 79
79static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) 80static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -131,6 +132,18 @@ static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
131 *buf++ = __raw_readl(ndfdtr); 132 *buf++ = __raw_readl(ndfdtr);
132} 133}
133 134
135static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf,
136 int len)
137{
138 struct platform_device *dev = mtd_to_platdev(mtd);
139 void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
140
141 while (len--)
142 if (*buf++ != (uint8_t)__raw_readl(ndfdtr))
143 return -EFAULT;
144 return 0;
145}
146
134static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd, 147static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
135 unsigned int ctrl) 148 unsigned int ctrl)
136{ 149{
@@ -274,6 +287,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
274static int __init txx9ndfmc_probe(struct platform_device *dev) 287static int __init txx9ndfmc_probe(struct platform_device *dev)
275{ 288{
276 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; 289 struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
290 static const char *probes[] = { "cmdlinepart", NULL };
277 int hold, spw; 291 int hold, spw;
278 int i; 292 int i;
279 struct txx9ndfmc_drvdata *drvdata; 293 struct txx9ndfmc_drvdata *drvdata;
@@ -286,7 +300,11 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
286 drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL); 300 drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
287 if (!drvdata) 301 if (!drvdata)
288 return -ENOMEM; 302 return -ENOMEM;
289 drvdata->base = devm_request_and_ioremap(&dev->dev, res); 303 if (!devm_request_mem_region(&dev->dev, res->start,
304 resource_size(res), dev_name(&dev->dev)))
305 return -EBUSY;
306 drvdata->base = devm_ioremap(&dev->dev, res->start,
307 resource_size(res));
290 if (!drvdata->base) 308 if (!drvdata->base)
291 return -EBUSY; 309 return -EBUSY;
292 310
@@ -315,6 +333,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
315 struct txx9ndfmc_priv *txx9_priv; 333 struct txx9ndfmc_priv *txx9_priv;
316 struct nand_chip *chip; 334 struct nand_chip *chip;
317 struct mtd_info *mtd; 335 struct mtd_info *mtd;
336 int nr_parts;
318 337
319 if (!(plat->ch_mask & (1 << i))) 338 if (!(plat->ch_mask & (1 << i)))
320 continue; 339 continue;
@@ -334,6 +353,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
334 chip->read_byte = txx9ndfmc_read_byte; 353 chip->read_byte = txx9ndfmc_read_byte;
335 chip->read_buf = txx9ndfmc_read_buf; 354 chip->read_buf = txx9ndfmc_read_buf;
336 chip->write_buf = txx9ndfmc_write_buf; 355 chip->write_buf = txx9ndfmc_write_buf;
356 chip->verify_buf = txx9ndfmc_verify_buf;
337 chip->cmd_ctrl = txx9ndfmc_cmd_ctrl; 357 chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
338 chip->dev_ready = txx9ndfmc_dev_ready; 358 chip->dev_ready = txx9ndfmc_dev_ready;
339 chip->ecc.calculate = txx9ndfmc_calculate_ecc; 359 chip->ecc.calculate = txx9ndfmc_calculate_ecc;
@@ -343,7 +363,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
343 /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */ 363 /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
344 chip->ecc.size = 256; 364 chip->ecc.size = 256;
345 chip->ecc.bytes = 3; 365 chip->ecc.bytes = 3;
346 chip->ecc.strength = 1;
347 chip->chip_delay = 100; 366 chip->chip_delay = 100;
348 chip->controller = &drvdata->hw_control; 367 chip->controller = &drvdata->hw_control;
349 368
@@ -374,7 +393,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
374 } 393 }
375 mtd->name = txx9_priv->mtdname; 394 mtd->name = txx9_priv->mtdname;
376 395
377 mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); 396 nr_parts = parse_mtd_partitions(mtd, probes,
397 &drvdata->parts[i], 0);
398 mtd_device_register(mtd, drvdata->parts[i], nr_parts);
378 drvdata->mtds[i] = mtd; 399 drvdata->mtds[i] = mtd;
379 } 400 }
380 401
@@ -400,6 +421,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
400 txx9_priv = chip->priv; 421 txx9_priv = chip->priv;
401 422
402 nand_release(mtd); 423 nand_release(mtd);
424 kfree(drvdata->parts[i]);
403 kfree(txx9_priv->mtdname); 425 kfree(txx9_priv->mtdname);
404 kfree(txx9_priv); 426 kfree(txx9_priv);
405 } 427 }
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
deleted file mode 100644
index 3f81dc8f214..00000000000
--- a/drivers/mtd/nand/xway_nand.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright © 2012 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/mtd/nand.h>
10#include <linux/of_gpio.h>
11#include <linux/of_platform.h>
12
13#include <lantiq_soc.h>
14
15/* nand registers */
16#define EBU_ADDSEL1 0x24
17#define EBU_NAND_CON 0xB0
18#define EBU_NAND_WAIT 0xB4
19#define EBU_NAND_ECC0 0xB8
20#define EBU_NAND_ECC_AC 0xBC
21
22/* nand commands */
23#define NAND_CMD_ALE (1 << 2)
24#define NAND_CMD_CLE (1 << 3)
25#define NAND_CMD_CS (1 << 4)
26#define NAND_WRITE_CMD_RESET 0xff
27#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
28#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
29#define NAND_WRITE_DATA (NAND_CMD_CS)
30#define NAND_READ_DATA (NAND_CMD_CS)
31#define NAND_WAIT_WR_C (1 << 3)
32#define NAND_WAIT_RD (0x1)
33
34/* we need to tel the ebu which addr we mapped the nand to */
35#define ADDSEL1_MASK(x) (x << 4)
36#define ADDSEL1_REGEN 1
37
38/* we need to tell the EBU that we have nand attached and set it up properly */
39#define BUSCON1_SETUP (1 << 22)
40#define BUSCON1_BCGEN_RES (0x3 << 12)
41#define BUSCON1_WAITWRC2 (2 << 8)
42#define BUSCON1_WAITRDC2 (2 << 6)
43#define BUSCON1_HOLDC1 (1 << 4)
44#define BUSCON1_RECOVC1 (1 << 2)
45#define BUSCON1_CMULT4 1
46
47#define NAND_CON_CE (1 << 20)
48#define NAND_CON_OUT_CS1 (1 << 10)
49#define NAND_CON_IN_CS1 (1 << 8)
50#define NAND_CON_PRE_P (1 << 7)
51#define NAND_CON_WP_P (1 << 6)
52#define NAND_CON_SE_P (1 << 5)
53#define NAND_CON_CS_P (1 << 4)
54#define NAND_CON_CSMUX (1 << 1)
55#define NAND_CON_NANDM 1
56
57static void xway_reset_chip(struct nand_chip *chip)
58{
59 unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
60 unsigned long flags;
61
62 nandaddr &= ~NAND_WRITE_ADDR;
63 nandaddr |= NAND_WRITE_CMD;
64
65 /* finish with a reset */
66 spin_lock_irqsave(&ebu_lock, flags);
67 writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
68 while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
69 ;
70 spin_unlock_irqrestore(&ebu_lock, flags);
71}
72
73static void xway_select_chip(struct mtd_info *mtd, int chip)
74{
75
76 switch (chip) {
77 case -1:
78 ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
79 ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
80 break;
81 case 0:
82 ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
83 ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
84 break;
85 default:
86 BUG();
87 }
88}
89
90static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
91{
92 struct nand_chip *this = mtd->priv;
93 unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
94 unsigned long flags;
95
96 if (ctrl & NAND_CTRL_CHANGE) {
97 nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
98 if (ctrl & NAND_CLE)
99 nandaddr |= NAND_WRITE_CMD;
100 else
101 nandaddr |= NAND_WRITE_ADDR;
102 this->IO_ADDR_W = (void __iomem *) nandaddr;
103 }
104
105 if (cmd != NAND_CMD_NONE) {
106 spin_lock_irqsave(&ebu_lock, flags);
107 writeb(cmd, this->IO_ADDR_W);
108 while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
109 ;
110 spin_unlock_irqrestore(&ebu_lock, flags);
111 }
112}
113
114static int xway_dev_ready(struct mtd_info *mtd)
115{
116 return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
117}
118
119static unsigned char xway_read_byte(struct mtd_info *mtd)
120{
121 struct nand_chip *this = mtd->priv;
122 unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
123 unsigned long flags;
124 int ret;
125
126 spin_lock_irqsave(&ebu_lock, flags);
127 ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
128 spin_unlock_irqrestore(&ebu_lock, flags);
129
130 return ret;
131}
132
133static int xway_nand_probe(struct platform_device *pdev)
134{
135 struct nand_chip *this = platform_get_drvdata(pdev);
136 unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
137 const __be32 *cs = of_get_property(pdev->dev.of_node,
138 "lantiq,cs", NULL);
139 u32 cs_flag = 0;
140
141 /* load our CS from the DT. Either we find a valid 1 or default to 0 */
142 if (cs && (*cs == 1))
143 cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
144
145 /* setup the EBU to run in NAND mode on our base addr */
146 ltq_ebu_w32(CPHYSADDR(nandaddr)
147 | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
148
149 ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
150 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
151 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
152
153 ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
154 | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
155 | cs_flag, EBU_NAND_CON);
156
157 /* finish with a reset */
158 xway_reset_chip(this);
159
160 return 0;
161}
162
163/* allow users to override the partition in DT using the cmdline */
164static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL };
165
166static struct platform_nand_data xway_nand_data = {
167 .chip = {
168 .nr_chips = 1,
169 .chip_delay = 30,
170 .part_probe_types = part_probes,
171 },
172 .ctrl = {
173 .probe = xway_nand_probe,
174 .cmd_ctrl = xway_cmd_ctrl,
175 .dev_ready = xway_dev_ready,
176 .select_chip = xway_select_chip,
177 .read_byte = xway_read_byte,
178 }
179};
180
181/*
182 * Try to find the node inside the DT. If it is available attach out
183 * platform_nand_data
184 */
185static int __init xway_register_nand(void)
186{
187 struct device_node *node;
188 struct platform_device *pdev;
189
190 node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
191 if (!node)
192 return -ENOENT;
193 pdev = of_find_device_by_node(node);
194 if (!pdev)
195 return -EINVAL;
196 pdev->dev.platform_data = &xway_nand_data;
197 of_node_put(node);
198 return 0;
199}
200
201subsys_initcall(xway_register_nand);
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index c5f4ebf4b38..b155666acfb 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,12 +56,21 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
57 return; 57 return;
58 58
59 pr_debug("NFTL: add_mtd for %s\n", mtd->name); 59 if (!mtd->block_isbad) {
60 printk(KERN_ERR
61"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
62"Please use the new diskonchip driver under the NAND subsystem.\n");
63 return;
64 }
65
66 DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
60 67
61 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); 68 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
62 69
63 if (!nftl) 70 if (!nftl) {
71 printk(KERN_WARNING "NFTL: out of memory for data structures\n");
64 return; 72 return;
73 }
65 74
66 nftl->mbd.mtd = mtd; 75 nftl->mbd.mtd = mtd;
67 nftl->mbd.devnum = -1; 76 nftl->mbd.devnum = -1;
@@ -123,7 +132,7 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
123{ 132{
124 struct NFTLrecord *nftl = (void *)dev; 133 struct NFTLrecord *nftl = (void *)dev;
125 134
126 pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum); 135 DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum);
127 136
128 del_mtd_blktrans_dev(dev); 137 del_mtd_blktrans_dev(dev);
129 kfree(nftl->ReplUnitTable); 138 kfree(nftl->ReplUnitTable);
@@ -140,13 +149,13 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
140 struct mtd_oob_ops ops; 149 struct mtd_oob_ops ops;
141 int res; 150 int res;
142 151
143 ops.mode = MTD_OPS_PLACE_OOB; 152 ops.mode = MTD_OOB_PLACE;
144 ops.ooboffs = offs & mask; 153 ops.ooboffs = offs & mask;
145 ops.ooblen = len; 154 ops.ooblen = len;
146 ops.oobbuf = buf; 155 ops.oobbuf = buf;
147 ops.datbuf = NULL; 156 ops.datbuf = NULL;
148 157
149 res = mtd_read_oob(mtd, offs & ~mask, &ops); 158 res = mtd->read_oob(mtd, offs & ~mask, &ops);
150 *retlen = ops.oobretlen; 159 *retlen = ops.oobretlen;
151 return res; 160 return res;
152} 161}
@@ -161,13 +170,13 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
161 struct mtd_oob_ops ops; 170 struct mtd_oob_ops ops;
162 int res; 171 int res;
163 172
164 ops.mode = MTD_OPS_PLACE_OOB; 173 ops.mode = MTD_OOB_PLACE;
165 ops.ooboffs = offs & mask; 174 ops.ooboffs = offs & mask;
166 ops.ooblen = len; 175 ops.ooblen = len;
167 ops.oobbuf = buf; 176 ops.oobbuf = buf;
168 ops.datbuf = NULL; 177 ops.datbuf = NULL;
169 178
170 res = mtd_write_oob(mtd, offs & ~mask, &ops); 179 res = mtd->write_oob(mtd, offs & ~mask, &ops);
171 *retlen = ops.oobretlen; 180 *retlen = ops.oobretlen;
172 return res; 181 return res;
173} 182}
@@ -184,14 +193,14 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
184 struct mtd_oob_ops ops; 193 struct mtd_oob_ops ops;
185 int res; 194 int res;
186 195
187 ops.mode = MTD_OPS_PLACE_OOB; 196 ops.mode = MTD_OOB_PLACE;
188 ops.ooboffs = offs & mask; 197 ops.ooboffs = offs & mask;
189 ops.ooblen = mtd->oobsize; 198 ops.ooblen = mtd->oobsize;
190 ops.oobbuf = oob; 199 ops.oobbuf = oob;
191 ops.datbuf = buf; 200 ops.datbuf = buf;
192 ops.len = len; 201 ops.len = len;
193 202
194 res = mtd_write_oob(mtd, offs & ~mask, &ops); 203 res = mtd->write_oob(mtd, offs & ~mask, &ops);
195 *retlen = ops.retlen; 204 *retlen = ops.retlen;
196 return res; 205 return res;
197} 206}
@@ -211,7 +220,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
211 220
212 /* Normally, we force a fold to happen before we run out of free blocks completely */ 221 /* Normally, we force a fold to happen before we run out of free blocks completely */
213 if (!desperate && nftl->numfreeEUNs < 2) { 222 if (!desperate && nftl->numfreeEUNs < 2) {
214 pr_debug("NFTL_findfreeblock: there are too few free EUNs\n"); 223 DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
215 return BLOCK_NIL; 224 return BLOCK_NIL;
216 } 225 }
217 226
@@ -282,7 +291,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
282 if (block == 2) { 291 if (block == 2) {
283 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; 292 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
284 if (foldmark == FOLD_MARK_IN_PROGRESS) { 293 if (foldmark == FOLD_MARK_IN_PROGRESS) {
285 pr_debug("Write Inhibited on EUN %d\n", thisEUN); 294 DEBUG(MTD_DEBUG_LEVEL1,
295 "Write Inhibited on EUN %d\n", thisEUN);
286 inplace = 0; 296 inplace = 0;
287 } else { 297 } else {
288 /* There's no other reason not to do inplace, 298 /* There's no other reason not to do inplace,
@@ -347,7 +357,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
347 if (BlockLastState[block] != SECTOR_FREE && 357 if (BlockLastState[block] != SECTOR_FREE &&
348 BlockMap[block] != BLOCK_NIL && 358 BlockMap[block] != BLOCK_NIL &&
349 BlockMap[block] != targetEUN) { 359 BlockMap[block] != targetEUN) {
350 pr_debug("Setting inplace to 0. VUC %d, " 360 DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, "
351 "block %d was %x lastEUN, " 361 "block %d was %x lastEUN, "
352 "and is in EUN %d (%s) %d\n", 362 "and is in EUN %d (%s) %d\n",
353 thisVUC, block, BlockLastState[block], 363 thisVUC, block, BlockLastState[block],
@@ -363,14 +373,14 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
363 pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && 373 pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
364 BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != 374 BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
365 SECTOR_FREE) { 375 SECTOR_FREE) {
366 pr_debug("Pending write not free in EUN %d. " 376 DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. "
367 "Folding out of place.\n", targetEUN); 377 "Folding out of place.\n", targetEUN);
368 inplace = 0; 378 inplace = 0;
369 } 379 }
370 } 380 }
371 381
372 if (!inplace) { 382 if (!inplace) {
373 pr_debug("Cannot fold Virtual Unit Chain %d in place. " 383 DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. "
374 "Trying out-of-place\n", thisVUC); 384 "Trying out-of-place\n", thisVUC);
375 /* We need to find a targetEUN to fold into. */ 385 /* We need to find a targetEUN to fold into. */
376 targetEUN = NFTL_findfreeblock(nftl, 1); 386 targetEUN = NFTL_findfreeblock(nftl, 1);
@@ -400,7 +410,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
400 and the Erase Unit into which we are supposed to be copying. 410 and the Erase Unit into which we are supposed to be copying.
401 Go for it. 411 Go for it.
402 */ 412 */
403 pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN); 413 DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN);
404 for (block = 0; block < nftl->EraseSize / 512 ; block++) { 414 for (block = 0; block < nftl->EraseSize / 512 ; block++) {
405 unsigned char movebuf[512]; 415 unsigned char movebuf[512];
406 int ret; 416 int ret;
@@ -416,17 +426,12 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
416 if (BlockMap[block] == BLOCK_NIL) 426 if (BlockMap[block] == BLOCK_NIL)
417 continue; 427 continue;
418 428
419 ret = mtd_read(mtd, 429 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
420 (nftl->EraseSize * BlockMap[block]) + (block * 512), 430 512, &retlen, movebuf);
421 512, 431 if (ret < 0 && ret != -EUCLEAN) {
422 &retlen, 432 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
423 movebuf); 433 + (block * 512), 512, &retlen,
424 if (ret < 0 && !mtd_is_bitflip(ret)) { 434 movebuf);
425 ret = mtd_read(mtd,
426 (nftl->EraseSize * BlockMap[block]) + (block * 512),
427 512,
428 &retlen,
429 movebuf);
430 if (ret != -EIO) 435 if (ret != -EIO)
431 printk("Error went away on retry.\n"); 436 printk("Error went away on retry.\n");
432 } 437 }
@@ -452,7 +457,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
452 has duplicate chains, we need to free one of the chains because it's not necessary any more. 457 has duplicate chains, we need to free one of the chains because it's not necessary any more.
453 */ 458 */
454 thisEUN = nftl->EUNtable[thisVUC]; 459 thisEUN = nftl->EUNtable[thisVUC];
455 pr_debug("Want to erase\n"); 460 DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n");
456 461
457 /* For each block in the old chain (except the targetEUN of course), 462 /* For each block in the old chain (except the targetEUN of course),
458 free it and make it available for future use */ 463 free it and make it available for future use */
@@ -565,7 +570,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
565 (writeEUN * nftl->EraseSize) + blockofs, 570 (writeEUN * nftl->EraseSize) + blockofs,
566 8, &retlen, (char *)&bci); 571 8, &retlen, (char *)&bci);
567 572
568 pr_debug("Status of block %d in EUN %d is %x\n", 573 DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
569 block , writeEUN, le16_to_cpu(bci.Status)); 574 block , writeEUN, le16_to_cpu(bci.Status));
570 575
571 status = bci.Status | bci.Status1; 576 status = bci.Status | bci.Status1;
@@ -618,7 +623,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
618 but they are reserved for when we're 623 but they are reserved for when we're
619 desperate. Well, now we're desperate. 624 desperate. Well, now we're desperate.
620 */ 625 */
621 pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); 626 DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
622 writeEUN = NFTL_findfreeblock(nftl, 1); 627 writeEUN = NFTL_findfreeblock(nftl, 1);
623 } 628 }
624 if (writeEUN == BLOCK_NIL) { 629 if (writeEUN == BLOCK_NIL) {
@@ -769,9 +774,9 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
769 } else { 774 } else {
770 loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs; 775 loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
771 size_t retlen; 776 size_t retlen;
772 int res = mtd_read(mtd, ptr, 512, &retlen, buffer); 777 int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
773 778
774 if (res < 0 && !mtd_is_bitflip(res)) 779 if (res < 0 && res != -EUCLEAN)
775 return -EIO; 780 return -EIO;
776 } 781 }
777 return 0; 782 return 0;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 51b9d6af307..e3cd1ffad2f 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -32,7 +32,7 @@
32 32
33/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the 33/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
34 * various device information of the NFTL partition and Bad Unit Table. Update 34 * various device information of the NFTL partition and Bad Unit Table. Update
35 * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[] 35 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
36 * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c 36 * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
37 */ 37 */
38static int find_boot_record(struct NFTLrecord *nftl) 38static int find_boot_record(struct NFTLrecord *nftl)
@@ -63,8 +63,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
63 63
64 /* Check for ANAND header first. Then can whinge if it's found but later 64 /* Check for ANAND header first. Then can whinge if it's found but later
65 checks fail */ 65 checks fail */
66 ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE, 66 ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
67 &retlen, buf); 67 &retlen, buf);
68 /* We ignore ret in case the ECC of the MediaHeader is invalid 68 /* We ignore ret in case the ECC of the MediaHeader is invalid
69 (which is apparently acceptable) */ 69 (which is apparently acceptable) */
70 if (retlen != SECTORSIZE) { 70 if (retlen != SECTORSIZE) {
@@ -242,8 +242,7 @@ The new DiskOnChip driver already scanned the bad block table. Just query it.
242 if (buf[i & (SECTORSIZE - 1)] != 0xff) 242 if (buf[i & (SECTORSIZE - 1)] != 0xff)
243 nftl->ReplUnitTable[i] = BLOCK_RESERVED; 243 nftl->ReplUnitTable[i] = BLOCK_RESERVED;
244#endif 244#endif
245 if (mtd_block_isbad(nftl->mbd.mtd, 245 if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
246 i * nftl->EraseSize))
247 nftl->ReplUnitTable[i] = BLOCK_RESERVED; 246 nftl->ReplUnitTable[i] = BLOCK_RESERVED;
248 } 247 }
249 248
@@ -275,7 +274,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
275 int i; 274 int i;
276 275
277 for (i = 0; i < len; i += SECTORSIZE) { 276 for (i = 0; i < len; i += SECTORSIZE) {
278 if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf)) 277 if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
279 return -1; 278 return -1;
280 if (memcmpb(buf, 0xff, SECTORSIZE) != 0) 279 if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
281 return -1; 280 return -1;
@@ -298,7 +297,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
298 * 297 *
299 * Return: 0 when succeed, -1 on error. 298 * Return: 0 when succeed, -1 on error.
300 * 299 *
301 * ToDo: 1. Is it necessary to check_free_sector after erasing ?? 300 * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
302 */ 301 */
303int NFTL_formatblock(struct NFTLrecord *nftl, int block) 302int NFTL_formatblock(struct NFTLrecord *nftl, int block)
304{ 303{
@@ -327,7 +326,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
327 instr->mtd = nftl->mbd.mtd; 326 instr->mtd = nftl->mbd.mtd;
328 instr->addr = block * nftl->EraseSize; 327 instr->addr = block * nftl->EraseSize;
329 instr->len = nftl->EraseSize; 328 instr->len = nftl->EraseSize;
330 mtd_erase(mtd, instr); 329 mtd->erase(mtd, instr);
331 330
332 if (instr->state == MTD_ERASE_FAILED) { 331 if (instr->state == MTD_ERASE_FAILED) {
333 printk("Error while formatting block %d\n", block); 332 printk("Error while formatting block %d\n", block);
@@ -338,7 +337,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
338 nb_erases = le32_to_cpu(uci.WearInfo); 337 nb_erases = le32_to_cpu(uci.WearInfo);
339 nb_erases++; 338 nb_erases++;
340 339
341 /* wrap (almost impossible with current flash) or free block */ 340 /* wrap (almost impossible with current flashs) or free block */
342 if (nb_erases == 0) 341 if (nb_erases == 0)
343 nb_erases = 1; 342 nb_erases = 1;
344 343
@@ -356,7 +355,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
356fail: 355fail:
357 /* could not format, update the bad block table (caller is responsible 356 /* could not format, update the bad block table (caller is responsible
358 for setting the ReplUnitTable to BLOCK_RESERVED on failure) */ 357 for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
359 mtd_block_markbad(nftl->mbd.mtd, instr->addr); 358 nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
360 return -1; 359 return -1;
361} 360}
362 361
@@ -364,10 +363,10 @@ fail:
364 * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain 363 * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
365 * was being folded when NFTL was interrupted. 364 * was being folded when NFTL was interrupted.
366 * 365 *
367 * The check_free_sectors in this function is necessary. There is a possible 366 * The check_free_sectors in this function is neceressary. There is a possible
368 * situation that after writing the Data area, the Block Control Information is 367 * situation that after writing the Data area, the Block Control Information is
369 * not updated according (due to power failure or something) which leaves the block 368 * not updated according (due to power failure or something) which leaves the block
370 * in an inconsistent state. So we have to check if a block is really FREE in this 369 * in an umconsistent state. So we have to check if a block is really FREE in this
371 * case. */ 370 * case. */
372static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block) 371static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
373{ 372{
@@ -429,7 +428,7 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
429 428
430 for (;;) { 429 for (;;) {
431 length++; 430 length++;
432 /* avoid infinite loops, although this is guaranteed not to 431 /* avoid infinite loops, although this is guaranted not to
433 happen because of the previous checks */ 432 happen because of the previous checks */
434 if (length >= nftl->nb_blocks) { 433 if (length >= nftl->nb_blocks) {
435 printk("nftl: length too long %d !\n", length); 434 printk("nftl: length too long %d !\n", length);
@@ -448,11 +447,11 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
448/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a 447/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
449 * Virtual Unit Chain, i.e. all the units are disconnected. 448 * Virtual Unit Chain, i.e. all the units are disconnected.
450 * 449 *
451 * It is not strictly correct to begin from the first block of the chain because 450 * It is not stricly correct to begin from the first block of the chain because
452 * if we stop the code, we may see again a valid chain if there was a first_block 451 * if we stop the code, we may see again a valid chain if there was a first_block
453 * flag in a block inside it. But is it really a problem ? 452 * flag in a block inside it. But is it really a problem ?
454 * 453 *
455 * FixMe: Figure out what the last statement means. What if power failure when we are 454 * FixMe: Figure out what the last statesment means. What if power failure when we are
456 * in the for (;;) loop formatting blocks ?? 455 * in the for (;;) loop formatting blocks ??
457 */ 456 */
458static void format_chain(struct NFTLrecord *nftl, unsigned int first_block) 457static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
@@ -486,7 +485,7 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
486 * totally free (only 0xff). 485 * totally free (only 0xff).
487 * 486 *
488 * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the 487 * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
489 * following criteria: 488 * following critia:
490 * 1. */ 489 * 1. */
491static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) 490static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
492{ 491{
@@ -503,7 +502,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
503 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1)); 502 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
504 if (erase_mark != ERASE_MARK) { 503 if (erase_mark != ERASE_MARK) {
505 /* if no erase mark, the block must be totally free. This is 504 /* if no erase mark, the block must be totally free. This is
506 possible in two cases : empty filesystem or interrupted erase (very unlikely) */ 505 possible in two cases : empty filsystem or interrupted erase (very unlikely) */
507 if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0) 506 if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
508 return -1; 507 return -1;
509 508
@@ -545,7 +544,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
545/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS 544/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
546 * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2 545 * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
547 * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted 546 * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
548 * for some reason. A clean up/check of the VUC is necessary in this case. 547 * for some reason. A clean up/check of the VUC is neceressary in this case.
549 * 548 *
550 * WARNING: return 0 if read error 549 * WARNING: return 0 if read error
551 */ 550 */
@@ -658,7 +657,7 @@ int NFTL_mount(struct NFTLrecord *s)
658 printk("Block %d: incorrect logical block: %d expected: %d\n", 657 printk("Block %d: incorrect logical block: %d expected: %d\n",
659 block, logical_block, first_logical_block); 658 block, logical_block, first_logical_block);
660 /* the chain is incorrect : we must format it, 659 /* the chain is incorrect : we must format it,
661 but we need to read it completely */ 660 but we need to read it completly */
662 do_format_chain = 1; 661 do_format_chain = 1;
663 } 662 }
664 if (is_first_block) { 663 if (is_first_block) {
@@ -670,7 +669,7 @@ int NFTL_mount(struct NFTLrecord *s)
670 printk("Block %d: incorrectly marked as first block in chain\n", 669 printk("Block %d: incorrectly marked as first block in chain\n",
671 block); 670 block);
672 /* the chain is incorrect : we must format it, 671 /* the chain is incorrect : we must format it,
673 but we need to read it completely */ 672 but we need to read it completly */
674 do_format_chain = 1; 673 do_format_chain = 1;
675 } else { 674 } else {
676 printk("Block %d: folding in progress - ignoring first block flag\n", 675 printk("Block %d: folding in progress - ignoring first block flag\n",
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index dbd3aa574ea..a996718fa6b 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,23 +20,14 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22 22
23static int parse_ofpart_partitions(struct mtd_info *master, 23int __devinit of_mtd_parse_partitions(struct device *dev,
24 struct mtd_partition **pparts, 24 struct device_node *node,
25 struct mtd_part_parser_data *data) 25 struct mtd_partition **pparts)
26{ 26{
27 struct device_node *node;
28 const char *partname; 27 const char *partname;
29 struct device_node *pp; 28 struct device_node *pp;
30 int nr_parts, i; 29 int nr_parts, i;
31 30
32
33 if (!data)
34 return 0;
35
36 node = data->of_node;
37 if (!node)
38 return 0;
39
40 /* First count the subnodes */ 31 /* First count the subnodes */
41 pp = NULL; 32 pp = NULL;
42 nr_parts = 0; 33 nr_parts = 0;
@@ -71,17 +62,14 @@ static int parse_ofpart_partitions(struct mtd_info *master,
71 (*pparts)[i].name = (char *)partname; 62 (*pparts)[i].name = (char *)partname;
72 63
73 if (of_get_property(pp, "read-only", &len)) 64 if (of_get_property(pp, "read-only", &len))
74 (*pparts)[i].mask_flags |= MTD_WRITEABLE; 65 (*pparts)[i].mask_flags = MTD_WRITEABLE;
75
76 if (of_get_property(pp, "lock", &len))
77 (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
78 66
79 i++; 67 i++;
80 } 68 }
81 69
82 if (!i) { 70 if (!i) {
83 of_node_put(pp); 71 of_node_put(pp);
84 pr_err("No valid partition found on %s\n", node->full_name); 72 dev_err(dev, "No valid partition found on %s\n", node->full_name);
85 kfree(*pparts); 73 kfree(*pparts);
86 *pparts = NULL; 74 *pparts = NULL;
87 return -EINVAL; 75 return -EINVAL;
@@ -89,99 +77,6 @@ static int parse_ofpart_partitions(struct mtd_info *master,
89 77
90 return nr_parts; 78 return nr_parts;
91} 79}
92 80EXPORT_SYMBOL(of_mtd_parse_partitions);
93static struct mtd_part_parser ofpart_parser = {
94 .owner = THIS_MODULE,
95 .parse_fn = parse_ofpart_partitions,
96 .name = "ofpart",
97};
98
99static int parse_ofoldpart_partitions(struct mtd_info *master,
100 struct mtd_partition **pparts,
101 struct mtd_part_parser_data *data)
102{
103 struct device_node *dp;
104 int i, plen, nr_parts;
105 const struct {
106 __be32 offset, len;
107 } *part;
108 const char *names;
109
110 if (!data)
111 return 0;
112
113 dp = data->of_node;
114 if (!dp)
115 return 0;
116
117 part = of_get_property(dp, "partitions", &plen);
118 if (!part)
119 return 0; /* No partitions found */
120
121 pr_warning("Device tree uses obsolete partition map binding: %s\n",
122 dp->full_name);
123
124 nr_parts = plen / sizeof(part[0]);
125
126 *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
127 if (!*pparts)
128 return -ENOMEM;
129
130 names = of_get_property(dp, "partition-names", &plen);
131
132 for (i = 0; i < nr_parts; i++) {
133 (*pparts)[i].offset = be32_to_cpu(part->offset);
134 (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
135 /* bit 0 set signifies read only partition */
136 if (be32_to_cpu(part->len) & 1)
137 (*pparts)[i].mask_flags = MTD_WRITEABLE;
138
139 if (names && (plen > 0)) {
140 int len = strlen(names) + 1;
141
142 (*pparts)[i].name = (char *)names;
143 plen -= len;
144 names += len;
145 } else {
146 (*pparts)[i].name = "unnamed";
147 }
148
149 part++;
150 }
151
152 return nr_parts;
153}
154
155static struct mtd_part_parser ofoldpart_parser = {
156 .owner = THIS_MODULE,
157 .parse_fn = parse_ofoldpart_partitions,
158 .name = "ofoldpart",
159};
160
161static int __init ofpart_parser_init(void)
162{
163 int rc;
164 rc = register_mtd_parser(&ofpart_parser);
165 if (rc)
166 goto out;
167
168 rc = register_mtd_parser(&ofoldpart_parser);
169 if (!rc)
170 return 0;
171
172 deregister_mtd_parser(&ofoldpart_parser);
173out:
174 return rc;
175}
176
177module_init(ofpart_parser_init);
178 81
179MODULE_LICENSE("GPL"); 82MODULE_LICENSE("GPL");
180MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
181MODULE_AUTHOR("Vitaly Wool, David Gibson");
182/*
183 * When MTD core cannot find the requested parser, it tries to load the module
184 * with the same name. Since we provide the ofoldpart parser, we should have
185 * the corresponding alias.
186 */
187MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 91467bb0363..772ad296661 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,7 +1,6 @@
1menuconfig MTD_ONENAND 1menuconfig MTD_ONENAND
2 tristate "OneNAND Device Support" 2 tristate "OneNAND Device Support"
3 depends on MTD 3 depends on MTD
4 depends on HAS_IOMEM
5 help 4 help
6 This enables support for accessing all type of OneNAND flash 5 This enables support for accessing all type of OneNAND flash
7 devices. For further information see 6 devices. For further information see
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 9f11562f849..2d70d354d84 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,12 +30,15 @@
30 */ 30 */
31#define DRIVER_NAME "onenand-flash" 31#define DRIVER_NAME "onenand-flash"
32 32
33static const char *part_probes[] = { "cmdlinepart", NULL, };
34
33struct onenand_info { 35struct onenand_info {
34 struct mtd_info mtd; 36 struct mtd_info mtd;
37 struct mtd_partition *parts;
35 struct onenand_chip onenand; 38 struct onenand_chip onenand;
36}; 39};
37 40
38static int generic_onenand_probe(struct platform_device *pdev) 41static int __devinit generic_onenand_probe(struct platform_device *pdev)
39{ 42{
40 struct onenand_info *info; 43 struct onenand_info *info;
41 struct onenand_platform_data *pdata = pdev->dev.platform_data; 44 struct onenand_platform_data *pdata = pdev->dev.platform_data;
@@ -70,9 +73,13 @@ static int generic_onenand_probe(struct platform_device *pdev)
70 goto out_iounmap; 73 goto out_iounmap;
71 } 74 }
72 75
73 err = mtd_device_parse_register(&info->mtd, NULL, NULL, 76 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
74 pdata ? pdata->parts : NULL, 77 if (err > 0)
75 pdata ? pdata->nr_parts : 0); 78 mtd_device_register(&info->mtd, info->parts, err);
79 else if (err <= 0 && pdata && pdata->parts)
80 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
81 else
82 err = mtd_device_register(&info->mtd, NULL, 0);
76 83
77 platform_set_drvdata(pdev, info); 84 platform_set_drvdata(pdev, info);
78 85
@@ -88,7 +95,7 @@ out_free_info:
88 return err; 95 return err;
89} 96}
90 97
91static int generic_onenand_remove(struct platform_device *pdev) 98static int __devexit generic_onenand_remove(struct platform_device *pdev)
92{ 99{
93 struct onenand_info *info = platform_get_drvdata(pdev); 100 struct onenand_info *info = platform_get_drvdata(pdev);
94 struct resource *res = pdev->resource; 101 struct resource *res = pdev->resource;
@@ -97,6 +104,7 @@ static int generic_onenand_remove(struct platform_device *pdev)
97 platform_set_drvdata(pdev, NULL); 104 platform_set_drvdata(pdev, NULL);
98 105
99 if (info) { 106 if (info) {
107 mtd_device_unregister(&info->mtd);
100 onenand_release(&info->mtd); 108 onenand_release(&info->mtd);
101 release_mem_region(res->start, size); 109 release_mem_region(res->start, size);
102 iounmap(info->onenand.base); 110 iounmap(info->onenand.base);
@@ -112,12 +120,24 @@ static struct platform_driver generic_onenand_driver = {
112 .owner = THIS_MODULE, 120 .owner = THIS_MODULE,
113 }, 121 },
114 .probe = generic_onenand_probe, 122 .probe = generic_onenand_probe,
115 .remove = generic_onenand_remove, 123 .remove = __devexit_p(generic_onenand_remove),
116}; 124};
117 125
118module_platform_driver(generic_onenand_driver); 126MODULE_ALIAS("platform:" DRIVER_NAME);
127
128static int __init generic_onenand_init(void)
129{
130 return platform_driver_register(&generic_onenand_driver);
131}
132
133static void __exit generic_onenand_exit(void)
134{
135 platform_driver_unregister(&generic_onenand_driver);
136}
137
138module_init(generic_onenand_init);
139module_exit(generic_onenand_exit);
119 140
120MODULE_LICENSE("GPL"); 141MODULE_LICENSE("GPL");
121MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); 142MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
122MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards"); 143MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
123MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 065f3fe02a2..a916dec2921 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -38,22 +38,26 @@
38#include <linux/regulator/consumer.h> 38#include <linux/regulator/consumer.h>
39 39
40#include <asm/mach/flash.h> 40#include <asm/mach/flash.h>
41#include <linux/platform_data/mtd-onenand-omap2.h> 41#include <plat/gpmc.h>
42#include <asm/gpio.h> 42#include <plat/onenand.h>
43#include <mach/gpio.h>
43 44
44#include <linux/omap-dma.h> 45#include <plat/dma.h>
46
47#include <plat/board.h>
45 48
46#define DRIVER_NAME "omap2-onenand" 49#define DRIVER_NAME "omap2-onenand"
47 50
51#define ONENAND_IO_SIZE SZ_128K
48#define ONENAND_BUFRAM_SIZE (1024 * 5) 52#define ONENAND_BUFRAM_SIZE (1024 * 5)
49 53
50struct omap2_onenand { 54struct omap2_onenand {
51 struct platform_device *pdev; 55 struct platform_device *pdev;
52 int gpmc_cs; 56 int gpmc_cs;
53 unsigned long phys_base; 57 unsigned long phys_base;
54 unsigned int mem_size;
55 int gpio_irq; 58 int gpio_irq;
56 struct mtd_info mtd; 59 struct mtd_info mtd;
60 struct mtd_partition *parts;
57 struct onenand_chip onenand; 61 struct onenand_chip onenand;
58 struct completion irq_done; 62 struct completion irq_done;
59 struct completion dma_done; 63 struct completion dma_done;
@@ -61,9 +65,10 @@ struct omap2_onenand {
61 int freq; 65 int freq;
62 int (*setup)(void __iomem *base, int *freq_ptr); 66 int (*setup)(void __iomem *base, int *freq_ptr);
63 struct regulator *regulator; 67 struct regulator *regulator;
64 u8 flags;
65}; 68};
66 69
70static const char *part_probes[] = { "cmdlinepart", NULL, };
71
67static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 72static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
68{ 73{
69 struct omap2_onenand *c = data; 74 struct omap2_onenand *c = data;
@@ -154,7 +159,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
154 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) { 159 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
155 syscfg |= ONENAND_SYS_CFG1_IOBE; 160 syscfg |= ONENAND_SYS_CFG1_IOBE;
156 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); 161 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
157 if (c->flags & ONENAND_IN_OMAP34XX) 162 if (cpu_is_omap34xx())
158 /* Add a delay to let GPIO settle */ 163 /* Add a delay to let GPIO settle */
159 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); 164 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
160 } 165 }
@@ -445,19 +450,13 @@ out_copy:
445 450
446#else 451#else
447 452
448static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, 453int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
449 unsigned char *buffer, int offset, 454 unsigned char *buffer, int offset,
450 size_t count) 455 size_t count);
451{
452 return -ENOSYS;
453}
454 456
455static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, 457int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
456 const unsigned char *buffer, 458 const unsigned char *buffer,
457 int offset, size_t count) 459 int offset, size_t count);
458{
459 return -ENOSYS;
460}
461 460
462#endif 461#endif
463 462
@@ -555,19 +554,13 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
555 554
556#else 555#else
557 556
558static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, 557int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
559 unsigned char *buffer, int offset, 558 unsigned char *buffer, int offset,
560 size_t count) 559 size_t count);
561{
562 return -ENOSYS;
563}
564 560
565static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, 561int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
566 const unsigned char *buffer, 562 const unsigned char *buffer,
567 int offset, size_t count) 563 int offset, size_t count);
568{
569 return -ENOSYS;
570}
571 564
572#endif 565#endif
573 566
@@ -630,13 +623,12 @@ static int omap2_onenand_disable(struct mtd_info *mtd)
630 return ret; 623 return ret;
631} 624}
632 625
633static int omap2_onenand_probe(struct platform_device *pdev) 626static int __devinit omap2_onenand_probe(struct platform_device *pdev)
634{ 627{
635 struct omap_onenand_platform_data *pdata; 628 struct omap_onenand_platform_data *pdata;
636 struct omap2_onenand *c; 629 struct omap2_onenand *c;
637 struct onenand_chip *this; 630 struct onenand_chip *this;
638 int r; 631 int r;
639 struct resource *res;
640 632
641 pdata = pdev->dev.platform_data; 633 pdata = pdev->dev.platform_data;
642 if (pdata == NULL) { 634 if (pdata == NULL) {
@@ -650,7 +642,6 @@ static int omap2_onenand_probe(struct platform_device *pdev)
650 642
651 init_completion(&c->irq_done); 643 init_completion(&c->irq_done);
652 init_completion(&c->dma_done); 644 init_completion(&c->dma_done);
653 c->flags = pdata->flags;
654 c->gpmc_cs = pdata->cs; 645 c->gpmc_cs = pdata->cs;
655 c->gpio_irq = pdata->gpio_irq; 646 c->gpio_irq = pdata->gpio_irq;
656 c->dma_channel = pdata->dma_channel; 647 c->dma_channel = pdata->dma_channel;
@@ -659,24 +650,20 @@ static int omap2_onenand_probe(struct platform_device *pdev)
659 c->gpio_irq = 0; 650 c->gpio_irq = 0;
660 } 651 }
661 652
662 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 653 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
663 if (res == NULL) { 654 if (r < 0) {
664 r = -EINVAL; 655 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
665 dev_err(&pdev->dev, "error getting memory resource\n");
666 goto err_kfree; 656 goto err_kfree;
667 } 657 }
668 658
669 c->phys_base = res->start; 659 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
670 c->mem_size = resource_size(res);
671
672 if (request_mem_region(c->phys_base, c->mem_size,
673 pdev->dev.driver->name) == NULL) { 660 pdev->dev.driver->name) == NULL) {
674 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n", 661 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
675 c->phys_base, c->mem_size); 662 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
676 r = -EBUSY; 663 r = -EBUSY;
677 goto err_kfree; 664 goto err_free_cs;
678 } 665 }
679 c->onenand.base = ioremap(c->phys_base, c->mem_size); 666 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
680 if (c->onenand.base == NULL) { 667 if (c->onenand.base == NULL) {
681 r = -ENOMEM; 668 r = -ENOMEM;
682 goto err_release_mem_region; 669 goto err_release_mem_region;
@@ -741,7 +728,7 @@ static int omap2_onenand_probe(struct platform_device *pdev)
741 this = &c->onenand; 728 this = &c->onenand;
742 if (c->dma_channel >= 0) { 729 if (c->dma_channel >= 0) {
743 this->wait = omap2_onenand_wait; 730 this->wait = omap2_onenand_wait;
744 if (c->flags & ONENAND_IN_OMAP34XX) { 731 if (cpu_is_omap34xx()) {
745 this->read_bufferram = omap3_onenand_read_bufferram; 732 this->read_bufferram = omap3_onenand_read_bufferram;
746 this->write_bufferram = omap3_onenand_write_bufferram; 733 this->write_bufferram = omap3_onenand_write_bufferram;
747 } else { 734 } else {
@@ -754,7 +741,6 @@ static int omap2_onenand_probe(struct platform_device *pdev)
754 c->regulator = regulator_get(&pdev->dev, "vonenand"); 741 c->regulator = regulator_get(&pdev->dev, "vonenand");
755 if (IS_ERR(c->regulator)) { 742 if (IS_ERR(c->regulator)) {
756 dev_err(&pdev->dev, "Failed to get regulator\n"); 743 dev_err(&pdev->dev, "Failed to get regulator\n");
757 r = PTR_ERR(c->regulator);
758 goto err_release_dma; 744 goto err_release_dma;
759 } 745 }
760 c->onenand.enable = omap2_onenand_enable; 746 c->onenand.enable = omap2_onenand_enable;
@@ -767,9 +753,13 @@ static int omap2_onenand_probe(struct platform_device *pdev)
767 if ((r = onenand_scan(&c->mtd, 1)) < 0) 753 if ((r = onenand_scan(&c->mtd, 1)) < 0)
768 goto err_release_regulator; 754 goto err_release_regulator;
769 755
770 r = mtd_device_parse_register(&c->mtd, NULL, NULL, 756 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
771 pdata ? pdata->parts : NULL, 757 if (r > 0)
772 pdata ? pdata->nr_parts : 0); 758 r = mtd_device_register(&c->mtd, c->parts, r);
759 else if (pdata->parts != NULL)
760 r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
761 else
762 r = mtd_device_register(&c->mtd, NULL, 0);
773 if (r) 763 if (r)
774 goto err_release_onenand; 764 goto err_release_onenand;
775 765
@@ -792,14 +782,17 @@ err_release_gpio:
792err_iounmap: 782err_iounmap:
793 iounmap(c->onenand.base); 783 iounmap(c->onenand.base);
794err_release_mem_region: 784err_release_mem_region:
795 release_mem_region(c->phys_base, c->mem_size); 785 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
786err_free_cs:
787 gpmc_cs_free(c->gpmc_cs);
796err_kfree: 788err_kfree:
789 kfree(c->parts);
797 kfree(c); 790 kfree(c);
798 791
799 return r; 792 return r;
800} 793}
801 794
802static int omap2_onenand_remove(struct platform_device *pdev) 795static int __devexit omap2_onenand_remove(struct platform_device *pdev)
803{ 796{
804 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 797 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
805 798
@@ -814,7 +807,9 @@ static int omap2_onenand_remove(struct platform_device *pdev)
814 gpio_free(c->gpio_irq); 807 gpio_free(c->gpio_irq);
815 } 808 }
816 iounmap(c->onenand.base); 809 iounmap(c->onenand.base);
817 release_mem_region(c->phys_base, c->mem_size); 810 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
811 gpmc_cs_free(c->gpmc_cs);
812 kfree(c->parts);
818 kfree(c); 813 kfree(c);
819 814
820 return 0; 815 return 0;
@@ -822,7 +817,7 @@ static int omap2_onenand_remove(struct platform_device *pdev)
822 817
823static struct platform_driver omap2_onenand_driver = { 818static struct platform_driver omap2_onenand_driver = {
824 .probe = omap2_onenand_probe, 819 .probe = omap2_onenand_probe,
825 .remove = omap2_onenand_remove, 820 .remove = __devexit_p(omap2_onenand_remove),
826 .shutdown = omap2_onenand_shutdown, 821 .shutdown = omap2_onenand_shutdown,
827 .driver = { 822 .driver = {
828 .name = DRIVER_NAME, 823 .name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3f41f20062..ac9e959802a 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1015,7 +1015,7 @@ static void onenand_release_device(struct mtd_info *mtd)
1015} 1015}
1016 1016
1017/** 1017/**
1018 * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer 1018 * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer
1019 * @param mtd MTD device structure 1019 * @param mtd MTD device structure
1020 * @param buf destination address 1020 * @param buf destination address
1021 * @param column oob offset to read from 1021 * @param column oob offset to read from
@@ -1079,7 +1079,7 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
1079 return status; 1079 return status;
1080 1080
1081 /* check if we failed due to uncorrectable error */ 1081 /* check if we failed due to uncorrectable error */
1082 if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR) 1082 if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
1083 return status; 1083 return status;
1084 1084
1085 /* check if address lies in MLC region */ 1085 /* check if address lies in MLC region */
@@ -1122,10 +1122,10 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1122 int ret = 0; 1122 int ret = 0;
1123 int writesize = this->writesize; 1123 int writesize = this->writesize;
1124 1124
1125 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1125 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1126 (int)len); 1126 __func__, (unsigned int) from, (int) len);
1127 1127
1128 if (ops->mode == MTD_OPS_AUTO_OOB) 1128 if (ops->mode == MTD_OOB_AUTO)
1129 oobsize = this->ecclayout->oobavail; 1129 oobsize = this->ecclayout->oobavail;
1130 else 1130 else
1131 oobsize = mtd->oobsize; 1131 oobsize = mtd->oobsize;
@@ -1159,7 +1159,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1159 if (unlikely(ret)) 1159 if (unlikely(ret))
1160 ret = onenand_recover_lsb(mtd, from, ret); 1160 ret = onenand_recover_lsb(mtd, from, ret);
1161 onenand_update_bufferram(mtd, from, !ret); 1161 onenand_update_bufferram(mtd, from, !ret);
1162 if (mtd_is_eccerr(ret)) 1162 if (ret == -EBADMSG)
1163 ret = 0; 1163 ret = 0;
1164 if (ret) 1164 if (ret)
1165 break; 1165 break;
@@ -1170,7 +1170,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1170 thisooblen = oobsize - oobcolumn; 1170 thisooblen = oobsize - oobcolumn;
1171 thisooblen = min_t(int, thisooblen, ooblen - oobread); 1171 thisooblen = min_t(int, thisooblen, ooblen - oobread);
1172 1172
1173 if (ops->mode == MTD_OPS_AUTO_OOB) 1173 if (ops->mode == MTD_OOB_AUTO)
1174 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); 1174 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1175 else 1175 else
1176 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); 1176 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1201,8 +1201,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1201 if (mtd->ecc_stats.failed - stats.failed) 1201 if (mtd->ecc_stats.failed - stats.failed)
1202 return -EBADMSG; 1202 return -EBADMSG;
1203 1203
1204 /* return max bitflips per ecc step; ONENANDs correct 1 bit only */ 1204 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1205 return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1206} 1205}
1207 1206
1208/** 1207/**
@@ -1227,10 +1226,10 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1227 int ret = 0, boundary = 0; 1226 int ret = 0, boundary = 0;
1228 int writesize = this->writesize; 1227 int writesize = this->writesize;
1229 1228
1230 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1229 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1231 (int)len); 1230 __func__, (unsigned int) from, (int) len);
1232 1231
1233 if (ops->mode == MTD_OPS_AUTO_OOB) 1232 if (ops->mode == MTD_OOB_AUTO)
1234 oobsize = this->ecclayout->oobavail; 1233 oobsize = this->ecclayout->oobavail;
1235 else 1234 else
1236 oobsize = mtd->oobsize; 1235 oobsize = mtd->oobsize;
@@ -1256,7 +1255,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1256 this->command(mtd, ONENAND_CMD_READ, from, writesize); 1255 this->command(mtd, ONENAND_CMD_READ, from, writesize);
1257 ret = this->wait(mtd, FL_READING); 1256 ret = this->wait(mtd, FL_READING);
1258 onenand_update_bufferram(mtd, from, !ret); 1257 onenand_update_bufferram(mtd, from, !ret);
1259 if (mtd_is_eccerr(ret)) 1258 if (ret == -EBADMSG)
1260 ret = 0; 1259 ret = 0;
1261 } 1260 }
1262 } 1261 }
@@ -1292,7 +1291,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1292 thisooblen = oobsize - oobcolumn; 1291 thisooblen = oobsize - oobcolumn;
1293 thisooblen = min_t(int, thisooblen, ooblen - oobread); 1292 thisooblen = min_t(int, thisooblen, ooblen - oobread);
1294 1293
1295 if (ops->mode == MTD_OPS_AUTO_OOB) 1294 if (ops->mode == MTD_OOB_AUTO)
1296 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); 1295 onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
1297 else 1296 else
1298 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); 1297 this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1316,7 +1315,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1316 /* Now wait for load */ 1315 /* Now wait for load */
1317 ret = this->wait(mtd, FL_READING); 1316 ret = this->wait(mtd, FL_READING);
1318 onenand_update_bufferram(mtd, from, !ret); 1317 onenand_update_bufferram(mtd, from, !ret);
1319 if (mtd_is_eccerr(ret)) 1318 if (ret == -EBADMSG)
1320 ret = 0; 1319 ret = 0;
1321 } 1320 }
1322 1321
@@ -1334,8 +1333,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1334 if (mtd->ecc_stats.failed - stats.failed) 1333 if (mtd->ecc_stats.failed - stats.failed)
1335 return -EBADMSG; 1334 return -EBADMSG;
1336 1335
1337 /* return max bitflips per ecc step; ONENANDs correct 1 bit only */ 1336 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1338 return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1339} 1337}
1340 1338
1341/** 1339/**
@@ -1353,19 +1351,19 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1353 struct mtd_ecc_stats stats; 1351 struct mtd_ecc_stats stats;
1354 int read = 0, thislen, column, oobsize; 1352 int read = 0, thislen, column, oobsize;
1355 size_t len = ops->ooblen; 1353 size_t len = ops->ooblen;
1356 unsigned int mode = ops->mode; 1354 mtd_oob_mode_t mode = ops->mode;
1357 u_char *buf = ops->oobbuf; 1355 u_char *buf = ops->oobbuf;
1358 int ret = 0, readcmd; 1356 int ret = 0, readcmd;
1359 1357
1360 from += ops->ooboffs; 1358 from += ops->ooboffs;
1361 1359
1362 pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, 1360 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1363 (int)len); 1361 __func__, (unsigned int) from, (int) len);
1364 1362
1365 /* Initialize return length value */ 1363 /* Initialize return length value */
1366 ops->oobretlen = 0; 1364 ops->oobretlen = 0;
1367 1365
1368 if (mode == MTD_OPS_AUTO_OOB) 1366 if (mode == MTD_OOB_AUTO)
1369 oobsize = this->ecclayout->oobavail; 1367 oobsize = this->ecclayout->oobavail;
1370 else 1368 else
1371 oobsize = mtd->oobsize; 1369 oobsize = mtd->oobsize;
@@ -1405,13 +1403,13 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1405 if (unlikely(ret)) 1403 if (unlikely(ret))
1406 ret = onenand_recover_lsb(mtd, from, ret); 1404 ret = onenand_recover_lsb(mtd, from, ret);
1407 1405
1408 if (ret && !mtd_is_eccerr(ret)) { 1406 if (ret && ret != -EBADMSG) {
1409 printk(KERN_ERR "%s: read failed = 0x%x\n", 1407 printk(KERN_ERR "%s: read failed = 0x%x\n",
1410 __func__, ret); 1408 __func__, ret);
1411 break; 1409 break;
1412 } 1410 }
1413 1411
1414 if (mode == MTD_OPS_AUTO_OOB) 1412 if (mode == MTD_OOB_AUTO)
1415 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1413 onenand_transfer_auto_oob(mtd, buf, column, thislen);
1416 else 1414 else
1417 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1415 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
@@ -1489,10 +1487,10 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1489 int ret; 1487 int ret;
1490 1488
1491 switch (ops->mode) { 1489 switch (ops->mode) {
1492 case MTD_OPS_PLACE_OOB: 1490 case MTD_OOB_PLACE:
1493 case MTD_OPS_AUTO_OOB: 1491 case MTD_OOB_AUTO:
1494 break; 1492 break;
1495 case MTD_OPS_RAW: 1493 case MTD_OOB_RAW:
1496 /* Not implemented yet */ 1494 /* Not implemented yet */
1497 default: 1495 default:
1498 return -EINVAL; 1496 return -EINVAL;
@@ -1578,8 +1576,8 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1578 size_t len = ops->ooblen; 1576 size_t len = ops->ooblen;
1579 u_char *buf = ops->oobbuf; 1577 u_char *buf = ops->oobbuf;
1580 1578
1581 pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from, 1579 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
1582 len); 1580 __func__, (unsigned int) from, len);
1583 1581
1584 /* Initialize return value */ 1582 /* Initialize return value */
1585 ops->oobretlen = 0; 1583 ops->oobretlen = 0;
@@ -1752,8 +1750,18 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1752 /* Wait for any existing operation to clear */ 1750 /* Wait for any existing operation to clear */
1753 onenand_panic_wait(mtd); 1751 onenand_panic_wait(mtd);
1754 1752
1755 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 1753 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1756 (int)len); 1754 __func__, (unsigned int) to, (int) len);
1755
1756 /* Initialize retlen, in case of early exit */
1757 *retlen = 0;
1758
1759 /* Do not allow writes past end of device */
1760 if (unlikely((to + len) > mtd->size)) {
1761 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1762 __func__);
1763 return -EINVAL;
1764 }
1757 1765
1758 /* Reject writes, which are not page aligned */ 1766 /* Reject writes, which are not page aligned */
1759 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1767 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
@@ -1813,7 +1821,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1813} 1821}
1814 1822
1815/** 1823/**
1816 * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer 1824 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
1817 * @param mtd MTD device structure 1825 * @param mtd MTD device structure
1818 * @param oob_buf oob buffer 1826 * @param oob_buf oob buffer
1819 * @param buf source address 1827 * @param buf source address
@@ -1875,13 +1883,20 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1875 u_char *oobbuf; 1883 u_char *oobbuf;
1876 int ret = 0, cmd; 1884 int ret = 0, cmd;
1877 1885
1878 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 1886 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1879 (int)len); 1887 __func__, (unsigned int) to, (int) len);
1880 1888
1881 /* Initialize retlen, in case of early exit */ 1889 /* Initialize retlen, in case of early exit */
1882 ops->retlen = 0; 1890 ops->retlen = 0;
1883 ops->oobretlen = 0; 1891 ops->oobretlen = 0;
1884 1892
1893 /* Do not allow writes past end of device */
1894 if (unlikely((to + len) > mtd->size)) {
1895 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1896 __func__);
1897 return -EINVAL;
1898 }
1899
1885 /* Reject writes, which are not page aligned */ 1900 /* Reject writes, which are not page aligned */
1886 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1901 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1887 printk(KERN_ERR "%s: Attempt to write not page aligned data\n", 1902 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -1893,7 +1908,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1893 if (!len) 1908 if (!len)
1894 return 0; 1909 return 0;
1895 1910
1896 if (ops->mode == MTD_OPS_AUTO_OOB) 1911 if (ops->mode == MTD_OOB_AUTO)
1897 oobsize = this->ecclayout->oobavail; 1912 oobsize = this->ecclayout->oobavail;
1898 else 1913 else
1899 oobsize = mtd->oobsize; 1914 oobsize = mtd->oobsize;
@@ -1930,7 +1945,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1930 /* We send data to spare ram with oobsize 1945 /* We send data to spare ram with oobsize
1931 * to prevent byte access */ 1946 * to prevent byte access */
1932 memset(oobbuf, 0xff, mtd->oobsize); 1947 memset(oobbuf, 0xff, mtd->oobsize);
1933 if (ops->mode == MTD_OPS_AUTO_OOB) 1948 if (ops->mode == MTD_OOB_AUTO)
1934 onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); 1949 onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
1935 else 1950 else
1936 memcpy(oobbuf + oobcolumn, oob, thisooblen); 1951 memcpy(oobbuf + oobcolumn, oob, thisooblen);
@@ -2040,7 +2055,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
2040 2055
2041 2056
2042/** 2057/**
2043 * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band 2058 * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band
2044 * @param mtd MTD device structure 2059 * @param mtd MTD device structure
2045 * @param to offset to write to 2060 * @param to offset to write to
2046 * @param len number of bytes to write 2061 * @param len number of bytes to write
@@ -2059,17 +2074,17 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2059 u_char *oobbuf; 2074 u_char *oobbuf;
2060 size_t len = ops->ooblen; 2075 size_t len = ops->ooblen;
2061 const u_char *buf = ops->oobbuf; 2076 const u_char *buf = ops->oobbuf;
2062 unsigned int mode = ops->mode; 2077 mtd_oob_mode_t mode = ops->mode;
2063 2078
2064 to += ops->ooboffs; 2079 to += ops->ooboffs;
2065 2080
2066 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 2081 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
2067 (int)len); 2082 __func__, (unsigned int) to, (int) len);
2068 2083
2069 /* Initialize retlen, in case of early exit */ 2084 /* Initialize retlen, in case of early exit */
2070 ops->oobretlen = 0; 2085 ops->oobretlen = 0;
2071 2086
2072 if (mode == MTD_OPS_AUTO_OOB) 2087 if (mode == MTD_OOB_AUTO)
2073 oobsize = this->ecclayout->oobavail; 2088 oobsize = this->ecclayout->oobavail;
2074 else 2089 else
2075 oobsize = mtd->oobsize; 2090 oobsize = mtd->oobsize;
@@ -2113,7 +2128,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2113 /* We send data to spare ram with oobsize 2128 /* We send data to spare ram with oobsize
2114 * to prevent byte access */ 2129 * to prevent byte access */
2115 memset(oobbuf, 0xff, mtd->oobsize); 2130 memset(oobbuf, 0xff, mtd->oobsize);
2116 if (mode == MTD_OPS_AUTO_OOB) 2131 if (mode == MTD_OOB_AUTO)
2117 onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen); 2132 onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
2118 else 2133 else
2119 memcpy(oobbuf + column, buf, thislen); 2134 memcpy(oobbuf + column, buf, thislen);
@@ -2202,10 +2217,10 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
2202 int ret; 2217 int ret;
2203 2218
2204 switch (ops->mode) { 2219 switch (ops->mode) {
2205 case MTD_OPS_PLACE_OOB: 2220 case MTD_OOB_PLACE:
2206 case MTD_OPS_AUTO_OOB: 2221 case MTD_OOB_AUTO:
2207 break; 2222 break;
2208 case MTD_OPS_RAW: 2223 case MTD_OOB_RAW:
2209 /* Not implemented yet */ 2224 /* Not implemented yet */
2210 default: 2225 default:
2211 return -EINVAL; 2226 return -EINVAL;
@@ -2266,7 +2281,7 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
2266} 2281}
2267 2282
2268/** 2283/**
2269 * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase 2284 * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
2270 * @param mtd MTD device structure 2285 * @param mtd MTD device structure
2271 * @param instr erase instruction 2286 * @param instr erase instruction
2272 * @param region erase region 2287 * @param region erase region
@@ -2382,7 +2397,7 @@ static int onenand_multiblock_erase(struct mtd_info *mtd,
2382 2397
2383 2398
2384/** 2399/**
2385 * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase 2400 * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
2386 * @param mtd MTD device structure 2401 * @param mtd MTD device structure
2387 * @param instr erase instruction 2402 * @param instr erase instruction
2388 * @param region erase region 2403 * @param region erase region
@@ -2474,9 +2489,14 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2474 struct mtd_erase_region_info *region = NULL; 2489 struct mtd_erase_region_info *region = NULL;
2475 loff_t region_offset = 0; 2490 loff_t region_offset = 0;
2476 2491
2477 pr_debug("%s: start=0x%012llx, len=%llu\n", __func__, 2492 DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
2478 (unsigned long long)instr->addr, 2493 (unsigned long long) instr->addr, (unsigned long long) instr->len);
2479 (unsigned long long)instr->len); 2494
2495 /* Do not allow erase past end of device */
2496 if (unlikely((len + addr) > mtd->size)) {
2497 printk(KERN_ERR "%s: Erase past end of device\n", __func__);
2498 return -EINVAL;
2499 }
2480 2500
2481 if (FLEXONENAND(this)) { 2501 if (FLEXONENAND(this)) {
2482 /* Find the eraseregion of this address */ 2502 /* Find the eraseregion of this address */
@@ -2504,6 +2524,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2504 return -EINVAL; 2524 return -EINVAL;
2505 } 2525 }
2506 2526
2527 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2528
2507 /* Grab the lock and see if the device is available */ 2529 /* Grab the lock and see if the device is available */
2508 onenand_get_device(mtd, FL_ERASING); 2530 onenand_get_device(mtd, FL_ERASING);
2509 2531
@@ -2536,7 +2558,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2536 */ 2558 */
2537static void onenand_sync(struct mtd_info *mtd) 2559static void onenand_sync(struct mtd_info *mtd)
2538{ 2560{
2539 pr_debug("%s: called\n", __func__); 2561 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
2540 2562
2541 /* Grab the lock and see if the device is available */ 2563 /* Grab the lock and see if the device is available */
2542 onenand_get_device(mtd, FL_SYNCING); 2564 onenand_get_device(mtd, FL_SYNCING);
@@ -2580,7 +2602,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
2580 struct bbm_info *bbm = this->bbm; 2602 struct bbm_info *bbm = this->bbm;
2581 u_char buf[2] = {0, 0}; 2603 u_char buf[2] = {0, 0};
2582 struct mtd_oob_ops ops = { 2604 struct mtd_oob_ops ops = {
2583 .mode = MTD_OPS_PLACE_OOB, 2605 .mode = MTD_OOB_PLACE,
2584 .ooblen = 2, 2606 .ooblen = 2,
2585 .oobbuf = buf, 2607 .oobbuf = buf,
2586 .ooboffs = 0, 2608 .ooboffs = 0,
@@ -2610,6 +2632,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
2610 */ 2632 */
2611static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) 2633static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2612{ 2634{
2635 struct onenand_chip *this = mtd->priv;
2613 int ret; 2636 int ret;
2614 2637
2615 ret = onenand_block_isbad(mtd, ofs); 2638 ret = onenand_block_isbad(mtd, ofs);
@@ -2621,7 +2644,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2621 } 2644 }
2622 2645
2623 onenand_get_device(mtd, FL_WRITING); 2646 onenand_get_device(mtd, FL_WRITING);
2624 ret = mtd_block_markbad(mtd, ofs); 2647 ret = this->block_markbad(mtd, ofs);
2625 onenand_release_device(mtd); 2648 onenand_release_device(mtd);
2626 return ret; 2649 return ret;
2627} 2650}
@@ -2899,7 +2922,7 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
2899} 2922}
2900 2923
2901/** 2924/**
2902 * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP 2925 * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
2903 * @param mtd MTD device structure 2926 * @param mtd MTD device structure
2904 * @param to offset to write to 2927 * @param to offset to write to
2905 * @param len number of bytes to write 2928 * @param len number of bytes to write
@@ -3147,7 +3170,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
3147 this->command(mtd, ONENAND_CMD_RESET, 0, 0); 3170 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3148 this->wait(mtd, FL_RESETING); 3171 this->wait(mtd, FL_RESETING);
3149 } else { 3172 } else {
3150 ops.mode = MTD_OPS_PLACE_OOB; 3173 ops.mode = MTD_OOB_PLACE;
3151 ops.ooblen = len; 3174 ops.ooblen = len;
3152 ops.oobbuf = buf; 3175 ops.oobbuf = buf;
3153 ops.ooboffs = 0; 3176 ops.ooboffs = 0;
@@ -3406,19 +3429,6 @@ static void onenand_check_features(struct mtd_info *mtd)
3406 else if (numbufs == 1) { 3429 else if (numbufs == 1) {
3407 this->options |= ONENAND_HAS_4KB_PAGE; 3430 this->options |= ONENAND_HAS_4KB_PAGE;
3408 this->options |= ONENAND_HAS_CACHE_PROGRAM; 3431 this->options |= ONENAND_HAS_CACHE_PROGRAM;
3409 /*
3410 * There are two different 4KiB pagesize chips
3411 * and no way to detect it by H/W config values.
3412 *
3413 * To detect the correct NOP for each chips,
3414 * It should check the version ID as workaround.
3415 *
3416 * Now it has as following
3417 * KFM4G16Q4M has NOP 4 with version ID 0x0131
3418 * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
3419 */
3420 if ((this->version_id & 0xf) == 0xe)
3421 this->options |= ONENAND_HAS_NOP_1;
3422 } 3432 }
3423 3433
3424 case ONENAND_DEVICE_DENSITY_2Gb: 3434 case ONENAND_DEVICE_DENSITY_2Gb:
@@ -3653,7 +3663,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3653 int i, ret; 3663 int i, ret;
3654 int block; 3664 int block;
3655 struct mtd_oob_ops ops = { 3665 struct mtd_oob_ops ops = {
3656 .mode = MTD_OPS_PLACE_OOB, 3666 .mode = MTD_OOB_PLACE,
3657 .ooboffs = 0, 3667 .ooboffs = 0,
3658 .ooblen = mtd->oobsize, 3668 .ooblen = mtd->oobsize,
3659 .datbuf = NULL, 3669 .datbuf = NULL,
@@ -3694,7 +3704,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3694 * flexonenand_set_boundary - Writes the SLC boundary 3704 * flexonenand_set_boundary - Writes the SLC boundary
3695 * @param mtd - mtd info structure 3705 * @param mtd - mtd info structure
3696 */ 3706 */
3697static int flexonenand_set_boundary(struct mtd_info *mtd, int die, 3707int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3698 int boundary, int lock) 3708 int boundary, int lock)
3699{ 3709{
3700 struct onenand_chip *this = mtd->priv; 3710 struct onenand_chip *this = mtd->priv;
@@ -4044,8 +4054,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4044 this->ecclayout = &onenand_oob_128; 4054 this->ecclayout = &onenand_oob_128;
4045 mtd->subpage_sft = 2; 4055 mtd->subpage_sft = 2;
4046 } 4056 }
4047 if (ONENAND_IS_NOP_1(this))
4048 mtd->subpage_sft = 0;
4049 break; 4057 break;
4050 case 64: 4058 case 64:
4051 this->ecclayout = &onenand_oob_64; 4059 this->ecclayout = &onenand_oob_64;
@@ -4080,34 +4088,33 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4080 mtd->oobavail = this->ecclayout->oobavail; 4088 mtd->oobavail = this->ecclayout->oobavail;
4081 4089
4082 mtd->ecclayout = this->ecclayout; 4090 mtd->ecclayout = this->ecclayout;
4083 mtd->ecc_strength = 1;
4084 4091
4085 /* Fill in remaining MTD driver data */ 4092 /* Fill in remaining MTD driver data */
4086 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH; 4093 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
4087 mtd->flags = MTD_CAP_NANDFLASH; 4094 mtd->flags = MTD_CAP_NANDFLASH;
4088 mtd->_erase = onenand_erase; 4095 mtd->erase = onenand_erase;
4089 mtd->_point = NULL; 4096 mtd->point = NULL;
4090 mtd->_unpoint = NULL; 4097 mtd->unpoint = NULL;
4091 mtd->_read = onenand_read; 4098 mtd->read = onenand_read;
4092 mtd->_write = onenand_write; 4099 mtd->write = onenand_write;
4093 mtd->_read_oob = onenand_read_oob; 4100 mtd->read_oob = onenand_read_oob;
4094 mtd->_write_oob = onenand_write_oob; 4101 mtd->write_oob = onenand_write_oob;
4095 mtd->_panic_write = onenand_panic_write; 4102 mtd->panic_write = onenand_panic_write;
4096#ifdef CONFIG_MTD_ONENAND_OTP 4103#ifdef CONFIG_MTD_ONENAND_OTP
4097 mtd->_get_fact_prot_info = onenand_get_fact_prot_info; 4104 mtd->get_fact_prot_info = onenand_get_fact_prot_info;
4098 mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg; 4105 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
4099 mtd->_get_user_prot_info = onenand_get_user_prot_info; 4106 mtd->get_user_prot_info = onenand_get_user_prot_info;
4100 mtd->_read_user_prot_reg = onenand_read_user_prot_reg; 4107 mtd->read_user_prot_reg = onenand_read_user_prot_reg;
4101 mtd->_write_user_prot_reg = onenand_write_user_prot_reg; 4108 mtd->write_user_prot_reg = onenand_write_user_prot_reg;
4102 mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg; 4109 mtd->lock_user_prot_reg = onenand_lock_user_prot_reg;
4103#endif 4110#endif
4104 mtd->_sync = onenand_sync; 4111 mtd->sync = onenand_sync;
4105 mtd->_lock = onenand_lock; 4112 mtd->lock = onenand_lock;
4106 mtd->_unlock = onenand_unlock; 4113 mtd->unlock = onenand_unlock;
4107 mtd->_suspend = onenand_suspend; 4114 mtd->suspend = onenand_suspend;
4108 mtd->_resume = onenand_resume; 4115 mtd->resume = onenand_resume;
4109 mtd->_block_isbad = onenand_block_isbad; 4116 mtd->block_isbad = onenand_block_isbad;
4110 mtd->_block_markbad = onenand_block_markbad; 4117 mtd->block_markbad = onenand_block_markbad;
4111 mtd->owner = THIS_MODULE; 4118 mtd->owner = THIS_MODULE;
4112 mtd->writebufsize = mtd->writesize; 4119 mtd->writebufsize = mtd->writesize;
4113 4120
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 66fe3b7e785..fc2c16a0fd1 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -15,7 +15,6 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/mtd/onenand.h> 17#include <linux/mtd/onenand.h>
18#include <linux/export.h>
19 18
20/** 19/**
21 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 20 * check_short_pattern - [GENERIC] check if a pattern is in the buffer
@@ -81,7 +80,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
81 startblock = 0; 80 startblock = 0;
82 from = 0; 81 from = 0;
83 82
84 ops.mode = MTD_OPS_PLACE_OOB; 83 ops.mode = MTD_OOB_PLACE;
85 ops.ooblen = readlen; 84 ops.ooblen = readlen;
86 ops.oobbuf = buf; 85 ops.oobbuf = buf;
87 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; 86 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -154,7 +153,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
154 block = (int) (onenand_block(this, offs) << 1); 153 block = (int) (onenand_block(this, offs) << 1);
155 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; 154 res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
156 155
157 pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", 156 DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
158 (unsigned int) offs, block >> 1, res); 157 (unsigned int) offs, block >> 1, res);
159 158
160 switch ((int) res) { 159 switch ((int) res) {
@@ -189,8 +188,10 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
189 len = this->chipsize >> (this->erase_shift + 2); 188 len = this->chipsize >> (this->erase_shift + 2);
190 /* Allocate memory (2bit per block) and clear the memory bad block table */ 189 /* Allocate memory (2bit per block) and clear the memory bad block table */
191 bbm->bbt = kzalloc(len, GFP_KERNEL); 190 bbm->bbt = kzalloc(len, GFP_KERNEL);
192 if (!bbm->bbt) 191 if (!bbm->bbt) {
192 printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
193 return -ENOMEM; 193 return -ENOMEM;
194 }
194 195
195 /* Set the bad block position */ 196 /* Set the bad block position */
196 bbm->badblockpos = ONENAND_BADBLOCK_POS; 197 bbm->badblockpos = ONENAND_BADBLOCK_POS;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 33f2a8fb8df..3306b5b3c73 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,6 +147,7 @@ struct s3c_onenand {
147 struct resource *dma_res; 147 struct resource *dma_res;
148 unsigned long phys_base; 148 unsigned long phys_base;
149 struct completion complete; 149 struct completion complete;
150 struct mtd_partition *parts;
150}; 151};
151 152
152#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) 153#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -156,6 +157,8 @@ struct s3c_onenand {
156 157
157static struct s3c_onenand *onenand; 158static struct s3c_onenand *onenand;
158 159
160static const char *part_probes[] = { "cmdlinepart", NULL, };
161
159static inline int s3c_read_reg(int offset) 162static inline int s3c_read_reg(int offset)
160{ 163{
161 return readl(onenand->base + offset); 164 return readl(onenand->base + offset);
@@ -923,7 +926,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
923 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 926 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
924 if (!r) { 927 if (!r) {
925 dev_err(&pdev->dev, "no buffer memory resource defined\n"); 928 dev_err(&pdev->dev, "no buffer memory resource defined\n");
926 err = -ENOENT; 929 return -ENOENT;
927 goto ahb_resource_failed; 930 goto ahb_resource_failed;
928 } 931 }
929 932
@@ -964,7 +967,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
964 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 967 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
965 if (!r) { 968 if (!r) {
966 dev_err(&pdev->dev, "no dma memory resource defined\n"); 969 dev_err(&pdev->dev, "no dma memory resource defined\n");
967 err = -ENOENT; 970 return -ENOENT;
968 goto dma_resource_failed; 971 goto dma_resource_failed;
969 } 972 }
970 973
@@ -1014,9 +1017,13 @@ static int s3c_onenand_probe(struct platform_device *pdev)
1014 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1017 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1015 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1018 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1016 1019
1017 err = mtd_device_parse_register(mtd, NULL, NULL, 1020 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
1018 pdata ? pdata->parts : NULL, 1021 if (err > 0)
1019 pdata ? pdata->nr_parts : 0); 1022 mtd_device_register(mtd, onenand->parts, err);
1023 else if (err <= 0 && pdata && pdata->parts)
1024 mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
1025 else
1026 err = mtd_device_register(mtd, NULL, 0);
1020 1027
1021 platform_set_drvdata(pdev, mtd); 1028 platform_set_drvdata(pdev, mtd);
1022 1029
@@ -1053,7 +1060,7 @@ onenand_fail:
1053 return err; 1060 return err;
1054} 1061}
1055 1062
1056static int s3c_onenand_remove(struct platform_device *pdev) 1063static int __devexit s3c_onenand_remove(struct platform_device *pdev)
1057{ 1064{
1058 struct mtd_info *mtd = platform_get_drvdata(pdev); 1065 struct mtd_info *mtd = platform_get_drvdata(pdev);
1059 1066
@@ -1130,10 +1137,21 @@ static struct platform_driver s3c_onenand_driver = {
1130 }, 1137 },
1131 .id_table = s3c_onenand_driver_ids, 1138 .id_table = s3c_onenand_driver_ids,
1132 .probe = s3c_onenand_probe, 1139 .probe = s3c_onenand_probe,
1133 .remove = s3c_onenand_remove, 1140 .remove = __devexit_p(s3c_onenand_remove),
1134}; 1141};
1135 1142
1136module_platform_driver(s3c_onenand_driver); 1143static int __init s3c_onenand_init(void)
1144{
1145 return platform_driver_register(&s3c_onenand_driver);
1146}
1147
1148static void __exit s3c_onenand_exit(void)
1149{
1150 platform_driver_unregister(&s3c_onenand_driver);
1151}
1152
1153module_init(s3c_onenand_init);
1154module_exit(s3c_onenand_exit);
1137 1155
1138MODULE_LICENSE("GPL"); 1156MODULE_LICENSE("GPL");
1139MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); 1157MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 580035c803d..4938bd0b024 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -28,7 +28,6 @@
28 28
29#include <linux/mtd/mtd.h> 29#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h> 30#include <linux/mtd/partitions.h>
31#include <linux/module.h>
32 31
33struct fis_image_desc { 32struct fis_image_desc {
34 unsigned char name[16]; // Null terminated name 33 unsigned char name[16]; // Null terminated name
@@ -57,8 +56,8 @@ static inline int redboot_checksum(struct fis_image_desc *img)
57} 56}
58 57
59static int parse_redboot_partitions(struct mtd_info *master, 58static int parse_redboot_partitions(struct mtd_info *master,
60 struct mtd_partition **pparts, 59 struct mtd_partition **pparts,
61 struct mtd_part_parser_data *data) 60 unsigned long fis_origin)
62{ 61{
63 int nrparts = 0; 62 int nrparts = 0;
64 struct fis_image_desc *buf; 63 struct fis_image_desc *buf;
@@ -78,7 +77,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
78 77
79 if ( directory < 0 ) { 78 if ( directory < 0 ) {
80 offset = master->size + directory * master->erasesize; 79 offset = master->size + directory * master->erasesize;
81 while (mtd_block_isbad(master, offset)) { 80 while (master->block_isbad &&
81 master->block_isbad(master, offset)) {
82 if (!offset) { 82 if (!offset) {
83 nogood: 83 nogood:
84 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); 84 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -88,7 +88,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
88 } 88 }
89 } else { 89 } else {
90 offset = directory * master->erasesize; 90 offset = directory * master->erasesize;
91 while (mtd_block_isbad(master, offset)) { 91 while (master->block_isbad &&
92 master->block_isbad(master, offset)) {
92 offset += master->erasesize; 93 offset += master->erasesize;
93 if (offset == master->size) 94 if (offset == master->size)
94 goto nogood; 95 goto nogood;
@@ -102,8 +103,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
102 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 103 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
103 master->name, offset); 104 master->name, offset);
104 105
105 ret = mtd_read(master, offset, master->erasesize, &retlen, 106 ret = master->read(master, offset,
106 (void *)buf); 107 master->erasesize, &retlen, (void *)buf);
107 108
108 if (ret) 109 if (ret)
109 goto out; 110 goto out;
@@ -196,10 +197,11 @@ static int parse_redboot_partitions(struct mtd_info *master,
196 goto out; 197 goto out;
197 } 198 }
198 new_fl->img = &buf[i]; 199 new_fl->img = &buf[i];
199 if (data && data->origin) 200 if (fis_origin) {
200 buf[i].flash_base -= data->origin; 201 buf[i].flash_base -= fis_origin;
201 else 202 } else {
202 buf[i].flash_base &= master->size-1; 203 buf[i].flash_base &= master->size-1;
204 }
203 205
204 /* I'm sure the JFFS2 code has done me permanent damage. 206 /* I'm sure the JFFS2 code has done me permanent damage.
205 * I now think the following is _normal_ 207 * I now think the following is _normal_
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 233b946e5d6..cc4d1805b86 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -18,7 +18,6 @@
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/module.h>
22 21
23#include <asm/types.h> 22#include <asm/types.h>
24 23
@@ -200,9 +199,9 @@ static int scan_header(struct partition *part)
200 part->sector_map[i] = -1; 199 part->sector_map[i] = -1;
201 200
202 for (i=0, blocks_found=0; i<part->total_blocks; i++) { 201 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
203 rc = mtd_read(part->mbd.mtd, i * part->block_size, 202 rc = part->mbd.mtd->read(part->mbd.mtd,
204 part->header_size, &retlen, 203 i * part->block_size, part->header_size,
205 (u_char *)part->header_cache); 204 &retlen, (u_char*)part->header_cache);
206 205
207 if (!rc && retlen != part->header_size) 206 if (!rc && retlen != part->header_size)
208 rc = -EIO; 207 rc = -EIO;
@@ -250,8 +249,8 @@ static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *b
250 249
251 addr = part->sector_map[sector]; 250 addr = part->sector_map[sector];
252 if (addr != -1) { 251 if (addr != -1) {
253 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 252 rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
254 (u_char *)buf); 253 &retlen, (u_char*)buf);
255 if (!rc && retlen != SECTOR_SIZE) 254 if (!rc && retlen != SECTOR_SIZE)
256 rc = -EIO; 255 rc = -EIO;
257 256
@@ -304,8 +303,9 @@ static void erase_callback(struct erase_info *erase)
304 part->blocks[i].used_sectors = 0; 303 part->blocks[i].used_sectors = 0;
305 part->blocks[i].erases++; 304 part->blocks[i].erases++;
306 305
307 rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), 306 rc = part->mbd.mtd->write(part->mbd.mtd,
308 &retlen, (u_char *)&magic); 307 part->blocks[i].offset, sizeof(magic), &retlen,
308 (u_char*)&magic);
309 309
310 if (!rc && retlen != sizeof(magic)) 310 if (!rc && retlen != sizeof(magic))
311 rc = -EIO; 311 rc = -EIO;
@@ -341,7 +341,7 @@ static int erase_block(struct partition *part, int block)
341 part->blocks[block].state = BLOCK_ERASING; 341 part->blocks[block].state = BLOCK_ERASING;
342 part->blocks[block].free_sectors = 0; 342 part->blocks[block].free_sectors = 0;
343 343
344 rc = mtd_erase(part->mbd.mtd, erase); 344 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
345 345
346 if (rc) { 346 if (rc) {
347 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " 347 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
@@ -371,8 +371,9 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
371 if (!map) 371 if (!map)
372 goto err2; 372 goto err2;
373 373
374 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, 374 rc = part->mbd.mtd->read(part->mbd.mtd,
375 part->header_size, &retlen, (u_char *)map); 375 part->blocks[block_no].offset, part->header_size,
376 &retlen, (u_char*)map);
376 377
377 if (!rc && retlen != part->header_size) 378 if (!rc && retlen != part->header_size)
378 rc = -EIO; 379 rc = -EIO;
@@ -411,8 +412,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
411 } 412 }
412 continue; 413 continue;
413 } 414 }
414 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 415 rc = part->mbd.mtd->read(part->mbd.mtd, addr,
415 sector_data); 416 SECTOR_SIZE, &retlen, sector_data);
416 417
417 if (!rc && retlen != SECTOR_SIZE) 418 if (!rc && retlen != SECTOR_SIZE)
418 rc = -EIO; 419 rc = -EIO;
@@ -448,7 +449,8 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
448 int rc; 449 int rc;
449 450
450 /* we have a race if sync doesn't exist */ 451 /* we have a race if sync doesn't exist */
451 mtd_sync(part->mbd.mtd); 452 if (part->mbd.mtd->sync)
453 part->mbd.mtd->sync(part->mbd.mtd);
452 454
453 score = 0x7fffffff; /* MAX_INT */ 455 score = 0x7fffffff; /* MAX_INT */
454 best_block = -1; 456 best_block = -1;
@@ -560,9 +562,8 @@ static int find_writable_block(struct partition *part, u_long *old_sector)
560 } 562 }
561 } 563 }
562 564
563 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, 565 rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
564 part->header_size, &retlen, 566 part->header_size, &retlen, (u_char*)part->header_cache);
565 (u_char *)part->header_cache);
566 567
567 if (!rc && retlen != part->header_size) 568 if (!rc && retlen != part->header_size)
568 rc = -EIO; 569 rc = -EIO;
@@ -593,8 +594,8 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
593 594
594 addr = part->blocks[block].offset + 595 addr = part->blocks[block].offset +
595 (HEADER_MAP_OFFSET + offset) * sizeof(u16); 596 (HEADER_MAP_OFFSET + offset) * sizeof(u16);
596 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, 597 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
597 (u_char *)&del); 598 sizeof(del), &retlen, (u_char*)&del);
598 599
599 if (!rc && retlen != sizeof(del)) 600 if (!rc && retlen != sizeof(del))
600 rc = -EIO; 601 rc = -EIO;
@@ -666,8 +667,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
666 667
667 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + 668 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
668 block->offset; 669 block->offset;
669 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 670 rc = part->mbd.mtd->write(part->mbd.mtd,
670 (u_char *)buf); 671 addr, SECTOR_SIZE, &retlen, (u_char*)buf);
671 672
672 if (!rc && retlen != SECTOR_SIZE) 673 if (!rc && retlen != SECTOR_SIZE)
673 rc = -EIO; 674 rc = -EIO;
@@ -686,8 +687,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
686 part->header_cache[i + HEADER_MAP_OFFSET] = entry; 687 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
687 688
688 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); 689 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
689 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, 690 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
690 (u_char *)&entry); 691 sizeof(entry), &retlen, (u_char*)&entry);
691 692
692 if (!rc && retlen != sizeof(entry)) 693 if (!rc && retlen != sizeof(entry))
693 rc = -EIO; 694 rc = -EIO;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 8dd6ba52404..ed3d6cd2c6d 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -25,7 +25,7 @@
25struct workqueue_struct *cache_flush_workqueue; 25struct workqueue_struct *cache_flush_workqueue;
26 26
27static int cache_timeout = 1000; 27static int cache_timeout = 1000;
28module_param(cache_timeout, int, S_IRUGO); 28module_param(cache_timeout, bool, S_IRUGO);
29MODULE_PARM_DESC(cache_timeout, 29MODULE_PARM_DESC(cache_timeout,
30 "Timeout (in ms) for cache flush (1000 ms default"); 30 "Timeout (in ms) for cache flush (1000 ms default");
31 31
@@ -34,7 +34,7 @@ module_param(debug, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(debug, "Debug level (0-2)"); 34MODULE_PARM_DESC(debug, "Debug level (0-2)");
35 35
36 36
37/* ------------------- sysfs attributes ---------------------------------- */ 37/* ------------------- sysfs attributtes ---------------------------------- */
38struct sm_sysfs_attribute { 38struct sm_sysfs_attribute {
39 struct device_attribute dev_attr; 39 struct device_attribute dev_attr;
40 char *data; 40 char *data;
@@ -138,7 +138,7 @@ static int sm_get_lba(uint8_t *lba)
138 if ((lba[0] & 0xF8) != 0x10) 138 if ((lba[0] & 0xF8) != 0x10)
139 return -2; 139 return -2;
140 140
141 /* check parity - endianness doesn't matter */ 141 /* check parity - endianess doesn't matter */
142 if (hweight16(*(uint16_t *)lba) & 1) 142 if (hweight16(*(uint16_t *)lba) & 1)
143 return -2; 143 return -2;
144 144
@@ -147,7 +147,7 @@ static int sm_get_lba(uint8_t *lba)
147 147
148 148
149/* 149/*
150 * Read LBA associated with block 150 * Read LBA asscociated with block
151 * returns -1, if block is erased 151 * returns -1, if block is erased
152 * returns -2 if error happens 152 * returns -2 if error happens
153 */ 153 */
@@ -252,11 +252,11 @@ static int sm_read_sector(struct sm_ftl *ftl,
252 return 0; 252 return 0;
253 } 253 }
254 254
255 /* User might not need the oob, but we do for data verification */ 255 /* User might not need the oob, but we do for data vertification */
256 if (!oob) 256 if (!oob)
257 oob = &tmp_oob; 257 oob = &tmp_oob;
258 258
259 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; 259 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
260 ops.ooboffs = 0; 260 ops.ooboffs = 0;
261 ops.ooblen = SM_OOB_SIZE; 261 ops.ooblen = SM_OOB_SIZE;
262 ops.oobbuf = (void *)oob; 262 ops.oobbuf = (void *)oob;
@@ -276,12 +276,12 @@ again:
276 return ret; 276 return ret;
277 } 277 }
278 278
279 /* Unfortunately, oob read will _always_ succeed, 279 /* Unfortunelly, oob read will _always_ succeed,
280 despite card removal..... */ 280 despite card removal..... */
281 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 281 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
282 282
283 /* Test for unknown errors */ 283 /* Test for unknown errors */
284 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) { 284 if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
285 dbg("read of block %d at zone %d, failed due to error (%d)", 285 dbg("read of block %d at zone %d, failed due to error (%d)",
286 block, zone, ret); 286 block, zone, ret);
287 goto again; 287 goto again;
@@ -306,7 +306,7 @@ again:
306 } 306 }
307 307
308 /* Test ECC*/ 308 /* Test ECC*/
309 if (mtd_is_eccerr(ret) || 309 if (ret == -EBADMSG ||
310 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { 310 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
311 311
312 dbg("read of block %d at zone %d, failed due to ECC error", 312 dbg("read of block %d at zone %d, failed due to ECC error",
@@ -336,16 +336,17 @@ static int sm_write_sector(struct sm_ftl *ftl,
336 if (ftl->unstable) 336 if (ftl->unstable)
337 return -EIO; 337 return -EIO;
338 338
339 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; 339 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
340 ops.len = SM_SECTOR_SIZE; 340 ops.len = SM_SECTOR_SIZE;
341 ops.datbuf = buffer; 341 ops.datbuf = buffer;
342 ops.ooboffs = 0; 342 ops.ooboffs = 0;
343 ops.ooblen = SM_OOB_SIZE; 343 ops.ooblen = SM_OOB_SIZE;
344 ops.oobbuf = (void *)oob; 344 ops.oobbuf = (void *)oob;
345 345
346 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 346 ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
347 347
348 /* Now we assume that hardware will catch write bitflip errors */ 348 /* Now we assume that hardware will catch write bitflip errors */
349 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
349 350
350 if (ret) { 351 if (ret) {
351 dbg("write to block %d at zone %d, failed with error %d", 352 dbg("write to block %d at zone %d, failed with error %d",
@@ -446,14 +447,14 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
446 447
447 /* We aren't checking the return value, because we don't care */ 448 /* We aren't checking the return value, because we don't care */
448 /* This also fails on fake xD cards, but I guess these won't expose 449 /* This also fails on fake xD cards, but I guess these won't expose
449 any bad blocks till fail completely */ 450 any bad blocks till fail completly */
450 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) 451 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
451 sm_write_sector(ftl, zone, block, boffset, NULL, &oob); 452 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
452} 453}
453 454
454/* 455/*
455 * Erase a block within a zone 456 * Erase a block within a zone
456 * If erase succeeds, it updates free block fifo, otherwise marks block as bad 457 * If erase succedes, it updates free block fifo, otherwise marks block as bad
457 */ 458 */
458static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 459static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
459 int put_free) 460 int put_free)
@@ -478,7 +479,7 @@ static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
478 return -EIO; 479 return -EIO;
479 } 480 }
480 481
481 if (mtd_erase(mtd, &erase)) { 482 if (mtd->erase(mtd, &erase)) {
482 sm_printk("erase of block %d in zone %d failed", 483 sm_printk("erase of block %d in zone %d failed",
483 block, zone_num); 484 block, zone_num);
484 goto error; 485 goto error;
@@ -509,7 +510,7 @@ static void sm_erase_callback(struct erase_info *self)
509 complete(&ftl->erase_completion); 510 complete(&ftl->erase_completion);
510} 511}
511 512
512/* Thoroughly test that block is valid. */ 513/* Throughtly test that block is valid. */
513static int sm_check_block(struct sm_ftl *ftl, int zone, int block) 514static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
514{ 515{
515 int boffset; 516 int boffset;
@@ -525,7 +526,7 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
525 for (boffset = 0; boffset < ftl->block_size; 526 for (boffset = 0; boffset < ftl->block_size;
526 boffset += SM_SECTOR_SIZE) { 527 boffset += SM_SECTOR_SIZE) {
527 528
528 /* This shouldn't happen anyway */ 529 /* This shoudn't happen anyway */
529 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) 530 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
530 return -2; 531 return -2;
531 532
@@ -644,8 +645,8 @@ int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
644 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) 645 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
645 return -ENODEV; 646 return -ENODEV;
646 647
647 /* We use OOB */ 648 /* We use these functions for IO */
648 if (!mtd_has_oob(mtd)) 649 if (!mtd->read_oob || !mtd->write_oob)
649 return -ENODEV; 650 return -ENODEV;
650 651
651 /* Find geometry information */ 652 /* Find geometry information */
@@ -1255,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1255 1256
1256static struct mtd_blktrans_ops sm_ftl_ops = { 1257static struct mtd_blktrans_ops sm_ftl_ops = {
1257 .name = "smblk", 1258 .name = "smblk",
1258 .major = 0, 1259 .major = -1,
1259 .part_bits = SM_FTL_PARTN_BITS, 1260 .part_bits = SM_FTL_PARTN_BITS,
1260 .blksize = SM_SECTOR_SIZE, 1261 .blksize = SM_SECTOR_SIZE,
1261 .getgeo = sm_getgeo, 1262 .getgeo = sm_getgeo,
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index ab2a52a039c..5cd18979333 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -122,9 +122,9 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
122 * is not SSFDC formatted 122 * is not SSFDC formatted
123 */ 123 */
124 for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) { 124 for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
125 if (mtd_block_isbad(mtd, offset)) { 125 if (!mtd->block_isbad(mtd, offset)) {
126 ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, 126 ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
127 sect_buf); 127 sect_buf);
128 128
129 /* CIS pattern match on the sector buffer */ 129 /* CIS pattern match on the sector buffer */
130 if (ret < 0 || retlen != SECTOR_SIZE) { 130 if (ret < 0 || retlen != SECTOR_SIZE) {
@@ -135,7 +135,8 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
135 /* Found */ 135 /* Found */
136 cis_sector = (int)(offset >> SECTOR_SHIFT); 136 cis_sector = (int)(offset >> SECTOR_SHIFT);
137 } else { 137 } else {
138 pr_debug("SSFDC_RO: CIS/IDI sector not found" 138 DEBUG(MTD_DEBUG_LEVEL1,
139 "SSFDC_RO: CIS/IDI sector not found"
139 " on %s (mtd%d)\n", mtd->name, 140 " on %s (mtd%d)\n", mtd->name,
140 mtd->index); 141 mtd->index);
141 } 142 }
@@ -156,7 +157,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
156 size_t retlen; 157 size_t retlen;
157 loff_t offset = (loff_t)sect_no << SECTOR_SHIFT; 158 loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
158 159
159 ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf); 160 ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
160 if (ret < 0 || retlen != SECTOR_SIZE) 161 if (ret < 0 || retlen != SECTOR_SIZE)
161 return -1; 162 return -1;
162 163
@@ -169,13 +170,13 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
169 struct mtd_oob_ops ops; 170 struct mtd_oob_ops ops;
170 int ret; 171 int ret;
171 172
172 ops.mode = MTD_OPS_RAW; 173 ops.mode = MTD_OOB_RAW;
173 ops.ooboffs = 0; 174 ops.ooboffs = 0;
174 ops.ooblen = OOB_SIZE; 175 ops.ooblen = OOB_SIZE;
175 ops.oobbuf = buf; 176 ops.oobbuf = buf;
176 ops.datbuf = NULL; 177 ops.datbuf = NULL;
177 178
178 ret = mtd_read_oob(mtd, offs, &ops); 179 ret = mtd->read_oob(mtd, offs, &ops);
179 if (ret < 0 || ops.oobretlen != OOB_SIZE) 180 if (ret < 0 || ops.oobretlen != OOB_SIZE)
180 return -1; 181 return -1;
181 182
@@ -220,7 +221,8 @@ static int get_logical_address(uint8_t *oob_buf)
220 block_address >>= 1; 221 block_address >>= 1;
221 222
222 if (get_parity(block_address, 10) != parity) { 223 if (get_parity(block_address, 10) != parity) {
223 pr_debug("SSFDC_RO: logical address field%d" 224 DEBUG(MTD_DEBUG_LEVEL0,
225 "SSFDC_RO: logical address field%d"
224 "parity error(0x%04X)\n", j+1, 226 "parity error(0x%04X)\n", j+1,
225 block_address); 227 block_address);
226 } else { 228 } else {
@@ -233,7 +235,7 @@ static int get_logical_address(uint8_t *oob_buf)
233 if (!ok) 235 if (!ok)
234 block_address = -2; 236 block_address = -2;
235 237
236 pr_debug("SSFDC_RO: get_logical_address() %d\n", 238 DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
237 block_address); 239 block_address);
238 240
239 return block_address; 241 return block_address;
@@ -247,7 +249,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
247 int ret, block_address, phys_block; 249 int ret, block_address, phys_block;
248 struct mtd_info *mtd = ssfdc->mbd.mtd; 250 struct mtd_info *mtd = ssfdc->mbd.mtd;
249 251
250 pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n", 252 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
251 ssfdc->map_len, 253 ssfdc->map_len,
252 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024); 254 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
253 255
@@ -255,12 +257,13 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
255 for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len; 257 for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
256 phys_block++) { 258 phys_block++) {
257 offset = (unsigned long)phys_block * ssfdc->erase_size; 259 offset = (unsigned long)phys_block * ssfdc->erase_size;
258 if (mtd_block_isbad(mtd, offset)) 260 if (mtd->block_isbad(mtd, offset))
259 continue; /* skip bad blocks */ 261 continue; /* skip bad blocks */
260 262
261 ret = read_raw_oob(mtd, offset, oob_buf); 263 ret = read_raw_oob(mtd, offset, oob_buf);
262 if (ret < 0) { 264 if (ret < 0) {
263 pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n", 265 DEBUG(MTD_DEBUG_LEVEL0,
266 "SSFDC_RO: mtd read_oob() failed at %lu\n",
264 offset); 267 offset);
265 return -1; 268 return -1;
266 } 269 }
@@ -276,7 +279,8 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
276 ssfdc->logic_block_map[block_address] = 279 ssfdc->logic_block_map[block_address] =
277 (unsigned short)phys_block; 280 (unsigned short)phys_block;
278 281
279 pr_debug("SSFDC_RO: build_block_map() phys_block=%d," 282 DEBUG(MTD_DEBUG_LEVEL2,
283 "SSFDC_RO: build_block_map() phys_block=%d,"
280 "logic_block_addr=%d, zone=%d\n", 284 "logic_block_addr=%d, zone=%d\n",
281 phys_block, block_address, zone_index); 285 phys_block, block_address, zone_index);
282 } 286 }
@@ -300,8 +304,11 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
300 return; 304 return;
301 305
302 ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL); 306 ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
303 if (!ssfdc) 307 if (!ssfdc) {
308 printk(KERN_WARNING
309 "SSFDC_RO: out of memory for data structures\n");
304 return; 310 return;
311 }
305 312
306 ssfdc->mbd.mtd = mtd; 313 ssfdc->mbd.mtd = mtd;
307 ssfdc->mbd.devnum = -1; 314 ssfdc->mbd.devnum = -1;
@@ -312,7 +319,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
312 ssfdc->erase_size = mtd->erasesize; 319 ssfdc->erase_size = mtd->erasesize;
313 ssfdc->map_len = (u32)mtd->size / mtd->erasesize; 320 ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
314 321
315 pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", 322 DEBUG(MTD_DEBUG_LEVEL1,
323 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
316 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, 324 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
317 DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE)); 325 DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
318 326
@@ -323,7 +331,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
323 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / 331 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
324 ((long)ssfdc->sectors * (long)ssfdc->heads)); 332 ((long)ssfdc->sectors * (long)ssfdc->heads));
325 333
326 pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", 334 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
327 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, 335 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
328 (long)ssfdc->cylinders * (long)ssfdc->heads * 336 (long)ssfdc->cylinders * (long)ssfdc->heads *
329 (long)ssfdc->sectors); 337 (long)ssfdc->sectors);
@@ -334,8 +342,11 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
334 /* Allocate logical block map */ 342 /* Allocate logical block map */
335 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) * 343 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
336 ssfdc->map_len, GFP_KERNEL); 344 ssfdc->map_len, GFP_KERNEL);
337 if (!ssfdc->logic_block_map) 345 if (!ssfdc->logic_block_map) {
346 printk(KERN_WARNING
347 "SSFDC_RO: out of memory for data structures\n");
338 goto out_err; 348 goto out_err;
349 }
339 memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) * 350 memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
340 ssfdc->map_len); 351 ssfdc->map_len);
341 352
@@ -360,7 +371,7 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
360{ 371{
361 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; 372 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
362 373
363 pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); 374 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
364 375
365 del_mtd_blktrans_dev(dev); 376 del_mtd_blktrans_dev(dev);
366 kfree(ssfdc->logic_block_map); 377 kfree(ssfdc->logic_block_map);
@@ -376,7 +387,8 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
376 offset = (int)(logic_sect_no % sectors_per_block); 387 offset = (int)(logic_sect_no % sectors_per_block);
377 block_address = (int)(logic_sect_no / sectors_per_block); 388 block_address = (int)(logic_sect_no / sectors_per_block);
378 389
379 pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d," 390 DEBUG(MTD_DEBUG_LEVEL3,
391 "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
380 " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, 392 " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
381 block_address); 393 block_address);
382 394
@@ -385,7 +397,8 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
385 397
386 block_address = ssfdc->logic_block_map[block_address]; 398 block_address = ssfdc->logic_block_map[block_address];
387 399
388 pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n", 400 DEBUG(MTD_DEBUG_LEVEL3,
401 "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
389 block_address); 402 block_address);
390 403
391 if (block_address < 0xffff) { 404 if (block_address < 0xffff) {
@@ -394,7 +407,8 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
394 sect_no = (unsigned long)block_address * sectors_per_block + 407 sect_no = (unsigned long)block_address * sectors_per_block +
395 offset; 408 offset;
396 409
397 pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", 410 DEBUG(MTD_DEBUG_LEVEL3,
411 "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
398 sect_no); 412 sect_no);
399 413
400 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0) 414 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
@@ -410,7 +424,7 @@ static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
410{ 424{
411 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; 425 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
412 426
413 pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", 427 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
414 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); 428 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
415 429
416 geo->heads = ssfdc->heads; 430 geo->heads = ssfdc->heads;
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index bd0065c0d35..b44dcab940d 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -6,4 +6,3 @@ obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o 6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o 7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
8obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o 8obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
9obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
deleted file mode 100644
index 207bf9a9972..00000000000
--- a/drivers/mtd/tests/mtd_nandbiterrs.c
+++ /dev/null
@@ -1,461 +0,0 @@
1/*
2 * Copyright © 2012 NetCommWireless
3 * Iwo Mergler <Iwo.Mergler@netcommwireless.com.au>
4 *
5 * Test for multi-bit error recovery on a NAND page This mostly tests the
6 * ECC controller / driver.
7 *
8 * There are two test modes:
9 *
10 * 0 - artificially inserting bit errors until the ECC fails
11 * This is the default method and fairly quick. It should
12 * be independent of the quality of the FLASH.
13 *
14 * 1 - re-writing the same pattern repeatedly until the ECC fails.
15 * This method relies on the physics of NAND FLASH to eventually
16 * generate '0' bits if '1' has been written sufficient times.
17 * Depending on the NAND, the first bit errors will appear after
18 * 1000 or more writes and then will usually snowball, reaching the
19 * limits of the ECC quickly.
20 *
21 * The test stops after 10000 cycles, should your FLASH be
22 * exceptionally good and not generate bit errors before that. Try
23 * a different page in that case.
24 *
25 * Please note that neither of these tests will significantly 'use up' any
26 * FLASH endurance. Only a maximum of two erase operations will be performed.
27 *
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License version 2 as published by
31 * the Free Software Foundation.
32 *
33 * This program is distributed in the hope that it will be useful, but WITHOUT
34 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
35 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
36 * more details.
37 *
38 * You should have received a copy of the GNU General Public License along with
39 * this program; see the file COPYING. If not, write to the Free Software
40 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/init.h>
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/mtd/mtd.h>
49#include <linux/err.h>
50#include <linux/mtd/nand.h>
51#include <linux/slab.h>
52
53static int dev;
54module_param(dev, int, S_IRUGO);
55MODULE_PARM_DESC(dev, "MTD device number to use");
56
57static unsigned page_offset;
58module_param(page_offset, uint, S_IRUGO);
59MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
60
61static unsigned seed;
62module_param(seed, uint, S_IRUGO);
63MODULE_PARM_DESC(seed, "Random seed");
64
65static int mode;
66module_param(mode, int, S_IRUGO);
67MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
68
69static unsigned max_overwrite = 10000;
70
71static loff_t offset; /* Offset of the page we're using. */
72static unsigned eraseblock; /* Eraseblock number for our page. */
73
74/* We assume that the ECC can correct up to a certain number
75 * of biterrors per subpage. */
76static unsigned subsize; /* Size of subpages */
77static unsigned subcount; /* Number of subpages per page */
78
79static struct mtd_info *mtd; /* MTD device */
80
81static uint8_t *wbuffer; /* One page write / compare buffer */
82static uint8_t *rbuffer; /* One page read buffer */
83
84/* 'random' bytes from known offsets */
85static uint8_t hash(unsigned offset)
86{
87 unsigned v = offset;
88 unsigned char c;
89 v ^= 0x7f7edfd3;
90 v = v ^ (v >> 3);
91 v = v ^ (v >> 5);
92 v = v ^ (v >> 13);
93 c = v & 0xFF;
94 /* Reverse bits of result. */
95 c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
96 c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
97 c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
98 return c;
99}
100
101static int erase_block(void)
102{
103 int err;
104 struct erase_info ei;
105 loff_t addr = eraseblock * mtd->erasesize;
106
107 pr_info("erase_block\n");
108
109 memset(&ei, 0, sizeof(struct erase_info));
110 ei.mtd = mtd;
111 ei.addr = addr;
112 ei.len = mtd->erasesize;
113
114 err = mtd_erase(mtd, &ei);
115 if (err || ei.state == MTD_ERASE_FAILED) {
116 pr_err("error %d while erasing\n", err);
117 if (!err)
118 err = -EIO;
119 return err;
120 }
121
122 return 0;
123}
124
125/* Writes wbuffer to page */
126static int write_page(int log)
127{
128 int err = 0;
129 size_t written;
130
131 if (log)
132 pr_info("write_page\n");
133
134 err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
135 if (err || written != mtd->writesize) {
136 pr_err("error: write failed at %#llx\n", (long long)offset);
137 if (!err)
138 err = -EIO;
139 }
140
141 return err;
142}
143
144/* Re-writes the data area while leaving the OOB alone. */
145static int rewrite_page(int log)
146{
147 int err = 0;
148 struct mtd_oob_ops ops;
149
150 if (log)
151 pr_info("rewrite page\n");
152
153 ops.mode = MTD_OPS_RAW; /* No ECC */
154 ops.len = mtd->writesize;
155 ops.retlen = 0;
156 ops.ooblen = 0;
157 ops.oobretlen = 0;
158 ops.ooboffs = 0;
159 ops.datbuf = wbuffer;
160 ops.oobbuf = NULL;
161
162 err = mtd_write_oob(mtd, offset, &ops);
163 if (err || ops.retlen != mtd->writesize) {
164 pr_err("error: write_oob failed (%d)\n", err);
165 if (!err)
166 err = -EIO;
167 }
168
169 return err;
170}
171
172/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
173 * or error (<0) */
174static int read_page(int log)
175{
176 int err = 0;
177 size_t read;
178 struct mtd_ecc_stats oldstats;
179
180 if (log)
181 pr_info("read_page\n");
182
183 /* Saving last mtd stats */
184 memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
185
186 err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
187 if (err == -EUCLEAN)
188 err = mtd->ecc_stats.corrected - oldstats.corrected;
189
190 if (err < 0 || read != mtd->writesize) {
191 pr_err("error: read failed at %#llx\n", (long long)offset);
192 if (err >= 0)
193 err = -EIO;
194 }
195
196 return err;
197}
198
199/* Verifies rbuffer against random sequence */
200static int verify_page(int log)
201{
202 unsigned i, errs = 0;
203
204 if (log)
205 pr_info("verify_page\n");
206
207 for (i = 0; i < mtd->writesize; i++) {
208 if (rbuffer[i] != hash(i+seed)) {
209 pr_err("Error: page offset %u, expected %02x, got %02x\n",
210 i, hash(i+seed), rbuffer[i]);
211 errs++;
212 }
213 }
214
215 if (errs)
216 return -EIO;
217 else
218 return 0;
219}
220
221#define CBIT(v, n) ((v) & (1 << (n)))
222#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
223
224/* Finds the first '1' bit in wbuffer starting at offset 'byte'
225 * and sets it to '0'. */
226static int insert_biterror(unsigned byte)
227{
228 int bit;
229
230 while (byte < mtd->writesize) {
231 for (bit = 7; bit >= 0; bit--) {
232 if (CBIT(wbuffer[byte], bit)) {
233 BCLR(wbuffer[byte], bit);
234 pr_info("Inserted biterror @ %u/%u\n", byte, bit);
235 return 0;
236 }
237 }
238 byte++;
239 }
240 pr_err("biterror: Failed to find a '1' bit\n");
241 return -EIO;
242}
243
244/* Writes 'random' data to page and then introduces deliberate bit
245 * errors into the page, while verifying each step. */
246static int incremental_errors_test(void)
247{
248 int err = 0;
249 unsigned i;
250 unsigned errs_per_subpage = 0;
251
252 pr_info("incremental biterrors test\n");
253
254 for (i = 0; i < mtd->writesize; i++)
255 wbuffer[i] = hash(i+seed);
256
257 err = write_page(1);
258 if (err)
259 goto exit;
260
261 while (1) {
262
263 err = rewrite_page(1);
264 if (err)
265 goto exit;
266
267 err = read_page(1);
268 if (err > 0)
269 pr_info("Read reported %d corrected bit errors\n", err);
270 if (err < 0) {
271 pr_err("After %d biterrors per subpage, read reported error %d\n",
272 errs_per_subpage, err);
273 err = 0;
274 goto exit;
275 }
276
277 err = verify_page(1);
278 if (err) {
279 pr_err("ECC failure, read data is incorrect despite read success\n");
280 goto exit;
281 }
282
283 pr_info("Successfully corrected %d bit errors per subpage\n",
284 errs_per_subpage);
285
286 for (i = 0; i < subcount; i++) {
287 err = insert_biterror(i * subsize);
288 if (err < 0)
289 goto exit;
290 }
291 errs_per_subpage++;
292 }
293
294exit:
295 return err;
296}
297
298
299/* Writes 'random' data to page and then re-writes that same data repeatedly.
300 This eventually develops bit errors (bits written as '1' will slowly become
301 '0'), which are corrected as far as the ECC is capable of. */
302static int overwrite_test(void)
303{
304 int err = 0;
305 unsigned i;
306 unsigned max_corrected = 0;
307 unsigned opno = 0;
308 /* We don't expect more than this many correctable bit errors per
309 * page. */
310 #define MAXBITS 512
311 static unsigned bitstats[MAXBITS]; /* bit error histogram. */
312
313 memset(bitstats, 0, sizeof(bitstats));
314
315 pr_info("overwrite biterrors test\n");
316
317 for (i = 0; i < mtd->writesize; i++)
318 wbuffer[i] = hash(i+seed);
319
320 err = write_page(1);
321 if (err)
322 goto exit;
323
324 while (opno < max_overwrite) {
325
326 err = rewrite_page(0);
327 if (err)
328 break;
329
330 err = read_page(0);
331 if (err >= 0) {
332 if (err >= MAXBITS) {
333 pr_info("Implausible number of bit errors corrected\n");
334 err = -EIO;
335 break;
336 }
337 bitstats[err]++;
338 if (err > max_corrected) {
339 max_corrected = err;
340 pr_info("Read reported %d corrected bit errors\n",
341 err);
342 }
343 } else { /* err < 0 */
344 pr_info("Read reported error %d\n", err);
345 err = 0;
346 break;
347 }
348
349 err = verify_page(0);
350 if (err) {
351 bitstats[max_corrected] = opno;
352 pr_info("ECC failure, read data is incorrect despite read success\n");
353 break;
354 }
355
356 opno++;
357 }
358
359 /* At this point bitstats[0] contains the number of ops with no bit
360 * errors, bitstats[1] the number of ops with 1 bit error, etc. */
361 pr_info("Bit error histogram (%d operations total):\n", opno);
362 for (i = 0; i < max_corrected; i++)
363 pr_info("Page reads with %3d corrected bit errors: %d\n",
364 i, bitstats[i]);
365
366exit:
367 return err;
368}
369
370static int __init mtd_nandbiterrs_init(void)
371{
372 int err = 0;
373
374 printk("\n");
375 printk(KERN_INFO "==================================================\n");
376 pr_info("MTD device: %d\n", dev);
377
378 mtd = get_mtd_device(NULL, dev);
379 if (IS_ERR(mtd)) {
380 err = PTR_ERR(mtd);
381 pr_err("error: cannot get MTD device\n");
382 goto exit_mtddev;
383 }
384
385 if (mtd->type != MTD_NANDFLASH) {
386 pr_info("this test requires NAND flash\n");
387 err = -ENODEV;
388 goto exit_nand;
389 }
390
391 pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
392 (unsigned long long)mtd->size, mtd->erasesize,
393 mtd->writesize, mtd->oobsize);
394
395 subsize = mtd->writesize >> mtd->subpage_sft;
396 subcount = mtd->writesize / subsize;
397
398 pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
399
400 offset = page_offset * mtd->writesize;
401 eraseblock = mtd_div_by_eb(offset, mtd);
402
403 pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
404 page_offset, offset, eraseblock);
405
406 wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
407 if (!wbuffer) {
408 err = -ENOMEM;
409 goto exit_wbuffer;
410 }
411
412 rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
413 if (!rbuffer) {
414 err = -ENOMEM;
415 goto exit_rbuffer;
416 }
417
418 err = erase_block();
419 if (err)
420 goto exit_error;
421
422 if (mode == 0)
423 err = incremental_errors_test();
424 else
425 err = overwrite_test();
426
427 if (err)
428 goto exit_error;
429
430 /* We leave the block un-erased in case of test failure. */
431 err = erase_block();
432 if (err)
433 goto exit_error;
434
435 err = -EIO;
436 pr_info("finished successfully.\n");
437 printk(KERN_INFO "==================================================\n");
438
439exit_error:
440 kfree(rbuffer);
441exit_rbuffer:
442 kfree(wbuffer);
443exit_wbuffer:
444 /* Nothing */
445exit_nand:
446 put_mtd_device(mtd);
447exit_mtddev:
448 return err;
449}
450
451static void __exit mtd_nandbiterrs_exit(void)
452{
453 return;
454}
455
456module_init(mtd_nandbiterrs_init);
457module_exit(mtd_nandbiterrs_exit);
458
459MODULE_DESCRIPTION("NAND bit error recovery test");
460MODULE_AUTHOR("Iwo Mergler");
461MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 1eee264509a..70d6d7d0d65 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -1,292 +1,63 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/kernel.h> 1#include <linux/kernel.h>
4#include <linux/module.h> 2#include <linux/module.h>
5#include <linux/list.h> 3#include <linux/list.h>
6#include <linux/random.h> 4#include <linux/random.h>
7#include <linux/string.h> 5#include <linux/string.h>
8#include <linux/bitops.h> 6#include <linux/bitops.h>
9#include <linux/slab.h> 7#include <linux/jiffies.h>
10#include <linux/mtd/nand_ecc.h> 8#include <linux/mtd/nand_ecc.h>
11 9
12/*
13 * Test the implementation for software ECC
14 *
15 * No actual MTD device is needed, So we don't need to warry about losing
16 * important data by human error.
17 *
18 * This covers possible patterns of corruption which can be reliably corrected
19 * or detected.
20 */
21
22#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE) 10#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
23 11
24struct nand_ecc_test { 12static void inject_single_bit_error(void *data, size_t size)
25 const char *name;
26 void (*prepare)(void *, void *, void *, void *, const size_t);
27 int (*verify)(void *, void *, void *, const size_t);
28};
29
30/*
31 * The reason for this __change_bit_le() instead of __change_bit() is to inject
32 * bit error properly within the region which is not a multiple of
33 * sizeof(unsigned long) on big-endian systems
34 */
35#ifdef __LITTLE_ENDIAN
36#define __change_bit_le(nr, addr) __change_bit(nr, addr)
37#elif defined(__BIG_ENDIAN)
38#define __change_bit_le(nr, addr) \
39 __change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
40#else
41#error "Unknown byte order"
42#endif
43
44static void single_bit_error_data(void *error_data, void *correct_data,
45 size_t size)
46{ 13{
47 unsigned int offset = random32() % (size * BITS_PER_BYTE); 14 unsigned long offset = random32() % (size * BITS_PER_BYTE);
48 15
49 memcpy(error_data, correct_data, size); 16 __change_bit(offset, data);
50 __change_bit_le(offset, error_data);
51} 17}
52 18
53static void double_bit_error_data(void *error_data, void *correct_data, 19static unsigned char data[512];
54 size_t size) 20static unsigned char error_data[512];
55{
56 unsigned int offset[2];
57
58 offset[0] = random32() % (size * BITS_PER_BYTE);
59 do {
60 offset[1] = random32() % (size * BITS_PER_BYTE);
61 } while (offset[0] == offset[1]);
62
63 memcpy(error_data, correct_data, size);
64 21
65 __change_bit_le(offset[0], error_data); 22static int nand_ecc_test(const size_t size)
66 __change_bit_le(offset[1], error_data);
67}
68
69static unsigned int random_ecc_bit(size_t size)
70{ 23{
71 unsigned int offset = random32() % (3 * BITS_PER_BYTE); 24 unsigned char code[3];
72 25 unsigned char error_code[3];
73 if (size == 256) { 26 char testname[30];
74 /*
75 * Don't inject a bit error into the insignificant bits (16th
76 * and 17th bit) in ECC code for 256 byte data block
77 */
78 while (offset == 16 || offset == 17)
79 offset = random32() % (3 * BITS_PER_BYTE);
80 }
81 27
82 return offset; 28 BUG_ON(sizeof(data) < size);
83}
84
85static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
86 size_t size)
87{
88 unsigned int offset = random_ecc_bit(size);
89 29
90 memcpy(error_ecc, correct_ecc, 3); 30 sprintf(testname, "nand-ecc-%zu", size);
91 __change_bit_le(offset, error_ecc);
92}
93 31
94static void double_bit_error_ecc(void *error_ecc, void *correct_ecc, 32 get_random_bytes(data, size);
95 size_t size)
96{
97 unsigned int offset[2];
98
99 offset[0] = random_ecc_bit(size);
100 do {
101 offset[1] = random_ecc_bit(size);
102 } while (offset[0] == offset[1]);
103
104 memcpy(error_ecc, correct_ecc, 3);
105 __change_bit_le(offset[0], error_ecc);
106 __change_bit_le(offset[1], error_ecc);
107}
108
109static void no_bit_error(void *error_data, void *error_ecc,
110 void *correct_data, void *correct_ecc, const size_t size)
111{
112 memcpy(error_data, correct_data, size);
113 memcpy(error_ecc, correct_ecc, 3);
114}
115
116static int no_bit_error_verify(void *error_data, void *error_ecc,
117 void *correct_data, const size_t size)
118{
119 unsigned char calc_ecc[3];
120 int ret;
121
122 __nand_calculate_ecc(error_data, size, calc_ecc);
123 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
124 if (ret == 0 && !memcmp(correct_data, error_data, size))
125 return 0;
126
127 return -EINVAL;
128}
129
130static void single_bit_error_in_data(void *error_data, void *error_ecc,
131 void *correct_data, void *correct_ecc, const size_t size)
132{
133 single_bit_error_data(error_data, correct_data, size);
134 memcpy(error_ecc, correct_ecc, 3);
135}
136 33
137static void single_bit_error_in_ecc(void *error_data, void *error_ecc, 34 memcpy(error_data, data, size);
138 void *correct_data, void *correct_ecc, const size_t size) 35 inject_single_bit_error(error_data, size);
139{
140 memcpy(error_data, correct_data, size);
141 single_bit_error_ecc(error_ecc, correct_ecc, size);
142}
143 36
144static int single_bit_error_correct(void *error_data, void *error_ecc, 37 __nand_calculate_ecc(data, size, code);
145 void *correct_data, const size_t size) 38 __nand_calculate_ecc(error_data, size, error_code);
146{ 39 __nand_correct_data(error_data, code, error_code, size);
147 unsigned char calc_ecc[3];
148 int ret;
149 40
150 __nand_calculate_ecc(error_data, size, calc_ecc); 41 if (!memcmp(data, error_data, size)) {
151 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size); 42 printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
152 if (ret == 1 && !memcmp(correct_data, error_data, size))
153 return 0; 43 return 0;
154
155 return -EINVAL;
156}
157
158static void double_bit_error_in_data(void *error_data, void *error_ecc,
159 void *correct_data, void *correct_ecc, const size_t size)
160{
161 double_bit_error_data(error_data, correct_data, size);
162 memcpy(error_ecc, correct_ecc, 3);
163}
164
165static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
166 void *correct_data, void *correct_ecc, const size_t size)
167{
168 single_bit_error_data(error_data, correct_data, size);
169 single_bit_error_ecc(error_ecc, correct_ecc, size);
170}
171
172static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
173 void *correct_data, void *correct_ecc, const size_t size)
174{
175 memcpy(error_data, correct_data, size);
176 double_bit_error_ecc(error_ecc, correct_ecc, size);
177}
178
179static int double_bit_error_detect(void *error_data, void *error_ecc,
180 void *correct_data, const size_t size)
181{
182 unsigned char calc_ecc[3];
183 int ret;
184
185 __nand_calculate_ecc(error_data, size, calc_ecc);
186 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
187
188 return (ret == -1) ? 0 : -EINVAL;
189}
190
191static const struct nand_ecc_test nand_ecc_test[] = {
192 {
193 .name = "no-bit-error",
194 .prepare = no_bit_error,
195 .verify = no_bit_error_verify,
196 },
197 {
198 .name = "single-bit-error-in-data-correct",
199 .prepare = single_bit_error_in_data,
200 .verify = single_bit_error_correct,
201 },
202 {
203 .name = "single-bit-error-in-ecc-correct",
204 .prepare = single_bit_error_in_ecc,
205 .verify = single_bit_error_correct,
206 },
207 {
208 .name = "double-bit-error-in-data-detect",
209 .prepare = double_bit_error_in_data,
210 .verify = double_bit_error_detect,
211 },
212 {
213 .name = "single-bit-error-in-data-and-ecc-detect",
214 .prepare = single_bit_error_in_data_and_ecc,
215 .verify = double_bit_error_detect,
216 },
217 {
218 .name = "double-bit-error-in-ecc-detect",
219 .prepare = double_bit_error_in_ecc,
220 .verify = double_bit_error_detect,
221 },
222};
223
224static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
225 void *correct_ecc, const size_t size)
226{
227 pr_info("hexdump of error data:\n");
228 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
229 error_data, size, false);
230 print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
231 DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
232
233 pr_info("hexdump of correct data:\n");
234 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
235 correct_data, size, false);
236 print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
237 DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
238}
239
240static int nand_ecc_test_run(const size_t size)
241{
242 int i;
243 int err = 0;
244 void *error_data;
245 void *error_ecc;
246 void *correct_data;
247 void *correct_ecc;
248
249 error_data = kmalloc(size, GFP_KERNEL);
250 error_ecc = kmalloc(3, GFP_KERNEL);
251 correct_data = kmalloc(size, GFP_KERNEL);
252 correct_ecc = kmalloc(3, GFP_KERNEL);
253
254 if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
255 err = -ENOMEM;
256 goto error;
257 } 44 }
258 45
259 get_random_bytes(correct_data, size); 46 printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
260 __nand_calculate_ecc(correct_data, size, correct_ecc);
261 47
262 for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) { 48 printk(KERN_DEBUG "hexdump of data:\n");
263 nand_ecc_test[i].prepare(error_data, error_ecc, 49 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
264 correct_data, correct_ecc, size); 50 data, size, false);
265 err = nand_ecc_test[i].verify(error_data, error_ecc, 51 printk(KERN_DEBUG "hexdump of error data:\n");
266 correct_data, size); 52 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
267 53 error_data, size, false);
268 if (err) {
269 pr_err("not ok - %s-%zd\n",
270 nand_ecc_test[i].name, size);
271 dump_data_ecc(error_data, error_ecc,
272 correct_data, correct_ecc, size);
273 break;
274 }
275 pr_info("ok - %s-%zd\n",
276 nand_ecc_test[i].name, size);
277 }
278error:
279 kfree(error_data);
280 kfree(error_ecc);
281 kfree(correct_data);
282 kfree(correct_ecc);
283 54
284 return err; 55 return -1;
285} 56}
286 57
287#else 58#else
288 59
289static int nand_ecc_test_run(const size_t size) 60static int nand_ecc_test(const size_t size)
290{ 61{
291 return 0; 62 return 0;
292} 63}
@@ -295,13 +66,12 @@ static int nand_ecc_test_run(const size_t size)
295 66
296static int __init ecc_test_init(void) 67static int __init ecc_test_init(void)
297{ 68{
298 int err; 69 srandom32(jiffies);
299 70
300 err = nand_ecc_test_run(256); 71 nand_ecc_test(256);
301 if (err) 72 nand_ecc_test(512);
302 return err;
303 73
304 return nand_ecc_test_run(512); 74 return 0;
305} 75}
306 76
307static void __exit ecc_test_exit(void) 77static void __exit ecc_test_exit(void)
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index e827fa8cd84..dec92ae6111 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -19,8 +19,6 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <asm/div64.h> 22#include <asm/div64.h>
25#include <linux/init.h> 23#include <linux/init.h>
26#include <linux/module.h> 24#include <linux/module.h>
@@ -30,7 +28,9 @@
30#include <linux/slab.h> 28#include <linux/slab.h>
31#include <linux/sched.h> 29#include <linux/sched.h>
32 30
33static int dev = -EINVAL; 31#define PRINT_PREF KERN_INFO "mtd_oobtest: "
32
33static int dev;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
36 36
@@ -78,14 +78,15 @@ static int erase_eraseblock(int ebnum)
78 ei.addr = addr; 78 ei.addr = addr;
79 ei.len = mtd->erasesize; 79 ei.len = mtd->erasesize;
80 80
81 err = mtd_erase(mtd, &ei); 81 err = mtd->erase(mtd, &ei);
82 if (err) { 82 if (err) {
83 pr_err("error %d while erasing EB %d\n", err, ebnum); 83 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
84 return err; 84 return err;
85 } 85 }
86 86
87 if (ei.state == MTD_ERASE_FAILED) { 87 if (ei.state == MTD_ERASE_FAILED) {
88 pr_err("some erase error occurred at EB %d\n", ebnum); 88 printk(PRINT_PREF "some erase error occurred at EB %d\n",
89 ebnum);
89 return -EIO; 90 return -EIO;
90 } 91 }
91 92
@@ -97,7 +98,7 @@ static int erase_whole_device(void)
97 int err; 98 int err;
98 unsigned int i; 99 unsigned int i;
99 100
100 pr_info("erasing whole device\n"); 101 printk(PRINT_PREF "erasing whole device\n");
101 for (i = 0; i < ebcnt; ++i) { 102 for (i = 0; i < ebcnt; ++i) {
102 if (bbt[i]) 103 if (bbt[i])
103 continue; 104 continue;
@@ -106,7 +107,7 @@ static int erase_whole_device(void)
106 return err; 107 return err;
107 cond_resched(); 108 cond_resched();
108 } 109 }
109 pr_info("erased %u eraseblocks\n", i); 110 printk(PRINT_PREF "erased %u eraseblocks\n", i);
110 return 0; 111 return 0;
111} 112}
112 113
@@ -130,7 +131,7 @@ static int write_eraseblock(int ebnum)
130 131
131 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 132 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
132 set_random_data(writebuf, use_len); 133 set_random_data(writebuf, use_len);
133 ops.mode = MTD_OPS_AUTO_OOB; 134 ops.mode = MTD_OOB_AUTO;
134 ops.len = 0; 135 ops.len = 0;
135 ops.retlen = 0; 136 ops.retlen = 0;
136 ops.ooblen = use_len; 137 ops.ooblen = use_len;
@@ -138,11 +139,11 @@ static int write_eraseblock(int ebnum)
138 ops.ooboffs = use_offset; 139 ops.ooboffs = use_offset;
139 ops.datbuf = NULL; 140 ops.datbuf = NULL;
140 ops.oobbuf = writebuf; 141 ops.oobbuf = writebuf;
141 err = mtd_write_oob(mtd, addr, &ops); 142 err = mtd->write_oob(mtd, addr, &ops);
142 if (err || ops.oobretlen != use_len) { 143 if (err || ops.oobretlen != use_len) {
143 pr_err("error: writeoob failed at %#llx\n", 144 printk(PRINT_PREF "error: writeoob failed at %#llx\n",
144 (long long)addr); 145 (long long)addr);
145 pr_err("error: use_len %d, use_offset %d\n", 146 printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
146 use_len, use_offset); 147 use_len, use_offset);
147 errcnt += 1; 148 errcnt += 1;
148 return err ? err : -1; 149 return err ? err : -1;
@@ -159,7 +160,7 @@ static int write_whole_device(void)
159 int err; 160 int err;
160 unsigned int i; 161 unsigned int i;
161 162
162 pr_info("writing OOBs of whole device\n"); 163 printk(PRINT_PREF "writing OOBs of whole device\n");
163 for (i = 0; i < ebcnt; ++i) { 164 for (i = 0; i < ebcnt; ++i) {
164 if (bbt[i]) 165 if (bbt[i])
165 continue; 166 continue;
@@ -167,10 +168,10 @@ static int write_whole_device(void)
167 if (err) 168 if (err)
168 return err; 169 return err;
169 if (i % 256 == 0) 170 if (i % 256 == 0)
170 pr_info("written up to eraseblock %u\n", i); 171 printk(PRINT_PREF "written up to eraseblock %u\n", i);
171 cond_resched(); 172 cond_resched();
172 } 173 }
173 pr_info("written %u eraseblocks\n", i); 174 printk(PRINT_PREF "written %u eraseblocks\n", i);
174 return 0; 175 return 0;
175} 176}
176 177
@@ -183,7 +184,7 @@ static int verify_eraseblock(int ebnum)
183 184
184 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { 185 for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
185 set_random_data(writebuf, use_len); 186 set_random_data(writebuf, use_len);
186 ops.mode = MTD_OPS_AUTO_OOB; 187 ops.mode = MTD_OOB_AUTO;
187 ops.len = 0; 188 ops.len = 0;
188 ops.retlen = 0; 189 ops.retlen = 0;
189 ops.ooblen = use_len; 190 ops.ooblen = use_len;
@@ -191,26 +192,26 @@ static int verify_eraseblock(int ebnum)
191 ops.ooboffs = use_offset; 192 ops.ooboffs = use_offset;
192 ops.datbuf = NULL; 193 ops.datbuf = NULL;
193 ops.oobbuf = readbuf; 194 ops.oobbuf = readbuf;
194 err = mtd_read_oob(mtd, addr, &ops); 195 err = mtd->read_oob(mtd, addr, &ops);
195 if (err || ops.oobretlen != use_len) { 196 if (err || ops.oobretlen != use_len) {
196 pr_err("error: readoob failed at %#llx\n", 197 printk(PRINT_PREF "error: readoob failed at %#llx\n",
197 (long long)addr); 198 (long long)addr);
198 errcnt += 1; 199 errcnt += 1;
199 return err ? err : -1; 200 return err ? err : -1;
200 } 201 }
201 if (memcmp(readbuf, writebuf, use_len)) { 202 if (memcmp(readbuf, writebuf, use_len)) {
202 pr_err("error: verify failed at %#llx\n", 203 printk(PRINT_PREF "error: verify failed at %#llx\n",
203 (long long)addr); 204 (long long)addr);
204 errcnt += 1; 205 errcnt += 1;
205 if (errcnt > 1000) { 206 if (errcnt > 1000) {
206 pr_err("error: too many errors\n"); 207 printk(PRINT_PREF "error: too many errors\n");
207 return -1; 208 return -1;
208 } 209 }
209 } 210 }
210 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { 211 if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
211 int k; 212 int k;
212 213
213 ops.mode = MTD_OPS_AUTO_OOB; 214 ops.mode = MTD_OOB_AUTO;
214 ops.len = 0; 215 ops.len = 0;
215 ops.retlen = 0; 216 ops.retlen = 0;
216 ops.ooblen = mtd->ecclayout->oobavail; 217 ops.ooblen = mtd->ecclayout->oobavail;
@@ -218,30 +219,31 @@ static int verify_eraseblock(int ebnum)
218 ops.ooboffs = 0; 219 ops.ooboffs = 0;
219 ops.datbuf = NULL; 220 ops.datbuf = NULL;
220 ops.oobbuf = readbuf; 221 ops.oobbuf = readbuf;
221 err = mtd_read_oob(mtd, addr, &ops); 222 err = mtd->read_oob(mtd, addr, &ops);
222 if (err || ops.oobretlen != mtd->ecclayout->oobavail) { 223 if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
223 pr_err("error: readoob failed at %#llx\n", 224 printk(PRINT_PREF "error: readoob failed at "
224 (long long)addr); 225 "%#llx\n", (long long)addr);
225 errcnt += 1; 226 errcnt += 1;
226 return err ? err : -1; 227 return err ? err : -1;
227 } 228 }
228 if (memcmp(readbuf + use_offset, writebuf, use_len)) { 229 if (memcmp(readbuf + use_offset, writebuf, use_len)) {
229 pr_err("error: verify failed at %#llx\n", 230 printk(PRINT_PREF "error: verify failed at "
230 (long long)addr); 231 "%#llx\n", (long long)addr);
231 errcnt += 1; 232 errcnt += 1;
232 if (errcnt > 1000) { 233 if (errcnt > 1000) {
233 pr_err("error: too many errors\n"); 234 printk(PRINT_PREF "error: too many "
235 "errors\n");
234 return -1; 236 return -1;
235 } 237 }
236 } 238 }
237 for (k = 0; k < use_offset; ++k) 239 for (k = 0; k < use_offset; ++k)
238 if (readbuf[k] != 0xff) { 240 if (readbuf[k] != 0xff) {
239 pr_err("error: verify 0xff " 241 printk(PRINT_PREF "error: verify 0xff "
240 "failed at %#llx\n", 242 "failed at %#llx\n",
241 (long long)addr); 243 (long long)addr);
242 errcnt += 1; 244 errcnt += 1;
243 if (errcnt > 1000) { 245 if (errcnt > 1000) {
244 pr_err("error: too " 246 printk(PRINT_PREF "error: too "
245 "many errors\n"); 247 "many errors\n");
246 return -1; 248 return -1;
247 } 249 }
@@ -249,12 +251,12 @@ static int verify_eraseblock(int ebnum)
249 for (k = use_offset + use_len; 251 for (k = use_offset + use_len;
250 k < mtd->ecclayout->oobavail; ++k) 252 k < mtd->ecclayout->oobavail; ++k)
251 if (readbuf[k] != 0xff) { 253 if (readbuf[k] != 0xff) {
252 pr_err("error: verify 0xff " 254 printk(PRINT_PREF "error: verify 0xff "
253 "failed at %#llx\n", 255 "failed at %#llx\n",
254 (long long)addr); 256 (long long)addr);
255 errcnt += 1; 257 errcnt += 1;
256 if (errcnt > 1000) { 258 if (errcnt > 1000) {
257 pr_err("error: too " 259 printk(PRINT_PREF "error: too "
258 "many errors\n"); 260 "many errors\n");
259 return -1; 261 return -1;
260 } 262 }
@@ -274,7 +276,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
274 size_t len = mtd->ecclayout->oobavail * pgcnt; 276 size_t len = mtd->ecclayout->oobavail * pgcnt;
275 277
276 set_random_data(writebuf, len); 278 set_random_data(writebuf, len);
277 ops.mode = MTD_OPS_AUTO_OOB; 279 ops.mode = MTD_OOB_AUTO;
278 ops.len = 0; 280 ops.len = 0;
279 ops.retlen = 0; 281 ops.retlen = 0;
280 ops.ooblen = len; 282 ops.ooblen = len;
@@ -282,19 +284,19 @@ static int verify_eraseblock_in_one_go(int ebnum)
282 ops.ooboffs = 0; 284 ops.ooboffs = 0;
283 ops.datbuf = NULL; 285 ops.datbuf = NULL;
284 ops.oobbuf = readbuf; 286 ops.oobbuf = readbuf;
285 err = mtd_read_oob(mtd, addr, &ops); 287 err = mtd->read_oob(mtd, addr, &ops);
286 if (err || ops.oobretlen != len) { 288 if (err || ops.oobretlen != len) {
287 pr_err("error: readoob failed at %#llx\n", 289 printk(PRINT_PREF "error: readoob failed at %#llx\n",
288 (long long)addr); 290 (long long)addr);
289 errcnt += 1; 291 errcnt += 1;
290 return err ? err : -1; 292 return err ? err : -1;
291 } 293 }
292 if (memcmp(readbuf, writebuf, len)) { 294 if (memcmp(readbuf, writebuf, len)) {
293 pr_err("error: verify failed at %#llx\n", 295 printk(PRINT_PREF "error: verify failed at %#llx\n",
294 (long long)addr); 296 (long long)addr);
295 errcnt += 1; 297 errcnt += 1;
296 if (errcnt > 1000) { 298 if (errcnt > 1000) {
297 pr_err("error: too many errors\n"); 299 printk(PRINT_PREF "error: too many errors\n");
298 return -1; 300 return -1;
299 } 301 }
300 } 302 }
@@ -307,7 +309,7 @@ static int verify_all_eraseblocks(void)
307 int err; 309 int err;
308 unsigned int i; 310 unsigned int i;
309 311
310 pr_info("verifying all eraseblocks\n"); 312 printk(PRINT_PREF "verifying all eraseblocks\n");
311 for (i = 0; i < ebcnt; ++i) { 313 for (i = 0; i < ebcnt; ++i) {
312 if (bbt[i]) 314 if (bbt[i])
313 continue; 315 continue;
@@ -315,10 +317,10 @@ static int verify_all_eraseblocks(void)
315 if (err) 317 if (err)
316 return err; 318 return err;
317 if (i % 256 == 0) 319 if (i % 256 == 0)
318 pr_info("verified up to eraseblock %u\n", i); 320 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
319 cond_resched(); 321 cond_resched();
320 } 322 }
321 pr_info("verified %u eraseblocks\n", i); 323 printk(PRINT_PREF "verified %u eraseblocks\n", i);
322 return 0; 324 return 0;
323} 325}
324 326
@@ -327,9 +329,9 @@ static int is_block_bad(int ebnum)
327 int ret; 329 int ret;
328 loff_t addr = ebnum * mtd->erasesize; 330 loff_t addr = ebnum * mtd->erasesize;
329 331
330 ret = mtd_block_isbad(mtd, addr); 332 ret = mtd->block_isbad(mtd, addr);
331 if (ret) 333 if (ret)
332 pr_info("block %d is bad\n", ebnum); 334 printk(PRINT_PREF "block %d is bad\n", ebnum);
333 return ret; 335 return ret;
334} 336}
335 337
@@ -339,18 +341,18 @@ static int scan_for_bad_eraseblocks(void)
339 341
340 bbt = kmalloc(ebcnt, GFP_KERNEL); 342 bbt = kmalloc(ebcnt, GFP_KERNEL);
341 if (!bbt) { 343 if (!bbt) {
342 pr_err("error: cannot allocate memory\n"); 344 printk(PRINT_PREF "error: cannot allocate memory\n");
343 return -ENOMEM; 345 return -ENOMEM;
344 } 346 }
345 347
346 pr_info("scanning for bad eraseblocks\n"); 348 printk(PRINT_PREF "scanning for bad eraseblocks\n");
347 for (i = 0; i < ebcnt; ++i) { 349 for (i = 0; i < ebcnt; ++i) {
348 bbt[i] = is_block_bad(i) ? 1 : 0; 350 bbt[i] = is_block_bad(i) ? 1 : 0;
349 if (bbt[i]) 351 if (bbt[i])
350 bad += 1; 352 bad += 1;
351 cond_resched(); 353 cond_resched();
352 } 354 }
353 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 355 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
354 return 0; 356 return 0;
355} 357}
356 358
@@ -364,24 +366,17 @@ static int __init mtd_oobtest_init(void)
364 366
365 printk(KERN_INFO "\n"); 367 printk(KERN_INFO "\n");
366 printk(KERN_INFO "=================================================\n"); 368 printk(KERN_INFO "=================================================\n");
367 369 printk(PRINT_PREF "MTD device: %d\n", dev);
368 if (dev < 0) {
369 pr_info("Please specify a valid mtd-device via module parameter\n");
370 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
371 return -EINVAL;
372 }
373
374 pr_info("MTD device: %d\n", dev);
375 370
376 mtd = get_mtd_device(NULL, dev); 371 mtd = get_mtd_device(NULL, dev);
377 if (IS_ERR(mtd)) { 372 if (IS_ERR(mtd)) {
378 err = PTR_ERR(mtd); 373 err = PTR_ERR(mtd);
379 pr_err("error: cannot get MTD device\n"); 374 printk(PRINT_PREF "error: cannot get MTD device\n");
380 return err; 375 return err;
381 } 376 }
382 377
383 if (mtd->type != MTD_NANDFLASH) { 378 if (mtd->type != MTD_NANDFLASH) {
384 pr_info("this test requires NAND flash\n"); 379 printk(PRINT_PREF "this test requires NAND flash\n");
385 goto out; 380 goto out;
386 } 381 }
387 382
@@ -390,7 +385,7 @@ static int __init mtd_oobtest_init(void)
390 ebcnt = tmp; 385 ebcnt = tmp;
391 pgcnt = mtd->erasesize / mtd->writesize; 386 pgcnt = mtd->erasesize / mtd->writesize;
392 387
393 pr_info("MTD device size %llu, eraseblock size %u, " 388 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
394 "page size %u, count of eraseblocks %u, pages per " 389 "page size %u, count of eraseblocks %u, pages per "
395 "eraseblock %u, OOB size %u\n", 390 "eraseblock %u, OOB size %u\n",
396 (unsigned long long)mtd->size, mtd->erasesize, 391 (unsigned long long)mtd->size, mtd->erasesize,
@@ -399,12 +394,12 @@ static int __init mtd_oobtest_init(void)
399 err = -ENOMEM; 394 err = -ENOMEM;
400 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); 395 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
401 if (!readbuf) { 396 if (!readbuf) {
402 pr_err("error: cannot allocate memory\n"); 397 printk(PRINT_PREF "error: cannot allocate memory\n");
403 goto out; 398 goto out;
404 } 399 }
405 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); 400 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
406 if (!writebuf) { 401 if (!writebuf) {
407 pr_err("error: cannot allocate memory\n"); 402 printk(PRINT_PREF "error: cannot allocate memory\n");
408 goto out; 403 goto out;
409 } 404 }
410 405
@@ -418,7 +413,7 @@ static int __init mtd_oobtest_init(void)
418 vary_offset = 0; 413 vary_offset = 0;
419 414
420 /* First test: write all OOB, read it back and verify */ 415 /* First test: write all OOB, read it back and verify */
421 pr_info("test 1 of 5\n"); 416 printk(PRINT_PREF "test 1 of 5\n");
422 417
423 err = erase_whole_device(); 418 err = erase_whole_device();
424 if (err) 419 if (err)
@@ -438,7 +433,7 @@ static int __init mtd_oobtest_init(void)
438 * Second test: write all OOB, a block at a time, read it back and 433 * Second test: write all OOB, a block at a time, read it back and
439 * verify. 434 * verify.
440 */ 435 */
441 pr_info("test 2 of 5\n"); 436 printk(PRINT_PREF "test 2 of 5\n");
442 437
443 err = erase_whole_device(); 438 err = erase_whole_device();
444 if (err) 439 if (err)
@@ -451,7 +446,7 @@ static int __init mtd_oobtest_init(void)
451 446
452 /* Check all eraseblocks */ 447 /* Check all eraseblocks */
453 simple_srand(3); 448 simple_srand(3);
454 pr_info("verifying all eraseblocks\n"); 449 printk(PRINT_PREF "verifying all eraseblocks\n");
455 for (i = 0; i < ebcnt; ++i) { 450 for (i = 0; i < ebcnt; ++i) {
456 if (bbt[i]) 451 if (bbt[i])
457 continue; 452 continue;
@@ -459,16 +454,16 @@ static int __init mtd_oobtest_init(void)
459 if (err) 454 if (err)
460 goto out; 455 goto out;
461 if (i % 256 == 0) 456 if (i % 256 == 0)
462 pr_info("verified up to eraseblock %u\n", i); 457 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
463 cond_resched(); 458 cond_resched();
464 } 459 }
465 pr_info("verified %u eraseblocks\n", i); 460 printk(PRINT_PREF "verified %u eraseblocks\n", i);
466 461
467 /* 462 /*
468 * Third test: write OOB at varying offsets and lengths, read it back 463 * Third test: write OOB at varying offsets and lengths, read it back
469 * and verify. 464 * and verify.
470 */ 465 */
471 pr_info("test 3 of 5\n"); 466 printk(PRINT_PREF "test 3 of 5\n");
472 467
473 err = erase_whole_device(); 468 err = erase_whole_device();
474 if (err) 469 if (err)
@@ -501,7 +496,7 @@ static int __init mtd_oobtest_init(void)
501 vary_offset = 0; 496 vary_offset = 0;
502 497
503 /* Fourth test: try to write off end of device */ 498 /* Fourth test: try to write off end of device */
504 pr_info("test 4 of 5\n"); 499 printk(PRINT_PREF "test 4 of 5\n");
505 500
506 err = erase_whole_device(); 501 err = erase_whole_device();
507 if (err) 502 if (err)
@@ -512,7 +507,7 @@ static int __init mtd_oobtest_init(void)
512 addr0 += mtd->erasesize; 507 addr0 += mtd->erasesize;
513 508
514 /* Attempt to write off end of OOB */ 509 /* Attempt to write off end of OOB */
515 ops.mode = MTD_OPS_AUTO_OOB; 510 ops.mode = MTD_OOB_AUTO;
516 ops.len = 0; 511 ops.len = 0;
517 ops.retlen = 0; 512 ops.retlen = 0;
518 ops.ooblen = 1; 513 ops.ooblen = 1;
@@ -520,19 +515,19 @@ static int __init mtd_oobtest_init(void)
520 ops.ooboffs = mtd->ecclayout->oobavail; 515 ops.ooboffs = mtd->ecclayout->oobavail;
521 ops.datbuf = NULL; 516 ops.datbuf = NULL;
522 ops.oobbuf = writebuf; 517 ops.oobbuf = writebuf;
523 pr_info("attempting to start write past end of OOB\n"); 518 printk(PRINT_PREF "attempting to start write past end of OOB\n");
524 pr_info("an error is expected...\n"); 519 printk(PRINT_PREF "an error is expected...\n");
525 err = mtd_write_oob(mtd, addr0, &ops); 520 err = mtd->write_oob(mtd, addr0, &ops);
526 if (err) { 521 if (err) {
527 pr_info("error occurred as expected\n"); 522 printk(PRINT_PREF "error occurred as expected\n");
528 err = 0; 523 err = 0;
529 } else { 524 } else {
530 pr_err("error: can write past end of OOB\n"); 525 printk(PRINT_PREF "error: can write past end of OOB\n");
531 errcnt += 1; 526 errcnt += 1;
532 } 527 }
533 528
534 /* Attempt to read off end of OOB */ 529 /* Attempt to read off end of OOB */
535 ops.mode = MTD_OPS_AUTO_OOB; 530 ops.mode = MTD_OOB_AUTO;
536 ops.len = 0; 531 ops.len = 0;
537 ops.retlen = 0; 532 ops.retlen = 0;
538 ops.ooblen = 1; 533 ops.ooblen = 1;
@@ -540,23 +535,23 @@ static int __init mtd_oobtest_init(void)
540 ops.ooboffs = mtd->ecclayout->oobavail; 535 ops.ooboffs = mtd->ecclayout->oobavail;
541 ops.datbuf = NULL; 536 ops.datbuf = NULL;
542 ops.oobbuf = readbuf; 537 ops.oobbuf = readbuf;
543 pr_info("attempting to start read past end of OOB\n"); 538 printk(PRINT_PREF "attempting to start read past end of OOB\n");
544 pr_info("an error is expected...\n"); 539 printk(PRINT_PREF "an error is expected...\n");
545 err = mtd_read_oob(mtd, addr0, &ops); 540 err = mtd->read_oob(mtd, addr0, &ops);
546 if (err) { 541 if (err) {
547 pr_info("error occurred as expected\n"); 542 printk(PRINT_PREF "error occurred as expected\n");
548 err = 0; 543 err = 0;
549 } else { 544 } else {
550 pr_err("error: can read past end of OOB\n"); 545 printk(PRINT_PREF "error: can read past end of OOB\n");
551 errcnt += 1; 546 errcnt += 1;
552 } 547 }
553 548
554 if (bbt[ebcnt - 1]) 549 if (bbt[ebcnt - 1])
555 pr_info("skipping end of device tests because last " 550 printk(PRINT_PREF "skipping end of device tests because last "
556 "block is bad\n"); 551 "block is bad\n");
557 else { 552 else {
558 /* Attempt to write off end of device */ 553 /* Attempt to write off end of device */
559 ops.mode = MTD_OPS_AUTO_OOB; 554 ops.mode = MTD_OOB_AUTO;
560 ops.len = 0; 555 ops.len = 0;
561 ops.retlen = 0; 556 ops.retlen = 0;
562 ops.ooblen = mtd->ecclayout->oobavail + 1; 557 ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -564,19 +559,19 @@ static int __init mtd_oobtest_init(void)
564 ops.ooboffs = 0; 559 ops.ooboffs = 0;
565 ops.datbuf = NULL; 560 ops.datbuf = NULL;
566 ops.oobbuf = writebuf; 561 ops.oobbuf = writebuf;
567 pr_info("attempting to write past end of device\n"); 562 printk(PRINT_PREF "attempting to write past end of device\n");
568 pr_info("an error is expected...\n"); 563 printk(PRINT_PREF "an error is expected...\n");
569 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 564 err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
570 if (err) { 565 if (err) {
571 pr_info("error occurred as expected\n"); 566 printk(PRINT_PREF "error occurred as expected\n");
572 err = 0; 567 err = 0;
573 } else { 568 } else {
574 pr_err("error: wrote past end of device\n"); 569 printk(PRINT_PREF "error: wrote past end of device\n");
575 errcnt += 1; 570 errcnt += 1;
576 } 571 }
577 572
578 /* Attempt to read off end of device */ 573 /* Attempt to read off end of device */
579 ops.mode = MTD_OPS_AUTO_OOB; 574 ops.mode = MTD_OOB_AUTO;
580 ops.len = 0; 575 ops.len = 0;
581 ops.retlen = 0; 576 ops.retlen = 0;
582 ops.ooblen = mtd->ecclayout->oobavail + 1; 577 ops.ooblen = mtd->ecclayout->oobavail + 1;
@@ -584,14 +579,14 @@ static int __init mtd_oobtest_init(void)
584 ops.ooboffs = 0; 579 ops.ooboffs = 0;
585 ops.datbuf = NULL; 580 ops.datbuf = NULL;
586 ops.oobbuf = readbuf; 581 ops.oobbuf = readbuf;
587 pr_info("attempting to read past end of device\n"); 582 printk(PRINT_PREF "attempting to read past end of device\n");
588 pr_info("an error is expected...\n"); 583 printk(PRINT_PREF "an error is expected...\n");
589 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 584 err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
590 if (err) { 585 if (err) {
591 pr_info("error occurred as expected\n"); 586 printk(PRINT_PREF "error occurred as expected\n");
592 err = 0; 587 err = 0;
593 } else { 588 } else {
594 pr_err("error: read past end of device\n"); 589 printk(PRINT_PREF "error: read past end of device\n");
595 errcnt += 1; 590 errcnt += 1;
596 } 591 }
597 592
@@ -600,7 +595,7 @@ static int __init mtd_oobtest_init(void)
600 goto out; 595 goto out;
601 596
602 /* Attempt to write off end of device */ 597 /* Attempt to write off end of device */
603 ops.mode = MTD_OPS_AUTO_OOB; 598 ops.mode = MTD_OOB_AUTO;
604 ops.len = 0; 599 ops.len = 0;
605 ops.retlen = 0; 600 ops.retlen = 0;
606 ops.ooblen = mtd->ecclayout->oobavail; 601 ops.ooblen = mtd->ecclayout->oobavail;
@@ -608,19 +603,19 @@ static int __init mtd_oobtest_init(void)
608 ops.ooboffs = 1; 603 ops.ooboffs = 1;
609 ops.datbuf = NULL; 604 ops.datbuf = NULL;
610 ops.oobbuf = writebuf; 605 ops.oobbuf = writebuf;
611 pr_info("attempting to write past end of device\n"); 606 printk(PRINT_PREF "attempting to write past end of device\n");
612 pr_info("an error is expected...\n"); 607 printk(PRINT_PREF "an error is expected...\n");
613 err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); 608 err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
614 if (err) { 609 if (err) {
615 pr_info("error occurred as expected\n"); 610 printk(PRINT_PREF "error occurred as expected\n");
616 err = 0; 611 err = 0;
617 } else { 612 } else {
618 pr_err("error: wrote past end of device\n"); 613 printk(PRINT_PREF "error: wrote past end of device\n");
619 errcnt += 1; 614 errcnt += 1;
620 } 615 }
621 616
622 /* Attempt to read off end of device */ 617 /* Attempt to read off end of device */
623 ops.mode = MTD_OPS_AUTO_OOB; 618 ops.mode = MTD_OOB_AUTO;
624 ops.len = 0; 619 ops.len = 0;
625 ops.retlen = 0; 620 ops.retlen = 0;
626 ops.ooblen = mtd->ecclayout->oobavail; 621 ops.ooblen = mtd->ecclayout->oobavail;
@@ -628,20 +623,20 @@ static int __init mtd_oobtest_init(void)
628 ops.ooboffs = 1; 623 ops.ooboffs = 1;
629 ops.datbuf = NULL; 624 ops.datbuf = NULL;
630 ops.oobbuf = readbuf; 625 ops.oobbuf = readbuf;
631 pr_info("attempting to read past end of device\n"); 626 printk(PRINT_PREF "attempting to read past end of device\n");
632 pr_info("an error is expected...\n"); 627 printk(PRINT_PREF "an error is expected...\n");
633 err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); 628 err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
634 if (err) { 629 if (err) {
635 pr_info("error occurred as expected\n"); 630 printk(PRINT_PREF "error occurred as expected\n");
636 err = 0; 631 err = 0;
637 } else { 632 } else {
638 pr_err("error: read past end of device\n"); 633 printk(PRINT_PREF "error: read past end of device\n");
639 errcnt += 1; 634 errcnt += 1;
640 } 635 }
641 } 636 }
642 637
643 /* Fifth test: write / read across block boundaries */ 638 /* Fifth test: write / read across block boundaries */
644 pr_info("test 5 of 5\n"); 639 printk(PRINT_PREF "test 5 of 5\n");
645 640
646 /* Erase all eraseblocks */ 641 /* Erase all eraseblocks */
647 err = erase_whole_device(); 642 err = erase_whole_device();
@@ -650,7 +645,7 @@ static int __init mtd_oobtest_init(void)
650 645
651 /* Write all eraseblocks */ 646 /* Write all eraseblocks */
652 simple_srand(11); 647 simple_srand(11);
653 pr_info("writing OOBs of whole device\n"); 648 printk(PRINT_PREF "writing OOBs of whole device\n");
654 for (i = 0; i < ebcnt - 1; ++i) { 649 for (i = 0; i < ebcnt - 1; ++i) {
655 int cnt = 2; 650 int cnt = 2;
656 int pg; 651 int pg;
@@ -660,7 +655,7 @@ static int __init mtd_oobtest_init(void)
660 addr = (i + 1) * mtd->erasesize - mtd->writesize; 655 addr = (i + 1) * mtd->erasesize - mtd->writesize;
661 for (pg = 0; pg < cnt; ++pg) { 656 for (pg = 0; pg < cnt; ++pg) {
662 set_random_data(writebuf, sz); 657 set_random_data(writebuf, sz);
663 ops.mode = MTD_OPS_AUTO_OOB; 658 ops.mode = MTD_OOB_AUTO;
664 ops.len = 0; 659 ops.len = 0;
665 ops.retlen = 0; 660 ops.retlen = 0;
666 ops.ooblen = sz; 661 ops.ooblen = sz;
@@ -668,26 +663,27 @@ static int __init mtd_oobtest_init(void)
668 ops.ooboffs = 0; 663 ops.ooboffs = 0;
669 ops.datbuf = NULL; 664 ops.datbuf = NULL;
670 ops.oobbuf = writebuf; 665 ops.oobbuf = writebuf;
671 err = mtd_write_oob(mtd, addr, &ops); 666 err = mtd->write_oob(mtd, addr, &ops);
672 if (err) 667 if (err)
673 goto out; 668 goto out;
674 if (i % 256 == 0) 669 if (i % 256 == 0)
675 pr_info("written up to eraseblock %u\n", i); 670 printk(PRINT_PREF "written up to eraseblock "
671 "%u\n", i);
676 cond_resched(); 672 cond_resched();
677 addr += mtd->writesize; 673 addr += mtd->writesize;
678 } 674 }
679 } 675 }
680 pr_info("written %u eraseblocks\n", i); 676 printk(PRINT_PREF "written %u eraseblocks\n", i);
681 677
682 /* Check all eraseblocks */ 678 /* Check all eraseblocks */
683 simple_srand(11); 679 simple_srand(11);
684 pr_info("verifying all eraseblocks\n"); 680 printk(PRINT_PREF "verifying all eraseblocks\n");
685 for (i = 0; i < ebcnt - 1; ++i) { 681 for (i = 0; i < ebcnt - 1; ++i) {
686 if (bbt[i] || bbt[i + 1]) 682 if (bbt[i] || bbt[i + 1])
687 continue; 683 continue;
688 set_random_data(writebuf, mtd->ecclayout->oobavail * 2); 684 set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
689 addr = (i + 1) * mtd->erasesize - mtd->writesize; 685 addr = (i + 1) * mtd->erasesize - mtd->writesize;
690 ops.mode = MTD_OPS_AUTO_OOB; 686 ops.mode = MTD_OOB_AUTO;
691 ops.len = 0; 687 ops.len = 0;
692 ops.retlen = 0; 688 ops.retlen = 0;
693 ops.ooblen = mtd->ecclayout->oobavail * 2; 689 ops.ooblen = mtd->ecclayout->oobavail * 2;
@@ -695,32 +691,32 @@ static int __init mtd_oobtest_init(void)
695 ops.ooboffs = 0; 691 ops.ooboffs = 0;
696 ops.datbuf = NULL; 692 ops.datbuf = NULL;
697 ops.oobbuf = readbuf; 693 ops.oobbuf = readbuf;
698 err = mtd_read_oob(mtd, addr, &ops); 694 err = mtd->read_oob(mtd, addr, &ops);
699 if (err) 695 if (err)
700 goto out; 696 goto out;
701 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) { 697 if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
702 pr_err("error: verify failed at %#llx\n", 698 printk(PRINT_PREF "error: verify failed at %#llx\n",
703 (long long)addr); 699 (long long)addr);
704 errcnt += 1; 700 errcnt += 1;
705 if (errcnt > 1000) { 701 if (errcnt > 1000) {
706 pr_err("error: too many errors\n"); 702 printk(PRINT_PREF "error: too many errors\n");
707 goto out; 703 goto out;
708 } 704 }
709 } 705 }
710 if (i % 256 == 0) 706 if (i % 256 == 0)
711 pr_info("verified up to eraseblock %u\n", i); 707 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
712 cond_resched(); 708 cond_resched();
713 } 709 }
714 pr_info("verified %u eraseblocks\n", i); 710 printk(PRINT_PREF "verified %u eraseblocks\n", i);
715 711
716 pr_info("finished with %d errors\n", errcnt); 712 printk(PRINT_PREF "finished with %d errors\n", errcnt);
717out: 713out:
718 kfree(bbt); 714 kfree(bbt);
719 kfree(writebuf); 715 kfree(writebuf);
720 kfree(readbuf); 716 kfree(readbuf);
721 put_mtd_device(mtd); 717 put_mtd_device(mtd);
722 if (err) 718 if (err)
723 pr_info("error %d occurred\n", err); 719 printk(PRINT_PREF "error %d occurred\n", err);
724 printk(KERN_INFO "=================================================\n"); 720 printk(KERN_INFO "=================================================\n");
725 return err; 721 return err;
726} 722}
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index f93a76f8811..00b937e38c1 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -19,8 +19,6 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <asm/div64.h> 22#include <asm/div64.h>
25#include <linux/init.h> 23#include <linux/init.h>
26#include <linux/module.h> 24#include <linux/module.h>
@@ -30,7 +28,9 @@
30#include <linux/slab.h> 28#include <linux/slab.h>
31#include <linux/sched.h> 29#include <linux/sched.h>
32 30
33static int dev = -EINVAL; 31#define PRINT_PREF KERN_INFO "mtd_pagetest: "
32
33static int dev;
34module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
36 36
@@ -77,14 +77,14 @@ static int erase_eraseblock(int ebnum)
77 ei.addr = addr; 77 ei.addr = addr;
78 ei.len = mtd->erasesize; 78 ei.len = mtd->erasesize;
79 79
80 err = mtd_erase(mtd, &ei); 80 err = mtd->erase(mtd, &ei);
81 if (err) { 81 if (err) {
82 pr_err("error %d while erasing EB %d\n", err, ebnum); 82 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
83 return err; 83 return err;
84 } 84 }
85 85
86 if (ei.state == MTD_ERASE_FAILED) { 86 if (ei.state == MTD_ERASE_FAILED) {
87 pr_err("some erase error occurred at EB %d\n", 87 printk(PRINT_PREF "some erase error occurred at EB %d\n",
88 ebnum); 88 ebnum);
89 return -EIO; 89 return -EIO;
90 } 90 }
@@ -95,14 +95,14 @@ static int erase_eraseblock(int ebnum)
95static int write_eraseblock(int ebnum) 95static int write_eraseblock(int ebnum)
96{ 96{
97 int err = 0; 97 int err = 0;
98 size_t written; 98 size_t written = 0;
99 loff_t addr = ebnum * mtd->erasesize; 99 loff_t addr = ebnum * mtd->erasesize;
100 100
101 set_random_data(writebuf, mtd->erasesize); 101 set_random_data(writebuf, mtd->erasesize);
102 cond_resched(); 102 cond_resched();
103 err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf); 103 err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
104 if (err || written != mtd->erasesize) 104 if (err || written != mtd->erasesize)
105 pr_err("error: write failed at %#llx\n", 105 printk(PRINT_PREF "error: write failed at %#llx\n",
106 (long long)addr); 106 (long long)addr);
107 107
108 return err; 108 return err;
@@ -111,7 +111,7 @@ static int write_eraseblock(int ebnum)
111static int verify_eraseblock(int ebnum) 111static int verify_eraseblock(int ebnum)
112{ 112{
113 uint32_t j; 113 uint32_t j;
114 size_t read; 114 size_t read = 0;
115 int err = 0, i; 115 int err = 0, i;
116 loff_t addr0, addrn; 116 loff_t addr0, addrn;
117 loff_t addr = ebnum * mtd->erasesize; 117 loff_t addr = ebnum * mtd->erasesize;
@@ -127,33 +127,34 @@ static int verify_eraseblock(int ebnum)
127 set_random_data(writebuf, mtd->erasesize); 127 set_random_data(writebuf, mtd->erasesize);
128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { 128 for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
129 /* Do a read to set the internal dataRAMs to different data */ 129 /* Do a read to set the internal dataRAMs to different data */
130 err = mtd_read(mtd, addr0, bufsize, &read, twopages); 130 err = mtd->read(mtd, addr0, bufsize, &read, twopages);
131 if (mtd_is_bitflip(err)) 131 if (err == -EUCLEAN)
132 err = 0; 132 err = 0;
133 if (err || read != bufsize) { 133 if (err || read != bufsize) {
134 pr_err("error: read failed at %#llx\n", 134 printk(PRINT_PREF "error: read failed at %#llx\n",
135 (long long)addr0); 135 (long long)addr0);
136 return err; 136 return err;
137 } 137 }
138 err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages); 138 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
139 if (mtd_is_bitflip(err)) 139 if (err == -EUCLEAN)
140 err = 0; 140 err = 0;
141 if (err || read != bufsize) { 141 if (err || read != bufsize) {
142 pr_err("error: read failed at %#llx\n", 142 printk(PRINT_PREF "error: read failed at %#llx\n",
143 (long long)(addrn - bufsize)); 143 (long long)(addrn - bufsize));
144 return err; 144 return err;
145 } 145 }
146 memset(twopages, 0, bufsize); 146 memset(twopages, 0, bufsize);
147 err = mtd_read(mtd, addr, bufsize, &read, twopages); 147 read = 0;
148 if (mtd_is_bitflip(err)) 148 err = mtd->read(mtd, addr, bufsize, &read, twopages);
149 if (err == -EUCLEAN)
149 err = 0; 150 err = 0;
150 if (err || read != bufsize) { 151 if (err || read != bufsize) {
151 pr_err("error: read failed at %#llx\n", 152 printk(PRINT_PREF "error: read failed at %#llx\n",
152 (long long)addr); 153 (long long)addr);
153 break; 154 break;
154 } 155 }
155 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) { 156 if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
156 pr_err("error: verify failed at %#llx\n", 157 printk(PRINT_PREF "error: verify failed at %#llx\n",
157 (long long)addr); 158 (long long)addr);
158 errcnt += 1; 159 errcnt += 1;
159 } 160 }
@@ -162,35 +163,36 @@ static int verify_eraseblock(int ebnum)
162 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) { 163 if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
163 unsigned long oldnext = next; 164 unsigned long oldnext = next;
164 /* Do a read to set the internal dataRAMs to different data */ 165 /* Do a read to set the internal dataRAMs to different data */
165 err = mtd_read(mtd, addr0, bufsize, &read, twopages); 166 err = mtd->read(mtd, addr0, bufsize, &read, twopages);
166 if (mtd_is_bitflip(err)) 167 if (err == -EUCLEAN)
167 err = 0; 168 err = 0;
168 if (err || read != bufsize) { 169 if (err || read != bufsize) {
169 pr_err("error: read failed at %#llx\n", 170 printk(PRINT_PREF "error: read failed at %#llx\n",
170 (long long)addr0); 171 (long long)addr0);
171 return err; 172 return err;
172 } 173 }
173 err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages); 174 err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
174 if (mtd_is_bitflip(err)) 175 if (err == -EUCLEAN)
175 err = 0; 176 err = 0;
176 if (err || read != bufsize) { 177 if (err || read != bufsize) {
177 pr_err("error: read failed at %#llx\n", 178 printk(PRINT_PREF "error: read failed at %#llx\n",
178 (long long)(addrn - bufsize)); 179 (long long)(addrn - bufsize));
179 return err; 180 return err;
180 } 181 }
181 memset(twopages, 0, bufsize); 182 memset(twopages, 0, bufsize);
182 err = mtd_read(mtd, addr, bufsize, &read, twopages); 183 read = 0;
183 if (mtd_is_bitflip(err)) 184 err = mtd->read(mtd, addr, bufsize, &read, twopages);
185 if (err == -EUCLEAN)
184 err = 0; 186 err = 0;
185 if (err || read != bufsize) { 187 if (err || read != bufsize) {
186 pr_err("error: read failed at %#llx\n", 188 printk(PRINT_PREF "error: read failed at %#llx\n",
187 (long long)addr); 189 (long long)addr);
188 return err; 190 return err;
189 } 191 }
190 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize); 192 memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
191 set_random_data(boundary + pgsize, pgsize); 193 set_random_data(boundary + pgsize, pgsize);
192 if (memcmp(twopages, boundary, bufsize)) { 194 if (memcmp(twopages, boundary, bufsize)) {
193 pr_err("error: verify failed at %#llx\n", 195 printk(PRINT_PREF "error: verify failed at %#llx\n",
194 (long long)addr); 196 (long long)addr);
195 errcnt += 1; 197 errcnt += 1;
196 } 198 }
@@ -201,15 +203,15 @@ static int verify_eraseblock(int ebnum)
201 203
202static int crosstest(void) 204static int crosstest(void)
203{ 205{
204 size_t read; 206 size_t read = 0;
205 int err = 0, i; 207 int err = 0, i;
206 loff_t addr, addr0, addrn; 208 loff_t addr, addr0, addrn;
207 unsigned char *pp1, *pp2, *pp3, *pp4; 209 unsigned char *pp1, *pp2, *pp3, *pp4;
208 210
209 pr_info("crosstest\n"); 211 printk(PRINT_PREF "crosstest\n");
210 pp1 = kmalloc(pgsize * 4, GFP_KERNEL); 212 pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
211 if (!pp1) { 213 if (!pp1) {
212 pr_err("error: cannot allocate memory\n"); 214 printk(PRINT_PREF "error: cannot allocate memory\n");
213 return -ENOMEM; 215 return -ENOMEM;
214 } 216 }
215 pp2 = pp1 + pgsize; 217 pp2 = pp1 + pgsize;
@@ -226,88 +228,93 @@ static int crosstest(void)
226 addrn -= mtd->erasesize; 228 addrn -= mtd->erasesize;
227 229
228 /* Read 2nd-to-last page to pp1 */ 230 /* Read 2nd-to-last page to pp1 */
231 read = 0;
229 addr = addrn - pgsize - pgsize; 232 addr = addrn - pgsize - pgsize;
230 err = mtd_read(mtd, addr, pgsize, &read, pp1); 233 err = mtd->read(mtd, addr, pgsize, &read, pp1);
231 if (mtd_is_bitflip(err)) 234 if (err == -EUCLEAN)
232 err = 0; 235 err = 0;
233 if (err || read != pgsize) { 236 if (err || read != pgsize) {
234 pr_err("error: read failed at %#llx\n", 237 printk(PRINT_PREF "error: read failed at %#llx\n",
235 (long long)addr); 238 (long long)addr);
236 kfree(pp1); 239 kfree(pp1);
237 return err; 240 return err;
238 } 241 }
239 242
240 /* Read 3rd-to-last page to pp1 */ 243 /* Read 3rd-to-last page to pp1 */
244 read = 0;
241 addr = addrn - pgsize - pgsize - pgsize; 245 addr = addrn - pgsize - pgsize - pgsize;
242 err = mtd_read(mtd, addr, pgsize, &read, pp1); 246 err = mtd->read(mtd, addr, pgsize, &read, pp1);
243 if (mtd_is_bitflip(err)) 247 if (err == -EUCLEAN)
244 err = 0; 248 err = 0;
245 if (err || read != pgsize) { 249 if (err || read != pgsize) {
246 pr_err("error: read failed at %#llx\n", 250 printk(PRINT_PREF "error: read failed at %#llx\n",
247 (long long)addr); 251 (long long)addr);
248 kfree(pp1); 252 kfree(pp1);
249 return err; 253 return err;
250 } 254 }
251 255
252 /* Read first page to pp2 */ 256 /* Read first page to pp2 */
257 read = 0;
253 addr = addr0; 258 addr = addr0;
254 pr_info("reading page at %#llx\n", (long long)addr); 259 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
255 err = mtd_read(mtd, addr, pgsize, &read, pp2); 260 err = mtd->read(mtd, addr, pgsize, &read, pp2);
256 if (mtd_is_bitflip(err)) 261 if (err == -EUCLEAN)
257 err = 0; 262 err = 0;
258 if (err || read != pgsize) { 263 if (err || read != pgsize) {
259 pr_err("error: read failed at %#llx\n", 264 printk(PRINT_PREF "error: read failed at %#llx\n",
260 (long long)addr); 265 (long long)addr);
261 kfree(pp1); 266 kfree(pp1);
262 return err; 267 return err;
263 } 268 }
264 269
265 /* Read last page to pp3 */ 270 /* Read last page to pp3 */
271 read = 0;
266 addr = addrn - pgsize; 272 addr = addrn - pgsize;
267 pr_info("reading page at %#llx\n", (long long)addr); 273 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
268 err = mtd_read(mtd, addr, pgsize, &read, pp3); 274 err = mtd->read(mtd, addr, pgsize, &read, pp3);
269 if (mtd_is_bitflip(err)) 275 if (err == -EUCLEAN)
270 err = 0; 276 err = 0;
271 if (err || read != pgsize) { 277 if (err || read != pgsize) {
272 pr_err("error: read failed at %#llx\n", 278 printk(PRINT_PREF "error: read failed at %#llx\n",
273 (long long)addr); 279 (long long)addr);
274 kfree(pp1); 280 kfree(pp1);
275 return err; 281 return err;
276 } 282 }
277 283
278 /* Read first page again to pp4 */ 284 /* Read first page again to pp4 */
285 read = 0;
279 addr = addr0; 286 addr = addr0;
280 pr_info("reading page at %#llx\n", (long long)addr); 287 printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
281 err = mtd_read(mtd, addr, pgsize, &read, pp4); 288 err = mtd->read(mtd, addr, pgsize, &read, pp4);
282 if (mtd_is_bitflip(err)) 289 if (err == -EUCLEAN)
283 err = 0; 290 err = 0;
284 if (err || read != pgsize) { 291 if (err || read != pgsize) {
285 pr_err("error: read failed at %#llx\n", 292 printk(PRINT_PREF "error: read failed at %#llx\n",
286 (long long)addr); 293 (long long)addr);
287 kfree(pp1); 294 kfree(pp1);
288 return err; 295 return err;
289 } 296 }
290 297
291 /* pp2 and pp4 should be the same */ 298 /* pp2 and pp4 should be the same */
292 pr_info("verifying pages read at %#llx match\n", 299 printk(PRINT_PREF "verifying pages read at %#llx match\n",
293 (long long)addr0); 300 (long long)addr0);
294 if (memcmp(pp2, pp4, pgsize)) { 301 if (memcmp(pp2, pp4, pgsize)) {
295 pr_err("verify failed!\n"); 302 printk(PRINT_PREF "verify failed!\n");
296 errcnt += 1; 303 errcnt += 1;
297 } else if (!err) 304 } else if (!err)
298 pr_info("crosstest ok\n"); 305 printk(PRINT_PREF "crosstest ok\n");
299 kfree(pp1); 306 kfree(pp1);
300 return err; 307 return err;
301} 308}
302 309
303static int erasecrosstest(void) 310static int erasecrosstest(void)
304{ 311{
305 size_t read, written; 312 size_t read = 0, written = 0;
306 int err = 0, i, ebnum, ebnum2; 313 int err = 0, i, ebnum, ebnum2;
307 loff_t addr0; 314 loff_t addr0;
308 char *readbuf = twopages; 315 char *readbuf = twopages;
309 316
310 pr_info("erasecrosstest\n"); 317 printk(PRINT_PREF "erasecrosstest\n");
311 318
312 ebnum = 0; 319 ebnum = 0;
313 addr0 = 0; 320 addr0 = 0;
@@ -320,89 +327,89 @@ static int erasecrosstest(void)
320 while (ebnum2 && bbt[ebnum2]) 327 while (ebnum2 && bbt[ebnum2])
321 ebnum2 -= 1; 328 ebnum2 -= 1;
322 329
323 pr_info("erasing block %d\n", ebnum); 330 printk(PRINT_PREF "erasing block %d\n", ebnum);
324 err = erase_eraseblock(ebnum); 331 err = erase_eraseblock(ebnum);
325 if (err) 332 if (err)
326 return err; 333 return err;
327 334
328 pr_info("writing 1st page of block %d\n", ebnum); 335 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
329 set_random_data(writebuf, pgsize); 336 set_random_data(writebuf, pgsize);
330 strcpy(writebuf, "There is no data like this!"); 337 strcpy(writebuf, "There is no data like this!");
331 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 338 err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
332 if (err || written != pgsize) { 339 if (err || written != pgsize) {
333 pr_info("error: write failed at %#llx\n", 340 printk(PRINT_PREF "error: write failed at %#llx\n",
334 (long long)addr0); 341 (long long)addr0);
335 return err ? err : -1; 342 return err ? err : -1;
336 } 343 }
337 344
338 pr_info("reading 1st page of block %d\n", ebnum); 345 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
339 memset(readbuf, 0, pgsize); 346 memset(readbuf, 0, pgsize);
340 err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 347 err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
341 if (mtd_is_bitflip(err)) 348 if (err == -EUCLEAN)
342 err = 0; 349 err = 0;
343 if (err || read != pgsize) { 350 if (err || read != pgsize) {
344 pr_err("error: read failed at %#llx\n", 351 printk(PRINT_PREF "error: read failed at %#llx\n",
345 (long long)addr0); 352 (long long)addr0);
346 return err ? err : -1; 353 return err ? err : -1;
347 } 354 }
348 355
349 pr_info("verifying 1st page of block %d\n", ebnum); 356 printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
350 if (memcmp(writebuf, readbuf, pgsize)) { 357 if (memcmp(writebuf, readbuf, pgsize)) {
351 pr_err("verify failed!\n"); 358 printk(PRINT_PREF "verify failed!\n");
352 errcnt += 1; 359 errcnt += 1;
353 return -1; 360 return -1;
354 } 361 }
355 362
356 pr_info("erasing block %d\n", ebnum); 363 printk(PRINT_PREF "erasing block %d\n", ebnum);
357 err = erase_eraseblock(ebnum); 364 err = erase_eraseblock(ebnum);
358 if (err) 365 if (err)
359 return err; 366 return err;
360 367
361 pr_info("writing 1st page of block %d\n", ebnum); 368 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
362 set_random_data(writebuf, pgsize); 369 set_random_data(writebuf, pgsize);
363 strcpy(writebuf, "There is no data like this!"); 370 strcpy(writebuf, "There is no data like this!");
364 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 371 err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
365 if (err || written != pgsize) { 372 if (err || written != pgsize) {
366 pr_err("error: write failed at %#llx\n", 373 printk(PRINT_PREF "error: write failed at %#llx\n",
367 (long long)addr0); 374 (long long)addr0);
368 return err ? err : -1; 375 return err ? err : -1;
369 } 376 }
370 377
371 pr_info("erasing block %d\n", ebnum2); 378 printk(PRINT_PREF "erasing block %d\n", ebnum2);
372 err = erase_eraseblock(ebnum2); 379 err = erase_eraseblock(ebnum2);
373 if (err) 380 if (err)
374 return err; 381 return err;
375 382
376 pr_info("reading 1st page of block %d\n", ebnum); 383 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
377 memset(readbuf, 0, pgsize); 384 memset(readbuf, 0, pgsize);
378 err = mtd_read(mtd, addr0, pgsize, &read, readbuf); 385 err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
379 if (mtd_is_bitflip(err)) 386 if (err == -EUCLEAN)
380 err = 0; 387 err = 0;
381 if (err || read != pgsize) { 388 if (err || read != pgsize) {
382 pr_err("error: read failed at %#llx\n", 389 printk(PRINT_PREF "error: read failed at %#llx\n",
383 (long long)addr0); 390 (long long)addr0);
384 return err ? err : -1; 391 return err ? err : -1;
385 } 392 }
386 393
387 pr_info("verifying 1st page of block %d\n", ebnum); 394 printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
388 if (memcmp(writebuf, readbuf, pgsize)) { 395 if (memcmp(writebuf, readbuf, pgsize)) {
389 pr_err("verify failed!\n"); 396 printk(PRINT_PREF "verify failed!\n");
390 errcnt += 1; 397 errcnt += 1;
391 return -1; 398 return -1;
392 } 399 }
393 400
394 if (!err) 401 if (!err)
395 pr_info("erasecrosstest ok\n"); 402 printk(PRINT_PREF "erasecrosstest ok\n");
396 return err; 403 return err;
397} 404}
398 405
399static int erasetest(void) 406static int erasetest(void)
400{ 407{
401 size_t read, written; 408 size_t read = 0, written = 0;
402 int err = 0, i, ebnum, ok = 1; 409 int err = 0, i, ebnum, ok = 1;
403 loff_t addr0; 410 loff_t addr0;
404 411
405 pr_info("erasetest\n"); 412 printk(PRINT_PREF "erasetest\n");
406 413
407 ebnum = 0; 414 ebnum = 0;
408 addr0 = 0; 415 addr0 = 0;
@@ -411,40 +418,40 @@ static int erasetest(void)
411 ebnum += 1; 418 ebnum += 1;
412 } 419 }
413 420
414 pr_info("erasing block %d\n", ebnum); 421 printk(PRINT_PREF "erasing block %d\n", ebnum);
415 err = erase_eraseblock(ebnum); 422 err = erase_eraseblock(ebnum);
416 if (err) 423 if (err)
417 return err; 424 return err;
418 425
419 pr_info("writing 1st page of block %d\n", ebnum); 426 printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
420 set_random_data(writebuf, pgsize); 427 set_random_data(writebuf, pgsize);
421 err = mtd_write(mtd, addr0, pgsize, &written, writebuf); 428 err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
422 if (err || written != pgsize) { 429 if (err || written != pgsize) {
423 pr_err("error: write failed at %#llx\n", 430 printk(PRINT_PREF "error: write failed at %#llx\n",
424 (long long)addr0); 431 (long long)addr0);
425 return err ? err : -1; 432 return err ? err : -1;
426 } 433 }
427 434
428 pr_info("erasing block %d\n", ebnum); 435 printk(PRINT_PREF "erasing block %d\n", ebnum);
429 err = erase_eraseblock(ebnum); 436 err = erase_eraseblock(ebnum);
430 if (err) 437 if (err)
431 return err; 438 return err;
432 439
433 pr_info("reading 1st page of block %d\n", ebnum); 440 printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
434 err = mtd_read(mtd, addr0, pgsize, &read, twopages); 441 err = mtd->read(mtd, addr0, pgsize, &read, twopages);
435 if (mtd_is_bitflip(err)) 442 if (err == -EUCLEAN)
436 err = 0; 443 err = 0;
437 if (err || read != pgsize) { 444 if (err || read != pgsize) {
438 pr_err("error: read failed at %#llx\n", 445 printk(PRINT_PREF "error: read failed at %#llx\n",
439 (long long)addr0); 446 (long long)addr0);
440 return err ? err : -1; 447 return err ? err : -1;
441 } 448 }
442 449
443 pr_info("verifying 1st page of block %d is all 0xff\n", 450 printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
444 ebnum); 451 ebnum);
445 for (i = 0; i < pgsize; ++i) 452 for (i = 0; i < pgsize; ++i)
446 if (twopages[i] != 0xff) { 453 if (twopages[i] != 0xff) {
447 pr_err("verifying all 0xff failed at %d\n", 454 printk(PRINT_PREF "verifying all 0xff failed at %d\n",
448 i); 455 i);
449 errcnt += 1; 456 errcnt += 1;
450 ok = 0; 457 ok = 0;
@@ -452,7 +459,7 @@ static int erasetest(void)
452 } 459 }
453 460
454 if (ok && !err) 461 if (ok && !err)
455 pr_info("erasetest ok\n"); 462 printk(PRINT_PREF "erasetest ok\n");
456 463
457 return err; 464 return err;
458} 465}
@@ -462,9 +469,9 @@ static int is_block_bad(int ebnum)
462 loff_t addr = ebnum * mtd->erasesize; 469 loff_t addr = ebnum * mtd->erasesize;
463 int ret; 470 int ret;
464 471
465 ret = mtd_block_isbad(mtd, addr); 472 ret = mtd->block_isbad(mtd, addr);
466 if (ret) 473 if (ret)
467 pr_info("block %d is bad\n", ebnum); 474 printk(PRINT_PREF "block %d is bad\n", ebnum);
468 return ret; 475 return ret;
469} 476}
470 477
@@ -474,18 +481,18 @@ static int scan_for_bad_eraseblocks(void)
474 481
475 bbt = kzalloc(ebcnt, GFP_KERNEL); 482 bbt = kzalloc(ebcnt, GFP_KERNEL);
476 if (!bbt) { 483 if (!bbt) {
477 pr_err("error: cannot allocate memory\n"); 484 printk(PRINT_PREF "error: cannot allocate memory\n");
478 return -ENOMEM; 485 return -ENOMEM;
479 } 486 }
480 487
481 pr_info("scanning for bad eraseblocks\n"); 488 printk(PRINT_PREF "scanning for bad eraseblocks\n");
482 for (i = 0; i < ebcnt; ++i) { 489 for (i = 0; i < ebcnt; ++i) {
483 bbt[i] = is_block_bad(i) ? 1 : 0; 490 bbt[i] = is_block_bad(i) ? 1 : 0;
484 if (bbt[i]) 491 if (bbt[i])
485 bad += 1; 492 bad += 1;
486 cond_resched(); 493 cond_resched();
487 } 494 }
488 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 495 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
489 return 0; 496 return 0;
490} 497}
491 498
@@ -497,24 +504,17 @@ static int __init mtd_pagetest_init(void)
497 504
498 printk(KERN_INFO "\n"); 505 printk(KERN_INFO "\n");
499 printk(KERN_INFO "=================================================\n"); 506 printk(KERN_INFO "=================================================\n");
500 507 printk(PRINT_PREF "MTD device: %d\n", dev);
501 if (dev < 0) {
502 pr_info("Please specify a valid mtd-device via module parameter\n");
503 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
504 return -EINVAL;
505 }
506
507 pr_info("MTD device: %d\n", dev);
508 508
509 mtd = get_mtd_device(NULL, dev); 509 mtd = get_mtd_device(NULL, dev);
510 if (IS_ERR(mtd)) { 510 if (IS_ERR(mtd)) {
511 err = PTR_ERR(mtd); 511 err = PTR_ERR(mtd);
512 pr_err("error: cannot get MTD device\n"); 512 printk(PRINT_PREF "error: cannot get MTD device\n");
513 return err; 513 return err;
514 } 514 }
515 515
516 if (mtd->type != MTD_NANDFLASH) { 516 if (mtd->type != MTD_NANDFLASH) {
517 pr_info("this test requires NAND flash\n"); 517 printk(PRINT_PREF "this test requires NAND flash\n");
518 goto out; 518 goto out;
519 } 519 }
520 520
@@ -524,7 +524,7 @@ static int __init mtd_pagetest_init(void)
524 pgcnt = mtd->erasesize / mtd->writesize; 524 pgcnt = mtd->erasesize / mtd->writesize;
525 pgsize = mtd->writesize; 525 pgsize = mtd->writesize;
526 526
527 pr_info("MTD device size %llu, eraseblock size %u, " 527 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
528 "page size %u, count of eraseblocks %u, pages per " 528 "page size %u, count of eraseblocks %u, pages per "
529 "eraseblock %u, OOB size %u\n", 529 "eraseblock %u, OOB size %u\n",
530 (unsigned long long)mtd->size, mtd->erasesize, 530 (unsigned long long)mtd->size, mtd->erasesize,
@@ -534,17 +534,17 @@ static int __init mtd_pagetest_init(void)
534 bufsize = pgsize * 2; 534 bufsize = pgsize * 2;
535 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL); 535 writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
536 if (!writebuf) { 536 if (!writebuf) {
537 pr_err("error: cannot allocate memory\n"); 537 printk(PRINT_PREF "error: cannot allocate memory\n");
538 goto out; 538 goto out;
539 } 539 }
540 twopages = kmalloc(bufsize, GFP_KERNEL); 540 twopages = kmalloc(bufsize, GFP_KERNEL);
541 if (!twopages) { 541 if (!twopages) {
542 pr_err("error: cannot allocate memory\n"); 542 printk(PRINT_PREF "error: cannot allocate memory\n");
543 goto out; 543 goto out;
544 } 544 }
545 boundary = kmalloc(bufsize, GFP_KERNEL); 545 boundary = kmalloc(bufsize, GFP_KERNEL);
546 if (!boundary) { 546 if (!boundary) {
547 pr_err("error: cannot allocate memory\n"); 547 printk(PRINT_PREF "error: cannot allocate memory\n");
548 goto out; 548 goto out;
549 } 549 }
550 550
@@ -553,7 +553,7 @@ static int __init mtd_pagetest_init(void)
553 goto out; 553 goto out;
554 554
555 /* Erase all eraseblocks */ 555 /* Erase all eraseblocks */
556 pr_info("erasing whole device\n"); 556 printk(PRINT_PREF "erasing whole device\n");
557 for (i = 0; i < ebcnt; ++i) { 557 for (i = 0; i < ebcnt; ++i) {
558 if (bbt[i]) 558 if (bbt[i])
559 continue; 559 continue;
@@ -562,11 +562,11 @@ static int __init mtd_pagetest_init(void)
562 goto out; 562 goto out;
563 cond_resched(); 563 cond_resched();
564 } 564 }
565 pr_info("erased %u eraseblocks\n", i); 565 printk(PRINT_PREF "erased %u eraseblocks\n", i);
566 566
567 /* Write all eraseblocks */ 567 /* Write all eraseblocks */
568 simple_srand(1); 568 simple_srand(1);
569 pr_info("writing whole device\n"); 569 printk(PRINT_PREF "writing whole device\n");
570 for (i = 0; i < ebcnt; ++i) { 570 for (i = 0; i < ebcnt; ++i) {
571 if (bbt[i]) 571 if (bbt[i])
572 continue; 572 continue;
@@ -574,14 +574,14 @@ static int __init mtd_pagetest_init(void)
574 if (err) 574 if (err)
575 goto out; 575 goto out;
576 if (i % 256 == 0) 576 if (i % 256 == 0)
577 pr_info("written up to eraseblock %u\n", i); 577 printk(PRINT_PREF "written up to eraseblock %u\n", i);
578 cond_resched(); 578 cond_resched();
579 } 579 }
580 pr_info("written %u eraseblocks\n", i); 580 printk(PRINT_PREF "written %u eraseblocks\n", i);
581 581
582 /* Check all eraseblocks */ 582 /* Check all eraseblocks */
583 simple_srand(1); 583 simple_srand(1);
584 pr_info("verifying all eraseblocks\n"); 584 printk(PRINT_PREF "verifying all eraseblocks\n");
585 for (i = 0; i < ebcnt; ++i) { 585 for (i = 0; i < ebcnt; ++i) {
586 if (bbt[i]) 586 if (bbt[i])
587 continue; 587 continue;
@@ -589,10 +589,10 @@ static int __init mtd_pagetest_init(void)
589 if (err) 589 if (err)
590 goto out; 590 goto out;
591 if (i % 256 == 0) 591 if (i % 256 == 0)
592 pr_info("verified up to eraseblock %u\n", i); 592 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
593 cond_resched(); 593 cond_resched();
594 } 594 }
595 pr_info("verified %u eraseblocks\n", i); 595 printk(PRINT_PREF "verified %u eraseblocks\n", i);
596 596
597 err = crosstest(); 597 err = crosstest();
598 if (err) 598 if (err)
@@ -606,7 +606,7 @@ static int __init mtd_pagetest_init(void)
606 if (err) 606 if (err)
607 goto out; 607 goto out;
608 608
609 pr_info("finished with %d errors\n", errcnt); 609 printk(PRINT_PREF "finished with %d errors\n", errcnt);
610out: 610out:
611 611
612 kfree(bbt); 612 kfree(bbt);
@@ -615,7 +615,7 @@ out:
615 kfree(writebuf); 615 kfree(writebuf);
616 put_mtd_device(mtd); 616 put_mtd_device(mtd);
617 if (err) 617 if (err)
618 pr_info("error %d occurred\n", err); 618 printk(PRINT_PREF "error %d occurred\n", err);
619 printk(KERN_INFO "=================================================\n"); 619 printk(KERN_INFO "=================================================\n");
620 return err; 620 return err;
621} 621}
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 266de04b6d2..afe71aa15c4 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -19,8 +19,6 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/init.h> 22#include <linux/init.h>
25#include <linux/module.h> 23#include <linux/module.h>
26#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
@@ -29,7 +27,9 @@
29#include <linux/slab.h> 27#include <linux/slab.h>
30#include <linux/sched.h> 28#include <linux/sched.h>
31 29
32static int dev = -EINVAL; 30#define PRINT_PREF KERN_INFO "mtd_readtest: "
31
32static int dev;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
35 35
@@ -44,19 +44,19 @@ static int pgcnt;
44 44
45static int read_eraseblock_by_page(int ebnum) 45static int read_eraseblock_by_page(int ebnum)
46{ 46{
47 size_t read; 47 size_t read = 0;
48 int i, ret, err = 0; 48 int i, ret, err = 0;
49 loff_t addr = ebnum * mtd->erasesize; 49 loff_t addr = ebnum * mtd->erasesize;
50 void *buf = iobuf; 50 void *buf = iobuf;
51 void *oobbuf = iobuf1; 51 void *oobbuf = iobuf1;
52 52
53 for (i = 0; i < pgcnt; i++) { 53 for (i = 0; i < pgcnt; i++) {
54 memset(buf, 0 , pgsize); 54 memset(buf, 0 , pgcnt);
55 ret = mtd_read(mtd, addr, pgsize, &read, buf); 55 ret = mtd->read(mtd, addr, pgsize, &read, buf);
56 if (ret == -EUCLEAN) 56 if (ret == -EUCLEAN)
57 ret = 0; 57 ret = 0;
58 if (ret || read != pgsize) { 58 if (ret || read != pgsize) {
59 pr_err("error: read failed at %#llx\n", 59 printk(PRINT_PREF "error: read failed at %#llx\n",
60 (long long)addr); 60 (long long)addr);
61 if (!err) 61 if (!err)
62 err = ret; 62 err = ret;
@@ -66,7 +66,7 @@ static int read_eraseblock_by_page(int ebnum)
66 if (mtd->oobsize) { 66 if (mtd->oobsize) {
67 struct mtd_oob_ops ops; 67 struct mtd_oob_ops ops;
68 68
69 ops.mode = MTD_OPS_PLACE_OOB; 69 ops.mode = MTD_OOB_PLACE;
70 ops.len = 0; 70 ops.len = 0;
71 ops.retlen = 0; 71 ops.retlen = 0;
72 ops.ooblen = mtd->oobsize; 72 ops.ooblen = mtd->oobsize;
@@ -74,10 +74,9 @@ static int read_eraseblock_by_page(int ebnum)
74 ops.ooboffs = 0; 74 ops.ooboffs = 0;
75 ops.datbuf = NULL; 75 ops.datbuf = NULL;
76 ops.oobbuf = oobbuf; 76 ops.oobbuf = oobbuf;
77 ret = mtd_read_oob(mtd, addr, &ops); 77 ret = mtd->read_oob(mtd, addr, &ops);
78 if ((ret && !mtd_is_bitflip(ret)) || 78 if (ret || ops.oobretlen != mtd->oobsize) {
79 ops.oobretlen != mtd->oobsize) { 79 printk(PRINT_PREF "error: read oob failed at "
80 pr_err("error: read oob failed at "
81 "%#llx\n", (long long)addr); 80 "%#llx\n", (long long)addr);
82 if (!err) 81 if (!err)
83 err = ret; 82 err = ret;
@@ -99,7 +98,7 @@ static void dump_eraseblock(int ebnum)
99 char line[128]; 98 char line[128];
100 int pg, oob; 99 int pg, oob;
101 100
102 pr_info("dumping eraseblock %d\n", ebnum); 101 printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
103 n = mtd->erasesize; 102 n = mtd->erasesize;
104 for (i = 0; i < n;) { 103 for (i = 0; i < n;) {
105 char *p = line; 104 char *p = line;
@@ -112,7 +111,7 @@ static void dump_eraseblock(int ebnum)
112 } 111 }
113 if (!mtd->oobsize) 112 if (!mtd->oobsize)
114 return; 113 return;
115 pr_info("dumping oob from eraseblock %d\n", ebnum); 114 printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
116 n = mtd->oobsize; 115 n = mtd->oobsize;
117 for (pg = 0, i = 0; pg < pgcnt; pg++) 116 for (pg = 0, i = 0; pg < pgcnt; pg++)
118 for (oob = 0; oob < n;) { 117 for (oob = 0; oob < n;) {
@@ -132,9 +131,9 @@ static int is_block_bad(int ebnum)
132 loff_t addr = ebnum * mtd->erasesize; 131 loff_t addr = ebnum * mtd->erasesize;
133 int ret; 132 int ret;
134 133
135 ret = mtd_block_isbad(mtd, addr); 134 ret = mtd->block_isbad(mtd, addr);
136 if (ret) 135 if (ret)
137 pr_info("block %d is bad\n", ebnum); 136 printk(PRINT_PREF "block %d is bad\n", ebnum);
138 return ret; 137 return ret;
139} 138}
140 139
@@ -144,21 +143,22 @@ static int scan_for_bad_eraseblocks(void)
144 143
145 bbt = kzalloc(ebcnt, GFP_KERNEL); 144 bbt = kzalloc(ebcnt, GFP_KERNEL);
146 if (!bbt) { 145 if (!bbt) {
147 pr_err("error: cannot allocate memory\n"); 146 printk(PRINT_PREF "error: cannot allocate memory\n");
148 return -ENOMEM; 147 return -ENOMEM;
149 } 148 }
150 149
151 if (!mtd_can_have_bb(mtd)) 150 /* NOR flash does not implement block_isbad */
151 if (mtd->block_isbad == NULL)
152 return 0; 152 return 0;
153 153
154 pr_info("scanning for bad eraseblocks\n"); 154 printk(PRINT_PREF "scanning for bad eraseblocks\n");
155 for (i = 0; i < ebcnt; ++i) { 155 for (i = 0; i < ebcnt; ++i) {
156 bbt[i] = is_block_bad(i) ? 1 : 0; 156 bbt[i] = is_block_bad(i) ? 1 : 0;
157 if (bbt[i]) 157 if (bbt[i])
158 bad += 1; 158 bad += 1;
159 cond_resched(); 159 cond_resched();
160 } 160 }
161 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 161 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
162 return 0; 162 return 0;
163} 163}
164 164
@@ -169,23 +169,17 @@ static int __init mtd_readtest_init(void)
169 169
170 printk(KERN_INFO "\n"); 170 printk(KERN_INFO "\n");
171 printk(KERN_INFO "=================================================\n"); 171 printk(KERN_INFO "=================================================\n");
172 172 printk(PRINT_PREF "MTD device: %d\n", dev);
173 if (dev < 0) {
174 pr_info("Please specify a valid mtd-device via module parameter\n");
175 return -EINVAL;
176 }
177
178 pr_info("MTD device: %d\n", dev);
179 173
180 mtd = get_mtd_device(NULL, dev); 174 mtd = get_mtd_device(NULL, dev);
181 if (IS_ERR(mtd)) { 175 if (IS_ERR(mtd)) {
182 err = PTR_ERR(mtd); 176 err = PTR_ERR(mtd);
183 pr_err("error: Cannot get MTD device\n"); 177 printk(PRINT_PREF "error: Cannot get MTD device\n");
184 return err; 178 return err;
185 } 179 }
186 180
187 if (mtd->writesize == 1) { 181 if (mtd->writesize == 1) {
188 pr_info("not NAND flash, assume page size is 512 " 182 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
189 "bytes.\n"); 183 "bytes.\n");
190 pgsize = 512; 184 pgsize = 512;
191 } else 185 } else
@@ -196,7 +190,7 @@ static int __init mtd_readtest_init(void)
196 ebcnt = tmp; 190 ebcnt = tmp;
197 pgcnt = mtd->erasesize / pgsize; 191 pgcnt = mtd->erasesize / pgsize;
198 192
199 pr_info("MTD device size %llu, eraseblock size %u, " 193 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
200 "page size %u, count of eraseblocks %u, pages per " 194 "page size %u, count of eraseblocks %u, pages per "
201 "eraseblock %u, OOB size %u\n", 195 "eraseblock %u, OOB size %u\n",
202 (unsigned long long)mtd->size, mtd->erasesize, 196 (unsigned long long)mtd->size, mtd->erasesize,
@@ -205,12 +199,12 @@ static int __init mtd_readtest_init(void)
205 err = -ENOMEM; 199 err = -ENOMEM;
206 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); 200 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
207 if (!iobuf) { 201 if (!iobuf) {
208 pr_err("error: cannot allocate memory\n"); 202 printk(PRINT_PREF "error: cannot allocate memory\n");
209 goto out; 203 goto out;
210 } 204 }
211 iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL); 205 iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
212 if (!iobuf1) { 206 if (!iobuf1) {
213 pr_err("error: cannot allocate memory\n"); 207 printk(PRINT_PREF "error: cannot allocate memory\n");
214 goto out; 208 goto out;
215 } 209 }
216 210
@@ -219,7 +213,7 @@ static int __init mtd_readtest_init(void)
219 goto out; 213 goto out;
220 214
221 /* Read all eraseblocks 1 page at a time */ 215 /* Read all eraseblocks 1 page at a time */
222 pr_info("testing page read\n"); 216 printk(PRINT_PREF "testing page read\n");
223 for (i = 0; i < ebcnt; ++i) { 217 for (i = 0; i < ebcnt; ++i) {
224 int ret; 218 int ret;
225 219
@@ -235,9 +229,9 @@ static int __init mtd_readtest_init(void)
235 } 229 }
236 230
237 if (err) 231 if (err)
238 pr_info("finished with errors\n"); 232 printk(PRINT_PREF "finished with errors\n");
239 else 233 else
240 pr_info("finished\n"); 234 printk(PRINT_PREF "finished\n");
241 235
242out: 236out:
243 237
@@ -246,7 +240,7 @@ out:
246 kfree(bbt); 240 kfree(bbt);
247 put_mtd_device(mtd); 241 put_mtd_device(mtd);
248 if (err) 242 if (err)
249 pr_info("error %d occurred\n", err); 243 printk(PRINT_PREF "error %d occurred\n", err);
250 printk(KERN_INFO "=================================================\n"); 244 printk(KERN_INFO "=================================================\n");
251 return err; 245 return err;
252} 246}
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 596cbea8df4..627d4e2466a 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -19,8 +19,6 @@
19 * Author: Adrian Hunter <adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/init.h> 22#include <linux/init.h>
25#include <linux/module.h> 23#include <linux/module.h>
26#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
@@ -28,9 +26,10 @@
28#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
29#include <linux/slab.h> 27#include <linux/slab.h>
30#include <linux/sched.h> 28#include <linux/sched.h>
31#include <linux/random.h>
32 29
33static int dev = -EINVAL; 30#define PRINT_PREF KERN_INFO "mtd_speedtest: "
31
32static int dev;
34module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
35MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
36 35
@@ -48,13 +47,25 @@ static int ebcnt;
48static int pgcnt; 47static int pgcnt;
49static int goodebcnt; 48static int goodebcnt;
50static struct timeval start, finish; 49static struct timeval start, finish;
50static unsigned long next = 1;
51
52static inline unsigned int simple_rand(void)
53{
54 next = next * 1103515245 + 12345;
55 return (unsigned int)((next / 65536) % 32768);
56}
57
58static inline void simple_srand(unsigned long seed)
59{
60 next = seed;
61}
51 62
52static void set_random_data(unsigned char *buf, size_t len) 63static void set_random_data(unsigned char *buf, size_t len)
53{ 64{
54 size_t i; 65 size_t i;
55 66
56 for (i = 0; i < len; ++i) 67 for (i = 0; i < len; ++i)
57 buf[i] = random32(); 68 buf[i] = simple_rand();
58} 69}
59 70
60static int erase_eraseblock(int ebnum) 71static int erase_eraseblock(int ebnum)
@@ -68,14 +79,14 @@ static int erase_eraseblock(int ebnum)
68 ei.addr = addr; 79 ei.addr = addr;
69 ei.len = mtd->erasesize; 80 ei.len = mtd->erasesize;
70 81
71 err = mtd_erase(mtd, &ei); 82 err = mtd->erase(mtd, &ei);
72 if (err) { 83 if (err) {
73 pr_err("error %d while erasing EB %d\n", err, ebnum); 84 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
74 return err; 85 return err;
75 } 86 }
76 87
77 if (ei.state == MTD_ERASE_FAILED) { 88 if (ei.state == MTD_ERASE_FAILED) {
78 pr_err("some erase error occurred at EB %d\n", 89 printk(PRINT_PREF "some erase error occurred at EB %d\n",
79 ebnum); 90 ebnum);
80 return -EIO; 91 return -EIO;
81 } 92 }
@@ -94,15 +105,15 @@ static int multiblock_erase(int ebnum, int blocks)
94 ei.addr = addr; 105 ei.addr = addr;
95 ei.len = mtd->erasesize * blocks; 106 ei.len = mtd->erasesize * blocks;
96 107
97 err = mtd_erase(mtd, &ei); 108 err = mtd->erase(mtd, &ei);
98 if (err) { 109 if (err) {
99 pr_err("error %d while erasing EB %d, blocks %d\n", 110 printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
100 err, ebnum, blocks); 111 err, ebnum, blocks);
101 return err; 112 return err;
102 } 113 }
103 114
104 if (ei.state == MTD_ERASE_FAILED) { 115 if (ei.state == MTD_ERASE_FAILED) {
105 pr_err("some erase error occurred at EB %d," 116 printk(PRINT_PREF "some erase error occurred at EB %d,"
106 "blocks %d\n", ebnum, blocks); 117 "blocks %d\n", ebnum, blocks);
107 return -EIO; 118 return -EIO;
108 } 119 }
@@ -128,13 +139,13 @@ static int erase_whole_device(void)
128 139
129static int write_eraseblock(int ebnum) 140static int write_eraseblock(int ebnum)
130{ 141{
131 size_t written; 142 size_t written = 0;
132 int err = 0; 143 int err = 0;
133 loff_t addr = ebnum * mtd->erasesize; 144 loff_t addr = ebnum * mtd->erasesize;
134 145
135 err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf); 146 err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
136 if (err || written != mtd->erasesize) { 147 if (err || written != mtd->erasesize) {
137 pr_err("error: write failed at %#llx\n", addr); 148 printk(PRINT_PREF "error: write failed at %#llx\n", addr);
138 if (!err) 149 if (!err)
139 err = -EINVAL; 150 err = -EINVAL;
140 } 151 }
@@ -144,15 +155,15 @@ static int write_eraseblock(int ebnum)
144 155
145static int write_eraseblock_by_page(int ebnum) 156static int write_eraseblock_by_page(int ebnum)
146{ 157{
147 size_t written; 158 size_t written = 0;
148 int i, err = 0; 159 int i, err = 0;
149 loff_t addr = ebnum * mtd->erasesize; 160 loff_t addr = ebnum * mtd->erasesize;
150 void *buf = iobuf; 161 void *buf = iobuf;
151 162
152 for (i = 0; i < pgcnt; i++) { 163 for (i = 0; i < pgcnt; i++) {
153 err = mtd_write(mtd, addr, pgsize, &written, buf); 164 err = mtd->write(mtd, addr, pgsize, &written, buf);
154 if (err || written != pgsize) { 165 if (err || written != pgsize) {
155 pr_err("error: write failed at %#llx\n", 166 printk(PRINT_PREF "error: write failed at %#llx\n",
156 addr); 167 addr);
157 if (!err) 168 if (!err)
158 err = -EINVAL; 169 err = -EINVAL;
@@ -167,15 +178,15 @@ static int write_eraseblock_by_page(int ebnum)
167 178
168static int write_eraseblock_by_2pages(int ebnum) 179static int write_eraseblock_by_2pages(int ebnum)
169{ 180{
170 size_t written, sz = pgsize * 2; 181 size_t written = 0, sz = pgsize * 2;
171 int i, n = pgcnt / 2, err = 0; 182 int i, n = pgcnt / 2, err = 0;
172 loff_t addr = ebnum * mtd->erasesize; 183 loff_t addr = ebnum * mtd->erasesize;
173 void *buf = iobuf; 184 void *buf = iobuf;
174 185
175 for (i = 0; i < n; i++) { 186 for (i = 0; i < n; i++) {
176 err = mtd_write(mtd, addr, sz, &written, buf); 187 err = mtd->write(mtd, addr, sz, &written, buf);
177 if (err || written != sz) { 188 if (err || written != sz) {
178 pr_err("error: write failed at %#llx\n", 189 printk(PRINT_PREF "error: write failed at %#llx\n",
179 addr); 190 addr);
180 if (!err) 191 if (!err)
181 err = -EINVAL; 192 err = -EINVAL;
@@ -185,9 +196,9 @@ static int write_eraseblock_by_2pages(int ebnum)
185 buf += sz; 196 buf += sz;
186 } 197 }
187 if (pgcnt % 2) { 198 if (pgcnt % 2) {
188 err = mtd_write(mtd, addr, pgsize, &written, buf); 199 err = mtd->write(mtd, addr, pgsize, &written, buf);
189 if (err || written != pgsize) { 200 if (err || written != pgsize) {
190 pr_err("error: write failed at %#llx\n", 201 printk(PRINT_PREF "error: write failed at %#llx\n",
191 addr); 202 addr);
192 if (!err) 203 if (!err)
193 err = -EINVAL; 204 err = -EINVAL;
@@ -199,16 +210,16 @@ static int write_eraseblock_by_2pages(int ebnum)
199 210
200static int read_eraseblock(int ebnum) 211static int read_eraseblock(int ebnum)
201{ 212{
202 size_t read; 213 size_t read = 0;
203 int err = 0; 214 int err = 0;
204 loff_t addr = ebnum * mtd->erasesize; 215 loff_t addr = ebnum * mtd->erasesize;
205 216
206 err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf); 217 err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
207 /* Ignore corrected ECC errors */ 218 /* Ignore corrected ECC errors */
208 if (mtd_is_bitflip(err)) 219 if (err == -EUCLEAN)
209 err = 0; 220 err = 0;
210 if (err || read != mtd->erasesize) { 221 if (err || read != mtd->erasesize) {
211 pr_err("error: read failed at %#llx\n", addr); 222 printk(PRINT_PREF "error: read failed at %#llx\n", addr);
212 if (!err) 223 if (!err)
213 err = -EINVAL; 224 err = -EINVAL;
214 } 225 }
@@ -218,18 +229,18 @@ static int read_eraseblock(int ebnum)
218 229
219static int read_eraseblock_by_page(int ebnum) 230static int read_eraseblock_by_page(int ebnum)
220{ 231{
221 size_t read; 232 size_t read = 0;
222 int i, err = 0; 233 int i, err = 0;
223 loff_t addr = ebnum * mtd->erasesize; 234 loff_t addr = ebnum * mtd->erasesize;
224 void *buf = iobuf; 235 void *buf = iobuf;
225 236
226 for (i = 0; i < pgcnt; i++) { 237 for (i = 0; i < pgcnt; i++) {
227 err = mtd_read(mtd, addr, pgsize, &read, buf); 238 err = mtd->read(mtd, addr, pgsize, &read, buf);
228 /* Ignore corrected ECC errors */ 239 /* Ignore corrected ECC errors */
229 if (mtd_is_bitflip(err)) 240 if (err == -EUCLEAN)
230 err = 0; 241 err = 0;
231 if (err || read != pgsize) { 242 if (err || read != pgsize) {
232 pr_err("error: read failed at %#llx\n", 243 printk(PRINT_PREF "error: read failed at %#llx\n",
233 addr); 244 addr);
234 if (!err) 245 if (!err)
235 err = -EINVAL; 246 err = -EINVAL;
@@ -244,18 +255,18 @@ static int read_eraseblock_by_page(int ebnum)
244 255
245static int read_eraseblock_by_2pages(int ebnum) 256static int read_eraseblock_by_2pages(int ebnum)
246{ 257{
247 size_t read, sz = pgsize * 2; 258 size_t read = 0, sz = pgsize * 2;
248 int i, n = pgcnt / 2, err = 0; 259 int i, n = pgcnt / 2, err = 0;
249 loff_t addr = ebnum * mtd->erasesize; 260 loff_t addr = ebnum * mtd->erasesize;
250 void *buf = iobuf; 261 void *buf = iobuf;
251 262
252 for (i = 0; i < n; i++) { 263 for (i = 0; i < n; i++) {
253 err = mtd_read(mtd, addr, sz, &read, buf); 264 err = mtd->read(mtd, addr, sz, &read, buf);
254 /* Ignore corrected ECC errors */ 265 /* Ignore corrected ECC errors */
255 if (mtd_is_bitflip(err)) 266 if (err == -EUCLEAN)
256 err = 0; 267 err = 0;
257 if (err || read != sz) { 268 if (err || read != sz) {
258 pr_err("error: read failed at %#llx\n", 269 printk(PRINT_PREF "error: read failed at %#llx\n",
259 addr); 270 addr);
260 if (!err) 271 if (!err)
261 err = -EINVAL; 272 err = -EINVAL;
@@ -265,12 +276,12 @@ static int read_eraseblock_by_2pages(int ebnum)
265 buf += sz; 276 buf += sz;
266 } 277 }
267 if (pgcnt % 2) { 278 if (pgcnt % 2) {
268 err = mtd_read(mtd, addr, pgsize, &read, buf); 279 err = mtd->read(mtd, addr, pgsize, &read, buf);
269 /* Ignore corrected ECC errors */ 280 /* Ignore corrected ECC errors */
270 if (mtd_is_bitflip(err)) 281 if (err == -EUCLEAN)
271 err = 0; 282 err = 0;
272 if (err || read != pgsize) { 283 if (err || read != pgsize) {
273 pr_err("error: read failed at %#llx\n", 284 printk(PRINT_PREF "error: read failed at %#llx\n",
274 addr); 285 addr);
275 if (!err) 286 if (!err)
276 err = -EINVAL; 287 err = -EINVAL;
@@ -285,9 +296,9 @@ static int is_block_bad(int ebnum)
285 loff_t addr = ebnum * mtd->erasesize; 296 loff_t addr = ebnum * mtd->erasesize;
286 int ret; 297 int ret;
287 298
288 ret = mtd_block_isbad(mtd, addr); 299 ret = mtd->block_isbad(mtd, addr);
289 if (ret) 300 if (ret)
290 pr_info("block %d is bad\n", ebnum); 301 printk(PRINT_PREF "block %d is bad\n", ebnum);
291 return ret; 302 return ret;
292} 303}
293 304
@@ -321,21 +332,22 @@ static int scan_for_bad_eraseblocks(void)
321 332
322 bbt = kzalloc(ebcnt, GFP_KERNEL); 333 bbt = kzalloc(ebcnt, GFP_KERNEL);
323 if (!bbt) { 334 if (!bbt) {
324 pr_err("error: cannot allocate memory\n"); 335 printk(PRINT_PREF "error: cannot allocate memory\n");
325 return -ENOMEM; 336 return -ENOMEM;
326 } 337 }
327 338
328 if (!mtd_can_have_bb(mtd)) 339 /* NOR flash does not implement block_isbad */
340 if (mtd->block_isbad == NULL)
329 goto out; 341 goto out;
330 342
331 pr_info("scanning for bad eraseblocks\n"); 343 printk(PRINT_PREF "scanning for bad eraseblocks\n");
332 for (i = 0; i < ebcnt; ++i) { 344 for (i = 0; i < ebcnt; ++i) {
333 bbt[i] = is_block_bad(i) ? 1 : 0; 345 bbt[i] = is_block_bad(i) ? 1 : 0;
334 if (bbt[i]) 346 if (bbt[i])
335 bad += 1; 347 bad += 1;
336 cond_resched(); 348 cond_resched();
337 } 349 }
338 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 350 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
339out: 351out:
340 goodebcnt = ebcnt - bad; 352 goodebcnt = ebcnt - bad;
341 return 0; 353 return 0;
@@ -349,27 +361,20 @@ static int __init mtd_speedtest_init(void)
349 361
350 printk(KERN_INFO "\n"); 362 printk(KERN_INFO "\n");
351 printk(KERN_INFO "=================================================\n"); 363 printk(KERN_INFO "=================================================\n");
352
353 if (dev < 0) {
354 pr_info("Please specify a valid mtd-device via module parameter\n");
355 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
356 return -EINVAL;
357 }
358
359 if (count) 364 if (count)
360 pr_info("MTD device: %d count: %d\n", dev, count); 365 printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
361 else 366 else
362 pr_info("MTD device: %d\n", dev); 367 printk(PRINT_PREF "MTD device: %d\n", dev);
363 368
364 mtd = get_mtd_device(NULL, dev); 369 mtd = get_mtd_device(NULL, dev);
365 if (IS_ERR(mtd)) { 370 if (IS_ERR(mtd)) {
366 err = PTR_ERR(mtd); 371 err = PTR_ERR(mtd);
367 pr_err("error: cannot get MTD device\n"); 372 printk(PRINT_PREF "error: cannot get MTD device\n");
368 return err; 373 return err;
369 } 374 }
370 375
371 if (mtd->writesize == 1) { 376 if (mtd->writesize == 1) {
372 pr_info("not NAND flash, assume page size is 512 " 377 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
373 "bytes.\n"); 378 "bytes.\n");
374 pgsize = 512; 379 pgsize = 512;
375 } else 380 } else
@@ -380,7 +385,7 @@ static int __init mtd_speedtest_init(void)
380 ebcnt = tmp; 385 ebcnt = tmp;
381 pgcnt = mtd->erasesize / pgsize; 386 pgcnt = mtd->erasesize / pgsize;
382 387
383 pr_info("MTD device size %llu, eraseblock size %u, " 388 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
384 "page size %u, count of eraseblocks %u, pages per " 389 "page size %u, count of eraseblocks %u, pages per "
385 "eraseblock %u, OOB size %u\n", 390 "eraseblock %u, OOB size %u\n",
386 (unsigned long long)mtd->size, mtd->erasesize, 391 (unsigned long long)mtd->size, mtd->erasesize,
@@ -392,10 +397,11 @@ static int __init mtd_speedtest_init(void)
392 err = -ENOMEM; 397 err = -ENOMEM;
393 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); 398 iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
394 if (!iobuf) { 399 if (!iobuf) {
395 pr_err("error: cannot allocate memory\n"); 400 printk(PRINT_PREF "error: cannot allocate memory\n");
396 goto out; 401 goto out;
397 } 402 }
398 403
404 simple_srand(1);
399 set_random_data(iobuf, mtd->erasesize); 405 set_random_data(iobuf, mtd->erasesize);
400 406
401 err = scan_for_bad_eraseblocks(); 407 err = scan_for_bad_eraseblocks();
@@ -407,7 +413,7 @@ static int __init mtd_speedtest_init(void)
407 goto out; 413 goto out;
408 414
409 /* Write all eraseblocks, 1 eraseblock at a time */ 415 /* Write all eraseblocks, 1 eraseblock at a time */
410 pr_info("testing eraseblock write speed\n"); 416 printk(PRINT_PREF "testing eraseblock write speed\n");
411 start_timing(); 417 start_timing();
412 for (i = 0; i < ebcnt; ++i) { 418 for (i = 0; i < ebcnt; ++i) {
413 if (bbt[i]) 419 if (bbt[i])
@@ -419,10 +425,10 @@ static int __init mtd_speedtest_init(void)
419 } 425 }
420 stop_timing(); 426 stop_timing();
421 speed = calc_speed(); 427 speed = calc_speed();
422 pr_info("eraseblock write speed is %ld KiB/s\n", speed); 428 printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
423 429
424 /* Read all eraseblocks, 1 eraseblock at a time */ 430 /* Read all eraseblocks, 1 eraseblock at a time */
425 pr_info("testing eraseblock read speed\n"); 431 printk(PRINT_PREF "testing eraseblock read speed\n");
426 start_timing(); 432 start_timing();
427 for (i = 0; i < ebcnt; ++i) { 433 for (i = 0; i < ebcnt; ++i) {
428 if (bbt[i]) 434 if (bbt[i])
@@ -434,14 +440,14 @@ static int __init mtd_speedtest_init(void)
434 } 440 }
435 stop_timing(); 441 stop_timing();
436 speed = calc_speed(); 442 speed = calc_speed();
437 pr_info("eraseblock read speed is %ld KiB/s\n", speed); 443 printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
438 444
439 err = erase_whole_device(); 445 err = erase_whole_device();
440 if (err) 446 if (err)
441 goto out; 447 goto out;
442 448
443 /* Write all eraseblocks, 1 page at a time */ 449 /* Write all eraseblocks, 1 page at a time */
444 pr_info("testing page write speed\n"); 450 printk(PRINT_PREF "testing page write speed\n");
445 start_timing(); 451 start_timing();
446 for (i = 0; i < ebcnt; ++i) { 452 for (i = 0; i < ebcnt; ++i) {
447 if (bbt[i]) 453 if (bbt[i])
@@ -453,10 +459,10 @@ static int __init mtd_speedtest_init(void)
453 } 459 }
454 stop_timing(); 460 stop_timing();
455 speed = calc_speed(); 461 speed = calc_speed();
456 pr_info("page write speed is %ld KiB/s\n", speed); 462 printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
457 463
458 /* Read all eraseblocks, 1 page at a time */ 464 /* Read all eraseblocks, 1 page at a time */
459 pr_info("testing page read speed\n"); 465 printk(PRINT_PREF "testing page read speed\n");
460 start_timing(); 466 start_timing();
461 for (i = 0; i < ebcnt; ++i) { 467 for (i = 0; i < ebcnt; ++i) {
462 if (bbt[i]) 468 if (bbt[i])
@@ -468,14 +474,14 @@ static int __init mtd_speedtest_init(void)
468 } 474 }
469 stop_timing(); 475 stop_timing();
470 speed = calc_speed(); 476 speed = calc_speed();
471 pr_info("page read speed is %ld KiB/s\n", speed); 477 printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
472 478
473 err = erase_whole_device(); 479 err = erase_whole_device();
474 if (err) 480 if (err)
475 goto out; 481 goto out;
476 482
477 /* Write all eraseblocks, 2 pages at a time */ 483 /* Write all eraseblocks, 2 pages at a time */
478 pr_info("testing 2 page write speed\n"); 484 printk(PRINT_PREF "testing 2 page write speed\n");
479 start_timing(); 485 start_timing();
480 for (i = 0; i < ebcnt; ++i) { 486 for (i = 0; i < ebcnt; ++i) {
481 if (bbt[i]) 487 if (bbt[i])
@@ -487,10 +493,10 @@ static int __init mtd_speedtest_init(void)
487 } 493 }
488 stop_timing(); 494 stop_timing();
489 speed = calc_speed(); 495 speed = calc_speed();
490 pr_info("2 page write speed is %ld KiB/s\n", speed); 496 printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
491 497
492 /* Read all eraseblocks, 2 pages at a time */ 498 /* Read all eraseblocks, 2 pages at a time */
493 pr_info("testing 2 page read speed\n"); 499 printk(PRINT_PREF "testing 2 page read speed\n");
494 start_timing(); 500 start_timing();
495 for (i = 0; i < ebcnt; ++i) { 501 for (i = 0; i < ebcnt; ++i) {
496 if (bbt[i]) 502 if (bbt[i])
@@ -502,10 +508,10 @@ static int __init mtd_speedtest_init(void)
502 } 508 }
503 stop_timing(); 509 stop_timing();
504 speed = calc_speed(); 510 speed = calc_speed();
505 pr_info("2 page read speed is %ld KiB/s\n", speed); 511 printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
506 512
507 /* Erase all eraseblocks */ 513 /* Erase all eraseblocks */
508 pr_info("Testing erase speed\n"); 514 printk(PRINT_PREF "Testing erase speed\n");
509 start_timing(); 515 start_timing();
510 for (i = 0; i < ebcnt; ++i) { 516 for (i = 0; i < ebcnt; ++i) {
511 if (bbt[i]) 517 if (bbt[i])
@@ -517,12 +523,12 @@ static int __init mtd_speedtest_init(void)
517 } 523 }
518 stop_timing(); 524 stop_timing();
519 speed = calc_speed(); 525 speed = calc_speed();
520 pr_info("erase speed is %ld KiB/s\n", speed); 526 printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
521 527
522 /* Multi-block erase all eraseblocks */ 528 /* Multi-block erase all eraseblocks */
523 for (k = 1; k < 7; k++) { 529 for (k = 1; k < 7; k++) {
524 blocks = 1 << k; 530 blocks = 1 << k;
525 pr_info("Testing %dx multi-block erase speed\n", 531 printk(PRINT_PREF "Testing %dx multi-block erase speed\n",
526 blocks); 532 blocks);
527 start_timing(); 533 start_timing();
528 for (i = 0; i < ebcnt; ) { 534 for (i = 0; i < ebcnt; ) {
@@ -541,16 +547,16 @@ static int __init mtd_speedtest_init(void)
541 } 547 }
542 stop_timing(); 548 stop_timing();
543 speed = calc_speed(); 549 speed = calc_speed();
544 pr_info("%dx multi-block erase speed is %ld KiB/s\n", 550 printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n",
545 blocks, speed); 551 blocks, speed);
546 } 552 }
547 pr_info("finished\n"); 553 printk(PRINT_PREF "finished\n");
548out: 554out:
549 kfree(iobuf); 555 kfree(iobuf);
550 kfree(bbt); 556 kfree(bbt);
551 put_mtd_device(mtd); 557 put_mtd_device(mtd);
552 if (err) 558 if (err)
553 pr_info("error %d occurred\n", err); 559 printk(PRINT_PREF "error %d occurred\n", err);
554 printk(KERN_INFO "=================================================\n"); 560 printk(KERN_INFO "=================================================\n");
555 return err; 561 return err;
556} 562}
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3729f679ae5..129bad2e408 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -19,8 +19,6 @@
19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> 19 * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/init.h> 22#include <linux/init.h>
25#include <linux/module.h> 23#include <linux/module.h>
26#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
@@ -29,9 +27,10 @@
29#include <linux/slab.h> 27#include <linux/slab.h>
30#include <linux/sched.h> 28#include <linux/sched.h>
31#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
32#include <linux/random.h>
33 30
34static int dev = -EINVAL; 31#define PRINT_PREF KERN_INFO "mtd_stresstest: "
32
33static int dev;
35module_param(dev, int, S_IRUGO); 34module_param(dev, int, S_IRUGO);
36MODULE_PARM_DESC(dev, "MTD device number to use"); 35MODULE_PARM_DESC(dev, "MTD device number to use");
37 36
@@ -49,13 +48,28 @@ static int pgsize;
49static int bufsize; 48static int bufsize;
50static int ebcnt; 49static int ebcnt;
51static int pgcnt; 50static int pgcnt;
51static unsigned long next = 1;
52
53static inline unsigned int simple_rand(void)
54{
55 next = next * 1103515245 + 12345;
56 return (unsigned int)((next / 65536) % 32768);
57}
58
59static inline void simple_srand(unsigned long seed)
60{
61 next = seed;
62}
52 63
53static int rand_eb(void) 64static int rand_eb(void)
54{ 65{
55 unsigned int eb; 66 int eb;
56 67
57again: 68again:
58 eb = random32(); 69 if (ebcnt < 32768)
70 eb = simple_rand();
71 else
72 eb = (simple_rand() << 15) | simple_rand();
59 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ 73 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
60 eb %= (ebcnt - 1); 74 eb %= (ebcnt - 1);
61 if (bbt[eb]) 75 if (bbt[eb])
@@ -65,18 +79,24 @@ again:
65 79
66static int rand_offs(void) 80static int rand_offs(void)
67{ 81{
68 unsigned int offs; 82 int offs;
69 83
70 offs = random32(); 84 if (bufsize < 32768)
85 offs = simple_rand();
86 else
87 offs = (simple_rand() << 15) | simple_rand();
71 offs %= bufsize; 88 offs %= bufsize;
72 return offs; 89 return offs;
73} 90}
74 91
75static int rand_len(int offs) 92static int rand_len(int offs)
76{ 93{
77 unsigned int len; 94 int len;
78 95
79 len = random32(); 96 if (bufsize < 32768)
97 len = simple_rand();
98 else
99 len = (simple_rand() << 15) | simple_rand();
80 len %= (bufsize - offs); 100 len %= (bufsize - offs);
81 return len; 101 return len;
82} 102}
@@ -92,14 +112,14 @@ static int erase_eraseblock(int ebnum)
92 ei.addr = addr; 112 ei.addr = addr;
93 ei.len = mtd->erasesize; 113 ei.len = mtd->erasesize;
94 114
95 err = mtd_erase(mtd, &ei); 115 err = mtd->erase(mtd, &ei);
96 if (unlikely(err)) { 116 if (unlikely(err)) {
97 pr_err("error %d while erasing EB %d\n", err, ebnum); 117 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
98 return err; 118 return err;
99 } 119 }
100 120
101 if (unlikely(ei.state == MTD_ERASE_FAILED)) { 121 if (unlikely(ei.state == MTD_ERASE_FAILED)) {
102 pr_err("some erase error occurred at EB %d\n", 122 printk(PRINT_PREF "some erase error occurred at EB %d\n",
103 ebnum); 123 ebnum);
104 return -EIO; 124 return -EIO;
105 } 125 }
@@ -112,15 +132,15 @@ static int is_block_bad(int ebnum)
112 loff_t addr = ebnum * mtd->erasesize; 132 loff_t addr = ebnum * mtd->erasesize;
113 int ret; 133 int ret;
114 134
115 ret = mtd_block_isbad(mtd, addr); 135 ret = mtd->block_isbad(mtd, addr);
116 if (ret) 136 if (ret)
117 pr_info("block %d is bad\n", ebnum); 137 printk(PRINT_PREF "block %d is bad\n", ebnum);
118 return ret; 138 return ret;
119} 139}
120 140
121static int do_read(void) 141static int do_read(void)
122{ 142{
123 size_t read; 143 size_t read = 0;
124 int eb = rand_eb(); 144 int eb = rand_eb();
125 int offs = rand_offs(); 145 int offs = rand_offs();
126 int len = rand_len(offs), err; 146 int len = rand_len(offs), err;
@@ -133,11 +153,11 @@ static int do_read(void)
133 len = mtd->erasesize - offs; 153 len = mtd->erasesize - offs;
134 } 154 }
135 addr = eb * mtd->erasesize + offs; 155 addr = eb * mtd->erasesize + offs;
136 err = mtd_read(mtd, addr, len, &read, readbuf); 156 err = mtd->read(mtd, addr, len, &read, readbuf);
137 if (mtd_is_bitflip(err)) 157 if (err == -EUCLEAN)
138 err = 0; 158 err = 0;
139 if (unlikely(err || read != len)) { 159 if (unlikely(err || read != len)) {
140 pr_err("error: read failed at 0x%llx\n", 160 printk(PRINT_PREF "error: read failed at 0x%llx\n",
141 (long long)addr); 161 (long long)addr);
142 if (!err) 162 if (!err)
143 err = -EINVAL; 163 err = -EINVAL;
@@ -149,7 +169,7 @@ static int do_read(void)
149static int do_write(void) 169static int do_write(void)
150{ 170{
151 int eb = rand_eb(), offs, err, len; 171 int eb = rand_eb(), offs, err, len;
152 size_t written; 172 size_t written = 0;
153 loff_t addr; 173 loff_t addr;
154 174
155 offs = offsets[eb]; 175 offs = offsets[eb];
@@ -172,9 +192,9 @@ static int do_write(void)
172 } 192 }
173 } 193 }
174 addr = eb * mtd->erasesize + offs; 194 addr = eb * mtd->erasesize + offs;
175 err = mtd_write(mtd, addr, len, &written, writebuf); 195 err = mtd->write(mtd, addr, len, &written, writebuf);
176 if (unlikely(err || written != len)) { 196 if (unlikely(err || written != len)) {
177 pr_err("error: write failed at 0x%llx\n", 197 printk(PRINT_PREF "error: write failed at 0x%llx\n",
178 (long long)addr); 198 (long long)addr);
179 if (!err) 199 if (!err)
180 err = -EINVAL; 200 err = -EINVAL;
@@ -191,7 +211,7 @@ static int do_write(void)
191 211
192static int do_operation(void) 212static int do_operation(void)
193{ 213{
194 if (random32() & 1) 214 if (simple_rand() & 1)
195 return do_read(); 215 return do_read();
196 else 216 else
197 return do_write(); 217 return do_write();
@@ -203,21 +223,22 @@ static int scan_for_bad_eraseblocks(void)
203 223
204 bbt = kzalloc(ebcnt, GFP_KERNEL); 224 bbt = kzalloc(ebcnt, GFP_KERNEL);
205 if (!bbt) { 225 if (!bbt) {
206 pr_err("error: cannot allocate memory\n"); 226 printk(PRINT_PREF "error: cannot allocate memory\n");
207 return -ENOMEM; 227 return -ENOMEM;
208 } 228 }
209 229
210 if (!mtd_can_have_bb(mtd)) 230 /* NOR flash does not implement block_isbad */
231 if (mtd->block_isbad == NULL)
211 return 0; 232 return 0;
212 233
213 pr_info("scanning for bad eraseblocks\n"); 234 printk(PRINT_PREF "scanning for bad eraseblocks\n");
214 for (i = 0; i < ebcnt; ++i) { 235 for (i = 0; i < ebcnt; ++i) {
215 bbt[i] = is_block_bad(i) ? 1 : 0; 236 bbt[i] = is_block_bad(i) ? 1 : 0;
216 if (bbt[i]) 237 if (bbt[i])
217 bad += 1; 238 bad += 1;
218 cond_resched(); 239 cond_resched();
219 } 240 }
220 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 241 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
221 return 0; 242 return 0;
222} 243}
223 244
@@ -229,24 +250,17 @@ static int __init mtd_stresstest_init(void)
229 250
230 printk(KERN_INFO "\n"); 251 printk(KERN_INFO "\n");
231 printk(KERN_INFO "=================================================\n"); 252 printk(KERN_INFO "=================================================\n");
232 253 printk(PRINT_PREF "MTD device: %d\n", dev);
233 if (dev < 0) {
234 pr_info("Please specify a valid mtd-device via module parameter\n");
235 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
236 return -EINVAL;
237 }
238
239 pr_info("MTD device: %d\n", dev);
240 254
241 mtd = get_mtd_device(NULL, dev); 255 mtd = get_mtd_device(NULL, dev);
242 if (IS_ERR(mtd)) { 256 if (IS_ERR(mtd)) {
243 err = PTR_ERR(mtd); 257 err = PTR_ERR(mtd);
244 pr_err("error: cannot get MTD device\n"); 258 printk(PRINT_PREF "error: cannot get MTD device\n");
245 return err; 259 return err;
246 } 260 }
247 261
248 if (mtd->writesize == 1) { 262 if (mtd->writesize == 1) {
249 pr_info("not NAND flash, assume page size is 512 " 263 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
250 "bytes.\n"); 264 "bytes.\n");
251 pgsize = 512; 265 pgsize = 512;
252 } else 266 } else
@@ -257,14 +271,14 @@ static int __init mtd_stresstest_init(void)
257 ebcnt = tmp; 271 ebcnt = tmp;
258 pgcnt = mtd->erasesize / pgsize; 272 pgcnt = mtd->erasesize / pgsize;
259 273
260 pr_info("MTD device size %llu, eraseblock size %u, " 274 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
261 "page size %u, count of eraseblocks %u, pages per " 275 "page size %u, count of eraseblocks %u, pages per "
262 "eraseblock %u, OOB size %u\n", 276 "eraseblock %u, OOB size %u\n",
263 (unsigned long long)mtd->size, mtd->erasesize, 277 (unsigned long long)mtd->size, mtd->erasesize,
264 pgsize, ebcnt, pgcnt, mtd->oobsize); 278 pgsize, ebcnt, pgcnt, mtd->oobsize);
265 279
266 if (ebcnt < 2) { 280 if (ebcnt < 2) {
267 pr_err("error: need at least 2 eraseblocks\n"); 281 printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
268 err = -ENOSPC; 282 err = -ENOSPC;
269 goto out_put_mtd; 283 goto out_put_mtd;
270 } 284 }
@@ -277,29 +291,30 @@ static int __init mtd_stresstest_init(void)
277 writebuf = vmalloc(bufsize); 291 writebuf = vmalloc(bufsize);
278 offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL); 292 offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
279 if (!readbuf || !writebuf || !offsets) { 293 if (!readbuf || !writebuf || !offsets) {
280 pr_err("error: cannot allocate memory\n"); 294 printk(PRINT_PREF "error: cannot allocate memory\n");
281 goto out; 295 goto out;
282 } 296 }
283 for (i = 0; i < ebcnt; i++) 297 for (i = 0; i < ebcnt; i++)
284 offsets[i] = mtd->erasesize; 298 offsets[i] = mtd->erasesize;
299 simple_srand(current->pid);
285 for (i = 0; i < bufsize; i++) 300 for (i = 0; i < bufsize; i++)
286 writebuf[i] = random32(); 301 writebuf[i] = simple_rand();
287 302
288 err = scan_for_bad_eraseblocks(); 303 err = scan_for_bad_eraseblocks();
289 if (err) 304 if (err)
290 goto out; 305 goto out;
291 306
292 /* Do operations */ 307 /* Do operations */
293 pr_info("doing operations\n"); 308 printk(PRINT_PREF "doing operations\n");
294 for (op = 0; op < count; op++) { 309 for (op = 0; op < count; op++) {
295 if ((op & 1023) == 0) 310 if ((op & 1023) == 0)
296 pr_info("%d operations done\n", op); 311 printk(PRINT_PREF "%d operations done\n", op);
297 err = do_operation(); 312 err = do_operation();
298 if (err) 313 if (err)
299 goto out; 314 goto out;
300 cond_resched(); 315 cond_resched();
301 } 316 }
302 pr_info("finished, %d operations done\n", op); 317 printk(PRINT_PREF "finished, %d operations done\n", op);
303 318
304out: 319out:
305 kfree(offsets); 320 kfree(offsets);
@@ -309,7 +324,7 @@ out:
309out_put_mtd: 324out_put_mtd:
310 put_mtd_device(mtd); 325 put_mtd_device(mtd);
311 if (err) 326 if (err)
312 pr_info("error %d occurred\n", err); 327 printk(PRINT_PREF "error %d occurred\n", err);
313 printk(KERN_INFO "=================================================\n"); 328 printk(KERN_INFO "=================================================\n");
314 return err; 329 return err;
315} 330}
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index c880c2229c5..334eae53a3d 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -19,8 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/init.h> 22#include <linux/init.h>
25#include <linux/module.h> 23#include <linux/module.h>
26#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
@@ -29,7 +27,9 @@
29#include <linux/slab.h> 27#include <linux/slab.h>
30#include <linux/sched.h> 28#include <linux/sched.h>
31 29
32static int dev = -EINVAL; 30#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
31
32static int dev;
33module_param(dev, int, S_IRUGO); 33module_param(dev, int, S_IRUGO);
34MODULE_PARM_DESC(dev, "MTD device number to use"); 34MODULE_PARM_DESC(dev, "MTD device number to use");
35 35
@@ -80,14 +80,14 @@ static int erase_eraseblock(int ebnum)
80 ei.addr = addr; 80 ei.addr = addr;
81 ei.len = mtd->erasesize; 81 ei.len = mtd->erasesize;
82 82
83 err = mtd_erase(mtd, &ei); 83 err = mtd->erase(mtd, &ei);
84 if (err) { 84 if (err) {
85 pr_err("error %d while erasing EB %d\n", err, ebnum); 85 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
86 return err; 86 return err;
87 } 87 }
88 88
89 if (ei.state == MTD_ERASE_FAILED) { 89 if (ei.state == MTD_ERASE_FAILED) {
90 pr_err("some erase error occurred at EB %d\n", 90 printk(PRINT_PREF "some erase error occurred at EB %d\n",
91 ebnum); 91 ebnum);
92 return -EIO; 92 return -EIO;
93 } 93 }
@@ -100,7 +100,7 @@ static int erase_whole_device(void)
100 int err; 100 int err;
101 unsigned int i; 101 unsigned int i;
102 102
103 pr_info("erasing whole device\n"); 103 printk(PRINT_PREF "erasing whole device\n");
104 for (i = 0; i < ebcnt; ++i) { 104 for (i = 0; i < ebcnt; ++i) {
105 if (bbt[i]) 105 if (bbt[i])
106 continue; 106 continue;
@@ -109,24 +109,24 @@ static int erase_whole_device(void)
109 return err; 109 return err;
110 cond_resched(); 110 cond_resched();
111 } 111 }
112 pr_info("erased %u eraseblocks\n", i); 112 printk(PRINT_PREF "erased %u eraseblocks\n", i);
113 return 0; 113 return 0;
114} 114}
115 115
116static int write_eraseblock(int ebnum) 116static int write_eraseblock(int ebnum)
117{ 117{
118 size_t written; 118 size_t written = 0;
119 int err = 0; 119 int err = 0;
120 loff_t addr = ebnum * mtd->erasesize; 120 loff_t addr = ebnum * mtd->erasesize;
121 121
122 set_random_data(writebuf, subpgsize); 122 set_random_data(writebuf, subpgsize);
123 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 123 err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
124 if (unlikely(err || written != subpgsize)) { 124 if (unlikely(err || written != subpgsize)) {
125 pr_err("error: write failed at %#llx\n", 125 printk(PRINT_PREF "error: write failed at %#llx\n",
126 (long long)addr); 126 (long long)addr);
127 if (written != subpgsize) { 127 if (written != subpgsize) {
128 pr_err(" write size: %#x\n", subpgsize); 128 printk(PRINT_PREF " write size: %#x\n", subpgsize);
129 pr_err(" written: %#zx\n", written); 129 printk(PRINT_PREF " written: %#zx\n", written);
130 } 130 }
131 return err ? err : -1; 131 return err ? err : -1;
132 } 132 }
@@ -134,13 +134,13 @@ static int write_eraseblock(int ebnum)
134 addr += subpgsize; 134 addr += subpgsize;
135 135
136 set_random_data(writebuf, subpgsize); 136 set_random_data(writebuf, subpgsize);
137 err = mtd_write(mtd, addr, subpgsize, &written, writebuf); 137 err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
138 if (unlikely(err || written != subpgsize)) { 138 if (unlikely(err || written != subpgsize)) {
139 pr_err("error: write failed at %#llx\n", 139 printk(PRINT_PREF "error: write failed at %#llx\n",
140 (long long)addr); 140 (long long)addr);
141 if (written != subpgsize) { 141 if (written != subpgsize) {
142 pr_err(" write size: %#x\n", subpgsize); 142 printk(PRINT_PREF " write size: %#x\n", subpgsize);
143 pr_err(" written: %#zx\n", written); 143 printk(PRINT_PREF " written: %#zx\n", written);
144 } 144 }
145 return err ? err : -1; 145 return err ? err : -1;
146 } 146 }
@@ -150,7 +150,7 @@ static int write_eraseblock(int ebnum)
150 150
151static int write_eraseblock2(int ebnum) 151static int write_eraseblock2(int ebnum)
152{ 152{
153 size_t written; 153 size_t written = 0;
154 int err = 0, k; 154 int err = 0, k;
155 loff_t addr = ebnum * mtd->erasesize; 155 loff_t addr = ebnum * mtd->erasesize;
156 156
@@ -158,14 +158,14 @@ static int write_eraseblock2(int ebnum)
158 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize) 158 if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
159 break; 159 break;
160 set_random_data(writebuf, subpgsize * k); 160 set_random_data(writebuf, subpgsize * k);
161 err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf); 161 err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
162 if (unlikely(err || written != subpgsize * k)) { 162 if (unlikely(err || written != subpgsize * k)) {
163 pr_err("error: write failed at %#llx\n", 163 printk(PRINT_PREF "error: write failed at %#llx\n",
164 (long long)addr); 164 (long long)addr);
165 if (written != subpgsize) { 165 if (written != subpgsize) {
166 pr_err(" write size: %#x\n", 166 printk(PRINT_PREF " write size: %#x\n",
167 subpgsize * k); 167 subpgsize * k);
168 pr_err(" written: %#08zx\n", 168 printk(PRINT_PREF " written: %#08zx\n",
169 written); 169 written);
170 } 170 }
171 return err ? err : -1; 171 return err ? err : -1;
@@ -189,32 +189,33 @@ static void print_subpage(unsigned char *p)
189 189
190static int verify_eraseblock(int ebnum) 190static int verify_eraseblock(int ebnum)
191{ 191{
192 size_t read; 192 size_t read = 0;
193 int err = 0; 193 int err = 0;
194 loff_t addr = ebnum * mtd->erasesize; 194 loff_t addr = ebnum * mtd->erasesize;
195 195
196 set_random_data(writebuf, subpgsize); 196 set_random_data(writebuf, subpgsize);
197 clear_data(readbuf, subpgsize); 197 clear_data(readbuf, subpgsize);
198 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 198 read = 0;
199 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
199 if (unlikely(err || read != subpgsize)) { 200 if (unlikely(err || read != subpgsize)) {
200 if (mtd_is_bitflip(err) && read == subpgsize) { 201 if (err == -EUCLEAN && read == subpgsize) {
201 pr_info("ECC correction at %#llx\n", 202 printk(PRINT_PREF "ECC correction at %#llx\n",
202 (long long)addr); 203 (long long)addr);
203 err = 0; 204 err = 0;
204 } else { 205 } else {
205 pr_err("error: read failed at %#llx\n", 206 printk(PRINT_PREF "error: read failed at %#llx\n",
206 (long long)addr); 207 (long long)addr);
207 return err ? err : -1; 208 return err ? err : -1;
208 } 209 }
209 } 210 }
210 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 211 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
211 pr_err("error: verify failed at %#llx\n", 212 printk(PRINT_PREF "error: verify failed at %#llx\n",
212 (long long)addr); 213 (long long)addr);
213 pr_info("------------- written----------------\n"); 214 printk(PRINT_PREF "------------- written----------------\n");
214 print_subpage(writebuf); 215 print_subpage(writebuf);
215 pr_info("------------- read ------------------\n"); 216 printk(PRINT_PREF "------------- read ------------------\n");
216 print_subpage(readbuf); 217 print_subpage(readbuf);
217 pr_info("-------------------------------------\n"); 218 printk(PRINT_PREF "-------------------------------------\n");
218 errcnt += 1; 219 errcnt += 1;
219 } 220 }
220 221
@@ -222,26 +223,27 @@ static int verify_eraseblock(int ebnum)
222 223
223 set_random_data(writebuf, subpgsize); 224 set_random_data(writebuf, subpgsize);
224 clear_data(readbuf, subpgsize); 225 clear_data(readbuf, subpgsize);
225 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 226 read = 0;
227 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
226 if (unlikely(err || read != subpgsize)) { 228 if (unlikely(err || read != subpgsize)) {
227 if (mtd_is_bitflip(err) && read == subpgsize) { 229 if (err == -EUCLEAN && read == subpgsize) {
228 pr_info("ECC correction at %#llx\n", 230 printk(PRINT_PREF "ECC correction at %#llx\n",
229 (long long)addr); 231 (long long)addr);
230 err = 0; 232 err = 0;
231 } else { 233 } else {
232 pr_err("error: read failed at %#llx\n", 234 printk(PRINT_PREF "error: read failed at %#llx\n",
233 (long long)addr); 235 (long long)addr);
234 return err ? err : -1; 236 return err ? err : -1;
235 } 237 }
236 } 238 }
237 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 239 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
238 pr_info("error: verify failed at %#llx\n", 240 printk(PRINT_PREF "error: verify failed at %#llx\n",
239 (long long)addr); 241 (long long)addr);
240 pr_info("------------- written----------------\n"); 242 printk(PRINT_PREF "------------- written----------------\n");
241 print_subpage(writebuf); 243 print_subpage(writebuf);
242 pr_info("------------- read ------------------\n"); 244 printk(PRINT_PREF "------------- read ------------------\n");
243 print_subpage(readbuf); 245 print_subpage(readbuf);
244 pr_info("-------------------------------------\n"); 246 printk(PRINT_PREF "-------------------------------------\n");
245 errcnt += 1; 247 errcnt += 1;
246 } 248 }
247 249
@@ -250,7 +252,7 @@ static int verify_eraseblock(int ebnum)
250 252
251static int verify_eraseblock2(int ebnum) 253static int verify_eraseblock2(int ebnum)
252{ 254{
253 size_t read; 255 size_t read = 0;
254 int err = 0, k; 256 int err = 0, k;
255 loff_t addr = ebnum * mtd->erasesize; 257 loff_t addr = ebnum * mtd->erasesize;
256 258
@@ -259,20 +261,21 @@ static int verify_eraseblock2(int ebnum)
259 break; 261 break;
260 set_random_data(writebuf, subpgsize * k); 262 set_random_data(writebuf, subpgsize * k);
261 clear_data(readbuf, subpgsize * k); 263 clear_data(readbuf, subpgsize * k);
262 err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf); 264 read = 0;
265 err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
263 if (unlikely(err || read != subpgsize * k)) { 266 if (unlikely(err || read != subpgsize * k)) {
264 if (mtd_is_bitflip(err) && read == subpgsize * k) { 267 if (err == -EUCLEAN && read == subpgsize * k) {
265 pr_info("ECC correction at %#llx\n", 268 printk(PRINT_PREF "ECC correction at %#llx\n",
266 (long long)addr); 269 (long long)addr);
267 err = 0; 270 err = 0;
268 } else { 271 } else {
269 pr_err("error: read failed at " 272 printk(PRINT_PREF "error: read failed at "
270 "%#llx\n", (long long)addr); 273 "%#llx\n", (long long)addr);
271 return err ? err : -1; 274 return err ? err : -1;
272 } 275 }
273 } 276 }
274 if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) { 277 if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
275 pr_err("error: verify failed at %#llx\n", 278 printk(PRINT_PREF "error: verify failed at %#llx\n",
276 (long long)addr); 279 (long long)addr);
277 errcnt += 1; 280 errcnt += 1;
278 } 281 }
@@ -285,27 +288,28 @@ static int verify_eraseblock2(int ebnum)
285static int verify_eraseblock_ff(int ebnum) 288static int verify_eraseblock_ff(int ebnum)
286{ 289{
287 uint32_t j; 290 uint32_t j;
288 size_t read; 291 size_t read = 0;
289 int err = 0; 292 int err = 0;
290 loff_t addr = ebnum * mtd->erasesize; 293 loff_t addr = ebnum * mtd->erasesize;
291 294
292 memset(writebuf, 0xff, subpgsize); 295 memset(writebuf, 0xff, subpgsize);
293 for (j = 0; j < mtd->erasesize / subpgsize; ++j) { 296 for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
294 clear_data(readbuf, subpgsize); 297 clear_data(readbuf, subpgsize);
295 err = mtd_read(mtd, addr, subpgsize, &read, readbuf); 298 read = 0;
299 err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
296 if (unlikely(err || read != subpgsize)) { 300 if (unlikely(err || read != subpgsize)) {
297 if (mtd_is_bitflip(err) && read == subpgsize) { 301 if (err == -EUCLEAN && read == subpgsize) {
298 pr_info("ECC correction at %#llx\n", 302 printk(PRINT_PREF "ECC correction at %#llx\n",
299 (long long)addr); 303 (long long)addr);
300 err = 0; 304 err = 0;
301 } else { 305 } else {
302 pr_err("error: read failed at " 306 printk(PRINT_PREF "error: read failed at "
303 "%#llx\n", (long long)addr); 307 "%#llx\n", (long long)addr);
304 return err ? err : -1; 308 return err ? err : -1;
305 } 309 }
306 } 310 }
307 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) { 311 if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
308 pr_err("error: verify 0xff failed at " 312 printk(PRINT_PREF "error: verify 0xff failed at "
309 "%#llx\n", (long long)addr); 313 "%#llx\n", (long long)addr);
310 errcnt += 1; 314 errcnt += 1;
311 } 315 }
@@ -320,7 +324,7 @@ static int verify_all_eraseblocks_ff(void)
320 int err; 324 int err;
321 unsigned int i; 325 unsigned int i;
322 326
323 pr_info("verifying all eraseblocks for 0xff\n"); 327 printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
324 for (i = 0; i < ebcnt; ++i) { 328 for (i = 0; i < ebcnt; ++i) {
325 if (bbt[i]) 329 if (bbt[i])
326 continue; 330 continue;
@@ -328,10 +332,10 @@ static int verify_all_eraseblocks_ff(void)
328 if (err) 332 if (err)
329 return err; 333 return err;
330 if (i % 256 == 0) 334 if (i % 256 == 0)
331 pr_info("verified up to eraseblock %u\n", i); 335 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
332 cond_resched(); 336 cond_resched();
333 } 337 }
334 pr_info("verified %u eraseblocks\n", i); 338 printk(PRINT_PREF "verified %u eraseblocks\n", i);
335 return 0; 339 return 0;
336} 340}
337 341
@@ -340,9 +344,9 @@ static int is_block_bad(int ebnum)
340 loff_t addr = ebnum * mtd->erasesize; 344 loff_t addr = ebnum * mtd->erasesize;
341 int ret; 345 int ret;
342 346
343 ret = mtd_block_isbad(mtd, addr); 347 ret = mtd->block_isbad(mtd, addr);
344 if (ret) 348 if (ret)
345 pr_info("block %d is bad\n", ebnum); 349 printk(PRINT_PREF "block %d is bad\n", ebnum);
346 return ret; 350 return ret;
347} 351}
348 352
@@ -352,18 +356,18 @@ static int scan_for_bad_eraseblocks(void)
352 356
353 bbt = kzalloc(ebcnt, GFP_KERNEL); 357 bbt = kzalloc(ebcnt, GFP_KERNEL);
354 if (!bbt) { 358 if (!bbt) {
355 pr_err("error: cannot allocate memory\n"); 359 printk(PRINT_PREF "error: cannot allocate memory\n");
356 return -ENOMEM; 360 return -ENOMEM;
357 } 361 }
358 362
359 pr_info("scanning for bad eraseblocks\n"); 363 printk(PRINT_PREF "scanning for bad eraseblocks\n");
360 for (i = 0; i < ebcnt; ++i) { 364 for (i = 0; i < ebcnt; ++i) {
361 bbt[i] = is_block_bad(i) ? 1 : 0; 365 bbt[i] = is_block_bad(i) ? 1 : 0;
362 if (bbt[i]) 366 if (bbt[i])
363 bad += 1; 367 bad += 1;
364 cond_resched(); 368 cond_resched();
365 } 369 }
366 pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); 370 printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
367 return 0; 371 return 0;
368} 372}
369 373
@@ -375,24 +379,17 @@ static int __init mtd_subpagetest_init(void)
375 379
376 printk(KERN_INFO "\n"); 380 printk(KERN_INFO "\n");
377 printk(KERN_INFO "=================================================\n"); 381 printk(KERN_INFO "=================================================\n");
378 382 printk(PRINT_PREF "MTD device: %d\n", dev);
379 if (dev < 0) {
380 pr_info("Please specify a valid mtd-device via module parameter\n");
381 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
382 return -EINVAL;
383 }
384
385 pr_info("MTD device: %d\n", dev);
386 383
387 mtd = get_mtd_device(NULL, dev); 384 mtd = get_mtd_device(NULL, dev);
388 if (IS_ERR(mtd)) { 385 if (IS_ERR(mtd)) {
389 err = PTR_ERR(mtd); 386 err = PTR_ERR(mtd);
390 pr_err("error: cannot get MTD device\n"); 387 printk(PRINT_PREF "error: cannot get MTD device\n");
391 return err; 388 return err;
392 } 389 }
393 390
394 if (mtd->type != MTD_NANDFLASH) { 391 if (mtd->type != MTD_NANDFLASH) {
395 pr_info("this test requires NAND flash\n"); 392 printk(PRINT_PREF "this test requires NAND flash\n");
396 goto out; 393 goto out;
397 } 394 }
398 395
@@ -402,7 +399,7 @@ static int __init mtd_subpagetest_init(void)
402 ebcnt = tmp; 399 ebcnt = tmp;
403 pgcnt = mtd->erasesize / mtd->writesize; 400 pgcnt = mtd->erasesize / mtd->writesize;
404 401
405 pr_info("MTD device size %llu, eraseblock size %u, " 402 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
406 "page size %u, subpage size %u, count of eraseblocks %u, " 403 "page size %u, subpage size %u, count of eraseblocks %u, "
407 "pages per eraseblock %u, OOB size %u\n", 404 "pages per eraseblock %u, OOB size %u\n",
408 (unsigned long long)mtd->size, mtd->erasesize, 405 (unsigned long long)mtd->size, mtd->erasesize,
@@ -412,12 +409,12 @@ static int __init mtd_subpagetest_init(void)
412 bufsize = subpgsize * 32; 409 bufsize = subpgsize * 32;
413 writebuf = kmalloc(bufsize, GFP_KERNEL); 410 writebuf = kmalloc(bufsize, GFP_KERNEL);
414 if (!writebuf) { 411 if (!writebuf) {
415 pr_info("error: cannot allocate memory\n"); 412 printk(PRINT_PREF "error: cannot allocate memory\n");
416 goto out; 413 goto out;
417 } 414 }
418 readbuf = kmalloc(bufsize, GFP_KERNEL); 415 readbuf = kmalloc(bufsize, GFP_KERNEL);
419 if (!readbuf) { 416 if (!readbuf) {
420 pr_info("error: cannot allocate memory\n"); 417 printk(PRINT_PREF "error: cannot allocate memory\n");
421 goto out; 418 goto out;
422 } 419 }
423 420
@@ -429,7 +426,7 @@ static int __init mtd_subpagetest_init(void)
429 if (err) 426 if (err)
430 goto out; 427 goto out;
431 428
432 pr_info("writing whole device\n"); 429 printk(PRINT_PREF "writing whole device\n");
433 simple_srand(1); 430 simple_srand(1);
434 for (i = 0; i < ebcnt; ++i) { 431 for (i = 0; i < ebcnt; ++i) {
435 if (bbt[i]) 432 if (bbt[i])
@@ -438,13 +435,13 @@ static int __init mtd_subpagetest_init(void)
438 if (unlikely(err)) 435 if (unlikely(err))
439 goto out; 436 goto out;
440 if (i % 256 == 0) 437 if (i % 256 == 0)
441 pr_info("written up to eraseblock %u\n", i); 438 printk(PRINT_PREF "written up to eraseblock %u\n", i);
442 cond_resched(); 439 cond_resched();
443 } 440 }
444 pr_info("written %u eraseblocks\n", i); 441 printk(PRINT_PREF "written %u eraseblocks\n", i);
445 442
446 simple_srand(1); 443 simple_srand(1);
447 pr_info("verifying all eraseblocks\n"); 444 printk(PRINT_PREF "verifying all eraseblocks\n");
448 for (i = 0; i < ebcnt; ++i) { 445 for (i = 0; i < ebcnt; ++i) {
449 if (bbt[i]) 446 if (bbt[i])
450 continue; 447 continue;
@@ -452,10 +449,10 @@ static int __init mtd_subpagetest_init(void)
452 if (unlikely(err)) 449 if (unlikely(err))
453 goto out; 450 goto out;
454 if (i % 256 == 0) 451 if (i % 256 == 0)
455 pr_info("verified up to eraseblock %u\n", i); 452 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
456 cond_resched(); 453 cond_resched();
457 } 454 }
458 pr_info("verified %u eraseblocks\n", i); 455 printk(PRINT_PREF "verified %u eraseblocks\n", i);
459 456
460 err = erase_whole_device(); 457 err = erase_whole_device();
461 if (err) 458 if (err)
@@ -467,7 +464,7 @@ static int __init mtd_subpagetest_init(void)
467 464
468 /* Write all eraseblocks */ 465 /* Write all eraseblocks */
469 simple_srand(3); 466 simple_srand(3);
470 pr_info("writing whole device\n"); 467 printk(PRINT_PREF "writing whole device\n");
471 for (i = 0; i < ebcnt; ++i) { 468 for (i = 0; i < ebcnt; ++i) {
472 if (bbt[i]) 469 if (bbt[i])
473 continue; 470 continue;
@@ -475,14 +472,14 @@ static int __init mtd_subpagetest_init(void)
475 if (unlikely(err)) 472 if (unlikely(err))
476 goto out; 473 goto out;
477 if (i % 256 == 0) 474 if (i % 256 == 0)
478 pr_info("written up to eraseblock %u\n", i); 475 printk(PRINT_PREF "written up to eraseblock %u\n", i);
479 cond_resched(); 476 cond_resched();
480 } 477 }
481 pr_info("written %u eraseblocks\n", i); 478 printk(PRINT_PREF "written %u eraseblocks\n", i);
482 479
483 /* Check all eraseblocks */ 480 /* Check all eraseblocks */
484 simple_srand(3); 481 simple_srand(3);
485 pr_info("verifying all eraseblocks\n"); 482 printk(PRINT_PREF "verifying all eraseblocks\n");
486 for (i = 0; i < ebcnt; ++i) { 483 for (i = 0; i < ebcnt; ++i) {
487 if (bbt[i]) 484 if (bbt[i])
488 continue; 485 continue;
@@ -490,10 +487,10 @@ static int __init mtd_subpagetest_init(void)
490 if (unlikely(err)) 487 if (unlikely(err))
491 goto out; 488 goto out;
492 if (i % 256 == 0) 489 if (i % 256 == 0)
493 pr_info("verified up to eraseblock %u\n", i); 490 printk(PRINT_PREF "verified up to eraseblock %u\n", i);
494 cond_resched(); 491 cond_resched();
495 } 492 }
496 pr_info("verified %u eraseblocks\n", i); 493 printk(PRINT_PREF "verified %u eraseblocks\n", i);
497 494
498 err = erase_whole_device(); 495 err = erase_whole_device();
499 if (err) 496 if (err)
@@ -503,7 +500,7 @@ static int __init mtd_subpagetest_init(void)
503 if (err) 500 if (err)
504 goto out; 501 goto out;
505 502
506 pr_info("finished with %d errors\n", errcnt); 503 printk(PRINT_PREF "finished with %d errors\n", errcnt);
507 504
508out: 505out:
509 kfree(bbt); 506 kfree(bbt);
@@ -511,7 +508,7 @@ out:
511 kfree(writebuf); 508 kfree(writebuf);
512 put_mtd_device(mtd); 509 put_mtd_device(mtd);
513 if (err) 510 if (err)
514 pr_info("error %d occurred\n", err); 511 printk(PRINT_PREF "error %d occurred\n", err);
515 printk(KERN_INFO "=================================================\n"); 512 printk(KERN_INFO "=================================================\n");
516 return err; 513 return err;
517} 514}
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index c4cde1e9edd..5c6c3d24890 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -23,8 +23,6 @@
23 * damage caused by this program. 23 * damage caused by this program.
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/init.h> 26#include <linux/init.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
@@ -33,6 +31,7 @@
33#include <linux/slab.h> 31#include <linux/slab.h>
34#include <linux/sched.h> 32#include <linux/sched.h>
35 33
34#define PRINT_PREF KERN_INFO "mtd_torturetest: "
36#define RETRIES 3 35#define RETRIES 3
37 36
38static int eb = 8; 37static int eb = 8;
@@ -47,7 +46,7 @@ static int pgcnt;
47module_param(pgcnt, int, S_IRUGO); 46module_param(pgcnt, int, S_IRUGO);
48MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); 47MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
49 48
50static int dev = -EINVAL; 49static int dev;
51module_param(dev, int, S_IRUGO); 50module_param(dev, int, S_IRUGO);
52MODULE_PARM_DESC(dev, "MTD device number to use"); 51MODULE_PARM_DESC(dev, "MTD device number to use");
53 52
@@ -106,14 +105,14 @@ static inline int erase_eraseblock(int ebnum)
106 ei.addr = addr; 105 ei.addr = addr;
107 ei.len = mtd->erasesize; 106 ei.len = mtd->erasesize;
108 107
109 err = mtd_erase(mtd, &ei); 108 err = mtd->erase(mtd, &ei);
110 if (err) { 109 if (err) {
111 pr_err("error %d while erasing EB %d\n", err, ebnum); 110 printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
112 return err; 111 return err;
113 } 112 }
114 113
115 if (ei.state == MTD_ERASE_FAILED) { 114 if (ei.state == MTD_ERASE_FAILED) {
116 pr_err("some erase error occurred at EB %d\n", 115 printk(PRINT_PREF "some erase error occurred at EB %d\n",
117 ebnum); 116 ebnum);
118 return -EIO; 117 return -EIO;
119 } 118 }
@@ -128,7 +127,7 @@ static inline int erase_eraseblock(int ebnum)
128static inline int check_eraseblock(int ebnum, unsigned char *buf) 127static inline int check_eraseblock(int ebnum, unsigned char *buf)
129{ 128{
130 int err, retries = 0; 129 int err, retries = 0;
131 size_t read; 130 size_t read = 0;
132 loff_t addr = ebnum * mtd->erasesize; 131 loff_t addr = ebnum * mtd->erasesize;
133 size_t len = mtd->erasesize; 132 size_t len = mtd->erasesize;
134 133
@@ -138,42 +137,42 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
138 } 137 }
139 138
140retry: 139retry:
141 err = mtd_read(mtd, addr, len, &read, check_buf); 140 err = mtd->read(mtd, addr, len, &read, check_buf);
142 if (mtd_is_bitflip(err)) 141 if (err == -EUCLEAN)
143 pr_err("single bit flip occurred at EB %d " 142 printk(PRINT_PREF "single bit flip occurred at EB %d "
144 "MTD reported that it was fixed.\n", ebnum); 143 "MTD reported that it was fixed.\n", ebnum);
145 else if (err) { 144 else if (err) {
146 pr_err("error %d while reading EB %d, " 145 printk(PRINT_PREF "error %d while reading EB %d, "
147 "read %zd\n", err, ebnum, read); 146 "read %zd\n", err, ebnum, read);
148 return err; 147 return err;
149 } 148 }
150 149
151 if (read != len) { 150 if (read != len) {
152 pr_err("failed to read %zd bytes from EB %d, " 151 printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
153 "read only %zd, but no error reported\n", 152 "read only %zd, but no error reported\n",
154 len, ebnum, read); 153 len, ebnum, read);
155 return -EIO; 154 return -EIO;
156 } 155 }
157 156
158 if (memcmp(buf, check_buf, len)) { 157 if (memcmp(buf, check_buf, len)) {
159 pr_err("read wrong data from EB %d\n", ebnum); 158 printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
160 report_corrupt(check_buf, buf); 159 report_corrupt(check_buf, buf);
161 160
162 if (retries++ < RETRIES) { 161 if (retries++ < RETRIES) {
163 /* Try read again */ 162 /* Try read again */
164 yield(); 163 yield();
165 pr_info("re-try reading data from EB %d\n", 164 printk(PRINT_PREF "re-try reading data from EB %d\n",
166 ebnum); 165 ebnum);
167 goto retry; 166 goto retry;
168 } else { 167 } else {
169 pr_info("retried %d times, still errors, " 168 printk(PRINT_PREF "retried %d times, still errors, "
170 "give-up\n", RETRIES); 169 "give-up\n", RETRIES);
171 return -EINVAL; 170 return -EINVAL;
172 } 171 }
173 } 172 }
174 173
175 if (retries != 0) 174 if (retries != 0)
176 pr_info("only attempt number %d was OK (!!!)\n", 175 printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
177 retries); 176 retries);
178 177
179 return 0; 178 return 0;
@@ -182,7 +181,7 @@ retry:
182static inline int write_pattern(int ebnum, void *buf) 181static inline int write_pattern(int ebnum, void *buf)
183{ 182{
184 int err; 183 int err;
185 size_t written; 184 size_t written = 0;
186 loff_t addr = ebnum * mtd->erasesize; 185 loff_t addr = ebnum * mtd->erasesize;
187 size_t len = mtd->erasesize; 186 size_t len = mtd->erasesize;
188 187
@@ -190,14 +189,14 @@ static inline int write_pattern(int ebnum, void *buf)
190 addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize; 189 addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
191 len = pgcnt * pgsize; 190 len = pgcnt * pgsize;
192 } 191 }
193 err = mtd_write(mtd, addr, len, &written, buf); 192 err = mtd->write(mtd, addr, len, &written, buf);
194 if (err) { 193 if (err) {
195 pr_err("error %d while writing EB %d, written %zd" 194 printk(PRINT_PREF "error %d while writing EB %d, written %zd"
196 " bytes\n", err, ebnum, written); 195 " bytes\n", err, ebnum, written);
197 return err; 196 return err;
198 } 197 }
199 if (written != len) { 198 if (written != len) {
200 pr_info("written only %zd bytes of %zd, but no error" 199 printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
201 " reported\n", written, len); 200 " reported\n", written, len);
202 return -EIO; 201 return -EIO;
203 } 202 }
@@ -212,64 +211,57 @@ static int __init tort_init(void)
212 211
213 printk(KERN_INFO "\n"); 212 printk(KERN_INFO "\n");
214 printk(KERN_INFO "=================================================\n"); 213 printk(KERN_INFO "=================================================\n");
215 pr_info("Warning: this program is trying to wear out your " 214 printk(PRINT_PREF "Warning: this program is trying to wear out your "
216 "flash, stop it if this is not wanted.\n"); 215 "flash, stop it if this is not wanted.\n");
217 216 printk(PRINT_PREF "MTD device: %d\n", dev);
218 if (dev < 0) { 217 printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
219 pr_info("Please specify a valid mtd-device via module parameter\n");
220 pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
221 return -EINVAL;
222 }
223
224 pr_info("MTD device: %d\n", dev);
225 pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
226 ebcnt, eb, eb + ebcnt - 1, dev); 218 ebcnt, eb, eb + ebcnt - 1, dev);
227 if (pgcnt) 219 if (pgcnt)
228 pr_info("torturing just %d pages per eraseblock\n", 220 printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
229 pgcnt); 221 pgcnt);
230 pr_info("write verify %s\n", check ? "enabled" : "disabled"); 222 printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
231 223
232 mtd = get_mtd_device(NULL, dev); 224 mtd = get_mtd_device(NULL, dev);
233 if (IS_ERR(mtd)) { 225 if (IS_ERR(mtd)) {
234 err = PTR_ERR(mtd); 226 err = PTR_ERR(mtd);
235 pr_err("error: cannot get MTD device\n"); 227 printk(PRINT_PREF "error: cannot get MTD device\n");
236 return err; 228 return err;
237 } 229 }
238 230
239 if (mtd->writesize == 1) { 231 if (mtd->writesize == 1) {
240 pr_info("not NAND flash, assume page size is 512 " 232 printk(PRINT_PREF "not NAND flash, assume page size is 512 "
241 "bytes.\n"); 233 "bytes.\n");
242 pgsize = 512; 234 pgsize = 512;
243 } else 235 } else
244 pgsize = mtd->writesize; 236 pgsize = mtd->writesize;
245 237
246 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { 238 if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
247 pr_err("error: invalid pgcnt value %d\n", pgcnt); 239 printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
248 goto out_mtd; 240 goto out_mtd;
249 } 241 }
250 242
251 err = -ENOMEM; 243 err = -ENOMEM;
252 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); 244 patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
253 if (!patt_5A5) { 245 if (!patt_5A5) {
254 pr_err("error: cannot allocate memory\n"); 246 printk(PRINT_PREF "error: cannot allocate memory\n");
255 goto out_mtd; 247 goto out_mtd;
256 } 248 }
257 249
258 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); 250 patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
259 if (!patt_A5A) { 251 if (!patt_A5A) {
260 pr_err("error: cannot allocate memory\n"); 252 printk(PRINT_PREF "error: cannot allocate memory\n");
261 goto out_patt_5A5; 253 goto out_patt_5A5;
262 } 254 }
263 255
264 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); 256 patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
265 if (!patt_FF) { 257 if (!patt_FF) {
266 pr_err("error: cannot allocate memory\n"); 258 printk(PRINT_PREF "error: cannot allocate memory\n");
267 goto out_patt_A5A; 259 goto out_patt_A5A;
268 } 260 }
269 261
270 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); 262 check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
271 if (!check_buf) { 263 if (!check_buf) {
272 pr_err("error: cannot allocate memory\n"); 264 printk(PRINT_PREF "error: cannot allocate memory\n");
273 goto out_patt_FF; 265 goto out_patt_FF;
274 } 266 }
275 267
@@ -291,18 +283,19 @@ static int __init tort_init(void)
291 * Check if there is a bad eraseblock among those we are going to test. 283 * Check if there is a bad eraseblock among those we are going to test.
292 */ 284 */
293 memset(&bad_ebs[0], 0, sizeof(int) * ebcnt); 285 memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
294 if (mtd_can_have_bb(mtd)) { 286 if (mtd->block_isbad) {
295 for (i = eb; i < eb + ebcnt; i++) { 287 for (i = eb; i < eb + ebcnt; i++) {
296 err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); 288 err = mtd->block_isbad(mtd,
289 (loff_t)i * mtd->erasesize);
297 290
298 if (err < 0) { 291 if (err < 0) {
299 pr_info("block_isbad() returned %d " 292 printk(PRINT_PREF "block_isbad() returned %d "
300 "for EB %d\n", err, i); 293 "for EB %d\n", err, i);
301 goto out; 294 goto out;
302 } 295 }
303 296
304 if (err) { 297 if (err) {
305 pr_err("EB %d is bad. Skip it.\n", i); 298 printk("EB %d is bad. Skip it.\n", i);
306 bad_ebs[i - eb] = 1; 299 bad_ebs[i - eb] = 1;
307 } 300 }
308 } 301 }
@@ -330,7 +323,7 @@ static int __init tort_init(void)
330 continue; 323 continue;
331 err = check_eraseblock(i, patt_FF); 324 err = check_eraseblock(i, patt_FF);
332 if (err) { 325 if (err) {
333 pr_info("verify failed" 326 printk(PRINT_PREF "verify failed"
334 " for 0xFF... pattern\n"); 327 " for 0xFF... pattern\n");
335 goto out; 328 goto out;
336 } 329 }
@@ -363,7 +356,7 @@ static int __init tort_init(void)
363 patt = patt_A5A; 356 patt = patt_A5A;
364 err = check_eraseblock(i, patt); 357 err = check_eraseblock(i, patt);
365 if (err) { 358 if (err) {
366 pr_info("verify failed for %s" 359 printk(PRINT_PREF "verify failed for %s"
367 " pattern\n", 360 " pattern\n",
368 ((eb + erase_cycles) & 1) ? 361 ((eb + erase_cycles) & 1) ?
369 "0x55AA55..." : "0xAA55AA..."); 362 "0x55AA55..." : "0xAA55AA...");
@@ -381,7 +374,7 @@ static int __init tort_init(void)
381 stop_timing(); 374 stop_timing();
382 ms = (finish.tv_sec - start.tv_sec) * 1000 + 375 ms = (finish.tv_sec - start.tv_sec) * 1000 +
383 (finish.tv_usec - start.tv_usec) / 1000; 376 (finish.tv_usec - start.tv_usec) / 1000;
384 pr_info("%08u erase cycles done, took %lu " 377 printk(PRINT_PREF "%08u erase cycles done, took %lu "
385 "milliseconds (%lu seconds)\n", 378 "milliseconds (%lu seconds)\n",
386 erase_cycles, ms, ms / 1000); 379 erase_cycles, ms, ms / 1000);
387 start_timing(); 380 start_timing();
@@ -392,7 +385,7 @@ static int __init tort_init(void)
392 } 385 }
393out: 386out:
394 387
395 pr_info("finished after %u erase cycles\n", 388 printk(PRINT_PREF "finished after %u erase cycles\n",
396 erase_cycles); 389 erase_cycles);
397 kfree(check_buf); 390 kfree(check_buf);
398out_patt_FF: 391out_patt_FF:
@@ -404,7 +397,7 @@ out_patt_5A5:
404out_mtd: 397out_mtd:
405 put_mtd_device(mtd); 398 put_mtd_device(mtd);
406 if (err) 399 if (err)
407 pr_info("error %d occurred during torturing\n", err); 400 printk(PRINT_PREF "error %d occurred during torturing\n", err);
408 printk(KERN_INFO "=================================================\n"); 401 printk(KERN_INFO "=================================================\n");
409 return err; 402 return err;
410} 403}
@@ -442,9 +435,9 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
442 &bits) >= 0) 435 &bits) >= 0)
443 pages++; 436 pages++;
444 437
445 pr_info("verify fails on %d pages, %d bytes/%d bits\n", 438 printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
446 pages, bytes, bits); 439 pages, bytes, bits);
447 pr_info("The following is a list of all differences between" 440 printk(PRINT_PREF "The following is a list of all differences between"
448 " what was read from flash and what was expected\n"); 441 " what was read from flash and what was expected\n");
449 442
450 for (i = 0; i < check_len; i += pgsize) { 443 for (i = 0; i < check_len; i += pgsize) {
@@ -458,7 +451,7 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
458 printk("-------------------------------------------------------" 451 printk("-------------------------------------------------------"
459 "----------------------------------\n"); 452 "----------------------------------\n");
460 453
461 pr_info("Page %zd has %d bytes/%d bits failing verify," 454 printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
462 " starting at offset 0x%x\n", 455 " starting at offset 0x%x\n",
463 (mtd->erasesize - check_len + i) / pgsize, 456 (mtd->erasesize - check_len + i) / pgsize,
464 bytes, bits, first); 457 bytes, bits, first);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 36663af56d8..4dcc752a0c0 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,55 +27,20 @@ config MTD_UBI_WL_THRESHOLD
27 life-cycle less than 10000, the threshold should be lessened (e.g., 27 life-cycle less than 10000, the threshold should be lessened (e.g.,
28 to 128 or 256, although it does not have to be power of 2). 28 to 128 or 256, although it does not have to be power of 2).
29 29
30config MTD_UBI_BEB_LIMIT 30config MTD_UBI_BEB_RESERVE
31 int "Maximum expected bad eraseblock count per 1024 eraseblocks" 31 int "Percentage of reserved eraseblocks for bad eraseblocks handling"
32 default 20 32 default 1
33 range 0 768 33 range 0 25
34 help 34 help
35 This option specifies the maximum bad physical eraseblocks UBI 35 If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
36 expects on the MTD device (per 1024 eraseblocks). If the underlying 36 reserves some amount of physical eraseblocks to handle new bad
37 flash does not admit of bad eraseblocks (e.g. NOR flash), this value 37 eraseblocks. For example, if a flash physical eraseblock becomes bad,
38 is ignored. 38 UBI uses these reserved physical eraseblocks to relocate the bad one.
39 39 This option specifies how many physical eraseblocks will be reserved
40 NAND datasheets often specify the minimum and maximum NVM (Number of 40 for bad eraseblock handling (percents of total number of good flash
41 Valid Blocks) for the flashes' endurance lifetime. The maximum 41 eraseblocks). If the underlying flash does not admit of bad
42 expected bad eraseblocks per 1024 eraseblocks then can be calculated 42 eraseblocks (e.g. NOR flash), this value is ignored and nothing is
43 as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs 43 reserved. Leave the default value if unsure.
44 (MaxNVB is basically the total count of eraseblocks on the chip).
45
46 To put it differently, if this value is 20, UBI will try to reserve
47 about 1.9% of physical eraseblocks for bad blocks handling. And that
48 will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
49 partition UBI attaches. This means that if you have, say, a NAND
50 flash chip admits maximum 40 bad eraseblocks, and it is split on two
51 MTD partitions of the same size, UBI will reserve 40 eraseblocks when
52 attaching a partition.
53
54 This option can be overridden by the "mtd=" UBI module parameter or
55 by the "attach" ioctl.
56
57 Leave the default value if unsure.
58
59config MTD_UBI_FASTMAP
60 bool "UBI Fastmap (Experimental feature)"
61 default n
62 help
63 Important: this feature is experimental so far and the on-flash
64 format for fastmap may change in the next kernel versions
65
66 Fastmap is a mechanism which allows attaching an UBI device
67 in nearly constant time. Instead of scanning the whole MTD device it
68 only has to locate a checkpoint (called fastmap) on the device.
69 The on-flash fastmap contains all information needed to attach
70 the device. Using fastmap makes only sense on large devices where
71 attaching by scanning takes long. UBI will not automatically install
72 a fastmap on old images, but you can set the UBI module parameter
73 fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
74 images are still usable with UBI implementations without
75 fastmap support. On typical flash devices the whole fastmap fits
76 into one PEB. UBI will reserve PEBs to hold two fastmaps.
77
78 If in doubt, say "N".
79 44
80config MTD_UBI_GLUEBI 45config MTD_UBI_GLUEBI
81 tristate "MTD devices emulation driver (gluebi)" 46 tristate "MTD devices emulation driver (gluebi)"
@@ -87,4 +52,12 @@ config MTD_UBI_GLUEBI
87 work on top of UBI. Do not enable this unless you use legacy 52 work on top of UBI. Do not enable this unless you use legacy
88 software. 53 software.
89 54
55config MTD_UBI_DEBUG
56 bool "UBI debugging"
57 depends on SYSFS
58 select DEBUG_FS
59 select KALLSYMS
60 help
61 This option enables UBI debugging.
62
90endif # MTD_UBI 63endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index b46b0c97858..c9302a5452b 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_MTD_UBI) += ubi.o 1obj-$(CONFIG_MTD_UBI) += ubi.o
2 2
3ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o 3ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
4ubi-y += misc.o debug.o 4ubi-y += misc.o
5ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
6 5
6ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
7obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o 7obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
deleted file mode 100644
index c071d410488..00000000000
--- a/drivers/mtd/ubi/attach.c
+++ /dev/null
@@ -1,1754 +0,0 @@
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * UBI attaching sub-system.
23 *
24 * This sub-system is responsible for attaching MTD devices and it also
25 * implements flash media scanning.
26 *
27 * The attaching information is represented by a &struct ubi_attach_info'
28 * object. Information about volumes is represented by &struct ubi_ainf_volume
29 * objects which are kept in volume RB-tree with root at the @volumes field.
30 * The RB-tree is indexed by the volume ID.
31 *
32 * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
33 * objects are kept in per-volume RB-trees with the root at the corresponding
34 * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
35 * per-volume objects and each of these objects is the root of RB-tree of
36 * per-LEB objects.
37 *
38 * Corrupted physical eraseblocks are put to the @corr list, free physical
39 * eraseblocks are put to the @free list and the physical eraseblock to be
40 * erased are put to the @erase list.
41 *
42 * About corruptions
43 * ~~~~~~~~~~~~~~~~~
44 *
45 * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
46 * whether the headers are corrupted or not. Sometimes UBI also protects the
47 * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
48 * when it moves the contents of a PEB for wear-leveling purposes.
49 *
50 * UBI tries to distinguish between 2 types of corruptions.
51 *
52 * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
53 * tries to handle them gracefully, without printing too many warnings and
54 * error messages. The idea is that we do not lose important data in these
55 * cases - we may lose only the data which were being written to the media just
56 * before the power cut happened, and the upper layers (e.g., UBIFS) are
57 * supposed to handle such data losses (e.g., by using the FS journal).
58 *
59 * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
60 * the reason is a power cut, UBI puts this PEB to the @erase list, and all
61 * PEBs in the @erase list are scheduled for erasure later.
62 *
63 * 2. Unexpected corruptions which are not caused by power cuts. During
64 * attaching, such PEBs are put to the @corr list and UBI preserves them.
65 * Obviously, this lessens the amount of available PEBs, and if at some point
66 * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
67 * about such PEBs every time the MTD device is attached.
68 *
69 * However, it is difficult to reliably distinguish between these types of
70 * corruptions and UBI's strategy is as follows (in case of attaching by
71 * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
72 * the data area does not contain all 0xFFs, and there were no bit-flips or
73 * integrity errors (e.g., ECC errors in case of NAND) while reading the data
74 * area. Otherwise UBI assumes corruption type 1. So the decision criteria
75 * are as follows.
76 * o If the data area contains only 0xFFs, there are no data, and it is safe
77 * to just erase this PEB - this is corruption type 1.
78 * o If the data area has bit-flips or data integrity errors (ECC errors on
79 * NAND), it is probably a PEB which was being erased when power cut
80 * happened, so this is corruption type 1. However, this is just a guess,
81 * which might be wrong.
82 * o Otherwise this is corruption type 2.
83 */
84
85#include <linux/err.h>
86#include <linux/slab.h>
87#include <linux/crc32.h>
88#include <linux/math64.h>
89#include <linux/random.h>
90#include "ubi.h"
91
92static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
93
94/* Temporary variables used during scanning */
95static struct ubi_ec_hdr *ech;
96static struct ubi_vid_hdr *vidh;
97
98/**
99 * add_to_list - add physical eraseblock to a list.
100 * @ai: attaching information
101 * @pnum: physical eraseblock number to add
102 * @vol_id: the last used volume id for the PEB
103 * @lnum: the last used LEB number for the PEB
104 * @ec: erase counter of the physical eraseblock
105 * @to_head: if not zero, add to the head of the list
106 * @list: the list to add to
107 *
108 * This function allocates a 'struct ubi_ainf_peb' object for physical
109 * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
110 * It stores the @lnum and @vol_id alongside, which can both be
111 * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
112 * If @to_head is not zero, PEB will be added to the head of the list, which
113 * basically means it will be processed first later. E.g., we add corrupted
114 * PEBs (corrupted due to power cuts) to the head of the erase list to make
115 * sure we erase them first and get rid of corruptions ASAP. This function
116 * returns zero in case of success and a negative error code in case of
117 * failure.
118 */
119static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
120 int lnum, int ec, int to_head, struct list_head *list)
121{
122 struct ubi_ainf_peb *aeb;
123
124 if (list == &ai->free) {
125 dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
126 } else if (list == &ai->erase) {
127 dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
128 } else if (list == &ai->alien) {
129 dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
130 ai->alien_peb_count += 1;
131 } else
132 BUG();
133
134 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
135 if (!aeb)
136 return -ENOMEM;
137
138 aeb->pnum = pnum;
139 aeb->vol_id = vol_id;
140 aeb->lnum = lnum;
141 aeb->ec = ec;
142 if (to_head)
143 list_add(&aeb->u.list, list);
144 else
145 list_add_tail(&aeb->u.list, list);
146 return 0;
147}
148
149/**
150 * add_corrupted - add a corrupted physical eraseblock.
151 * @ai: attaching information
152 * @pnum: physical eraseblock number to add
153 * @ec: erase counter of the physical eraseblock
154 *
155 * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
156 * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
157 * was presumably not caused by a power cut. Returns zero in case of success
158 * and a negative error code in case of failure.
159 */
160static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
161{
162 struct ubi_ainf_peb *aeb;
163
164 dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
165
166 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
167 if (!aeb)
168 return -ENOMEM;
169
170 ai->corr_peb_count += 1;
171 aeb->pnum = pnum;
172 aeb->ec = ec;
173 list_add(&aeb->u.list, &ai->corr);
174 return 0;
175}
176
177/**
178 * validate_vid_hdr - check volume identifier header.
179 * @vid_hdr: the volume identifier header to check
180 * @av: information about the volume this logical eraseblock belongs to
181 * @pnum: physical eraseblock number the VID header came from
182 *
183 * This function checks that data stored in @vid_hdr is consistent. Returns
184 * non-zero if an inconsistency was found and zero if not.
185 *
186 * Note, UBI does sanity check of everything it reads from the flash media.
187 * Most of the checks are done in the I/O sub-system. Here we check that the
188 * information in the VID header is consistent to the information in other VID
189 * headers of the same volume.
190 */
191static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
192 const struct ubi_ainf_volume *av, int pnum)
193{
194 int vol_type = vid_hdr->vol_type;
195 int vol_id = be32_to_cpu(vid_hdr->vol_id);
196 int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
197 int data_pad = be32_to_cpu(vid_hdr->data_pad);
198
199 if (av->leb_count != 0) {
200 int av_vol_type;
201
202 /*
203 * This is not the first logical eraseblock belonging to this
204 * volume. Ensure that the data in its VID header is consistent
205 * to the data in previous logical eraseblock headers.
206 */
207
208 if (vol_id != av->vol_id) {
209 ubi_err("inconsistent vol_id");
210 goto bad;
211 }
212
213 if (av->vol_type == UBI_STATIC_VOLUME)
214 av_vol_type = UBI_VID_STATIC;
215 else
216 av_vol_type = UBI_VID_DYNAMIC;
217
218 if (vol_type != av_vol_type) {
219 ubi_err("inconsistent vol_type");
220 goto bad;
221 }
222
223 if (used_ebs != av->used_ebs) {
224 ubi_err("inconsistent used_ebs");
225 goto bad;
226 }
227
228 if (data_pad != av->data_pad) {
229 ubi_err("inconsistent data_pad");
230 goto bad;
231 }
232 }
233
234 return 0;
235
236bad:
237 ubi_err("inconsistent VID header at PEB %d", pnum);
238 ubi_dump_vid_hdr(vid_hdr);
239 ubi_dump_av(av);
240 return -EINVAL;
241}
242
243/**
244 * add_volume - add volume to the attaching information.
245 * @ai: attaching information
246 * @vol_id: ID of the volume to add
247 * @pnum: physical eraseblock number
248 * @vid_hdr: volume identifier header
249 *
250 * If the volume corresponding to the @vid_hdr logical eraseblock is already
251 * present in the attaching information, this function does nothing. Otherwise
252 * it adds corresponding volume to the attaching information. Returns a pointer
253 * to the allocated "av" object in case of success and a negative error code in
254 * case of failure.
255 */
256static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
257 int vol_id, int pnum,
258 const struct ubi_vid_hdr *vid_hdr)
259{
260 struct ubi_ainf_volume *av;
261 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
262
263 ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
264
265 /* Walk the volume RB-tree to look if this volume is already present */
266 while (*p) {
267 parent = *p;
268 av = rb_entry(parent, struct ubi_ainf_volume, rb);
269
270 if (vol_id == av->vol_id)
271 return av;
272
273 if (vol_id > av->vol_id)
274 p = &(*p)->rb_left;
275 else
276 p = &(*p)->rb_right;
277 }
278
279 /* The volume is absent - add it */
280 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
281 if (!av)
282 return ERR_PTR(-ENOMEM);
283
284 av->highest_lnum = av->leb_count = 0;
285 av->vol_id = vol_id;
286 av->root = RB_ROOT;
287 av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
288 av->data_pad = be32_to_cpu(vid_hdr->data_pad);
289 av->compat = vid_hdr->compat;
290 av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
291 : UBI_STATIC_VOLUME;
292 if (vol_id > ai->highest_vol_id)
293 ai->highest_vol_id = vol_id;
294
295 rb_link_node(&av->rb, parent, p);
296 rb_insert_color(&av->rb, &ai->volumes);
297 ai->vols_found += 1;
298 dbg_bld("added volume %d", vol_id);
299 return av;
300}
301
302/**
303 * ubi_compare_lebs - find out which logical eraseblock is newer.
304 * @ubi: UBI device description object
305 * @aeb: first logical eraseblock to compare
306 * @pnum: physical eraseblock number of the second logical eraseblock to
307 * compare
308 * @vid_hdr: volume identifier header of the second logical eraseblock
309 *
310 * This function compares 2 copies of a LEB and informs which one is newer. In
311 * case of success this function returns a positive value, in case of failure, a
312 * negative error code is returned. The success return codes use the following
313 * bits:
314 * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
315 * second PEB (described by @pnum and @vid_hdr);
316 * o bit 0 is set: the second PEB is newer;
317 * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
318 * o bit 1 is set: bit-flips were detected in the newer LEB;
319 * o bit 2 is cleared: the older LEB is not corrupted;
320 * o bit 2 is set: the older LEB is corrupted.
321 */
322int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
323 int pnum, const struct ubi_vid_hdr *vid_hdr)
324{
325 int len, err, second_is_newer, bitflips = 0, corrupted = 0;
326 uint32_t data_crc, crc;
327 struct ubi_vid_hdr *vh = NULL;
328 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
329
330 if (sqnum2 == aeb->sqnum) {
331 /*
332 * This must be a really ancient UBI image which has been
333 * created before sequence numbers support has been added. At
334 * that times we used 32-bit LEB versions stored in logical
335 * eraseblocks. That was before UBI got into mainline. We do not
336 * support these images anymore. Well, those images still work,
337 * but only if no unclean reboots happened.
338 */
339 ubi_err("unsupported on-flash UBI format");
340 return -EINVAL;
341 }
342
343 /* Obviously the LEB with lower sequence counter is older */
344 second_is_newer = (sqnum2 > aeb->sqnum);
345
346 /*
347 * Now we know which copy is newer. If the copy flag of the PEB with
348 * newer version is not set, then we just return, otherwise we have to
349 * check data CRC. For the second PEB we already have the VID header,
350 * for the first one - we'll need to re-read it from flash.
351 *
352 * Note: this may be optimized so that we wouldn't read twice.
353 */
354
355 if (second_is_newer) {
356 if (!vid_hdr->copy_flag) {
357 /* It is not a copy, so it is newer */
358 dbg_bld("second PEB %d is newer, copy_flag is unset",
359 pnum);
360 return 1;
361 }
362 } else {
363 if (!aeb->copy_flag) {
364 /* It is not a copy, so it is newer */
365 dbg_bld("first PEB %d is newer, copy_flag is unset",
366 pnum);
367 return bitflips << 1;
368 }
369
370 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
371 if (!vh)
372 return -ENOMEM;
373
374 pnum = aeb->pnum;
375 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
376 if (err) {
377 if (err == UBI_IO_BITFLIPS)
378 bitflips = 1;
379 else {
380 ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
381 pnum, err);
382 if (err > 0)
383 err = -EIO;
384
385 goto out_free_vidh;
386 }
387 }
388
389 vid_hdr = vh;
390 }
391
392 /* Read the data of the copy and check the CRC */
393
394 len = be32_to_cpu(vid_hdr->data_size);
395
396 mutex_lock(&ubi->buf_mutex);
397 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
398 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
399 goto out_unlock;
400
401 data_crc = be32_to_cpu(vid_hdr->data_crc);
402 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
403 if (crc != data_crc) {
404 dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
405 pnum, crc, data_crc);
406 corrupted = 1;
407 bitflips = 0;
408 second_is_newer = !second_is_newer;
409 } else {
410 dbg_bld("PEB %d CRC is OK", pnum);
411 bitflips = !!err;
412 }
413 mutex_unlock(&ubi->buf_mutex);
414
415 ubi_free_vid_hdr(ubi, vh);
416
417 if (second_is_newer)
418 dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
419 else
420 dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
421
422 return second_is_newer | (bitflips << 1) | (corrupted << 2);
423
424out_unlock:
425 mutex_unlock(&ubi->buf_mutex);
426out_free_vidh:
427 ubi_free_vid_hdr(ubi, vh);
428 return err;
429}
430
431/**
432 * ubi_add_to_av - add used physical eraseblock to the attaching information.
433 * @ubi: UBI device description object
434 * @ai: attaching information
435 * @pnum: the physical eraseblock number
436 * @ec: erase counter
437 * @vid_hdr: the volume identifier header
438 * @bitflips: if bit-flips were detected when this physical eraseblock was read
439 *
440 * This function adds information about a used physical eraseblock to the
441 * 'used' tree of the corresponding volume. The function is rather complex
442 * because it has to handle cases when this is not the first physical
443 * eraseblock belonging to the same logical eraseblock, and the newer one has
444 * to be picked, while the older one has to be dropped. This function returns
445 * zero in case of success and a negative error code in case of failure.
446 */
447int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
448 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
449{
450 int err, vol_id, lnum;
451 unsigned long long sqnum;
452 struct ubi_ainf_volume *av;
453 struct ubi_ainf_peb *aeb;
454 struct rb_node **p, *parent = NULL;
455
456 vol_id = be32_to_cpu(vid_hdr->vol_id);
457 lnum = be32_to_cpu(vid_hdr->lnum);
458 sqnum = be64_to_cpu(vid_hdr->sqnum);
459
460 dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
461 pnum, vol_id, lnum, ec, sqnum, bitflips);
462
463 av = add_volume(ai, vol_id, pnum, vid_hdr);
464 if (IS_ERR(av))
465 return PTR_ERR(av);
466
467 if (ai->max_sqnum < sqnum)
468 ai->max_sqnum = sqnum;
469
470 /*
471 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
472 * if this is the first instance of this logical eraseblock or not.
473 */
474 p = &av->root.rb_node;
475 while (*p) {
476 int cmp_res;
477
478 parent = *p;
479 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
480 if (lnum != aeb->lnum) {
481 if (lnum < aeb->lnum)
482 p = &(*p)->rb_left;
483 else
484 p = &(*p)->rb_right;
485 continue;
486 }
487
488 /*
489 * There is already a physical eraseblock describing the same
490 * logical eraseblock present.
491 */
492
493 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
494 aeb->pnum, aeb->sqnum, aeb->ec);
495
496 /*
497 * Make sure that the logical eraseblocks have different
498 * sequence numbers. Otherwise the image is bad.
499 *
500 * However, if the sequence number is zero, we assume it must
501 * be an ancient UBI image from the era when UBI did not have
502 * sequence numbers. We still can attach these images, unless
503 * there is a need to distinguish between old and new
504 * eraseblocks, in which case we'll refuse the image in
505 * 'ubi_compare_lebs()'. In other words, we attach old clean
506 * images, but refuse attaching old images with duplicated
507 * logical eraseblocks because there was an unclean reboot.
508 */
509 if (aeb->sqnum == sqnum && sqnum != 0) {
510 ubi_err("two LEBs with same sequence number %llu",
511 sqnum);
512 ubi_dump_aeb(aeb, 0);
513 ubi_dump_vid_hdr(vid_hdr);
514 return -EINVAL;
515 }
516
517 /*
518 * Now we have to drop the older one and preserve the newer
519 * one.
520 */
521 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
522 if (cmp_res < 0)
523 return cmp_res;
524
525 if (cmp_res & 1) {
526 /*
527 * This logical eraseblock is newer than the one
528 * found earlier.
529 */
530 err = validate_vid_hdr(vid_hdr, av, pnum);
531 if (err)
532 return err;
533
534 err = add_to_list(ai, aeb->pnum, aeb->vol_id,
535 aeb->lnum, aeb->ec, cmp_res & 4,
536 &ai->erase);
537 if (err)
538 return err;
539
540 aeb->ec = ec;
541 aeb->pnum = pnum;
542 aeb->vol_id = vol_id;
543 aeb->lnum = lnum;
544 aeb->scrub = ((cmp_res & 2) || bitflips);
545 aeb->copy_flag = vid_hdr->copy_flag;
546 aeb->sqnum = sqnum;
547
548 if (av->highest_lnum == lnum)
549 av->last_data_size =
550 be32_to_cpu(vid_hdr->data_size);
551
552 return 0;
553 } else {
554 /*
555 * This logical eraseblock is older than the one found
556 * previously.
557 */
558 return add_to_list(ai, pnum, vol_id, lnum, ec,
559 cmp_res & 4, &ai->erase);
560 }
561 }
562
563 /*
564 * We've met this logical eraseblock for the first time, add it to the
565 * attaching information.
566 */
567
568 err = validate_vid_hdr(vid_hdr, av, pnum);
569 if (err)
570 return err;
571
572 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
573 if (!aeb)
574 return -ENOMEM;
575
576 aeb->ec = ec;
577 aeb->pnum = pnum;
578 aeb->vol_id = vol_id;
579 aeb->lnum = lnum;
580 aeb->scrub = bitflips;
581 aeb->copy_flag = vid_hdr->copy_flag;
582 aeb->sqnum = sqnum;
583
584 if (av->highest_lnum <= lnum) {
585 av->highest_lnum = lnum;
586 av->last_data_size = be32_to_cpu(vid_hdr->data_size);
587 }
588
589 av->leb_count += 1;
590 rb_link_node(&aeb->u.rb, parent, p);
591 rb_insert_color(&aeb->u.rb, &av->root);
592 return 0;
593}
594
595/**
596 * ubi_find_av - find volume in the attaching information.
597 * @ai: attaching information
598 * @vol_id: the requested volume ID
599 *
600 * This function returns a pointer to the volume description or %NULL if there
601 * are no data about this volume in the attaching information.
602 */
603struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
604 int vol_id)
605{
606 struct ubi_ainf_volume *av;
607 struct rb_node *p = ai->volumes.rb_node;
608
609 while (p) {
610 av = rb_entry(p, struct ubi_ainf_volume, rb);
611
612 if (vol_id == av->vol_id)
613 return av;
614
615 if (vol_id > av->vol_id)
616 p = p->rb_left;
617 else
618 p = p->rb_right;
619 }
620
621 return NULL;
622}
623
624/**
625 * ubi_remove_av - delete attaching information about a volume.
626 * @ai: attaching information
627 * @av: the volume attaching information to delete
628 */
629void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
630{
631 struct rb_node *rb;
632 struct ubi_ainf_peb *aeb;
633
634 dbg_bld("remove attaching information about volume %d", av->vol_id);
635
636 while ((rb = rb_first(&av->root))) {
637 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
638 rb_erase(&aeb->u.rb, &av->root);
639 list_add_tail(&aeb->u.list, &ai->erase);
640 }
641
642 rb_erase(&av->rb, &ai->volumes);
643 kfree(av);
644 ai->vols_found -= 1;
645}
646
647/**
648 * early_erase_peb - erase a physical eraseblock.
649 * @ubi: UBI device description object
650 * @ai: attaching information
651 * @pnum: physical eraseblock number to erase;
652 * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
653 *
654 * This function erases physical eraseblock 'pnum', and writes the erase
655 * counter header to it. This function should only be used on UBI device
656 * initialization stages, when the EBA sub-system had not been yet initialized.
657 * This function returns zero in case of success and a negative error code in
658 * case of failure.
659 */
660static int early_erase_peb(struct ubi_device *ubi,
661 const struct ubi_attach_info *ai, int pnum, int ec)
662{
663 int err;
664 struct ubi_ec_hdr *ec_hdr;
665
666 if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
667 /*
668 * Erase counter overflow. Upgrade UBI and use 64-bit
669 * erase counters internally.
670 */
671 ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
672 return -EINVAL;
673 }
674
675 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
676 if (!ec_hdr)
677 return -ENOMEM;
678
679 ec_hdr->ec = cpu_to_be64(ec);
680
681 err = ubi_io_sync_erase(ubi, pnum, 0);
682 if (err < 0)
683 goto out_free;
684
685 err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
686
687out_free:
688 kfree(ec_hdr);
689 return err;
690}
691
692/**
693 * ubi_early_get_peb - get a free physical eraseblock.
694 * @ubi: UBI device description object
695 * @ai: attaching information
696 *
697 * This function returns a free physical eraseblock. It is supposed to be
698 * called on the UBI initialization stages when the wear-leveling sub-system is
699 * not initialized yet. This function picks a physical eraseblocks from one of
700 * the lists, writes the EC header if it is needed, and removes it from the
701 * list.
702 *
703 * This function returns a pointer to the "aeb" of the found free PEB in case
704 * of success and an error code in case of failure.
705 */
706struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
707 struct ubi_attach_info *ai)
708{
709 int err = 0;
710 struct ubi_ainf_peb *aeb, *tmp_aeb;
711
712 if (!list_empty(&ai->free)) {
713 aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
714 list_del(&aeb->u.list);
715 dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
716 return aeb;
717 }
718
719 /*
720 * We try to erase the first physical eraseblock from the erase list
721 * and pick it if we succeed, or try to erase the next one if not. And
722 * so forth. We don't want to take care about bad eraseblocks here -
723 * they'll be handled later.
724 */
725 list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
726 if (aeb->ec == UBI_UNKNOWN)
727 aeb->ec = ai->mean_ec;
728
729 err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
730 if (err)
731 continue;
732
733 aeb->ec += 1;
734 list_del(&aeb->u.list);
735 dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
736 return aeb;
737 }
738
739 ubi_err("no free eraseblocks");
740 return ERR_PTR(-ENOSPC);
741}
742
743/**
744 * check_corruption - check the data area of PEB.
745 * @ubi: UBI device description object
746 * @vid_hdr: the (corrupted) VID header of this PEB
747 * @pnum: the physical eraseblock number to check
748 *
749 * This is a helper function which is used to distinguish between VID header
750 * corruptions caused by power cuts and other reasons. If the PEB contains only
751 * 0xFF bytes in the data area, the VID header is most probably corrupted
752 * because of a power cut (%0 is returned in this case). Otherwise, it was
753 * probably corrupted for some other reasons (%1 is returned in this case). A
754 * negative error code is returned if a read error occurred.
755 *
756 * If the corruption reason was a power cut, UBI can safely erase this PEB.
757 * Otherwise, it should preserve it to avoid possibly destroying important
758 * information.
759 */
760static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
761 int pnum)
762{
763 int err;
764
765 mutex_lock(&ubi->buf_mutex);
766 memset(ubi->peb_buf, 0x00, ubi->leb_size);
767
768 err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
769 ubi->leb_size);
770 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
771 /*
772 * Bit-flips or integrity errors while reading the data area.
773 * It is difficult to say for sure what type of corruption is
774 * this, but presumably a power cut happened while this PEB was
775 * erased, so it became unstable and corrupted, and should be
776 * erased.
777 */
778 err = 0;
779 goto out_unlock;
780 }
781
782 if (err)
783 goto out_unlock;
784
785 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
786 goto out_unlock;
787
788 ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
789 pnum);
790 ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
791 ubi_dump_vid_hdr(vid_hdr);
792 pr_err("hexdump of PEB %d offset %d, length %d",
793 pnum, ubi->leb_start, ubi->leb_size);
794 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
795 ubi->peb_buf, ubi->leb_size, 1);
796 err = 1;
797
798out_unlock:
799 mutex_unlock(&ubi->buf_mutex);
800 return err;
801}
802
803/**
804 * scan_peb - scan and process UBI headers of a PEB.
805 * @ubi: UBI device description object
806 * @ai: attaching information
807 * @pnum: the physical eraseblock number
808 * @vid: The volume ID of the found volume will be stored in this pointer
809 * @sqnum: The sqnum of the found volume will be stored in this pointer
810 *
811 * This function reads UBI headers of PEB @pnum, checks them, and adds
812 * information about this PEB to the corresponding list or RB-tree in the
813 * "attaching info" structure. Returns zero if the physical eraseblock was
814 * successfully handled and a negative error code in case of failure.
815 */
816static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
817 int pnum, int *vid, unsigned long long *sqnum)
818{
819 long long uninitialized_var(ec);
820 int err, bitflips = 0, vol_id = -1, ec_err = 0;
821
822 dbg_bld("scan PEB %d", pnum);
823
824 /* Skip bad physical eraseblocks */
825 err = ubi_io_is_bad(ubi, pnum);
826 if (err < 0)
827 return err;
828 else if (err) {
829 ai->bad_peb_count += 1;
830 return 0;
831 }
832
833 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
834 if (err < 0)
835 return err;
836 switch (err) {
837 case 0:
838 break;
839 case UBI_IO_BITFLIPS:
840 bitflips = 1;
841 break;
842 case UBI_IO_FF:
843 ai->empty_peb_count += 1;
844 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
845 UBI_UNKNOWN, 0, &ai->erase);
846 case UBI_IO_FF_BITFLIPS:
847 ai->empty_peb_count += 1;
848 return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
849 UBI_UNKNOWN, 1, &ai->erase);
850 case UBI_IO_BAD_HDR_EBADMSG:
851 case UBI_IO_BAD_HDR:
852 /*
853 * We have to also look at the VID header, possibly it is not
854 * corrupted. Set %bitflips flag in order to make this PEB be
855 * moved and EC be re-created.
856 */
857 ec_err = err;
858 ec = UBI_UNKNOWN;
859 bitflips = 1;
860 break;
861 default:
862 ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
863 return -EINVAL;
864 }
865
866 if (!ec_err) {
867 int image_seq;
868
869 /* Make sure UBI version is OK */
870 if (ech->version != UBI_VERSION) {
871 ubi_err("this UBI version is %d, image version is %d",
872 UBI_VERSION, (int)ech->version);
873 return -EINVAL;
874 }
875
876 ec = be64_to_cpu(ech->ec);
877 if (ec > UBI_MAX_ERASECOUNTER) {
878 /*
879 * Erase counter overflow. The EC headers have 64 bits
880 * reserved, but we anyway make use of only 31 bit
881 * values, as this seems to be enough for any existing
882 * flash. Upgrade UBI and use 64-bit erase counters
883 * internally.
884 */
885 ubi_err("erase counter overflow, max is %d",
886 UBI_MAX_ERASECOUNTER);
887 ubi_dump_ec_hdr(ech);
888 return -EINVAL;
889 }
890
891 /*
892 * Make sure that all PEBs have the same image sequence number.
893 * This allows us to detect situations when users flash UBI
894 * images incorrectly, so that the flash has the new UBI image
895 * and leftovers from the old one. This feature was added
896 * relatively recently, and the sequence number was always
897 * zero, because old UBI implementations always set it to zero.
898 * For this reasons, we do not panic if some PEBs have zero
899 * sequence number, while other PEBs have non-zero sequence
900 * number.
901 */
902 image_seq = be32_to_cpu(ech->image_seq);
903 if (!ubi->image_seq && image_seq)
904 ubi->image_seq = image_seq;
905 if (ubi->image_seq && image_seq &&
906 ubi->image_seq != image_seq) {
907 ubi_err("bad image sequence number %d in PEB %d, expected %d",
908 image_seq, pnum, ubi->image_seq);
909 ubi_dump_ec_hdr(ech);
910 return -EINVAL;
911 }
912 }
913
914 /* OK, we've done with the EC header, let's look at the VID header */
915
916 err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
917 if (err < 0)
918 return err;
919 switch (err) {
920 case 0:
921 break;
922 case UBI_IO_BITFLIPS:
923 bitflips = 1;
924 break;
925 case UBI_IO_BAD_HDR_EBADMSG:
926 if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
927 /*
928 * Both EC and VID headers are corrupted and were read
929 * with data integrity error, probably this is a bad
930 * PEB, bit it is not marked as bad yet. This may also
931 * be a result of power cut during erasure.
932 */
933 ai->maybe_bad_peb_count += 1;
934 case UBI_IO_BAD_HDR:
935 if (ec_err)
936 /*
937 * Both headers are corrupted. There is a possibility
938 * that this a valid UBI PEB which has corresponding
939 * LEB, but the headers are corrupted. However, it is
940 * impossible to distinguish it from a PEB which just
941 * contains garbage because of a power cut during erase
942 * operation. So we just schedule this PEB for erasure.
943 *
944 * Besides, in case of NOR flash, we deliberately
945 * corrupt both headers because NOR flash erasure is
946 * slow and can start from the end.
947 */
948 err = 0;
949 else
950 /*
951 * The EC was OK, but the VID header is corrupted. We
952 * have to check what is in the data area.
953 */
954 err = check_corruption(ubi, vidh, pnum);
955
956 if (err < 0)
957 return err;
958 else if (!err)
959 /* This corruption is caused by a power cut */
960 err = add_to_list(ai, pnum, UBI_UNKNOWN,
961 UBI_UNKNOWN, ec, 1, &ai->erase);
962 else
963 /* This is an unexpected corruption */
964 err = add_corrupted(ai, pnum, ec);
965 if (err)
966 return err;
967 goto adjust_mean_ec;
968 case UBI_IO_FF_BITFLIPS:
969 err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
970 ec, 1, &ai->erase);
971 if (err)
972 return err;
973 goto adjust_mean_ec;
974 case UBI_IO_FF:
975 if (ec_err || bitflips)
976 err = add_to_list(ai, pnum, UBI_UNKNOWN,
977 UBI_UNKNOWN, ec, 1, &ai->erase);
978 else
979 err = add_to_list(ai, pnum, UBI_UNKNOWN,
980 UBI_UNKNOWN, ec, 0, &ai->free);
981 if (err)
982 return err;
983 goto adjust_mean_ec;
984 default:
985 ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
986 err);
987 return -EINVAL;
988 }
989
990 vol_id = be32_to_cpu(vidh->vol_id);
991 if (vid)
992 *vid = vol_id;
993 if (sqnum)
994 *sqnum = be64_to_cpu(vidh->sqnum);
995 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
996 int lnum = be32_to_cpu(vidh->lnum);
997
998 /* Unsupported internal volume */
999 switch (vidh->compat) {
1000 case UBI_COMPAT_DELETE:
1001 if (vol_id != UBI_FM_SB_VOLUME_ID
1002 && vol_id != UBI_FM_DATA_VOLUME_ID) {
1003 ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
1004 vol_id, lnum);
1005 }
1006 err = add_to_list(ai, pnum, vol_id, lnum,
1007 ec, 1, &ai->erase);
1008 if (err)
1009 return err;
1010 return 0;
1011
1012 case UBI_COMPAT_RO:
1013 ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
1014 vol_id, lnum);
1015 ubi->ro_mode = 1;
1016 break;
1017
1018 case UBI_COMPAT_PRESERVE:
1019 ubi_msg("\"preserve\" compatible internal volume %d:%d found",
1020 vol_id, lnum);
1021 err = add_to_list(ai, pnum, vol_id, lnum,
1022 ec, 0, &ai->alien);
1023 if (err)
1024 return err;
1025 return 0;
1026
1027 case UBI_COMPAT_REJECT:
1028 ubi_err("incompatible internal volume %d:%d found",
1029 vol_id, lnum);
1030 return -EINVAL;
1031 }
1032 }
1033
1034 if (ec_err)
1035 ubi_warn("valid VID header but corrupted EC header at PEB %d",
1036 pnum);
1037 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1038 if (err)
1039 return err;
1040
1041adjust_mean_ec:
1042 if (!ec_err) {
1043 ai->ec_sum += ec;
1044 ai->ec_count += 1;
1045 if (ec > ai->max_ec)
1046 ai->max_ec = ec;
1047 if (ec < ai->min_ec)
1048 ai->min_ec = ec;
1049 }
1050
1051 return 0;
1052}
1053
1054/**
1055 * late_analysis - analyze the overall situation with PEB.
1056 * @ubi: UBI device description object
1057 * @ai: attaching information
1058 *
1059 * This is a helper function which takes a look what PEBs we have after we
1060 * gather information about all of them ("ai" is compete). It decides whether
1061 * the flash is empty and should be formatted of whether there are too many
1062 * corrupted PEBs and we should not attach this MTD device. Returns zero if we
1063 * should proceed with attaching the MTD device, and %-EINVAL if we should not.
1064 */
1065static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1066{
1067 struct ubi_ainf_peb *aeb;
1068 int max_corr, peb_count;
1069
1070 peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
1071 max_corr = peb_count / 20 ?: 8;
1072
1073 /*
1074 * Few corrupted PEBs is not a problem and may be just a result of
1075 * unclean reboots. However, many of them may indicate some problems
1076 * with the flash HW or driver.
1077 */
1078 if (ai->corr_peb_count) {
1079 ubi_err("%d PEBs are corrupted and preserved",
1080 ai->corr_peb_count);
1081 pr_err("Corrupted PEBs are:");
1082 list_for_each_entry(aeb, &ai->corr, u.list)
1083 pr_cont(" %d", aeb->pnum);
1084 pr_cont("\n");
1085
1086 /*
1087 * If too many PEBs are corrupted, we refuse attaching,
1088 * otherwise, only print a warning.
1089 */
1090 if (ai->corr_peb_count >= max_corr) {
1091 ubi_err("too many corrupted PEBs, refusing");
1092 return -EINVAL;
1093 }
1094 }
1095
1096 if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
1097 /*
1098 * All PEBs are empty, or almost all - a couple PEBs look like
1099 * they may be bad PEBs which were not marked as bad yet.
1100 *
1101 * This piece of code basically tries to distinguish between
1102 * the following situations:
1103 *
1104 * 1. Flash is empty, but there are few bad PEBs, which are not
1105 * marked as bad so far, and which were read with error. We
1106 * want to go ahead and format this flash. While formatting,
1107 * the faulty PEBs will probably be marked as bad.
1108 *
1109 * 2. Flash contains non-UBI data and we do not want to format
1110 * it and destroy possibly important information.
1111 */
1112 if (ai->maybe_bad_peb_count <= 2) {
1113 ai->is_empty = 1;
1114 ubi_msg("empty MTD device detected");
1115 get_random_bytes(&ubi->image_seq,
1116 sizeof(ubi->image_seq));
1117 } else {
1118 ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1119 return -EINVAL;
1120 }
1121
1122 }
1123
1124 return 0;
1125}
1126
1127/**
1128 * destroy_av - free volume attaching information.
1129 * @av: volume attaching information
1130 * @ai: attaching information
1131 *
1132 * This function destroys the volume attaching information.
1133 */
1134static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1135{
1136 struct ubi_ainf_peb *aeb;
1137 struct rb_node *this = av->root.rb_node;
1138
1139 while (this) {
1140 if (this->rb_left)
1141 this = this->rb_left;
1142 else if (this->rb_right)
1143 this = this->rb_right;
1144 else {
1145 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1146 this = rb_parent(this);
1147 if (this) {
1148 if (this->rb_left == &aeb->u.rb)
1149 this->rb_left = NULL;
1150 else
1151 this->rb_right = NULL;
1152 }
1153
1154 kmem_cache_free(ai->aeb_slab_cache, aeb);
1155 }
1156 }
1157 kfree(av);
1158}
1159
1160/**
1161 * destroy_ai - destroy attaching information.
1162 * @ai: attaching information
1163 */
1164static void destroy_ai(struct ubi_attach_info *ai)
1165{
1166 struct ubi_ainf_peb *aeb, *aeb_tmp;
1167 struct ubi_ainf_volume *av;
1168 struct rb_node *rb;
1169
1170 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1171 list_del(&aeb->u.list);
1172 kmem_cache_free(ai->aeb_slab_cache, aeb);
1173 }
1174 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1175 list_del(&aeb->u.list);
1176 kmem_cache_free(ai->aeb_slab_cache, aeb);
1177 }
1178 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1179 list_del(&aeb->u.list);
1180 kmem_cache_free(ai->aeb_slab_cache, aeb);
1181 }
1182 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1183 list_del(&aeb->u.list);
1184 kmem_cache_free(ai->aeb_slab_cache, aeb);
1185 }
1186
1187 /* Destroy the volume RB-tree */
1188 rb = ai->volumes.rb_node;
1189 while (rb) {
1190 if (rb->rb_left)
1191 rb = rb->rb_left;
1192 else if (rb->rb_right)
1193 rb = rb->rb_right;
1194 else {
1195 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1196
1197 rb = rb_parent(rb);
1198 if (rb) {
1199 if (rb->rb_left == &av->rb)
1200 rb->rb_left = NULL;
1201 else
1202 rb->rb_right = NULL;
1203 }
1204
1205 destroy_av(ai, av);
1206 }
1207 }
1208
1209 if (ai->aeb_slab_cache)
1210 kmem_cache_destroy(ai->aeb_slab_cache);
1211
1212 kfree(ai);
1213}
1214
1215/**
1216 * scan_all - scan entire MTD device.
1217 * @ubi: UBI device description object
1218 * @ai: attach info object
1219 * @start: start scanning at this PEB
1220 *
1221 * This function does full scanning of an MTD device and returns complete
1222 * information about it in form of a "struct ubi_attach_info" object. In case
1223 * of failure, an error code is returned.
1224 */
1225static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1226 int start)
1227{
1228 int err, pnum;
1229 struct rb_node *rb1, *rb2;
1230 struct ubi_ainf_volume *av;
1231 struct ubi_ainf_peb *aeb;
1232
1233 err = -ENOMEM;
1234
1235 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1236 if (!ech)
1237 return err;
1238
1239 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1240 if (!vidh)
1241 goto out_ech;
1242
1243 for (pnum = start; pnum < ubi->peb_count; pnum++) {
1244 cond_resched();
1245
1246 dbg_gen("process PEB %d", pnum);
1247 err = scan_peb(ubi, ai, pnum, NULL, NULL);
1248 if (err < 0)
1249 goto out_vidh;
1250 }
1251
1252 ubi_msg("scanning is finished");
1253
1254 /* Calculate mean erase counter */
1255 if (ai->ec_count)
1256 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
1257
1258 err = late_analysis(ubi, ai);
1259 if (err)
1260 goto out_vidh;
1261
1262 /*
1263 * In case of unknown erase counter we use the mean erase counter
1264 * value.
1265 */
1266 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1267 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1268 if (aeb->ec == UBI_UNKNOWN)
1269 aeb->ec = ai->mean_ec;
1270 }
1271
1272 list_for_each_entry(aeb, &ai->free, u.list) {
1273 if (aeb->ec == UBI_UNKNOWN)
1274 aeb->ec = ai->mean_ec;
1275 }
1276
1277 list_for_each_entry(aeb, &ai->corr, u.list)
1278 if (aeb->ec == UBI_UNKNOWN)
1279 aeb->ec = ai->mean_ec;
1280
1281 list_for_each_entry(aeb, &ai->erase, u.list)
1282 if (aeb->ec == UBI_UNKNOWN)
1283 aeb->ec = ai->mean_ec;
1284
1285 err = self_check_ai(ubi, ai);
1286 if (err)
1287 goto out_vidh;
1288
1289 ubi_free_vid_hdr(ubi, vidh);
1290 kfree(ech);
1291
1292 return 0;
1293
1294out_vidh:
1295 ubi_free_vid_hdr(ubi, vidh);
1296out_ech:
1297 kfree(ech);
1298 return err;
1299}
1300
1301#ifdef CONFIG_MTD_UBI_FASTMAP
1302
1303/**
1304 * scan_fastmap - try to find a fastmap and attach from it.
1305 * @ubi: UBI device description object
1306 * @ai: attach info object
1307 *
1308 * Returns 0 on success, negative return values indicate an internal
1309 * error.
1310 * UBI_NO_FASTMAP denotes that no fastmap was found.
1311 * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
1312 */
1313static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
1314{
1315 int err, pnum, fm_anchor = -1;
1316 unsigned long long max_sqnum = 0;
1317
1318 err = -ENOMEM;
1319
1320 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1321 if (!ech)
1322 goto out;
1323
1324 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1325 if (!vidh)
1326 goto out_ech;
1327
1328 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1329 int vol_id = -1;
1330 unsigned long long sqnum = -1;
1331 cond_resched();
1332
1333 dbg_gen("process PEB %d", pnum);
1334 err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
1335 if (err < 0)
1336 goto out_vidh;
1337
1338 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1339 max_sqnum = sqnum;
1340 fm_anchor = pnum;
1341 }
1342 }
1343
1344 ubi_free_vid_hdr(ubi, vidh);
1345 kfree(ech);
1346
1347 if (fm_anchor < 0)
1348 return UBI_NO_FASTMAP;
1349
1350 return ubi_scan_fastmap(ubi, ai, fm_anchor);
1351
1352out_vidh:
1353 ubi_free_vid_hdr(ubi, vidh);
1354out_ech:
1355 kfree(ech);
1356out:
1357 return err;
1358}
1359
1360#endif
1361
1362static struct ubi_attach_info *alloc_ai(const char *slab_name)
1363{
1364 struct ubi_attach_info *ai;
1365
1366 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1367 if (!ai)
1368 return ai;
1369
1370 INIT_LIST_HEAD(&ai->corr);
1371 INIT_LIST_HEAD(&ai->free);
1372 INIT_LIST_HEAD(&ai->erase);
1373 INIT_LIST_HEAD(&ai->alien);
1374 ai->volumes = RB_ROOT;
1375 ai->aeb_slab_cache = kmem_cache_create(slab_name,
1376 sizeof(struct ubi_ainf_peb),
1377 0, 0, NULL);
1378 if (!ai->aeb_slab_cache) {
1379 kfree(ai);
1380 ai = NULL;
1381 }
1382
1383 return ai;
1384}
1385
1386/**
1387 * ubi_attach - attach an MTD device.
1388 * @ubi: UBI device descriptor
1389 * @force_scan: if set to non-zero attach by scanning
1390 *
1391 * This function returns zero in case of success and a negative error code in
1392 * case of failure.
1393 */
1394int ubi_attach(struct ubi_device *ubi, int force_scan)
1395{
1396 int err;
1397 struct ubi_attach_info *ai;
1398
1399 ai = alloc_ai("ubi_aeb_slab_cache");
1400 if (!ai)
1401 return -ENOMEM;
1402
1403#ifdef CONFIG_MTD_UBI_FASTMAP
1404 /* On small flash devices we disable fastmap in any case. */
1405 if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
1406 ubi->fm_disabled = 1;
1407 force_scan = 1;
1408 }
1409
1410 if (force_scan)
1411 err = scan_all(ubi, ai, 0);
1412 else {
1413 err = scan_fast(ubi, ai);
1414 if (err > 0) {
1415 if (err != UBI_NO_FASTMAP) {
1416 destroy_ai(ai);
1417 ai = alloc_ai("ubi_aeb_slab_cache2");
1418 if (!ai)
1419 return -ENOMEM;
1420 }
1421
1422 err = scan_all(ubi, ai, UBI_FM_MAX_START);
1423 }
1424 }
1425#else
1426 err = scan_all(ubi, ai, 0);
1427#endif
1428 if (err)
1429 goto out_ai;
1430
1431 ubi->bad_peb_count = ai->bad_peb_count;
1432 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
1433 ubi->corr_peb_count = ai->corr_peb_count;
1434 ubi->max_ec = ai->max_ec;
1435 ubi->mean_ec = ai->mean_ec;
1436 dbg_gen("max. sequence number: %llu", ai->max_sqnum);
1437
1438 err = ubi_read_volume_table(ubi, ai);
1439 if (err)
1440 goto out_ai;
1441
1442 err = ubi_wl_init(ubi, ai);
1443 if (err)
1444 goto out_vtbl;
1445
1446 err = ubi_eba_init(ubi, ai);
1447 if (err)
1448 goto out_wl;
1449
1450#ifdef CONFIG_MTD_UBI_FASTMAP
1451 if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
1452 struct ubi_attach_info *scan_ai;
1453
1454 scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
1455 if (!scan_ai)
1456 goto out_wl;
1457
1458 err = scan_all(ubi, scan_ai, 0);
1459 if (err) {
1460 destroy_ai(scan_ai);
1461 goto out_wl;
1462 }
1463
1464 err = self_check_eba(ubi, ai, scan_ai);
1465 destroy_ai(scan_ai);
1466
1467 if (err)
1468 goto out_wl;
1469 }
1470#endif
1471
1472 destroy_ai(ai);
1473 return 0;
1474
1475out_wl:
1476 ubi_wl_close(ubi);
1477out_vtbl:
1478 ubi_free_internal_volumes(ubi);
1479 vfree(ubi->vtbl);
1480out_ai:
1481 destroy_ai(ai);
1482 return err;
1483}
1484
1485/**
1486 * self_check_ai - check the attaching information.
1487 * @ubi: UBI device description object
1488 * @ai: attaching information
1489 *
1490 * This function returns zero if the attaching information is all right, and a
1491 * negative error code if not or if an error occurred.
1492 */
1493static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1494{
1495 int pnum, err, vols_found = 0;
1496 struct rb_node *rb1, *rb2;
1497 struct ubi_ainf_volume *av;
1498 struct ubi_ainf_peb *aeb, *last_aeb;
1499 uint8_t *buf;
1500
1501 if (!ubi_dbg_chk_gen(ubi))
1502 return 0;
1503
1504 /*
1505 * At first, check that attaching information is OK.
1506 */
1507 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1508 int leb_count = 0;
1509
1510 cond_resched();
1511
1512 vols_found += 1;
1513
1514 if (ai->is_empty) {
1515 ubi_err("bad is_empty flag");
1516 goto bad_av;
1517 }
1518
1519 if (av->vol_id < 0 || av->highest_lnum < 0 ||
1520 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
1521 av->data_pad < 0 || av->last_data_size < 0) {
1522 ubi_err("negative values");
1523 goto bad_av;
1524 }
1525
1526 if (av->vol_id >= UBI_MAX_VOLUMES &&
1527 av->vol_id < UBI_INTERNAL_VOL_START) {
1528 ubi_err("bad vol_id");
1529 goto bad_av;
1530 }
1531
1532 if (av->vol_id > ai->highest_vol_id) {
1533 ubi_err("highest_vol_id is %d, but vol_id %d is there",
1534 ai->highest_vol_id, av->vol_id);
1535 goto out;
1536 }
1537
1538 if (av->vol_type != UBI_DYNAMIC_VOLUME &&
1539 av->vol_type != UBI_STATIC_VOLUME) {
1540 ubi_err("bad vol_type");
1541 goto bad_av;
1542 }
1543
1544 if (av->data_pad > ubi->leb_size / 2) {
1545 ubi_err("bad data_pad");
1546 goto bad_av;
1547 }
1548
1549 last_aeb = NULL;
1550 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1551 cond_resched();
1552
1553 last_aeb = aeb;
1554 leb_count += 1;
1555
1556 if (aeb->pnum < 0 || aeb->ec < 0) {
1557 ubi_err("negative values");
1558 goto bad_aeb;
1559 }
1560
1561 if (aeb->ec < ai->min_ec) {
1562 ubi_err("bad ai->min_ec (%d), %d found",
1563 ai->min_ec, aeb->ec);
1564 goto bad_aeb;
1565 }
1566
1567 if (aeb->ec > ai->max_ec) {
1568 ubi_err("bad ai->max_ec (%d), %d found",
1569 ai->max_ec, aeb->ec);
1570 goto bad_aeb;
1571 }
1572
1573 if (aeb->pnum >= ubi->peb_count) {
1574 ubi_err("too high PEB number %d, total PEBs %d",
1575 aeb->pnum, ubi->peb_count);
1576 goto bad_aeb;
1577 }
1578
1579 if (av->vol_type == UBI_STATIC_VOLUME) {
1580 if (aeb->lnum >= av->used_ebs) {
1581 ubi_err("bad lnum or used_ebs");
1582 goto bad_aeb;
1583 }
1584 } else {
1585 if (av->used_ebs != 0) {
1586 ubi_err("non-zero used_ebs");
1587 goto bad_aeb;
1588 }
1589 }
1590
1591 if (aeb->lnum > av->highest_lnum) {
1592 ubi_err("incorrect highest_lnum or lnum");
1593 goto bad_aeb;
1594 }
1595 }
1596
1597 if (av->leb_count != leb_count) {
1598 ubi_err("bad leb_count, %d objects in the tree",
1599 leb_count);
1600 goto bad_av;
1601 }
1602
1603 if (!last_aeb)
1604 continue;
1605
1606 aeb = last_aeb;
1607
1608 if (aeb->lnum != av->highest_lnum) {
1609 ubi_err("bad highest_lnum");
1610 goto bad_aeb;
1611 }
1612 }
1613
1614 if (vols_found != ai->vols_found) {
1615 ubi_err("bad ai->vols_found %d, should be %d",
1616 ai->vols_found, vols_found);
1617 goto out;
1618 }
1619
1620 /* Check that attaching information is correct */
1621 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1622 last_aeb = NULL;
1623 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1624 int vol_type;
1625
1626 cond_resched();
1627
1628 last_aeb = aeb;
1629
1630 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
1631 if (err && err != UBI_IO_BITFLIPS) {
1632 ubi_err("VID header is not OK (%d)", err);
1633 if (err > 0)
1634 err = -EIO;
1635 return err;
1636 }
1637
1638 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1639 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1640 if (av->vol_type != vol_type) {
1641 ubi_err("bad vol_type");
1642 goto bad_vid_hdr;
1643 }
1644
1645 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1646 ubi_err("bad sqnum %llu", aeb->sqnum);
1647 goto bad_vid_hdr;
1648 }
1649
1650 if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
1651 ubi_err("bad vol_id %d", av->vol_id);
1652 goto bad_vid_hdr;
1653 }
1654
1655 if (av->compat != vidh->compat) {
1656 ubi_err("bad compat %d", vidh->compat);
1657 goto bad_vid_hdr;
1658 }
1659
1660 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1661 ubi_err("bad lnum %d", aeb->lnum);
1662 goto bad_vid_hdr;
1663 }
1664
1665 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1666 ubi_err("bad used_ebs %d", av->used_ebs);
1667 goto bad_vid_hdr;
1668 }
1669
1670 if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
1671 ubi_err("bad data_pad %d", av->data_pad);
1672 goto bad_vid_hdr;
1673 }
1674 }
1675
1676 if (!last_aeb)
1677 continue;
1678
1679 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
1680 ubi_err("bad highest_lnum %d", av->highest_lnum);
1681 goto bad_vid_hdr;
1682 }
1683
1684 if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
1685 ubi_err("bad last_data_size %d", av->last_data_size);
1686 goto bad_vid_hdr;
1687 }
1688 }
1689
1690 /*
1691 * Make sure that all the physical eraseblocks are in one of the lists
1692 * or trees.
1693 */
1694 buf = kzalloc(ubi->peb_count, GFP_KERNEL);
1695 if (!buf)
1696 return -ENOMEM;
1697
1698 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
1699 err = ubi_io_is_bad(ubi, pnum);
1700 if (err < 0) {
1701 kfree(buf);
1702 return err;
1703 } else if (err)
1704 buf[pnum] = 1;
1705 }
1706
1707 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
1708 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
1709 buf[aeb->pnum] = 1;
1710
1711 list_for_each_entry(aeb, &ai->free, u.list)
1712 buf[aeb->pnum] = 1;
1713
1714 list_for_each_entry(aeb, &ai->corr, u.list)
1715 buf[aeb->pnum] = 1;
1716
1717 list_for_each_entry(aeb, &ai->erase, u.list)
1718 buf[aeb->pnum] = 1;
1719
1720 list_for_each_entry(aeb, &ai->alien, u.list)
1721 buf[aeb->pnum] = 1;
1722
1723 err = 0;
1724 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1725 if (!buf[pnum]) {
1726 ubi_err("PEB %d is not referred", pnum);
1727 err = 1;
1728 }
1729
1730 kfree(buf);
1731 if (err)
1732 goto out;
1733 return 0;
1734
1735bad_aeb:
1736 ubi_err("bad attaching information about LEB %d", aeb->lnum);
1737 ubi_dump_aeb(aeb, 0);
1738 ubi_dump_av(av);
1739 goto out;
1740
1741bad_av:
1742 ubi_err("bad attaching information about volume %d", av->vol_id);
1743 ubi_dump_av(av);
1744 goto out;
1745
1746bad_vid_hdr:
1747 ubi_err("bad attaching information about volume %d", av->vol_id);
1748 ubi_dump_av(av);
1749 ubi_dump_vid_hdr(vidh);
1750
1751out:
1752 dump_stack();
1753 return -EINVAL;
1754}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a56133585e9..6c3fb5ab20f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,10 @@
27 * module load parameters or the kernel boot parameters. If MTD devices were 27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * specified, UBI does not attach any MTD device, but it is possible to do 28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device". 29 * later using the "UBI control device".
30 *
31 * At the moment we only attach UBI devices by scanning, which will become a
32 * bottleneck when flashes reach certain large size. Then one may improve UBI
33 * and add other methods, although it does not seem to be easy to do.
30 */ 34 */
31 35
32#include <linux/err.h> 36#include <linux/err.h>
@@ -36,7 +40,6 @@
36#include <linux/namei.h> 40#include <linux/namei.h>
37#include <linux/stat.h> 41#include <linux/stat.h>
38#include <linux/miscdevice.h> 42#include <linux/miscdevice.h>
39#include <linux/mtd/partitions.h>
40#include <linux/log2.h> 43#include <linux/log2.h>
41#include <linux/kthread.h> 44#include <linux/kthread.h>
42#include <linux/kernel.h> 45#include <linux/kernel.h>
@@ -46,12 +49,6 @@
46/* Maximum length of the 'mtd=' parameter */ 49/* Maximum length of the 'mtd=' parameter */
47#define MTD_PARAM_LEN_MAX 64 50#define MTD_PARAM_LEN_MAX 64
48 51
49/* Maximum number of comma-separated items in the 'mtd=' parameter */
50#define MTD_PARAM_MAX_COUNT 3
51
52/* Maximum value for the number of bad PEBs per 1024 PEBs */
53#define MAX_MTD_UBI_BEB_LIMIT 768
54
55#ifdef CONFIG_MTD_UBI_MODULE 52#ifdef CONFIG_MTD_UBI_MODULE
56#define ubi_is_module() 1 53#define ubi_is_module() 1
57#else 54#else
@@ -63,12 +60,10 @@
63 * @name: MTD character device node path, MTD device name, or MTD device number 60 * @name: MTD character device node path, MTD device name, or MTD device number
64 * string 61 * string
65 * @vid_hdr_offs: VID header offset 62 * @vid_hdr_offs: VID header offset
66 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
67 */ 63 */
68struct mtd_dev_param { 64struct mtd_dev_param {
69 char name[MTD_PARAM_LEN_MAX]; 65 char name[MTD_PARAM_LEN_MAX];
70 int vid_hdr_offs; 66 int vid_hdr_offs;
71 int max_beb_per1024;
72}; 67};
73 68
74/* Numbers of elements set in the @mtd_dev_param array */ 69/* Numbers of elements set in the @mtd_dev_param array */
@@ -76,10 +71,7 @@ static int __initdata mtd_devs;
76 71
77/* MTD devices specification parameters */ 72/* MTD devices specification parameters */
78static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 73static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
79#ifdef CONFIG_MTD_UBI_FASTMAP 74
80/* UBI module parameter to enable fastmap automatically on non-fastmap images */
81static bool fm_autoconvert;
82#endif
83/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 75/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
84struct class *ubi_class; 76struct class *ubi_class;
85 77
@@ -156,19 +148,6 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
156 148
157 ubi_do_get_device_info(ubi, &nt.di); 149 ubi_do_get_device_info(ubi, &nt.di);
158 ubi_do_get_volume_info(ubi, vol, &nt.vi); 150 ubi_do_get_volume_info(ubi, vol, &nt.vi);
159
160#ifdef CONFIG_MTD_UBI_FASTMAP
161 switch (ntype) {
162 case UBI_VOLUME_ADDED:
163 case UBI_VOLUME_REMOVED:
164 case UBI_VOLUME_RESIZED:
165 case UBI_VOLUME_RENAMED:
166 if (ubi_update_fastmap(ubi)) {
167 ubi_err("Unable to update fastmap!");
168 ubi_ro_mode(ubi);
169 }
170 }
171#endif
172 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 151 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
173} 152}
174 153
@@ -575,10 +554,10 @@ static void uif_close(struct ubi_device *ubi)
575} 554}
576 555
577/** 556/**
578 * ubi_free_internal_volumes - free internal volumes. 557 * free_internal_volumes - free internal volumes.
579 * @ubi: UBI device description object 558 * @ubi: UBI device description object
580 */ 559 */
581void ubi_free_internal_volumes(struct ubi_device *ubi) 560static void free_internal_volumes(struct ubi_device *ubi)
582{ 561{
583 int i; 562 int i;
584 563
@@ -589,38 +568,62 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
589 } 568 }
590} 569}
591 570
592static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024) 571/**
572 * attach_by_scanning - attach an MTD device using scanning method.
573 * @ubi: UBI device descriptor
574 *
575 * This function returns zero in case of success and a negative error code in
576 * case of failure.
577 *
578 * Note, currently this is the only method to attach UBI devices. Hopefully in
579 * the future we'll have more scalable attaching methods and avoid full media
580 * scanning. But even in this case scanning will be needed as a fall-back
581 * attaching method if there are some on-flash table corruptions.
582 */
583static int attach_by_scanning(struct ubi_device *ubi)
593{ 584{
594 int limit, device_pebs; 585 int err;
595 uint64_t device_size; 586 struct ubi_scan_info *si;
596 587
597 if (!max_beb_per1024) 588 si = ubi_scan(ubi);
598 return 0; 589 if (IS_ERR(si))
590 return PTR_ERR(si);
599 591
600 /* 592 ubi->bad_peb_count = si->bad_peb_count;
601 * Here we are using size of the entire flash chip and 593 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
602 * not just the MTD partition size because the maximum 594 ubi->corr_peb_count = si->corr_peb_count;
603 * number of bad eraseblocks is a percentage of the 595 ubi->max_ec = si->max_ec;
604 * whole device and bad eraseblocks are not fairly 596 ubi->mean_ec = si->mean_ec;
605 * distributed over the flash chip. So the worst case 597 ubi_msg("max. sequence number: %llu", si->max_sqnum);
606 * is that all the bad eraseblocks of the chip are in 598
607 * the MTD partition we are attaching (ubi->mtd). 599 err = ubi_read_volume_table(ubi, si);
608 */ 600 if (err)
609 device_size = mtd_get_device_size(ubi->mtd); 601 goto out_si;
610 device_pebs = mtd_div_by_eb(device_size, ubi->mtd); 602
611 limit = mult_frac(device_pebs, max_beb_per1024, 1024); 603 err = ubi_wl_init_scan(ubi, si);
604 if (err)
605 goto out_vtbl;
612 606
613 /* Round it up */ 607 err = ubi_eba_init_scan(ubi, si);
614 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs) 608 if (err)
615 limit += 1; 609 goto out_wl;
616 610
617 return limit; 611 ubi_scan_destroy_si(si);
612 return 0;
613
614out_wl:
615 ubi_wl_close(ubi);
616out_vtbl:
617 free_internal_volumes(ubi);
618 vfree(ubi->vtbl);
619out_si:
620 ubi_scan_destroy_si(si);
621 return err;
618} 622}
619 623
620/** 624/**
621 * io_init - initialize I/O sub-system for a given UBI device. 625 * io_init - initialize I/O sub-system for a given UBI device.
622 * @ubi: UBI device description object 626 * @ubi: UBI device description object
623 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
624 * 627 *
625 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 628 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
626 * assumed: 629 * assumed:
@@ -633,11 +636,8 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
633 * This function returns zero in case of success and a negative error code in 636 * This function returns zero in case of success and a negative error code in
634 * case of failure. 637 * case of failure.
635 */ 638 */
636static int io_init(struct ubi_device *ubi, int max_beb_per1024) 639static int io_init(struct ubi_device *ubi)
637{ 640{
638 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
639 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
640
641 if (ubi->mtd->numeraseregions != 0) { 641 if (ubi->mtd->numeraseregions != 0) {
642 /* 642 /*
643 * Some flashes have several erase regions. Different regions 643 * Some flashes have several erase regions. Different regions
@@ -664,10 +664,8 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
665 ubi->flash_size = ubi->mtd->size; 665 ubi->flash_size = ubi->mtd->size;
666 666
667 if (mtd_can_have_bb(ubi->mtd)) { 667 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
668 ubi->bad_allowed = 1; 668 ubi->bad_allowed = 1;
669 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
670 }
671 669
672 if (ubi->mtd->type == MTD_NORFLASH) { 670 if (ubi->mtd->type == MTD_NORFLASH) {
673 ubi_assert(ubi->mtd->writesize == 1); 671 ubi_assert(ubi->mtd->writesize == 1);
@@ -709,11 +707,11 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
709 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 707 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
710 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 708 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
711 709
712 dbg_gen("min_io_size %d", ubi->min_io_size); 710 dbg_msg("min_io_size %d", ubi->min_io_size);
713 dbg_gen("max_write_size %d", ubi->max_write_size); 711 dbg_msg("max_write_size %d", ubi->max_write_size);
714 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 712 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
715 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 713 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
716 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 714 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
717 715
718 if (ubi->vid_hdr_offset == 0) 716 if (ubi->vid_hdr_offset == 0)
719 /* Default offset */ 717 /* Default offset */
@@ -730,10 +728,10 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
730 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; 728 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
731 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 729 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
732 730
733 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset); 731 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
734 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 732 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
735 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift); 733 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
736 dbg_gen("leb_start %d", ubi->leb_start); 734 dbg_msg("leb_start %d", ubi->leb_start);
737 735
738 /* The shift must be aligned to 32-bit boundary */ 736 /* The shift must be aligned to 32-bit boundary */
739 if (ubi->vid_hdr_shift % 4) { 737 if (ubi->vid_hdr_shift % 4) {
@@ -759,7 +757,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
759 ubi->max_erroneous = ubi->peb_count / 10; 757 ubi->max_erroneous = ubi->peb_count / 10;
760 if (ubi->max_erroneous < 16) 758 if (ubi->max_erroneous < 16)
761 ubi->max_erroneous = 16; 759 ubi->max_erroneous = 16;
762 dbg_gen("max_erroneous %d", ubi->max_erroneous); 760 dbg_msg("max_erroneous %d", ubi->max_erroneous);
763 761
764 /* 762 /*
765 * It may happen that EC and VID headers are situated in one minimal 763 * It may happen that EC and VID headers are situated in one minimal
@@ -767,24 +765,36 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
767 * read-only mode. 765 * read-only mode.
768 */ 766 */
769 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 767 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
770 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); 768 ubi_warn("EC and VID headers are in the same minimal I/O unit, "
769 "switch to read-only mode");
771 ubi->ro_mode = 1; 770 ubi->ro_mode = 1;
772 } 771 }
773 772
774 ubi->leb_size = ubi->peb_size - ubi->leb_start; 773 ubi->leb_size = ubi->peb_size - ubi->leb_start;
775 774
776 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 775 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
777 ubi_msg("MTD device %d is write-protected, attach in read-only mode", 776 ubi_msg("MTD device %d is write-protected, attach in "
778 ubi->mtd->index); 777 "read-only mode", ubi->mtd->index);
779 ubi->ro_mode = 1; 778 ubi->ro_mode = 1;
780 } 779 }
781 780
781 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
782 ubi->peb_size, ubi->peb_size >> 10);
783 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
784 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
785 if (ubi->hdrs_min_io_size != ubi->min_io_size)
786 ubi_msg("sub-page size: %d",
787 ubi->hdrs_min_io_size);
788 ubi_msg("VID header offset: %d (aligned %d)",
789 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
790 ubi_msg("data offset: %d", ubi->leb_start);
791
782 /* 792 /*
783 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But 793 * Note, ideally, we have to initialize ubi->bad_peb_count here. But
784 * unfortunately, MTD does not provide this information. We should loop 794 * unfortunately, MTD does not provide this information. We should loop
785 * over all physical eraseblocks and invoke mtd->block_is_bad() for 795 * over all physical eraseblocks and invoke mtd->block_is_bad() for
786 * each physical eraseblock. So, we leave @ubi->bad_peb_count 796 * each physical eraseblock. So, we skip ubi->bad_peb_count
787 * uninitialized so far. 797 * uninitialized and initialize it after scanning.
788 */ 798 */
789 799
790 return 0; 800 return 0;
@@ -795,7 +805,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
795 * @ubi: UBI device description object 805 * @ubi: UBI device description object
796 * @vol_id: ID of the volume to re-size 806 * @vol_id: ID of the volume to re-size
797 * 807 *
798 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in 808 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
799 * the volume table to the largest possible size. See comments in ubi-header.h 809 * the volume table to the largest possible size. See comments in ubi-header.h
800 * for more description of the flag. Returns zero in case of success and a 810 * for more description of the flag. Returns zero in case of success and a
801 * negative error code in case of failure. 811 * negative error code in case of failure.
@@ -806,11 +816,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
806 struct ubi_volume *vol = ubi->volumes[vol_id]; 816 struct ubi_volume *vol = ubi->volumes[vol_id];
807 int err, old_reserved_pebs = vol->reserved_pebs; 817 int err, old_reserved_pebs = vol->reserved_pebs;
808 818
809 if (ubi->ro_mode) {
810 ubi_warn("skip auto-resize because of R/O mode");
811 return 0;
812 }
813
814 /* 819 /*
815 * Clear the auto-resize flag in the volume in-memory copy of the 820 * Clear the auto-resize flag in the volume in-memory copy of the
816 * volume table, and 'ubi_resize_volume()' will propagate this change 821 * volume table, and 'ubi_resize_volume()' will propagate this change
@@ -825,7 +830,8 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
825 * No available PEBs to re-size the volume, clear the flag on 830 * No available PEBs to re-size the volume, clear the flag on
826 * flash and exit. 831 * flash and exit.
827 */ 832 */
828 vtbl_rec = ubi->vtbl[vol_id]; 833 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
834 sizeof(struct ubi_vtbl_record));
829 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 835 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
830 if (err) 836 if (err)
831 ubi_err("cannot clean auto-resize flag for volume %d", 837 ubi_err("cannot clean auto-resize flag for volume %d",
@@ -851,7 +857,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
851 * @mtd: MTD device description object 857 * @mtd: MTD device description object
852 * @ubi_num: number to assign to the new UBI device 858 * @ubi_num: number to assign to the new UBI device
853 * @vid_hdr_offset: VID header offset 859 * @vid_hdr_offset: VID header offset
854 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
855 * 860 *
856 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 861 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
857 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 862 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -862,18 +867,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
862 * Note, the invocations of this function has to be serialized by the 867 * Note, the invocations of this function has to be serialized by the
863 * @ubi_devices_mutex. 868 * @ubi_devices_mutex.
864 */ 869 */
865int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, 870int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
866 int vid_hdr_offset, int max_beb_per1024)
867{ 871{
868 struct ubi_device *ubi; 872 struct ubi_device *ubi;
869 int i, err, ref = 0; 873 int i, err, ref = 0;
870 874
871 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
872 return -EINVAL;
873
874 if (!max_beb_per1024)
875 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
876
877 /* 875 /*
878 * Check if we already have the same MTD device attached. 876 * Check if we already have the same MTD device attached.
879 * 877 *
@@ -883,7 +881,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
883 for (i = 0; i < UBI_MAX_DEVICES; i++) { 881 for (i = 0; i < UBI_MAX_DEVICES; i++) {
884 ubi = ubi_devices[i]; 882 ubi = ubi_devices[i];
885 if (ubi && mtd->index == ubi->mtd->index) { 883 if (ubi && mtd->index == ubi->mtd->index) {
886 ubi_err("mtd%d is already attached to ubi%d", 884 dbg_err("mtd%d is already attached to ubi%d",
887 mtd->index, i); 885 mtd->index, i);
888 return -EEXIST; 886 return -EEXIST;
889 } 887 }
@@ -898,8 +896,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
898 * no sense to attach emulated MTD devices, so we prohibit this. 896 * no sense to attach emulated MTD devices, so we prohibit this.
899 */ 897 */
900 if (mtd->type == MTD_UBIVOLUME) { 898 if (mtd->type == MTD_UBIVOLUME) {
901 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI", 899 ubi_err("refuse attaching mtd%d - it is already emulated on "
902 mtd->index); 900 "top of UBI", mtd->index);
903 return -EINVAL; 901 return -EINVAL;
904 } 902 }
905 903
@@ -909,7 +907,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
909 if (!ubi_devices[ubi_num]) 907 if (!ubi_devices[ubi_num])
910 break; 908 break;
911 if (ubi_num == UBI_MAX_DEVICES) { 909 if (ubi_num == UBI_MAX_DEVICES) {
912 ubi_err("only %d UBI devices may be created", 910 dbg_err("only %d UBI devices may be created",
913 UBI_MAX_DEVICES); 911 UBI_MAX_DEVICES);
914 return -ENFILE; 912 return -ENFILE;
915 } 913 }
@@ -919,7 +917,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
919 917
920 /* Make sure ubi_num is not busy */ 918 /* Make sure ubi_num is not busy */
921 if (ubi_devices[ubi_num]) { 919 if (ubi_devices[ubi_num]) {
922 ubi_err("ubi%d already exists", ubi_num); 920 dbg_err("ubi%d already exists", ubi_num);
923 return -EEXIST; 921 return -EEXIST;
924 } 922 }
925 } 923 }
@@ -933,62 +931,36 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
933 ubi->vid_hdr_offset = vid_hdr_offset; 931 ubi->vid_hdr_offset = vid_hdr_offset;
934 ubi->autoresize_vol_id = -1; 932 ubi->autoresize_vol_id = -1;
935 933
936#ifdef CONFIG_MTD_UBI_FASTMAP
937 ubi->fm_pool.used = ubi->fm_pool.size = 0;
938 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
939
940 /*
941 * fm_pool.max_size is 5% of the total number of PEBs but it's also
942 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
943 */
944 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
945 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
946 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
947 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
948
949 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
950 ubi->fm_disabled = !fm_autoconvert;
951
952 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
953 <= UBI_FM_MAX_START) {
954 ubi_err("More than %i PEBs are needed for fastmap, sorry.",
955 UBI_FM_MAX_START);
956 ubi->fm_disabled = 1;
957 }
958
959 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
960 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
961#else
962 ubi->fm_disabled = 1;
963#endif
964 mutex_init(&ubi->buf_mutex); 934 mutex_init(&ubi->buf_mutex);
965 mutex_init(&ubi->ckvol_mutex); 935 mutex_init(&ubi->ckvol_mutex);
966 mutex_init(&ubi->device_mutex); 936 mutex_init(&ubi->device_mutex);
967 spin_lock_init(&ubi->volumes_lock); 937 spin_lock_init(&ubi->volumes_lock);
968 mutex_init(&ubi->fm_mutex);
969 init_rwsem(&ubi->fm_sem);
970 938
971 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 939 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
940 dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
941 dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
972 942
973 err = io_init(ubi, max_beb_per1024); 943 err = io_init(ubi);
974 if (err) 944 if (err)
975 goto out_free; 945 goto out_free;
976 946
977 err = -ENOMEM; 947 err = -ENOMEM;
978 ubi->peb_buf = vmalloc(ubi->peb_size); 948 ubi->peb_buf1 = vmalloc(ubi->peb_size);
979 if (!ubi->peb_buf) 949 if (!ubi->peb_buf1)
980 goto out_free; 950 goto out_free;
981 951
982#ifdef CONFIG_MTD_UBI_FASTMAP 952 ubi->peb_buf2 = vmalloc(ubi->peb_size);
983 ubi->fm_size = ubi_calc_fm_size(ubi); 953 if (!ubi->peb_buf2)
984 ubi->fm_buf = vzalloc(ubi->fm_size);
985 if (!ubi->fm_buf)
986 goto out_free; 954 goto out_free;
987#endif 955
988 err = ubi_attach(ubi, 0); 956 err = ubi_debugging_init_dev(ubi);
989 if (err) { 957 if (err)
990 ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
991 goto out_free; 958 goto out_free;
959
960 err = attach_by_scanning(ubi);
961 if (err) {
962 dbg_err("failed to attach by scanning, error %d", err);
963 goto out_debugging;
992 } 964 }
993 965
994 if (ubi->autoresize_vol_id != -1) { 966 if (ubi->autoresize_vol_id != -1) {
@@ -1013,24 +985,23 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
1013 goto out_debugfs; 985 goto out_debugfs;
1014 } 986 }
1015 987
1016 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d", 988 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
1017 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num); 989 ubi_msg("MTD device name: \"%s\"", mtd->name);
1018 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes", 990 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
1019 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size); 991 ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
1020 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d", 992 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
1021 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size); 993 ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
1022 ubi_msg("VID header offset: %d (aligned %d), data offset: %d", 994 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
1023 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start); 995 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
1024 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d", 996 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
1025 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count); 997 ubi_msg("number of user volumes: %d",
1026 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d", 998 ubi->vol_count - UBI_INT_VOL_COUNT);
1027 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT, 999 ubi_msg("available PEBs: %d", ubi->avail_pebs);
1028 ubi->vtbl_slots); 1000 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
1029 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u", 1001 ubi_msg("number of PEBs reserved for bad PEB handling: %d",
1030 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD, 1002 ubi->beb_rsvd_pebs);
1031 ubi->image_seq); 1003 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
1032 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d", 1004 ubi_msg("image sequence number: %d", ubi->image_seq);
1033 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1034 1005
1035 /* 1006 /*
1036 * The below lock makes sure we do not race with 'ubi_thread()' which 1007 * The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1053,11 +1024,13 @@ out_uif:
1053 uif_close(ubi); 1024 uif_close(ubi);
1054out_detach: 1025out_detach:
1055 ubi_wl_close(ubi); 1026 ubi_wl_close(ubi);
1056 ubi_free_internal_volumes(ubi); 1027 free_internal_volumes(ubi);
1057 vfree(ubi->vtbl); 1028 vfree(ubi->vtbl);
1029out_debugging:
1030 ubi_debugging_exit_dev(ubi);
1058out_free: 1031out_free:
1059 vfree(ubi->peb_buf); 1032 vfree(ubi->peb_buf1);
1060 vfree(ubi->fm_buf); 1033 vfree(ubi->peb_buf2);
1061 if (ref) 1034 if (ref)
1062 put_device(&ubi->dev); 1035 put_device(&ubi->dev);
1063 else 1036 else
@@ -1106,12 +1079,8 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1106 1079
1107 ubi_assert(ubi_num == ubi->ubi_num); 1080 ubi_assert(ubi_num == ubi->ubi_num);
1108 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1081 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1109 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1082 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
1110#ifdef CONFIG_MTD_UBI_FASTMAP 1083
1111 /* If we don't write a new fastmap at detach time we lose all
1112 * EC updates that have been made since the last written fastmap. */
1113 ubi_update_fastmap(ubi);
1114#endif
1115 /* 1084 /*
1116 * Before freeing anything, we have to stop the background thread to 1085 * Before freeing anything, we have to stop the background thread to
1117 * prevent it from doing anything on this device while we are freeing. 1086 * prevent it from doing anything on this device while we are freeing.
@@ -1127,13 +1096,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1127 1096
1128 ubi_debugfs_exit_dev(ubi); 1097 ubi_debugfs_exit_dev(ubi);
1129 uif_close(ubi); 1098 uif_close(ubi);
1130
1131 ubi_wl_close(ubi); 1099 ubi_wl_close(ubi);
1132 ubi_free_internal_volumes(ubi); 1100 free_internal_volumes(ubi);
1133 vfree(ubi->vtbl); 1101 vfree(ubi->vtbl);
1134 put_mtd_device(ubi->mtd); 1102 put_mtd_device(ubi->mtd);
1135 vfree(ubi->peb_buf); 1103 ubi_debugging_exit_dev(ubi);
1136 vfree(ubi->fm_buf); 1104 vfree(ubi->peb_buf1);
1105 vfree(ubi->peb_buf2);
1137 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1106 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
1138 put_device(&ubi->dev); 1107 put_device(&ubi->dev);
1139 return 0; 1108 return 0;
@@ -1266,7 +1235,7 @@ static int __init ubi_init(void)
1266 1235
1267 mutex_lock(&ubi_devices_mutex); 1236 mutex_lock(&ubi_devices_mutex);
1268 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1237 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
1269 p->vid_hdr_offs, p->max_beb_per1024); 1238 p->vid_hdr_offs);
1270 mutex_unlock(&ubi_devices_mutex); 1239 mutex_unlock(&ubi_devices_mutex);
1271 if (err < 0) { 1240 if (err < 0) {
1272 ubi_err("cannot attach mtd%d", mtd->index); 1241 ubi_err("cannot attach mtd%d", mtd->index);
@@ -1312,7 +1281,7 @@ out:
1312 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1281 ubi_err("UBI error: cannot initialize UBI, error %d", err);
1313 return err; 1282 return err;
1314} 1283}
1315late_initcall(ubi_init); 1284module_init(ubi_init);
1316 1285
1317static void __exit ubi_exit(void) 1286static void __exit ubi_exit(void)
1318{ 1287{
@@ -1346,7 +1315,8 @@ static int __init bytes_str_to_int(const char *str)
1346 1315
1347 result = simple_strtoul(str, &endp, 0); 1316 result = simple_strtoul(str, &endp, 0);
1348 if (str == endp || result >= INT_MAX) { 1317 if (str == endp || result >= INT_MAX) {
1349 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str); 1318 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1319 str);
1350 return -EINVAL; 1320 return -EINVAL;
1351 } 1321 }
1352 1322
@@ -1362,7 +1332,8 @@ static int __init bytes_str_to_int(const char *str)
1362 case '\0': 1332 case '\0':
1363 break; 1333 break;
1364 default: 1334 default:
1365 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str); 1335 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1336 str);
1366 return -EINVAL; 1337 return -EINVAL;
1367 } 1338 }
1368 1339
@@ -1383,26 +1354,27 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1383 struct mtd_dev_param *p; 1354 struct mtd_dev_param *p;
1384 char buf[MTD_PARAM_LEN_MAX]; 1355 char buf[MTD_PARAM_LEN_MAX];
1385 char *pbuf = &buf[0]; 1356 char *pbuf = &buf[0];
1386 char *tokens[MTD_PARAM_MAX_COUNT]; 1357 char *tokens[2] = {NULL, NULL};
1387 1358
1388 if (!val) 1359 if (!val)
1389 return -EINVAL; 1360 return -EINVAL;
1390 1361
1391 if (mtd_devs == UBI_MAX_DEVICES) { 1362 if (mtd_devs == UBI_MAX_DEVICES) {
1392 ubi_err("UBI error: too many parameters, max. is %d\n", 1363 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
1393 UBI_MAX_DEVICES); 1364 UBI_MAX_DEVICES);
1394 return -EINVAL; 1365 return -EINVAL;
1395 } 1366 }
1396 1367
1397 len = strnlen(val, MTD_PARAM_LEN_MAX); 1368 len = strnlen(val, MTD_PARAM_LEN_MAX);
1398 if (len == MTD_PARAM_LEN_MAX) { 1369 if (len == MTD_PARAM_LEN_MAX) {
1399 ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n", 1370 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
1400 val, MTD_PARAM_LEN_MAX); 1371 "max. is %d\n", val, MTD_PARAM_LEN_MAX);
1401 return -EINVAL; 1372 return -EINVAL;
1402 } 1373 }
1403 1374
1404 if (len == 0) { 1375 if (len == 0) {
1405 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n"); 1376 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
1377 "ignored\n");
1406 return 0; 1378 return 0;
1407 } 1379 }
1408 1380
@@ -1412,11 +1384,12 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1412 if (buf[len - 1] == '\n') 1384 if (buf[len - 1] == '\n')
1413 buf[len - 1] = '\0'; 1385 buf[len - 1] = '\0';
1414 1386
1415 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++) 1387 for (i = 0; i < 2; i++)
1416 tokens[i] = strsep(&pbuf, ","); 1388 tokens[i] = strsep(&pbuf, ",");
1417 1389
1418 if (pbuf) { 1390 if (pbuf) {
1419 ubi_err("UBI error: too many arguments at \"%s\"\n", val); 1391 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
1392 val);
1420 return -EINVAL; 1393 return -EINVAL;
1421 } 1394 }
1422 1395
@@ -1429,36 +1402,24 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1429 if (p->vid_hdr_offs < 0) 1402 if (p->vid_hdr_offs < 0)
1430 return p->vid_hdr_offs; 1403 return p->vid_hdr_offs;
1431 1404
1432 if (tokens[2]) {
1433 int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
1434
1435 if (err) {
1436 ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1437 tokens[2]);
1438 return -EINVAL;
1439 }
1440 }
1441
1442 mtd_devs += 1; 1405 mtd_devs += 1;
1443 return 0; 1406 return 0;
1444} 1407}
1445 1408
1446module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1409module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1447MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n" 1410MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
1411 "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
1448 "Multiple \"mtd\" parameters may be specified.\n" 1412 "Multiple \"mtd\" parameters may be specified.\n"
1449 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n" 1413 "MTD devices may be specified by their number, name, or "
1450 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n" 1414 "path to the MTD character device node.\n"
1451 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value (" 1415 "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
1452 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n" 1416 "header position to be used by UBI.\n"
1453 "\n" 1417 "Example 1: mtd=/dev/mtd0 - attach MTD device "
1454 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n" 1418 "/dev/mtd0.\n"
1455 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n" 1419 "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
1456 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n" 1420 "with name \"content\" using VID header offset 1984, and "
1457 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device)."); 1421 "MTD device number 4 with default VID header offset.");
1458#ifdef CONFIG_MTD_UBI_FASTMAP 1422
1459module_param(fm_autoconvert, bool, 0644);
1460MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1461#endif
1462MODULE_VERSION(__stringify(UBI_VERSION)); 1423MODULE_VERSION(__stringify(UBI_VERSION));
1463MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1424MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1464MODULE_AUTHOR("Artem Bityutskiy"); 1425MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index dfcc65b33e9..3320a50ba4f 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -63,7 +63,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
63 users = vol->readers + vol->writers + vol->exclusive; 63 users = vol->readers + vol->writers + vol->exclusive;
64 ubi_assert(users > 0); 64 ubi_assert(users > 0);
65 if (users > 1) { 65 if (users > 1) {
66 ubi_err("%d users for volume %d", users, vol->vol_id); 66 dbg_err("%d users for volume %d", users, vol->vol_id);
67 err = -EBUSY; 67 err = -EBUSY;
68 } else { 68 } else {
69 vol->readers = vol->writers = 0; 69 vol->readers = vol->writers = 0;
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
140 vol->updating = 0; 140 vol->updating = 0;
141 vfree(vol->upd_buf); 141 vfree(vol->upd_buf);
142 } else if (vol->changing_leb) { 142 } else if (vol->changing_leb) {
143 dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel", 143 dbg_gen("only %lld of %lld bytes received for atomic LEB change"
144 vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num, 144 " for volume %d:%d, cancel", vol->upd_received,
145 vol->vol_id); 145 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
146 vol->changing_leb = 0; 146 vol->changing_leb = 0;
147 vfree(vol->upd_buf); 147 vfree(vol->upd_buf);
148 } 148 }
@@ -159,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
159 159
160 if (vol->updating) { 160 if (vol->updating) {
161 /* Update is in progress, seeking is prohibited */ 161 /* Update is in progress, seeking is prohibited */
162 ubi_err("updating"); 162 dbg_err("updating");
163 return -EBUSY; 163 return -EBUSY;
164 } 164 }
165 165
@@ -178,7 +178,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
178 } 178 }
179 179
180 if (new_offset < 0 || new_offset > vol->used_bytes) { 180 if (new_offset < 0 || new_offset > vol->used_bytes) {
181 ubi_err("bad seek %lld", new_offset); 181 dbg_err("bad seek %lld", new_offset);
182 return -EINVAL; 182 return -EINVAL;
183 } 183 }
184 184
@@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
189 return new_offset; 189 return new_offset;
190} 190}
191 191
192static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, 192static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
193 int datasync)
194{ 193{
195 struct ubi_volume_desc *desc = file->private_data; 194 struct ubi_volume_desc *desc = file->private_data;
196 struct ubi_device *ubi = desc->vol->ubi; 195 struct ubi_device *ubi = desc->vol->ubi;
@@ -217,11 +216,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
217 count, *offp, vol->vol_id); 216 count, *offp, vol->vol_id);
218 217
219 if (vol->updating) { 218 if (vol->updating) {
220 ubi_err("updating"); 219 dbg_err("updating");
221 return -EBUSY; 220 return -EBUSY;
222 } 221 }
223 if (vol->upd_marker) { 222 if (vol->upd_marker) {
224 ubi_err("damaged volume, update marker is set"); 223 dbg_err("damaged volume, update marker is set");
225 return -EBADF; 224 return -EBADF;
226 } 225 }
227 if (*offp == vol->used_bytes || count == 0) 226 if (*offp == vol->used_bytes || count == 0)
@@ -301,7 +300,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
301 300
302 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); 301 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
303 if (off & (ubi->min_io_size - 1)) { 302 if (off & (ubi->min_io_size - 1)) {
304 ubi_err("unaligned position"); 303 dbg_err("unaligned position");
305 return -EINVAL; 304 return -EINVAL;
306 } 305 }
307 306
@@ -310,7 +309,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
310 309
311 /* We can write only in fractions of the minimum I/O unit */ 310 /* We can write only in fractions of the minimum I/O unit */
312 if (count & (ubi->min_io_size - 1)) { 311 if (count & (ubi->min_io_size - 1)) {
313 ubi_err("unaligned write length"); 312 dbg_err("unaligned write length");
314 return -EINVAL; 313 return -EINVAL;
315 } 314 }
316 315
@@ -335,7 +334,8 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
335 break; 334 break;
336 } 335 }
337 336
338 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len); 337 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
338 UBI_UNKNOWN);
339 if (err) 339 if (err)
340 break; 340 break;
341 341
@@ -477,6 +477,9 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
477 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 477 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
478 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 478 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
479 break; 479 break;
480 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
481 req.dtype != UBI_UNKNOWN)
482 break;
480 483
481 err = get_exclusive(desc); 484 err = get_exclusive(desc);
482 if (err < 0) 485 if (err < 0)
@@ -515,7 +518,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
515 if (err) 518 if (err)
516 break; 519 break;
517 520
518 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); 521 err = ubi_wl_flush(ubi);
519 break; 522 break;
520 } 523 }
521 524
@@ -529,7 +532,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
529 err = -EFAULT; 532 err = -EFAULT;
530 break; 533 break;
531 } 534 }
532 err = ubi_leb_map(desc, req.lnum); 535 err = ubi_leb_map(desc, req.lnum, req.dtype);
533 break; 536 break;
534 } 537 }
535 538
@@ -629,9 +632,6 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
629 if (req->alignment != 1 && n) 632 if (req->alignment != 1 && n)
630 goto bad; 633 goto bad;
631 634
632 if (!req->name[0] || !req->name_len)
633 goto bad;
634
635 if (req->name_len > UBI_VOL_NAME_MAX) { 635 if (req->name_len > UBI_VOL_NAME_MAX) {
636 err = -ENAMETOOLONG; 636 err = -ENAMETOOLONG;
637 goto bad; 637 goto bad;
@@ -644,8 +644,8 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
644 return 0; 644 return 0;
645 645
646bad: 646bad:
647 ubi_err("bad volume creation request"); 647 dbg_err("bad volume creation request");
648 ubi_dump_mkvol_req(req); 648 ubi_dbg_dump_mkvol_req(req);
649 return err; 649 return err;
650} 650}
651 651
@@ -710,12 +710,12 @@ static int rename_volumes(struct ubi_device *ubi,
710 for (i = 0; i < req->count - 1; i++) { 710 for (i = 0; i < req->count - 1; i++) {
711 for (n = i + 1; n < req->count; n++) { 711 for (n = i + 1; n < req->count; n++) {
712 if (req->ents[i].vol_id == req->ents[n].vol_id) { 712 if (req->ents[i].vol_id == req->ents[n].vol_id) {
713 ubi_err("duplicated volume id %d", 713 dbg_err("duplicated volume id %d",
714 req->ents[i].vol_id); 714 req->ents[i].vol_id);
715 return -EINVAL; 715 return -EINVAL;
716 } 716 }
717 if (!strcmp(req->ents[i].name, req->ents[n].name)) { 717 if (!strcmp(req->ents[i].name, req->ents[n].name)) {
718 ubi_err("duplicated volume name \"%s\"", 718 dbg_err("duplicated volume name \"%s\"",
719 req->ents[i].name); 719 req->ents[i].name);
720 return -EINVAL; 720 return -EINVAL;
721 } 721 }
@@ -738,7 +738,7 @@ static int rename_volumes(struct ubi_device *ubi,
738 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 738 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
739 if (IS_ERR(re->desc)) { 739 if (IS_ERR(re->desc)) {
740 err = PTR_ERR(re->desc); 740 err = PTR_ERR(re->desc);
741 ubi_err("cannot open volume %d, error %d", vol_id, err); 741 dbg_err("cannot open volume %d, error %d", vol_id, err);
742 kfree(re); 742 kfree(re);
743 goto out_free; 743 goto out_free;
744 } 744 }
@@ -754,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
754 re->new_name_len = name_len; 754 re->new_name_len = name_len;
755 memcpy(re->new_name, name, name_len); 755 memcpy(re->new_name, name, name_len);
756 list_add_tail(&re->list, &rename_list); 756 list_add_tail(&re->list, &rename_list);
757 dbg_gen("will rename volume %d from \"%s\" to \"%s\"", 757 dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
758 vol_id, re->desc->vol->name, name); 758 vol_id, re->desc->vol->name, name);
759 } 759 }
760 760
@@ -797,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi,
797 continue; 797 continue;
798 798
799 /* The volume exists but busy, or an error occurred */ 799 /* The volume exists but busy, or an error occurred */
800 ubi_err("cannot open volume \"%s\", error %d", 800 dbg_err("cannot open volume \"%s\", error %d",
801 re->new_name, err); 801 re->new_name, err);
802 goto out_free; 802 goto out_free;
803 } 803 }
@@ -812,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
812 re1->remove = 1; 812 re1->remove = 1;
813 re1->desc = desc; 813 re1->desc = desc;
814 list_add(&re1->list, &rename_list); 814 list_add(&re1->list, &rename_list);
815 dbg_gen("will remove volume %d, name \"%s\"", 815 dbg_msg("will remove volume %d, name \"%s\"",
816 re1->desc->vol->vol_id, re1->desc->vol->name); 816 re1->desc->vol->vol_id, re1->desc->vol->name);
817 } 817 }
818 818
@@ -943,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
943 { 943 {
944 struct ubi_rnvol_req *req; 944 struct ubi_rnvol_req *req;
945 945
946 dbg_gen("re-name volumes"); 946 dbg_msg("re-name volumes");
947 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); 947 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
948 if (!req) { 948 if (!req) {
949 err = -ENOMEM; 949 err = -ENOMEM;
@@ -1011,8 +1011,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
1011 * 'ubi_attach_mtd_dev()'. 1011 * 'ubi_attach_mtd_dev()'.
1012 */ 1012 */
1013 mutex_lock(&ubi_devices_mutex); 1013 mutex_lock(&ubi_devices_mutex);
1014 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset, 1014 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
1015 req.max_beb_per1024);
1016 mutex_unlock(&ubi_devices_mutex); 1015 mutex_unlock(&ubi_devices_mutex);
1017 if (err < 0) 1016 if (err < 0)
1018 put_mtd_device(mtd); 1017 put_mtd_device(mtd);
@@ -1028,7 +1027,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
1028 { 1027 {
1029 int ubi_num; 1028 int ubi_num;
1030 1029
1031 dbg_gen("detach MTD device"); 1030 dbg_gen("dettach MTD device");
1032 err = get_user(ubi_num, (__user int32_t *)argp); 1031 err = get_user(ubi_num, (__user int32_t *)argp);
1033 if (err) { 1032 if (err) {
1034 err = -EFAULT; 1033 err = -EFAULT;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 63cb1d7236c..ab80c0debac 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -18,203 +18,243 @@
18 * Author: Artem Bityutskiy (Битюцкий Артём) 18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */ 19 */
20 20
21/*
22 * Here we keep all the UBI debugging stuff which should normally be disabled
23 * and compiled-out, but it is extremely helpful when hunting bugs or doing big
24 * changes.
25 */
26
27#ifdef CONFIG_MTD_UBI_DEBUG
28
21#include "ubi.h" 29#include "ubi.h"
22#include <linux/debugfs.h> 30#include <linux/debugfs.h>
23#include <linux/uaccess.h> 31#include <linux/uaccess.h>
24#include <linux/module.h> 32#include <linux/module.h>
25 33
26
27/**
28 * ubi_dump_flash - dump a region of flash.
29 * @ubi: UBI device description object
30 * @pnum: the physical eraseblock number to dump
31 * @offset: the starting offset within the physical eraseblock to dump
32 * @len: the length of the region to dump
33 */
34void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
35{
36 int err;
37 size_t read;
38 void *buf;
39 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
40
41 buf = vmalloc(len);
42 if (!buf)
43 return;
44 err = mtd_read(ubi->mtd, addr, len, &read, buf);
45 if (err && err != -EUCLEAN) {
46 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
47 err, len, pnum, offset, read);
48 goto out;
49 }
50
51 ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
52 len, pnum, offset);
53 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
54out:
55 vfree(buf);
56 return;
57}
58
59/** 34/**
60 * ubi_dump_ec_hdr - dump an erase counter header. 35 * ubi_dbg_dump_ec_hdr - dump an erase counter header.
61 * @ec_hdr: the erase counter header to dump 36 * @ec_hdr: the erase counter header to dump
62 */ 37 */
63void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) 38void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
64{ 39{
65 pr_err("Erase counter header dump:\n"); 40 printk(KERN_DEBUG "Erase counter header dump:\n");
66 pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic)); 41 printk(KERN_DEBUG "\tmagic %#08x\n",
67 pr_err("\tversion %d\n", (int)ec_hdr->version); 42 be32_to_cpu(ec_hdr->magic));
68 pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec)); 43 printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version);
69 pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset)); 44 printk(KERN_DEBUG "\tec %llu\n",
70 pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset)); 45 (long long)be64_to_cpu(ec_hdr->ec));
71 pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq)); 46 printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
72 pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc)); 47 be32_to_cpu(ec_hdr->vid_hdr_offset));
73 pr_err("erase counter header hexdump:\n"); 48 printk(KERN_DEBUG "\tdata_offset %d\n",
49 be32_to_cpu(ec_hdr->data_offset));
50 printk(KERN_DEBUG "\timage_seq %d\n",
51 be32_to_cpu(ec_hdr->image_seq));
52 printk(KERN_DEBUG "\thdr_crc %#08x\n",
53 be32_to_cpu(ec_hdr->hdr_crc));
54 printk(KERN_DEBUG "erase counter header hexdump:\n");
74 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 55 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
75 ec_hdr, UBI_EC_HDR_SIZE, 1); 56 ec_hdr, UBI_EC_HDR_SIZE, 1);
76} 57}
77 58
78/** 59/**
79 * ubi_dump_vid_hdr - dump a volume identifier header. 60 * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
80 * @vid_hdr: the volume identifier header to dump 61 * @vid_hdr: the volume identifier header to dump
81 */ 62 */
82void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) 63void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
83{ 64{
84 pr_err("Volume identifier header dump:\n"); 65 printk(KERN_DEBUG "Volume identifier header dump:\n");
85 pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic)); 66 printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
86 pr_err("\tversion %d\n", (int)vid_hdr->version); 67 printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
87 pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type); 68 printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
88 pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag); 69 printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
89 pr_err("\tcompat %d\n", (int)vid_hdr->compat); 70 printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
90 pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id)); 71 printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
91 pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum)); 72 printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
92 pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size)); 73 printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
93 pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs)); 74 printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
94 pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad)); 75 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
95 pr_err("\tsqnum %llu\n", 76 printk(KERN_DEBUG "\tsqnum %llu\n",
96 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); 77 (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
97 pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc)); 78 printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
98 pr_err("Volume identifier header hexdump:\n"); 79 printk(KERN_DEBUG "Volume identifier header hexdump:\n");
99 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 80 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
100 vid_hdr, UBI_VID_HDR_SIZE, 1); 81 vid_hdr, UBI_VID_HDR_SIZE, 1);
101} 82}
102 83
103/** 84/**
104 * ubi_dump_vol_info - dump volume information. 85 * ubi_dbg_dump_vol_info- dump volume information.
105 * @vol: UBI volume description object 86 * @vol: UBI volume description object
106 */ 87 */
107void ubi_dump_vol_info(const struct ubi_volume *vol) 88void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
108{ 89{
109 pr_err("Volume information dump:\n"); 90 printk(KERN_DEBUG "Volume information dump:\n");
110 pr_err("\tvol_id %d\n", vol->vol_id); 91 printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
111 pr_err("\treserved_pebs %d\n", vol->reserved_pebs); 92 printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs);
112 pr_err("\talignment %d\n", vol->alignment); 93 printk(KERN_DEBUG "\talignment %d\n", vol->alignment);
113 pr_err("\tdata_pad %d\n", vol->data_pad); 94 printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad);
114 pr_err("\tvol_type %d\n", vol->vol_type); 95 printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type);
115 pr_err("\tname_len %d\n", vol->name_len); 96 printk(KERN_DEBUG "\tname_len %d\n", vol->name_len);
116 pr_err("\tusable_leb_size %d\n", vol->usable_leb_size); 97 printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
117 pr_err("\tused_ebs %d\n", vol->used_ebs); 98 printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs);
118 pr_err("\tused_bytes %lld\n", vol->used_bytes); 99 printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes);
119 pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes); 100 printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes);
120 pr_err("\tcorrupted %d\n", vol->corrupted); 101 printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted);
121 pr_err("\tupd_marker %d\n", vol->upd_marker); 102 printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker);
122 103
123 if (vol->name_len <= UBI_VOL_NAME_MAX && 104 if (vol->name_len <= UBI_VOL_NAME_MAX &&
124 strnlen(vol->name, vol->name_len + 1) == vol->name_len) { 105 strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
125 pr_err("\tname %s\n", vol->name); 106 printk(KERN_DEBUG "\tname %s\n", vol->name);
126 } else { 107 } else {
127 pr_err("\t1st 5 characters of name: %c%c%c%c%c\n", 108 printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
128 vol->name[0], vol->name[1], vol->name[2], 109 vol->name[0], vol->name[1], vol->name[2],
129 vol->name[3], vol->name[4]); 110 vol->name[3], vol->name[4]);
130 } 111 }
131} 112}
132 113
133/** 114/**
134 * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object. 115 * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
135 * @r: the object to dump 116 * @r: the object to dump
136 * @idx: volume table index 117 * @idx: volume table index
137 */ 118 */
138void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) 119void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
139{ 120{
140 int name_len = be16_to_cpu(r->name_len); 121 int name_len = be16_to_cpu(r->name_len);
141 122
142 pr_err("Volume table record %d dump:\n", idx); 123 printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
143 pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs)); 124 printk(KERN_DEBUG "\treserved_pebs %d\n",
144 pr_err("\talignment %d\n", be32_to_cpu(r->alignment)); 125 be32_to_cpu(r->reserved_pebs));
145 pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad)); 126 printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment));
146 pr_err("\tvol_type %d\n", (int)r->vol_type); 127 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad));
147 pr_err("\tupd_marker %d\n", (int)r->upd_marker); 128 printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type);
148 pr_err("\tname_len %d\n", name_len); 129 printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker);
130 printk(KERN_DEBUG "\tname_len %d\n", name_len);
149 131
150 if (r->name[0] == '\0') { 132 if (r->name[0] == '\0') {
151 pr_err("\tname NULL\n"); 133 printk(KERN_DEBUG "\tname NULL\n");
152 return; 134 return;
153 } 135 }
154 136
155 if (name_len <= UBI_VOL_NAME_MAX && 137 if (name_len <= UBI_VOL_NAME_MAX &&
156 strnlen(&r->name[0], name_len + 1) == name_len) { 138 strnlen(&r->name[0], name_len + 1) == name_len) {
157 pr_err("\tname %s\n", &r->name[0]); 139 printk(KERN_DEBUG "\tname %s\n", &r->name[0]);
158 } else { 140 } else {
159 pr_err("\t1st 5 characters of name: %c%c%c%c%c\n", 141 printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
160 r->name[0], r->name[1], r->name[2], r->name[3], 142 r->name[0], r->name[1], r->name[2], r->name[3],
161 r->name[4]); 143 r->name[4]);
162 } 144 }
163 pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc)); 145 printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc));
164} 146}
165 147
166/** 148/**
167 * ubi_dump_av - dump a &struct ubi_ainf_volume object. 149 * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
168 * @av: the object to dump 150 * @sv: the object to dump
169 */ 151 */
170void ubi_dump_av(const struct ubi_ainf_volume *av) 152void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
171{ 153{
172 pr_err("Volume attaching information dump:\n"); 154 printk(KERN_DEBUG "Volume scanning information dump:\n");
173 pr_err("\tvol_id %d\n", av->vol_id); 155 printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id);
174 pr_err("\thighest_lnum %d\n", av->highest_lnum); 156 printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum);
175 pr_err("\tleb_count %d\n", av->leb_count); 157 printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count);
176 pr_err("\tcompat %d\n", av->compat); 158 printk(KERN_DEBUG "\tcompat %d\n", sv->compat);
177 pr_err("\tvol_type %d\n", av->vol_type); 159 printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type);
178 pr_err("\tused_ebs %d\n", av->used_ebs); 160 printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs);
179 pr_err("\tlast_data_size %d\n", av->last_data_size); 161 printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
180 pr_err("\tdata_pad %d\n", av->data_pad); 162 printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad);
181} 163}
182 164
183/** 165/**
184 * ubi_dump_aeb - dump a &struct ubi_ainf_peb object. 166 * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
185 * @aeb: the object to dump 167 * @seb: the object to dump
186 * @type: object type: 0 - not corrupted, 1 - corrupted 168 * @type: object type: 0 - not corrupted, 1 - corrupted
187 */ 169 */
188void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type) 170void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
189{ 171{
190 pr_err("eraseblock attaching information dump:\n"); 172 printk(KERN_DEBUG "eraseblock scanning information dump:\n");
191 pr_err("\tec %d\n", aeb->ec); 173 printk(KERN_DEBUG "\tec %d\n", seb->ec);
192 pr_err("\tpnum %d\n", aeb->pnum); 174 printk(KERN_DEBUG "\tpnum %d\n", seb->pnum);
193 if (type == 0) { 175 if (type == 0) {
194 pr_err("\tlnum %d\n", aeb->lnum); 176 printk(KERN_DEBUG "\tlnum %d\n", seb->lnum);
195 pr_err("\tscrub %d\n", aeb->scrub); 177 printk(KERN_DEBUG "\tscrub %d\n", seb->scrub);
196 pr_err("\tsqnum %llu\n", aeb->sqnum); 178 printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum);
197 } 179 }
198} 180}
199 181
200/** 182/**
201 * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object. 183 * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
202 * @req: the object to dump 184 * @req: the object to dump
203 */ 185 */
204void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req) 186void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
205{ 187{
206 char nm[17]; 188 char nm[17];
207 189
208 pr_err("Volume creation request dump:\n"); 190 printk(KERN_DEBUG "Volume creation request dump:\n");
209 pr_err("\tvol_id %d\n", req->vol_id); 191 printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id);
210 pr_err("\talignment %d\n", req->alignment); 192 printk(KERN_DEBUG "\talignment %d\n", req->alignment);
211 pr_err("\tbytes %lld\n", (long long)req->bytes); 193 printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes);
212 pr_err("\tvol_type %d\n", req->vol_type); 194 printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type);
213 pr_err("\tname_len %d\n", req->name_len); 195 printk(KERN_DEBUG "\tname_len %d\n", req->name_len);
214 196
215 memcpy(nm, req->name, 16); 197 memcpy(nm, req->name, 16);
216 nm[16] = 0; 198 nm[16] = 0;
217 pr_err("\t1st 16 characters of name: %s\n", nm); 199 printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
200}
201
202/**
203 * ubi_dbg_dump_flash - dump a region of flash.
204 * @ubi: UBI device description object
205 * @pnum: the physical eraseblock number to dump
206 * @offset: the starting offset within the physical eraseblock to dump
207 * @len: the length of the region to dump
208 */
209void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
210{
211 int err;
212 size_t read;
213 void *buf;
214 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
215
216 buf = vmalloc(len);
217 if (!buf)
218 return;
219 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
220 if (err && err != -EUCLEAN) {
221 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
222 "read %zd bytes", err, len, pnum, offset, read);
223 goto out;
224 }
225
226 dbg_msg("dumping %d bytes of data from PEB %d, offset %d",
227 len, pnum, offset);
228 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
229out:
230 vfree(buf);
231 return;
232}
233
234/**
235 * ubi_debugging_init_dev - initialize debugging for an UBI device.
236 * @ubi: UBI device description object
237 *
238 * This function initializes debugging-related data for UBI device @ubi.
239 * Returns zero in case of success and a negative error code in case of
240 * failure.
241 */
242int ubi_debugging_init_dev(struct ubi_device *ubi)
243{
244 ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
245 if (!ubi->dbg)
246 return -ENOMEM;
247
248 return 0;
249}
250
251/**
252 * ubi_debugging_exit_dev - free debugging data for an UBI device.
253 * @ubi: UBI device description object
254 */
255void ubi_debugging_exit_dev(struct ubi_device *ubi)
256{
257 kfree(ubi->dbg);
218} 258}
219 259
220/* 260/*
@@ -231,9 +271,6 @@ static struct dentry *dfs_rootdir;
231 */ 271 */
232int ubi_debugfs_init(void) 272int ubi_debugfs_init(void)
233{ 273{
234 if (!IS_ENABLED(CONFIG_DEBUG_FS))
235 return 0;
236
237 dfs_rootdir = debugfs_create_dir("ubi", NULL); 274 dfs_rootdir = debugfs_create_dir("ubi", NULL);
238 if (IS_ERR_OR_NULL(dfs_rootdir)) { 275 if (IS_ERR_OR_NULL(dfs_rootdir)) {
239 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 276 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -251,8 +288,7 @@ int ubi_debugfs_init(void)
251 */ 288 */
252void ubi_debugfs_exit(void) 289void ubi_debugfs_exit(void)
253{ 290{
254 if (IS_ENABLED(CONFIG_DEBUG_FS)) 291 debugfs_remove(dfs_rootdir);
255 debugfs_remove(dfs_rootdir);
256} 292}
257 293
258/* Read an UBI debugfs file */ 294/* Read an UBI debugfs file */
@@ -269,7 +305,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
269 ubi = ubi_get_device(ubi_num); 305 ubi = ubi_get_device(ubi_num);
270 if (!ubi) 306 if (!ubi)
271 return -ENODEV; 307 return -ENODEV;
272 d = &ubi->dbg; 308 d = ubi->dbg;
273 309
274 if (dent == d->dfs_chk_gen) 310 if (dent == d->dfs_chk_gen)
275 val = d->chk_gen; 311 val = d->chk_gen;
@@ -315,7 +351,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
315 ubi = ubi_get_device(ubi_num); 351 ubi = ubi_get_device(ubi_num);
316 if (!ubi) 352 if (!ubi)
317 return -ENODEV; 353 return -ENODEV;
318 d = &ubi->dbg; 354 d = ubi->dbg;
319 355
320 buf_size = min_t(size_t, count, (sizeof(buf) - 1)); 356 buf_size = min_t(size_t, count, (sizeof(buf) - 1));
321 if (copy_from_user(buf, user_buf, buf_size)) { 357 if (copy_from_user(buf, user_buf, buf_size)) {
@@ -350,11 +386,19 @@ out:
350 return count; 386 return count;
351} 387}
352 388
389static int default_open(struct inode *inode, struct file *file)
390{
391 if (inode->i_private)
392 file->private_data = inode->i_private;
393
394 return 0;
395}
396
353/* File operations for all UBI debugfs files */ 397/* File operations for all UBI debugfs files */
354static const struct file_operations dfs_fops = { 398static const struct file_operations dfs_fops = {
355 .read = dfs_file_read, 399 .read = dfs_file_read,
356 .write = dfs_file_write, 400 .write = dfs_file_write,
357 .open = simple_open, 401 .open = default_open,
358 .llseek = no_llseek, 402 .llseek = no_llseek,
359 .owner = THIS_MODULE, 403 .owner = THIS_MODULE,
360}; 404};
@@ -372,10 +416,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
372 unsigned long ubi_num = ubi->ubi_num; 416 unsigned long ubi_num = ubi->ubi_num;
373 const char *fname; 417 const char *fname;
374 struct dentry *dent; 418 struct dentry *dent;
375 struct ubi_debug_info *d = &ubi->dbg; 419 struct ubi_debug_info *d = ubi->dbg;
376
377 if (!IS_ENABLED(CONFIG_DEBUG_FS))
378 return 0;
379 420
380 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, 421 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
381 ubi->ubi_num); 422 ubi->ubi_num);
@@ -444,6 +485,7 @@ out:
444 */ 485 */
445void ubi_debugfs_exit_dev(struct ubi_device *ubi) 486void ubi_debugfs_exit_dev(struct ubi_device *ubi)
446{ 487{
447 if (IS_ENABLED(CONFIG_DEBUG_FS)) 488 debugfs_remove_recursive(ubi->dbg->dfs_dir);
448 debugfs_remove_recursive(ubi->dbg.dfs_dir);
449} 489}
490
491#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 33f8f3b2c9b..64fbb002182 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,27 +21,29 @@
21#ifndef __UBI_DEBUG_H__ 21#ifndef __UBI_DEBUG_H__
22#define __UBI_DEBUG_H__ 22#define __UBI_DEBUG_H__
23 23
24void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len); 24#ifdef CONFIG_MTD_UBI_DEBUG
25void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
26void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
27
28#include <linux/random.h> 25#include <linux/random.h>
29 26
30#define ubi_assert(expr) do { \ 27#define ubi_assert(expr) do { \
31 if (unlikely(!(expr))) { \ 28 if (unlikely(!(expr))) { \
32 pr_crit("UBI assert failed in %s at %u (pid %d)\n", \ 29 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
33 __func__, __LINE__, current->pid); \ 30 __func__, __LINE__, current->pid); \
34 dump_stack(); \ 31 ubi_dbg_dump_stack(); \
35 } \ 32 } \
36} while (0) 33} while (0)
37 34
38#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \ 35#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
36
37#define ubi_dbg_dump_stack() dump_stack()
38
39#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
39 print_hex_dump(l, ps, pt, r, g, b, len, a) 40 print_hex_dump(l, ps, pt, r, g, b, len, a)
40 41
41#define ubi_dbg_msg(type, fmt, ...) \ 42#define ubi_dbg_msg(type, fmt, ...) \
42 pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \ 43 pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
43 ##__VA_ARGS__)
44 44
45/* Just a debugging messages not related to any specific UBI subsystem */
46#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
45/* General debugging messages */ 47/* General debugging messages */
46#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__) 48#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
47/* Messages from the eraseblock association sub-system */ 49/* Messages from the eraseblock association sub-system */
@@ -53,18 +55,62 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
53/* Initialization and build messages */ 55/* Initialization and build messages */
54#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__) 56#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
55 57
56void ubi_dump_vol_info(const struct ubi_volume *vol); 58void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
57void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx); 59void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
58void ubi_dump_av(const struct ubi_ainf_volume *av); 60void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
59void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type); 61void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
60void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req); 62void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
61int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, 63void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
62 int len); 64void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
65void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
66int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
67int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
68 int offset, int len);
69int ubi_debugging_init_dev(struct ubi_device *ubi);
70void ubi_debugging_exit_dev(struct ubi_device *ubi);
63int ubi_debugfs_init(void); 71int ubi_debugfs_init(void);
64void ubi_debugfs_exit(void); 72void ubi_debugfs_exit(void);
65int ubi_debugfs_init_dev(struct ubi_device *ubi); 73int ubi_debugfs_init_dev(struct ubi_device *ubi);
66void ubi_debugfs_exit_dev(struct ubi_device *ubi); 74void ubi_debugfs_exit_dev(struct ubi_device *ubi);
67 75
76/*
77 * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
78 * + 2 for the number plus 1 for the trailing zero byte.
79 */
80#define UBI_DFS_DIR_NAME "ubi%d"
81#define UBI_DFS_DIR_LEN (3 + 2 + 1)
82
83/**
84 * struct ubi_debug_info - debugging information for an UBI device.
85 *
86 * @chk_gen: if UBI general extra checks are enabled
87 * @chk_io: if UBI I/O extra checks are enabled
88 * @disable_bgt: disable the background task for testing purposes
89 * @emulate_bitflips: emulate bit-flips for testing purposes
90 * @emulate_io_failures: emulate write/erase failures for testing purposes
91 * @dfs_dir_name: name of debugfs directory containing files of this UBI device
92 * @dfs_dir: direntry object of the UBI device debugfs directory
93 * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
94 * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
95 * @dfs_disable_bgt: debugfs knob to disable the background task
96 * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
97 * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
98 */
99struct ubi_debug_info {
100 unsigned int chk_gen:1;
101 unsigned int chk_io:1;
102 unsigned int disable_bgt:1;
103 unsigned int emulate_bitflips:1;
104 unsigned int emulate_io_failures:1;
105 char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
106 struct dentry *dfs_dir;
107 struct dentry *dfs_chk_gen;
108 struct dentry *dfs_chk_io;
109 struct dentry *dfs_disable_bgt;
110 struct dentry *dfs_emulate_bitflips;
111 struct dentry *dfs_emulate_io_failures;
112};
113
68/** 114/**
69 * ubi_dbg_is_bgt_disabled - if the background thread is disabled. 115 * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
70 * @ubi: UBI device description object 116 * @ubi: UBI device description object
@@ -74,7 +120,7 @@ void ubi_debugfs_exit_dev(struct ubi_device *ubi);
74 */ 120 */
75static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) 121static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
76{ 122{
77 return ubi->dbg.disable_bgt; 123 return ubi->dbg->disable_bgt;
78} 124}
79 125
80/** 126/**
@@ -85,7 +131,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
85 */ 131 */
86static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) 132static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
87{ 133{
88 if (ubi->dbg.emulate_bitflips) 134 if (ubi->dbg->emulate_bitflips)
89 return !(random32() % 200); 135 return !(random32() % 200);
90 return 0; 136 return 0;
91} 137}
@@ -99,7 +145,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
99 */ 145 */
100static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi) 146static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
101{ 147{
102 if (ubi->dbg.emulate_io_failures) 148 if (ubi->dbg->emulate_io_failures)
103 return !(random32() % 500); 149 return !(random32() % 500);
104 return 0; 150 return 0;
105} 151}
@@ -113,18 +159,78 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
113 */ 159 */
114static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) 160static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
115{ 161{
116 if (ubi->dbg.emulate_io_failures) 162 if (ubi->dbg->emulate_io_failures)
117 return !(random32() % 400); 163 return !(random32() % 400);
118 return 0; 164 return 0;
119} 165}
120 166
121static inline int ubi_dbg_chk_io(const struct ubi_device *ubi) 167#else
122{
123 return ubi->dbg.chk_io;
124}
125 168
126static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi) 169/* Use "if (0)" to make compiler check arguments even if debugging is off */
127{ 170#define ubi_assert(expr) do { \
128 return ubi->dbg.chk_gen; 171 if (0) { \
129} 172 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
173 __func__, __LINE__, current->pid); \
174 } \
175} while (0)
176
177#define dbg_err(fmt, ...) do { \
178 if (0) \
179 ubi_err(fmt, ##__VA_ARGS__); \
180} while (0)
181
182#define ubi_dbg_msg(fmt, ...) do { \
183 if (0) \
184 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
185} while (0)
186
187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
188#define dbg_gen(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
189#define dbg_eba(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
190#define dbg_wl(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
191#define dbg_io(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
192#define dbg_bld(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
193
194static inline void ubi_dbg_dump_stack(void) { return; }
195static inline void
196ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { return; }
197static inline void
198ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { return; }
199static inline void
200ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { return; }
201static inline void
202ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { return; }
203static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
204static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
205 int type) { return; }
206static inline void
207ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { return; }
208static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
209 int pnum, int offset, int len) { return; }
210static inline void
211ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
212 int g, const void *b, size_t len, bool a) { return; }
213static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
214 int pnum, int offset,
215 int len) { return 0; }
216static inline int ubi_dbg_check_write(struct ubi_device *ubi,
217 const void *buf, int pnum,
218 int offset, int len) { return 0; }
219
220static inline int ubi_debugging_init_dev(struct ubi_device *ubi) { return 0; }
221static inline void ubi_debugging_exit_dev(struct ubi_device *ubi) { return; }
222static inline int ubi_debugfs_init(void) { return 0; }
223static inline void ubi_debugfs_exit(void) { return; }
224static inline int ubi_debugfs_init_dev(struct ubi_device *ubi) { return 0; }
225static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi) { return; }
226
227static inline int
228ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) { return 0; }
229static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
230static inline int
231ubi_dbg_is_write_failure(const struct ubi_device *ubi) { return 0; }
232static inline int
233ubi_dbg_is_erase_failure(const struct ubi_device *ubi) { return 0; }
234
235#endif /* !CONFIG_MTD_UBI_DEBUG */
130#endif /* !__UBI_DEBUG_H__ */ 236#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 0e11671dadc..c696c9481c9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@
57 * global sequence counter value. It also increases the global sequence 57 * global sequence counter value. It also increases the global sequence
58 * counter. 58 * counter.
59 */ 59 */
60unsigned long long ubi_next_sqnum(struct ubi_device *ubi) 60static unsigned long long next_sqnum(struct ubi_device *ubi)
61{ 61{
62 unsigned long long sqnum; 62 unsigned long long sqnum;
63 63
@@ -340,10 +340,8 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
340 340
341 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 341 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
342 342
343 down_read(&ubi->fm_sem);
344 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 343 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
345 up_read(&ubi->fm_sem); 344 err = ubi_wl_put_peb(ubi, pnum, 0);
346 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
347 345
348out_unlock: 346out_unlock:
349 leb_write_unlock(ubi, vol_id, lnum); 347 leb_write_unlock(ubi, vol_id, lnum);
@@ -422,8 +420,9 @@ retry:
422 */ 420 */
423 if (err == UBI_IO_BAD_HDR_EBADMSG || 421 if (err == UBI_IO_BAD_HDR_EBADMSG ||
424 err == UBI_IO_BAD_HDR) { 422 err == UBI_IO_BAD_HDR) {
425 ubi_warn("corrupted VID header at PEB %d, LEB %d:%d", 423 ubi_warn("corrupted VID header at PEB "
426 pnum, vol_id, lnum); 424 "%d, LEB %d:%d", pnum, vol_id,
425 lnum);
427 err = -EBADMSG; 426 err = -EBADMSG;
428 } else 427 } else
429 ubi_ro_mode(ubi); 428 ubi_ro_mode(ubi);
@@ -444,7 +443,7 @@ retry:
444 if (err == UBI_IO_BITFLIPS) { 443 if (err == UBI_IO_BITFLIPS) {
445 scrub = 1; 444 scrub = 1;
446 err = 0; 445 err = 0;
447 } else if (mtd_is_eccerr(err)) { 446 } else if (err == -EBADMSG) {
448 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 447 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
449 goto out_unlock; 448 goto out_unlock;
450 scrub = 1; 449 scrub = 1;
@@ -508,7 +507,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
508 return -ENOMEM; 507 return -ENOMEM;
509 508
510retry: 509retry:
511 new_pnum = ubi_wl_get_peb(ubi); 510 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
512 if (new_pnum < 0) { 511 if (new_pnum < 0) {
513 ubi_free_vid_hdr(ubi, vid_hdr); 512 ubi_free_vid_hdr(ubi, vid_hdr);
514 return new_pnum; 513 return new_pnum;
@@ -523,25 +522,25 @@ retry:
523 goto out_put; 522 goto out_put;
524 } 523 }
525 524
526 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 525 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
527 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 526 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
528 if (err) 527 if (err)
529 goto write_error; 528 goto write_error;
530 529
531 data_size = offset + len; 530 data_size = offset + len;
532 mutex_lock(&ubi->buf_mutex); 531 mutex_lock(&ubi->buf_mutex);
533 memset(ubi->peb_buf + offset, 0xFF, len); 532 memset(ubi->peb_buf1 + offset, 0xFF, len);
534 533
535 /* Read everything before the area where the write failure happened */ 534 /* Read everything before the area where the write failure happened */
536 if (offset > 0) { 535 if (offset > 0) {
537 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); 536 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
538 if (err && err != UBI_IO_BITFLIPS) 537 if (err && err != UBI_IO_BITFLIPS)
539 goto out_unlock; 538 goto out_unlock;
540 } 539 }
541 540
542 memcpy(ubi->peb_buf + offset, buf, len); 541 memcpy(ubi->peb_buf1 + offset, buf, len);
543 542
544 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 543 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
545 if (err) { 544 if (err) {
546 mutex_unlock(&ubi->buf_mutex); 545 mutex_unlock(&ubi->buf_mutex);
547 goto write_error; 546 goto write_error;
@@ -550,10 +549,8 @@ retry:
550 mutex_unlock(&ubi->buf_mutex); 549 mutex_unlock(&ubi->buf_mutex);
551 ubi_free_vid_hdr(ubi, vid_hdr); 550 ubi_free_vid_hdr(ubi, vid_hdr);
552 551
553 down_read(&ubi->fm_sem);
554 vol->eba_tbl[lnum] = new_pnum; 552 vol->eba_tbl[lnum] = new_pnum;
555 up_read(&ubi->fm_sem); 553 ubi_wl_put_peb(ubi, pnum, 1);
556 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
557 554
558 ubi_msg("data was successfully recovered"); 555 ubi_msg("data was successfully recovered");
559 return 0; 556 return 0;
@@ -561,7 +558,7 @@ retry:
561out_unlock: 558out_unlock:
562 mutex_unlock(&ubi->buf_mutex); 559 mutex_unlock(&ubi->buf_mutex);
563out_put: 560out_put:
564 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 561 ubi_wl_put_peb(ubi, new_pnum, 1);
565 ubi_free_vid_hdr(ubi, vid_hdr); 562 ubi_free_vid_hdr(ubi, vid_hdr);
566 return err; 563 return err;
567 564
@@ -571,7 +568,7 @@ write_error:
571 * get another one. 568 * get another one.
572 */ 569 */
573 ubi_warn("failed to write to PEB %d", new_pnum); 570 ubi_warn("failed to write to PEB %d", new_pnum);
574 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 571 ubi_wl_put_peb(ubi, new_pnum, 1);
575 if (++tries > UBI_IO_RETRIES) { 572 if (++tries > UBI_IO_RETRIES) {
576 ubi_free_vid_hdr(ubi, vid_hdr); 573 ubi_free_vid_hdr(ubi, vid_hdr);
577 return err; 574 return err;
@@ -588,6 +585,7 @@ write_error:
588 * @buf: the data to write 585 * @buf: the data to write
589 * @offset: offset within the logical eraseblock where to write 586 * @offset: offset within the logical eraseblock where to write
590 * @len: how many bytes to write 587 * @len: how many bytes to write
588 * @dtype: data type
591 * 589 *
592 * This function writes data to logical eraseblock @lnum of a dynamic volume 590 * This function writes data to logical eraseblock @lnum of a dynamic volume
593 * @vol. Returns zero in case of success and a negative error code in case 591 * @vol. Returns zero in case of success and a negative error code in case
@@ -595,7 +593,7 @@ write_error:
595 * written to the flash media, but may be some garbage. 593 * written to the flash media, but may be some garbage.
596 */ 594 */
597int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 595int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
598 const void *buf, int offset, int len) 596 const void *buf, int offset, int len, int dtype)
599{ 597{
600 int err, pnum, tries = 0, vol_id = vol->vol_id; 598 int err, pnum, tries = 0, vol_id = vol->vol_id;
601 struct ubi_vid_hdr *vid_hdr; 599 struct ubi_vid_hdr *vid_hdr;
@@ -636,14 +634,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
636 } 634 }
637 635
638 vid_hdr->vol_type = UBI_VID_DYNAMIC; 636 vid_hdr->vol_type = UBI_VID_DYNAMIC;
639 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 637 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
640 vid_hdr->vol_id = cpu_to_be32(vol_id); 638 vid_hdr->vol_id = cpu_to_be32(vol_id);
641 vid_hdr->lnum = cpu_to_be32(lnum); 639 vid_hdr->lnum = cpu_to_be32(lnum);
642 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 640 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
643 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 641 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
644 642
645retry: 643retry:
646 pnum = ubi_wl_get_peb(ubi); 644 pnum = ubi_wl_get_peb(ubi, dtype);
647 if (pnum < 0) { 645 if (pnum < 0) {
648 ubi_free_vid_hdr(ubi, vid_hdr); 646 ubi_free_vid_hdr(ubi, vid_hdr);
649 leb_write_unlock(ubi, vol_id, lnum); 647 leb_write_unlock(ubi, vol_id, lnum);
@@ -663,15 +661,14 @@ retry:
663 if (len) { 661 if (len) {
664 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 662 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
665 if (err) { 663 if (err) {
666 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", 664 ubi_warn("failed to write %d bytes at offset %d of "
667 len, offset, vol_id, lnum, pnum); 665 "LEB %d:%d, PEB %d", len, offset, vol_id,
666 lnum, pnum);
668 goto write_error; 667 goto write_error;
669 } 668 }
670 } 669 }
671 670
672 down_read(&ubi->fm_sem);
673 vol->eba_tbl[lnum] = pnum; 671 vol->eba_tbl[lnum] = pnum;
674 up_read(&ubi->fm_sem);
675 672
676 leb_write_unlock(ubi, vol_id, lnum); 673 leb_write_unlock(ubi, vol_id, lnum);
677 ubi_free_vid_hdr(ubi, vid_hdr); 674 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -690,7 +687,7 @@ write_error:
690 * eraseblock, so just put it and request a new one. We assume that if 687 * eraseblock, so just put it and request a new one. We assume that if
691 * this physical eraseblock went bad, the erase code will handle that. 688 * this physical eraseblock went bad, the erase code will handle that.
692 */ 689 */
693 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 690 err = ubi_wl_put_peb(ubi, pnum, 1);
694 if (err || ++tries > UBI_IO_RETRIES) { 691 if (err || ++tries > UBI_IO_RETRIES) {
695 ubi_ro_mode(ubi); 692 ubi_ro_mode(ubi);
696 leb_write_unlock(ubi, vol_id, lnum); 693 leb_write_unlock(ubi, vol_id, lnum);
@@ -698,7 +695,7 @@ write_error:
698 return err; 695 return err;
699 } 696 }
700 697
701 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 698 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
702 ubi_msg("try another PEB"); 699 ubi_msg("try another PEB");
703 goto retry; 700 goto retry;
704} 701}
@@ -710,6 +707,7 @@ write_error:
710 * @lnum: logical eraseblock number 707 * @lnum: logical eraseblock number
711 * @buf: data to write 708 * @buf: data to write
712 * @len: how many bytes to write 709 * @len: how many bytes to write
710 * @dtype: data type
713 * @used_ebs: how many logical eraseblocks will this volume contain 711 * @used_ebs: how many logical eraseblocks will this volume contain
714 * 712 *
715 * This function writes data to logical eraseblock @lnum of static volume 713 * This function writes data to logical eraseblock @lnum of static volume
@@ -726,7 +724,8 @@ write_error:
726 * code in case of failure. 724 * code in case of failure.
727 */ 725 */
728int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 726int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
729 int lnum, const void *buf, int len, int used_ebs) 727 int lnum, const void *buf, int len, int dtype,
728 int used_ebs)
730{ 729{
731 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 730 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
732 struct ubi_vid_hdr *vid_hdr; 731 struct ubi_vid_hdr *vid_hdr;
@@ -751,7 +750,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
751 return err; 750 return err;
752 } 751 }
753 752
754 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 753 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
755 vid_hdr->vol_id = cpu_to_be32(vol_id); 754 vid_hdr->vol_id = cpu_to_be32(vol_id);
756 vid_hdr->lnum = cpu_to_be32(lnum); 755 vid_hdr->lnum = cpu_to_be32(lnum);
757 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 756 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -764,7 +763,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
764 vid_hdr->data_crc = cpu_to_be32(crc); 763 vid_hdr->data_crc = cpu_to_be32(crc);
765 764
766retry: 765retry:
767 pnum = ubi_wl_get_peb(ubi); 766 pnum = ubi_wl_get_peb(ubi, dtype);
768 if (pnum < 0) { 767 if (pnum < 0) {
769 ubi_free_vid_hdr(ubi, vid_hdr); 768 ubi_free_vid_hdr(ubi, vid_hdr);
770 leb_write_unlock(ubi, vol_id, lnum); 769 leb_write_unlock(ubi, vol_id, lnum);
@@ -789,9 +788,7 @@ retry:
789 } 788 }
790 789
791 ubi_assert(vol->eba_tbl[lnum] < 0); 790 ubi_assert(vol->eba_tbl[lnum] < 0);
792 down_read(&ubi->fm_sem);
793 vol->eba_tbl[lnum] = pnum; 791 vol->eba_tbl[lnum] = pnum;
794 up_read(&ubi->fm_sem);
795 792
796 leb_write_unlock(ubi, vol_id, lnum); 793 leb_write_unlock(ubi, vol_id, lnum);
797 ubi_free_vid_hdr(ubi, vid_hdr); 794 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -810,7 +807,7 @@ write_error:
810 return err; 807 return err;
811 } 808 }
812 809
813 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 810 err = ubi_wl_put_peb(ubi, pnum, 1);
814 if (err || ++tries > UBI_IO_RETRIES) { 811 if (err || ++tries > UBI_IO_RETRIES) {
815 ubi_ro_mode(ubi); 812 ubi_ro_mode(ubi);
816 leb_write_unlock(ubi, vol_id, lnum); 813 leb_write_unlock(ubi, vol_id, lnum);
@@ -818,7 +815,7 @@ write_error:
818 return err; 815 return err;
819 } 816 }
820 817
821 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 818 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
822 ubi_msg("try another PEB"); 819 ubi_msg("try another PEB");
823 goto retry; 820 goto retry;
824} 821}
@@ -830,6 +827,7 @@ write_error:
830 * @lnum: logical eraseblock number 827 * @lnum: logical eraseblock number
831 * @buf: data to write 828 * @buf: data to write
832 * @len: how many bytes to write 829 * @len: how many bytes to write
830 * @dtype: data type
833 * 831 *
834 * This function changes the contents of a logical eraseblock atomically. @buf 832 * This function changes the contents of a logical eraseblock atomically. @buf
835 * has to contain new logical eraseblock data, and @len - the length of the 833 * has to contain new logical eraseblock data, and @len - the length of the
@@ -841,7 +839,7 @@ write_error:
841 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 839 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
842 */ 840 */
843int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 841int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
844 int lnum, const void *buf, int len) 842 int lnum, const void *buf, int len, int dtype)
845{ 843{
846 int err, pnum, tries = 0, vol_id = vol->vol_id; 844 int err, pnum, tries = 0, vol_id = vol->vol_id;
847 struct ubi_vid_hdr *vid_hdr; 845 struct ubi_vid_hdr *vid_hdr;
@@ -858,7 +856,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
858 err = ubi_eba_unmap_leb(ubi, vol, lnum); 856 err = ubi_eba_unmap_leb(ubi, vol, lnum);
859 if (err) 857 if (err)
860 return err; 858 return err;
861 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); 859 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
862 } 860 }
863 861
864 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 862 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -870,7 +868,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
870 if (err) 868 if (err)
871 goto out_mutex; 869 goto out_mutex;
872 870
873 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 871 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
874 vid_hdr->vol_id = cpu_to_be32(vol_id); 872 vid_hdr->vol_id = cpu_to_be32(vol_id);
875 vid_hdr->lnum = cpu_to_be32(lnum); 873 vid_hdr->lnum = cpu_to_be32(lnum);
876 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 874 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -883,7 +881,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
883 vid_hdr->data_crc = cpu_to_be32(crc); 881 vid_hdr->data_crc = cpu_to_be32(crc);
884 882
885retry: 883retry:
886 pnum = ubi_wl_get_peb(ubi); 884 pnum = ubi_wl_get_peb(ubi, dtype);
887 if (pnum < 0) { 885 if (pnum < 0) {
888 err = pnum; 886 err = pnum;
889 goto out_leb_unlock; 887 goto out_leb_unlock;
@@ -907,14 +905,12 @@ retry:
907 } 905 }
908 906
909 if (vol->eba_tbl[lnum] >= 0) { 907 if (vol->eba_tbl[lnum] >= 0) {
910 err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0); 908 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
911 if (err) 909 if (err)
912 goto out_leb_unlock; 910 goto out_leb_unlock;
913 } 911 }
914 912
915 down_read(&ubi->fm_sem);
916 vol->eba_tbl[lnum] = pnum; 913 vol->eba_tbl[lnum] = pnum;
917 up_read(&ubi->fm_sem);
918 914
919out_leb_unlock: 915out_leb_unlock:
920 leb_write_unlock(ubi, vol_id, lnum); 916 leb_write_unlock(ubi, vol_id, lnum);
@@ -934,13 +930,13 @@ write_error:
934 goto out_leb_unlock; 930 goto out_leb_unlock;
935 } 931 }
936 932
937 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 933 err = ubi_wl_put_peb(ubi, pnum, 1);
938 if (err || ++tries > UBI_IO_RETRIES) { 934 if (err || ++tries > UBI_IO_RETRIES) {
939 ubi_ro_mode(ubi); 935 ubi_ro_mode(ubi);
940 goto out_leb_unlock; 936 goto out_leb_unlock;
941 } 937 }
942 938
943 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 939 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
944 ubi_msg("try another PEB"); 940 ubi_msg("try another PEB");
945 goto retry; 941 goto retry;
946} 942}
@@ -983,7 +979,7 @@ static int is_error_sane(int err)
983 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 979 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
984 * function. Returns: 980 * function. Returns:
985 * o %0 in case of success; 981 * o %0 in case of success;
986 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; 982 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
987 * o a negative error code in case of failure. 983 * o a negative error code in case of failure.
988 */ 984 */
989int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 985int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -1048,21 +1044,22 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1048 * cancel it. 1044 * cancel it.
1049 */ 1045 */
1050 if (vol->eba_tbl[lnum] != from) { 1046 if (vol->eba_tbl[lnum] != from) {
1051 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel", 1047 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1052 vol_id, lnum, from, vol->eba_tbl[lnum]); 1048 "PEB %d, cancel", vol_id, lnum, from,
1049 vol->eba_tbl[lnum]);
1053 err = MOVE_CANCEL_RACE; 1050 err = MOVE_CANCEL_RACE;
1054 goto out_unlock_leb; 1051 goto out_unlock_leb;
1055 } 1052 }
1056 1053
1057 /* 1054 /*
1058 * OK, now the LEB is locked and we can safely start moving it. Since 1055 * OK, now the LEB is locked and we can safely start moving it. Since
1059 * this function utilizes the @ubi->peb_buf buffer which is shared 1056 * this function utilizes the @ubi->peb_buf1 buffer which is shared
1060 * with some other functions - we lock the buffer by taking the 1057 * with some other functions - we lock the buffer by taking the
1061 * @ubi->buf_mutex. 1058 * @ubi->buf_mutex.
1062 */ 1059 */
1063 mutex_lock(&ubi->buf_mutex); 1060 mutex_lock(&ubi->buf_mutex);
1064 dbg_wl("read %d bytes of data", aldata_size); 1061 dbg_wl("read %d bytes of data", aldata_size);
1065 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); 1062 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1066 if (err && err != UBI_IO_BITFLIPS) { 1063 if (err && err != UBI_IO_BITFLIPS) {
1067 ubi_warn("error %d while reading data from PEB %d", 1064 ubi_warn("error %d while reading data from PEB %d",
1068 err, from); 1065 err, from);
@@ -1082,10 +1079,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1082 */ 1079 */
1083 if (vid_hdr->vol_type == UBI_VID_DYNAMIC) 1080 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1084 aldata_size = data_size = 1081 aldata_size = data_size =
1085 ubi_calc_data_len(ubi, ubi->peb_buf, data_size); 1082 ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
1086 1083
1087 cond_resched(); 1084 cond_resched();
1088 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); 1085 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
1089 cond_resched(); 1086 cond_resched();
1090 1087
1091 /* 1088 /*
@@ -1099,7 +1096,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1099 vid_hdr->data_size = cpu_to_be32(data_size); 1096 vid_hdr->data_size = cpu_to_be32(data_size);
1100 vid_hdr->data_crc = cpu_to_be32(crc); 1097 vid_hdr->data_crc = cpu_to_be32(crc);
1101 } 1098 }
1102 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1099 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
1103 1100
1104 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1101 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1105 if (err) { 1102 if (err) {
@@ -1114,17 +1111,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1114 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1111 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1115 if (err) { 1112 if (err) {
1116 if (err != UBI_IO_BITFLIPS) { 1113 if (err != UBI_IO_BITFLIPS) {
1117 ubi_warn("error %d while reading VID header back from PEB %d", 1114 ubi_warn("error %d while reading VID header back from "
1118 err, to); 1115 "PEB %d", err, to);
1119 if (is_error_sane(err)) 1116 if (is_error_sane(err))
1120 err = MOVE_TARGET_RD_ERR; 1117 err = MOVE_TARGET_RD_ERR;
1121 } else 1118 } else
1122 err = MOVE_TARGET_BITFLIPS; 1119 err = MOVE_CANCEL_BITFLIPS;
1123 goto out_unlock_buf; 1120 goto out_unlock_buf;
1124 } 1121 }
1125 1122
1126 if (data_size > 0) { 1123 if (data_size > 0) {
1127 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1124 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1128 if (err) { 1125 if (err) {
1129 if (err == -EIO) 1126 if (err == -EIO)
1130 err = MOVE_TARGET_WR_ERR; 1127 err = MOVE_TARGET_WR_ERR;
@@ -1137,33 +1134,31 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1137 * We've written the data and are going to read it back to make 1134 * We've written the data and are going to read it back to make
1138 * sure it was written correctly. 1135 * sure it was written correctly.
1139 */ 1136 */
1140 memset(ubi->peb_buf, 0xFF, aldata_size); 1137
1141 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1138 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
1142 if (err) { 1139 if (err) {
1143 if (err != UBI_IO_BITFLIPS) { 1140 if (err != UBI_IO_BITFLIPS) {
1144 ubi_warn("error %d while reading data back from PEB %d", 1141 ubi_warn("error %d while reading data back "
1145 err, to); 1142 "from PEB %d", err, to);
1146 if (is_error_sane(err)) 1143 if (is_error_sane(err))
1147 err = MOVE_TARGET_RD_ERR; 1144 err = MOVE_TARGET_RD_ERR;
1148 } else 1145 } else
1149 err = MOVE_TARGET_BITFLIPS; 1146 err = MOVE_CANCEL_BITFLIPS;
1150 goto out_unlock_buf; 1147 goto out_unlock_buf;
1151 } 1148 }
1152 1149
1153 cond_resched(); 1150 cond_resched();
1154 1151
1155 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { 1152 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1156 ubi_warn("read data back from PEB %d and it is different", 1153 ubi_warn("read data back from PEB %d and it is "
1157 to); 1154 "different", to);
1158 err = -EINVAL; 1155 err = -EINVAL;
1159 goto out_unlock_buf; 1156 goto out_unlock_buf;
1160 } 1157 }
1161 } 1158 }
1162 1159
1163 ubi_assert(vol->eba_tbl[lnum] == from); 1160 ubi_assert(vol->eba_tbl[lnum] == from);
1164 down_read(&ubi->fm_sem);
1165 vol->eba_tbl[lnum] = to; 1161 vol->eba_tbl[lnum] = to;
1166 up_read(&ubi->fm_sem);
1167 1162
1168out_unlock_buf: 1163out_unlock_buf:
1169 mutex_unlock(&ubi->buf_mutex); 1164 mutex_unlock(&ubi->buf_mutex);
@@ -1176,7 +1171,7 @@ out_unlock_leb:
1176 * print_rsvd_warning - warn about not having enough reserved PEBs. 1171 * print_rsvd_warning - warn about not having enough reserved PEBs.
1177 * @ubi: UBI device description object 1172 * @ubi: UBI device description object
1178 * 1173 *
1179 * This is a helper function for 'ubi_eba_init()' which is called when UBI 1174 * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
1180 * cannot reserve enough PEBs for bad block handling. This function makes a 1175 * cannot reserve enough PEBs for bad block handling. This function makes a
1181 * decision whether we have to print a warning or not. The algorithm is as 1176 * decision whether we have to print a warning or not. The algorithm is as
1182 * follows: 1177 * follows:
@@ -1191,13 +1186,13 @@ out_unlock_leb:
1191 * reported by real users. 1186 * reported by real users.
1192 */ 1187 */
1193static void print_rsvd_warning(struct ubi_device *ubi, 1188static void print_rsvd_warning(struct ubi_device *ubi,
1194 struct ubi_attach_info *ai) 1189 struct ubi_scan_info *si)
1195{ 1190{
1196 /* 1191 /*
1197 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably 1192 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1198 * large number to distinguish between newly flashed and used images. 1193 * large number to distinguish between newly flashed and used images.
1199 */ 1194 */
1200 if (ai->max_sqnum > (1 << 18)) { 1195 if (si->max_sqnum > (1 << 18)) {
1201 int min = ubi->beb_rsvd_level / 10; 1196 int min = ubi->beb_rsvd_level / 10;
1202 1197
1203 if (!min) 1198 if (!min)
@@ -1206,123 +1201,27 @@ static void print_rsvd_warning(struct ubi_device *ubi,
1206 return; 1201 return;
1207 } 1202 }
1208 1203
1209 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", 1204 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
1210 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1205 " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1211 if (ubi->corr_peb_count) 1206 if (ubi->corr_peb_count)
1212 ubi_warn("%d PEBs are corrupted and not used", 1207 ubi_warn("%d PEBs are corrupted and not used",
1213 ubi->corr_peb_count); 1208 ubi->corr_peb_count);
1214}
1215
1216/**
1217 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1218 * @ubi: UBI device description object
1219 * @ai_fastmap: UBI attach info object created by fastmap
1220 * @ai_scan: UBI attach info object created by scanning
1221 *
1222 * Returns < 0 in case of an internal error, 0 otherwise.
1223 * If a bad EBA table entry was found it will be printed out and
1224 * ubi_assert() triggers.
1225 */
1226int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1227 struct ubi_attach_info *ai_scan)
1228{
1229 int i, j, num_volumes, ret = 0;
1230 int **scan_eba, **fm_eba;
1231 struct ubi_ainf_volume *av;
1232 struct ubi_volume *vol;
1233 struct ubi_ainf_peb *aeb;
1234 struct rb_node *rb;
1235
1236 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1237
1238 scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
1239 if (!scan_eba)
1240 return -ENOMEM;
1241
1242 fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
1243 if (!fm_eba) {
1244 kfree(scan_eba);
1245 return -ENOMEM;
1246 }
1247
1248 for (i = 0; i < num_volumes; i++) {
1249 vol = ubi->volumes[i];
1250 if (!vol)
1251 continue;
1252
1253 scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
1254 GFP_KERNEL);
1255 if (!scan_eba[i]) {
1256 ret = -ENOMEM;
1257 goto out_free;
1258 }
1259
1260 fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
1261 GFP_KERNEL);
1262 if (!fm_eba[i]) {
1263 ret = -ENOMEM;
1264 goto out_free;
1265 }
1266
1267 for (j = 0; j < vol->reserved_pebs; j++)
1268 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1269
1270 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1271 if (!av)
1272 continue;
1273
1274 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1275 scan_eba[i][aeb->lnum] = aeb->pnum;
1276
1277 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1278 if (!av)
1279 continue;
1280
1281 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1282 fm_eba[i][aeb->lnum] = aeb->pnum;
1283
1284 for (j = 0; j < vol->reserved_pebs; j++) {
1285 if (scan_eba[i][j] != fm_eba[i][j]) {
1286 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1287 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1288 continue;
1289
1290 ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
1291 vol->vol_id, i, fm_eba[i][j],
1292 scan_eba[i][j]);
1293 ubi_assert(0);
1294 }
1295 }
1296 }
1297
1298out_free:
1299 for (i = 0; i < num_volumes; i++) {
1300 if (!ubi->volumes[i])
1301 continue;
1302
1303 kfree(scan_eba[i]);
1304 kfree(fm_eba[i]);
1305 }
1306
1307 kfree(scan_eba);
1308 kfree(fm_eba);
1309 return ret;
1310} 1209}
1311 1210
1312/** 1211/**
1313 * ubi_eba_init - initialize the EBA sub-system using attaching information. 1212 * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
1314 * @ubi: UBI device description object 1213 * @ubi: UBI device description object
1315 * @ai: attaching information 1214 * @si: scanning information
1316 * 1215 *
1317 * This function returns zero in case of success and a negative error code in 1216 * This function returns zero in case of success and a negative error code in
1318 * case of failure. 1217 * case of failure.
1319 */ 1218 */
1320int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1219int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1321{ 1220{
1322 int i, j, err, num_volumes; 1221 int i, j, err, num_volumes;
1323 struct ubi_ainf_volume *av; 1222 struct ubi_scan_volume *sv;
1324 struct ubi_volume *vol; 1223 struct ubi_volume *vol;
1325 struct ubi_ainf_peb *aeb; 1224 struct ubi_scan_leb *seb;
1326 struct rb_node *rb; 1225 struct rb_node *rb;
1327 1226
1328 dbg_eba("initialize EBA sub-system"); 1227 dbg_eba("initialize EBA sub-system");
@@ -1331,7 +1230,7 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1331 mutex_init(&ubi->alc_mutex); 1230 mutex_init(&ubi->alc_mutex);
1332 ubi->ltree = RB_ROOT; 1231 ubi->ltree = RB_ROOT;
1333 1232
1334 ubi->global_sqnum = ai->max_sqnum + 1; 1233 ubi->global_sqnum = si->max_sqnum + 1;
1335 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1234 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1336 1235
1337 for (i = 0; i < num_volumes; i++) { 1236 for (i = 0; i < num_volumes; i++) {
@@ -1351,18 +1250,18 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1351 for (j = 0; j < vol->reserved_pebs; j++) 1250 for (j = 0; j < vol->reserved_pebs; j++)
1352 vol->eba_tbl[j] = UBI_LEB_UNMAPPED; 1251 vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1353 1252
1354 av = ubi_find_av(ai, idx2vol_id(ubi, i)); 1253 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
1355 if (!av) 1254 if (!sv)
1356 continue; 1255 continue;
1357 1256
1358 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { 1257 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
1359 if (aeb->lnum >= vol->reserved_pebs) 1258 if (seb->lnum >= vol->reserved_pebs)
1360 /* 1259 /*
1361 * This may happen in case of an unclean reboot 1260 * This may happen in case of an unclean reboot
1362 * during re-size. 1261 * during re-size.
1363 */ 1262 */
1364 ubi_move_aeb_to_list(av, aeb, &ai->erase); 1263 ubi_scan_move_to_list(sv, seb, &si->erase);
1365 vol->eba_tbl[aeb->lnum] = aeb->pnum; 1264 vol->eba_tbl[seb->lnum] = seb->pnum;
1366 } 1265 }
1367 } 1266 }
1368 1267
@@ -1384,7 +1283,7 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1384 if (ubi->avail_pebs < ubi->beb_rsvd_level) { 1283 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1385 /* No enough free physical eraseblocks */ 1284 /* No enough free physical eraseblocks */
1386 ubi->beb_rsvd_pebs = ubi->avail_pebs; 1285 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1387 print_rsvd_warning(ubi, ai); 1286 print_rsvd_warning(ubi, si);
1388 } else 1287 } else
1389 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; 1288 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1390 1289
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
deleted file mode 100644
index 0648c6996d4..00000000000
--- a/drivers/mtd/ubi/fastmap.c
+++ /dev/null
@@ -1,1535 +0,0 @@
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 */
15
16#include <linux/crc32.h>
17#include "ubi.h"
18
19/**
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
22 */
23size_t ubi_calc_fm_size(struct ubi_device *ubi)
24{
25 size_t size;
26
27 size = sizeof(struct ubi_fm_hdr) + \
28 sizeof(struct ubi_fm_scan_pool) + \
29 sizeof(struct ubi_fm_scan_pool) + \
30 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
31 (sizeof(struct ubi_fm_eba) + \
32 (ubi->peb_count * sizeof(__be32))) + \
33 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
34 return roundup(size, ubi->leb_size);
35}
36
37
38/**
39 * new_fm_vhdr - allocate a new volume header for fastmap usage.
40 * @ubi: UBI device description object
41 * @vol_id: the VID of the new header
42 *
43 * Returns a new struct ubi_vid_hdr on success.
44 * NULL indicates out of memory.
45 */
46static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
47{
48 struct ubi_vid_hdr *new;
49
50 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
51 if (!new)
52 goto out;
53
54 new->vol_type = UBI_VID_DYNAMIC;
55 new->vol_id = cpu_to_be32(vol_id);
56
57 /* UBI implementations without fastmap support have to delete the
58 * fastmap.
59 */
60 new->compat = UBI_COMPAT_DELETE;
61
62out:
63 return new;
64}
65
66/**
67 * add_aeb - create and add a attach erase block to a given list.
68 * @ai: UBI attach info object
69 * @list: the target list
70 * @pnum: PEB number of the new attach erase block
71 * @ec: erease counter of the new LEB
72 * @scrub: scrub this PEB after attaching
73 *
74 * Returns 0 on success, < 0 indicates an internal error.
75 */
76static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
77 int pnum, int ec, int scrub)
78{
79 struct ubi_ainf_peb *aeb;
80
81 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
82 if (!aeb)
83 return -ENOMEM;
84
85 aeb->pnum = pnum;
86 aeb->ec = ec;
87 aeb->lnum = -1;
88 aeb->scrub = scrub;
89 aeb->copy_flag = aeb->sqnum = 0;
90
91 ai->ec_sum += aeb->ec;
92 ai->ec_count++;
93
94 if (ai->max_ec < aeb->ec)
95 ai->max_ec = aeb->ec;
96
97 if (ai->min_ec > aeb->ec)
98 ai->min_ec = aeb->ec;
99
100 list_add_tail(&aeb->u.list, list);
101
102 return 0;
103}
104
105/**
106 * add_vol - create and add a new volume to ubi_attach_info.
107 * @ai: ubi_attach_info object
108 * @vol_id: VID of the new volume
109 * @used_ebs: number of used EBS
110 * @data_pad: data padding value of the new volume
111 * @vol_type: volume type
112 * @last_eb_bytes: number of bytes in the last LEB
113 *
114 * Returns the new struct ubi_ainf_volume on success.
115 * NULL indicates an error.
116 */
117static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
118 int used_ebs, int data_pad, u8 vol_type,
119 int last_eb_bytes)
120{
121 struct ubi_ainf_volume *av;
122 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
123
124 while (*p) {
125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127
128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left;
130 else if (vol_id > av->vol_id)
131 p = &(*p)->rb_right;
132 }
133
134 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
135 if (!av)
136 goto out;
137
138 av->highest_lnum = av->leb_count = 0;
139 av->vol_id = vol_id;
140 av->used_ebs = used_ebs;
141 av->data_pad = data_pad;
142 av->last_data_size = last_eb_bytes;
143 av->compat = 0;
144 av->vol_type = vol_type;
145 av->root = RB_ROOT;
146
147 dbg_bld("found volume (ID %i)", vol_id);
148
149 rb_link_node(&av->rb, parent, p);
150 rb_insert_color(&av->rb, &ai->volumes);
151
152out:
153 return av;
154}
155
156/**
157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
158 * from it's original list.
159 * @ai: ubi_attach_info object
160 * @aeb: the to be assigned SEB
161 * @av: target scan volume
162 */
163static void assign_aeb_to_av(struct ubi_attach_info *ai,
164 struct ubi_ainf_peb *aeb,
165 struct ubi_ainf_volume *av)
166{
167 struct ubi_ainf_peb *tmp_aeb;
168 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
169
170 p = &av->root.rb_node;
171 while (*p) {
172 parent = *p;
173
174 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
175 if (aeb->lnum != tmp_aeb->lnum) {
176 if (aeb->lnum < tmp_aeb->lnum)
177 p = &(*p)->rb_left;
178 else
179 p = &(*p)->rb_right;
180
181 continue;
182 } else
183 break;
184 }
185
186 list_del(&aeb->u.list);
187 av->leb_count++;
188
189 rb_link_node(&aeb->u.rb, parent, p);
190 rb_insert_color(&aeb->u.rb, &av->root);
191}
192
193/**
194 * update_vol - inserts or updates a LEB which was found a pool.
195 * @ubi: the UBI device object
196 * @ai: attach info object
197 * @av: the volume this LEB belongs to
198 * @new_vh: the volume header derived from new_aeb
199 * @new_aeb: the AEB to be examined
200 *
201 * Returns 0 on success, < 0 indicates an internal error.
202 */
203static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
204 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
205 struct ubi_ainf_peb *new_aeb)
206{
207 struct rb_node **p = &av->root.rb_node, *parent = NULL;
208 struct ubi_ainf_peb *aeb, *victim;
209 int cmp_res;
210
211 while (*p) {
212 parent = *p;
213 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214
215 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
216 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
217 p = &(*p)->rb_left;
218 else
219 p = &(*p)->rb_right;
220
221 continue;
222 }
223
224 /* This case can happen if the fastmap gets written
225 * because of a volume change (creation, deletion, ..).
226 * Then a PEB can be within the persistent EBA and the pool.
227 */
228 if (aeb->pnum == new_aeb->pnum) {
229 ubi_assert(aeb->lnum == new_aeb->lnum);
230 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
231
232 return 0;
233 }
234
235 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
236 if (cmp_res < 0)
237 return cmp_res;
238
239 /* new_aeb is newer */
240 if (cmp_res & 1) {
241 victim = kmem_cache_alloc(ai->aeb_slab_cache,
242 GFP_KERNEL);
243 if (!victim)
244 return -ENOMEM;
245
246 victim->ec = aeb->ec;
247 victim->pnum = aeb->pnum;
248 list_add_tail(&victim->u.list, &ai->erase);
249
250 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
251 av->last_data_size = \
252 be32_to_cpu(new_vh->data_size);
253
254 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
255 av->vol_id, aeb->lnum, new_aeb->pnum);
256
257 aeb->ec = new_aeb->ec;
258 aeb->pnum = new_aeb->pnum;
259 aeb->copy_flag = new_vh->copy_flag;
260 aeb->scrub = new_aeb->scrub;
261 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
262
263 /* new_aeb is older */
264 } else {
265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
266 av->vol_id, aeb->lnum, new_aeb->pnum);
267 list_add_tail(&new_aeb->u.list, &ai->erase);
268 }
269
270 return 0;
271 }
272 /* This LEB is new, let's add it to the volume */
273
274 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
275 av->highest_lnum = be32_to_cpu(new_vh->lnum);
276 av->last_data_size = be32_to_cpu(new_vh->data_size);
277 }
278
279 if (av->vol_type == UBI_STATIC_VOLUME)
280 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
281
282 av->leb_count++;
283
284 rb_link_node(&new_aeb->u.rb, parent, p);
285 rb_insert_color(&new_aeb->u.rb, &av->root);
286
287 return 0;
288}
289
290/**
291 * process_pool_aeb - we found a non-empty PEB in a pool.
292 * @ubi: UBI device object
293 * @ai: attach info object
294 * @new_vh: the volume header derived from new_aeb
295 * @new_aeb: the AEB to be examined
296 *
297 * Returns 0 on success, < 0 indicates an internal error.
298 */
299static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
300 struct ubi_vid_hdr *new_vh,
301 struct ubi_ainf_peb *new_aeb)
302{
303 struct ubi_ainf_volume *av, *tmp_av = NULL;
304 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
305 int found = 0;
306
307 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
308 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
309 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
310
311 return 0;
312 }
313
314 /* Find the volume this SEB belongs to */
315 while (*p) {
316 parent = *p;
317 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
318
319 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
320 p = &(*p)->rb_left;
321 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
322 p = &(*p)->rb_right;
323 else {
324 found = 1;
325 break;
326 }
327 }
328
329 if (found)
330 av = tmp_av;
331 else {
332 ubi_err("orphaned volume in fastmap pool!");
333 return UBI_BAD_FASTMAP;
334 }
335
336 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
337
338 return update_vol(ubi, ai, av, new_vh, new_aeb);
339}
340
341/**
342 * unmap_peb - unmap a PEB.
343 * If fastmap detects a free PEB in the pool it has to check whether
344 * this PEB has been unmapped after writing the fastmap.
345 *
346 * @ai: UBI attach info object
347 * @pnum: The PEB to be unmapped
348 */
349static void unmap_peb(struct ubi_attach_info *ai, int pnum)
350{
351 struct ubi_ainf_volume *av;
352 struct rb_node *node, *node2;
353 struct ubi_ainf_peb *aeb;
354
355 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
356 av = rb_entry(node, struct ubi_ainf_volume, rb);
357
358 for (node2 = rb_first(&av->root); node2;
359 node2 = rb_next(node2)) {
360 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
361 if (aeb->pnum == pnum) {
362 rb_erase(&aeb->u.rb, &av->root);
363 kmem_cache_free(ai->aeb_slab_cache, aeb);
364 return;
365 }
366 }
367 }
368}
369
370/**
371 * scan_pool - scans a pool for changed (no longer empty PEBs).
372 * @ubi: UBI device object
373 * @ai: attach info object
374 * @pebs: an array of all PEB numbers in the to be scanned pool
375 * @pool_size: size of the pool (number of entries in @pebs)
376 * @max_sqnum: pointer to the maximal sequence number
377 * @eba_orphans: list of PEBs which need to be scanned
378 * @free: list of PEBs which are most likely free (and go into @ai->free)
379 *
380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
381 * < 0 indicates an internal error.
382 */
383static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
384 int *pebs, int pool_size, unsigned long long *max_sqnum,
385 struct list_head *eba_orphans, struct list_head *free)
386{
387 struct ubi_vid_hdr *vh;
388 struct ubi_ec_hdr *ech;
389 struct ubi_ainf_peb *new_aeb, *tmp_aeb;
390 int i, pnum, err, found_orphan, ret = 0;
391
392 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
393 if (!ech)
394 return -ENOMEM;
395
396 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
397 if (!vh) {
398 kfree(ech);
399 return -ENOMEM;
400 }
401
402 dbg_bld("scanning fastmap pool: size = %i", pool_size);
403
404 /*
405 * Now scan all PEBs in the pool to find changes which have been made
406 * after the creation of the fastmap
407 */
408 for (i = 0; i < pool_size; i++) {
409 int scrub = 0;
410
411 pnum = be32_to_cpu(pebs[i]);
412
413 if (ubi_io_is_bad(ubi, pnum)) {
414 ubi_err("bad PEB in fastmap pool!");
415 ret = UBI_BAD_FASTMAP;
416 goto out;
417 }
418
419 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
420 if (err && err != UBI_IO_BITFLIPS) {
421 ubi_err("unable to read EC header! PEB:%i err:%i",
422 pnum, err);
423 ret = err > 0 ? UBI_BAD_FASTMAP : err;
424 goto out;
425 } else if (ret == UBI_IO_BITFLIPS)
426 scrub = 1;
427
428 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
429 ubi_err("bad image seq: 0x%x, expected: 0x%x",
430 be32_to_cpu(ech->image_seq), ubi->image_seq);
431 err = UBI_BAD_FASTMAP;
432 goto out;
433 }
434
435 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
436 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
437 unsigned long long ec = be64_to_cpu(ech->ec);
438 unmap_peb(ai, pnum);
439 dbg_bld("Adding PEB to free: %i", pnum);
440 if (err == UBI_IO_FF_BITFLIPS)
441 add_aeb(ai, free, pnum, ec, 1);
442 else
443 add_aeb(ai, free, pnum, ec, 0);
444 continue;
445 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
446 dbg_bld("Found non empty PEB:%i in pool", pnum);
447
448 if (err == UBI_IO_BITFLIPS)
449 scrub = 1;
450
451 found_orphan = 0;
452 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
453 if (tmp_aeb->pnum == pnum) {
454 found_orphan = 1;
455 break;
456 }
457 }
458 if (found_orphan) {
459 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
460 list_del(&tmp_aeb->u.list);
461 }
462
463 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
464 GFP_KERNEL);
465 if (!new_aeb) {
466 ret = -ENOMEM;
467 goto out;
468 }
469
470 new_aeb->ec = be64_to_cpu(ech->ec);
471 new_aeb->pnum = pnum;
472 new_aeb->lnum = be32_to_cpu(vh->lnum);
473 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
474 new_aeb->copy_flag = vh->copy_flag;
475 new_aeb->scrub = scrub;
476
477 if (*max_sqnum < new_aeb->sqnum)
478 *max_sqnum = new_aeb->sqnum;
479
480 err = process_pool_aeb(ubi, ai, vh, new_aeb);
481 if (err) {
482 ret = err > 0 ? UBI_BAD_FASTMAP : err;
483 goto out;
484 }
485 } else {
486 /* We are paranoid and fall back to scanning mode */
487 ubi_err("fastmap pool PEBs contains damaged PEBs!");
488 ret = err > 0 ? UBI_BAD_FASTMAP : err;
489 goto out;
490 }
491
492 }
493
494out:
495 ubi_free_vid_hdr(ubi, vh);
496 kfree(ech);
497 return ret;
498}
499
500/**
501 * count_fastmap_pebs - Counts the PEBs found by fastmap.
502 * @ai: The UBI attach info object
503 */
504static int count_fastmap_pebs(struct ubi_attach_info *ai)
505{
506 struct ubi_ainf_peb *aeb;
507 struct ubi_ainf_volume *av;
508 struct rb_node *rb1, *rb2;
509 int n = 0;
510
511 list_for_each_entry(aeb, &ai->erase, u.list)
512 n++;
513
514 list_for_each_entry(aeb, &ai->free, u.list)
515 n++;
516
517 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
518 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
519 n++;
520
521 return n;
522}
523
524/**
525 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
526 * @ubi: UBI device object
527 * @ai: UBI attach info object
528 * @fm: the fastmap to be attached
529 *
530 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
531 * < 0 indicates an internal error.
532 */
533static int ubi_attach_fastmap(struct ubi_device *ubi,
534 struct ubi_attach_info *ai,
535 struct ubi_fastmap_layout *fm)
536{
537 struct list_head used, eba_orphans, free;
538 struct ubi_ainf_volume *av;
539 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
540 struct ubi_ec_hdr *ech;
541 struct ubi_fm_sb *fmsb;
542 struct ubi_fm_hdr *fmhdr;
543 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
544 struct ubi_fm_ec *fmec;
545 struct ubi_fm_volhdr *fmvhdr;
546 struct ubi_fm_eba *fm_eba;
547 int ret, i, j, pool_size, wl_pool_size;
548 size_t fm_pos = 0, fm_size = ubi->fm_size;
549 unsigned long long max_sqnum = 0;
550 void *fm_raw = ubi->fm_buf;
551
552 INIT_LIST_HEAD(&used);
553 INIT_LIST_HEAD(&free);
554 INIT_LIST_HEAD(&eba_orphans);
555 INIT_LIST_HEAD(&ai->corr);
556 INIT_LIST_HEAD(&ai->free);
557 INIT_LIST_HEAD(&ai->erase);
558 INIT_LIST_HEAD(&ai->alien);
559 ai->volumes = RB_ROOT;
560 ai->min_ec = UBI_MAX_ERASECOUNTER;
561
562 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
563 sizeof(struct ubi_ainf_peb),
564 0, 0, NULL);
565 if (!ai->aeb_slab_cache) {
566 ret = -ENOMEM;
567 goto fail;
568 }
569
570 fmsb = (struct ubi_fm_sb *)(fm_raw);
571 ai->max_sqnum = fmsb->sqnum;
572 fm_pos += sizeof(struct ubi_fm_sb);
573 if (fm_pos >= fm_size)
574 goto fail_bad;
575
576 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577 fm_pos += sizeof(*fmhdr);
578 if (fm_pos >= fm_size)
579 goto fail_bad;
580
581 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
583 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
584 goto fail_bad;
585 }
586
587 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588 fm_pos += sizeof(*fmpl1);
589 if (fm_pos >= fm_size)
590 goto fail_bad;
591 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
592 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
593 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
594 goto fail_bad;
595 }
596
597 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598 fm_pos += sizeof(*fmpl2);
599 if (fm_pos >= fm_size)
600 goto fail_bad;
601 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
602 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
603 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
604 goto fail_bad;
605 }
606
607 pool_size = be16_to_cpu(fmpl1->size);
608 wl_pool_size = be16_to_cpu(fmpl2->size);
609 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
610 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
611
612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613 ubi_err("bad pool size: %i", pool_size);
614 goto fail_bad;
615 }
616
617 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618 ubi_err("bad WL pool size: %i", wl_pool_size);
619 goto fail_bad;
620 }
621
622
623 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624 fm->max_pool_size < 0) {
625 ubi_err("bad maximal pool size: %i", fm->max_pool_size);
626 goto fail_bad;
627 }
628
629 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630 fm->max_wl_pool_size < 0) {
631 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
632 goto fail_bad;
633 }
634
635 /* read EC values from free list */
636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638 fm_pos += sizeof(*fmec);
639 if (fm_pos >= fm_size)
640 goto fail_bad;
641
642 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643 be32_to_cpu(fmec->ec), 0);
644 }
645
646 /* read EC values from used list */
647 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
648 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
649 fm_pos += sizeof(*fmec);
650 if (fm_pos >= fm_size)
651 goto fail_bad;
652
653 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
654 be32_to_cpu(fmec->ec), 0);
655 }
656
657 /* read EC values from scrub list */
658 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
659 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
660 fm_pos += sizeof(*fmec);
661 if (fm_pos >= fm_size)
662 goto fail_bad;
663
664 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
665 be32_to_cpu(fmec->ec), 1);
666 }
667
668 /* read EC values from erase list */
669 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
670 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
671 fm_pos += sizeof(*fmec);
672 if (fm_pos >= fm_size)
673 goto fail_bad;
674
675 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
676 be32_to_cpu(fmec->ec), 1);
677 }
678
679 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
680 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
681
682 /* Iterate over all volumes and read their EBA table */
683 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
684 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
685 fm_pos += sizeof(*fmvhdr);
686 if (fm_pos >= fm_size)
687 goto fail_bad;
688
689 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
690 ubi_err("bad fastmap vol header magic: 0x%x, " \
691 "expected: 0x%x",
692 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
693 goto fail_bad;
694 }
695
696 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
697 be32_to_cpu(fmvhdr->used_ebs),
698 be32_to_cpu(fmvhdr->data_pad),
699 fmvhdr->vol_type,
700 be32_to_cpu(fmvhdr->last_eb_bytes));
701
702 if (!av)
703 goto fail_bad;
704
705 ai->vols_found++;
706 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
707 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
708
709 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fm_eba);
711 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
712 if (fm_pos >= fm_size)
713 goto fail_bad;
714
715 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
716 ubi_err("bad fastmap EBA header magic: 0x%x, " \
717 "expected: 0x%x",
718 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
719 goto fail_bad;
720 }
721
722 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
723 int pnum = be32_to_cpu(fm_eba->pnum[j]);
724
725 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
726 continue;
727
728 aeb = NULL;
729 list_for_each_entry(tmp_aeb, &used, u.list) {
730 if (tmp_aeb->pnum == pnum)
731 aeb = tmp_aeb;
732 }
733
734 /* This can happen if a PEB is already in an EBA known
735 * by this fastmap but the PEB itself is not in the used
736 * list.
737 * In this case the PEB can be within the fastmap pool
738 * or while writing the fastmap it was in the protection
739 * queue.
740 */
741 if (!aeb) {
742 aeb = kmem_cache_alloc(ai->aeb_slab_cache,
743 GFP_KERNEL);
744 if (!aeb) {
745 ret = -ENOMEM;
746
747 goto fail;
748 }
749
750 aeb->lnum = j;
751 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
752 aeb->ec = -1;
753 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
754 list_add_tail(&aeb->u.list, &eba_orphans);
755 continue;
756 }
757
758 aeb->lnum = j;
759
760 if (av->highest_lnum <= aeb->lnum)
761 av->highest_lnum = aeb->lnum;
762
763 assign_aeb_to_av(ai, aeb, av);
764
765 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
766 aeb->pnum, aeb->lnum, av->vol_id);
767 }
768
769 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
770 if (!ech) {
771 ret = -ENOMEM;
772 goto fail;
773 }
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
776 u.list) {
777 int err;
778
779 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
780 ubi_err("bad PEB in fastmap EBA orphan list");
781 ret = UBI_BAD_FASTMAP;
782 kfree(ech);
783 goto fail;
784 }
785
786 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
787 if (err && err != UBI_IO_BITFLIPS) {
788 ubi_err("unable to read EC header! PEB:%i " \
789 "err:%i", tmp_aeb->pnum, err);
790 ret = err > 0 ? UBI_BAD_FASTMAP : err;
791 kfree(ech);
792
793 goto fail;
794 } else if (err == UBI_IO_BITFLIPS)
795 tmp_aeb->scrub = 1;
796
797 tmp_aeb->ec = be64_to_cpu(ech->ec);
798 assign_aeb_to_av(ai, tmp_aeb, av);
799 }
800
801 kfree(ech);
802 }
803
804 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
805 &eba_orphans, &free);
806 if (ret)
807 goto fail;
808
809 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
810 &eba_orphans, &free);
811 if (ret)
812 goto fail;
813
814 if (max_sqnum > ai->max_sqnum)
815 ai->max_sqnum = max_sqnum;
816
817 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
818 list_move_tail(&tmp_aeb->u.list, &ai->free);
819
820 /*
821 * If fastmap is leaking PEBs (must not happen), raise a
822 * fat warning and fall back to scanning mode.
823 * We do this here because in ubi_wl_init() it's too late
824 * and we cannot fall back to scanning.
825 */
826 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
827 ai->bad_peb_count - fm->used_blocks))
828 goto fail_bad;
829
830 return 0;
831
832fail_bad:
833 ret = UBI_BAD_FASTMAP;
834fail:
835 return ret;
836}
837
838/**
839 * ubi_scan_fastmap - scan the fastmap.
840 * @ubi: UBI device object
841 * @ai: UBI attach info to be filled
842 * @fm_anchor: The fastmap starts at this PEB
843 *
844 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
845 * UBI_BAD_FASTMAP if one was found but is not usable.
846 * < 0 indicates an internal error.
847 */
848int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
849 int fm_anchor)
850{
851 struct ubi_fm_sb *fmsb, *fmsb2;
852 struct ubi_vid_hdr *vh;
853 struct ubi_ec_hdr *ech;
854 struct ubi_fastmap_layout *fm;
855 int i, used_blocks, pnum, ret = 0;
856 size_t fm_size;
857 __be32 crc, tmp_crc;
858 unsigned long long sqnum = 0;
859
860 mutex_lock(&ubi->fm_mutex);
861 memset(ubi->fm_buf, 0, ubi->fm_size);
862
863 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
864 if (!fmsb) {
865 ret = -ENOMEM;
866 goto out;
867 }
868
869 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
870 if (!fm) {
871 ret = -ENOMEM;
872 kfree(fmsb);
873 goto out;
874 }
875
876 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
877 if (ret && ret != UBI_IO_BITFLIPS)
878 goto free_fm_sb;
879 else if (ret == UBI_IO_BITFLIPS)
880 fm->to_be_tortured[0] = 1;
881
882 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
883 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
884 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
885 ret = UBI_BAD_FASTMAP;
886 goto free_fm_sb;
887 }
888
889 if (fmsb->version != UBI_FM_FMT_VERSION) {
890 ubi_err("bad fastmap version: %i, expected: %i",
891 fmsb->version, UBI_FM_FMT_VERSION);
892 ret = UBI_BAD_FASTMAP;
893 goto free_fm_sb;
894 }
895
896 used_blocks = be32_to_cpu(fmsb->used_blocks);
897 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
898 ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
899 ret = UBI_BAD_FASTMAP;
900 goto free_fm_sb;
901 }
902
903 fm_size = ubi->leb_size * used_blocks;
904 if (fm_size != ubi->fm_size) {
905 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
906 ubi->fm_size);
907 ret = UBI_BAD_FASTMAP;
908 goto free_fm_sb;
909 }
910
911 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
912 if (!ech) {
913 ret = -ENOMEM;
914 goto free_fm_sb;
915 }
916
917 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
918 if (!vh) {
919 ret = -ENOMEM;
920 goto free_hdr;
921 }
922
923 for (i = 0; i < used_blocks; i++) {
924 pnum = be32_to_cpu(fmsb->block_loc[i]);
925
926 if (ubi_io_is_bad(ubi, pnum)) {
927 ret = UBI_BAD_FASTMAP;
928 goto free_hdr;
929 }
930
931 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
932 if (ret && ret != UBI_IO_BITFLIPS) {
933 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
934 i, pnum);
935 if (ret > 0)
936 ret = UBI_BAD_FASTMAP;
937 goto free_hdr;
938 } else if (ret == UBI_IO_BITFLIPS)
939 fm->to_be_tortured[i] = 1;
940
941 if (!ubi->image_seq)
942 ubi->image_seq = be32_to_cpu(ech->image_seq);
943
944 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
945 ret = UBI_BAD_FASTMAP;
946 goto free_hdr;
947 }
948
949 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
950 if (ret && ret != UBI_IO_BITFLIPS) {
951 ubi_err("unable to read fastmap block# %i (PEB: %i)",
952 i, pnum);
953 goto free_hdr;
954 }
955
956 if (i == 0) {
957 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
958 ubi_err("bad fastmap anchor vol_id: 0x%x," \
959 " expected: 0x%x",
960 be32_to_cpu(vh->vol_id),
961 UBI_FM_SB_VOLUME_ID);
962 ret = UBI_BAD_FASTMAP;
963 goto free_hdr;
964 }
965 } else {
966 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
967 ubi_err("bad fastmap data vol_id: 0x%x," \
968 " expected: 0x%x",
969 be32_to_cpu(vh->vol_id),
970 UBI_FM_DATA_VOLUME_ID);
971 ret = UBI_BAD_FASTMAP;
972 goto free_hdr;
973 }
974 }
975
976 if (sqnum < be64_to_cpu(vh->sqnum))
977 sqnum = be64_to_cpu(vh->sqnum);
978
979 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
980 ubi->leb_start, ubi->leb_size);
981 if (ret && ret != UBI_IO_BITFLIPS) {
982 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
983 "err: %i)", i, pnum, ret);
984 goto free_hdr;
985 }
986 }
987
988 kfree(fmsb);
989 fmsb = NULL;
990
991 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
992 tmp_crc = be32_to_cpu(fmsb2->data_crc);
993 fmsb2->data_crc = 0;
994 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
995 if (crc != tmp_crc) {
996 ubi_err("fastmap data CRC is invalid");
997 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
998 ret = UBI_BAD_FASTMAP;
999 goto free_hdr;
1000 }
1001
1002 fmsb2->sqnum = sqnum;
1003
1004 fm->used_blocks = used_blocks;
1005
1006 ret = ubi_attach_fastmap(ubi, ai, fm);
1007 if (ret) {
1008 if (ret > 0)
1009 ret = UBI_BAD_FASTMAP;
1010 goto free_hdr;
1011 }
1012
1013 for (i = 0; i < used_blocks; i++) {
1014 struct ubi_wl_entry *e;
1015
1016 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1017 if (!e) {
1018 while (i--)
1019 kfree(fm->e[i]);
1020
1021 ret = -ENOMEM;
1022 goto free_hdr;
1023 }
1024
1025 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1026 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1027 fm->e[i] = e;
1028 }
1029
1030 ubi->fm = fm;
1031 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1032 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1033 ubi_msg("attached by fastmap");
1034 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1035 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1036 ubi->fm_disabled = 0;
1037
1038 ubi_free_vid_hdr(ubi, vh);
1039 kfree(ech);
1040out:
1041 mutex_unlock(&ubi->fm_mutex);
1042 if (ret == UBI_BAD_FASTMAP)
1043 ubi_err("Attach by fastmap failed, doing a full scan!");
1044 return ret;
1045
1046free_hdr:
1047 ubi_free_vid_hdr(ubi, vh);
1048 kfree(ech);
1049free_fm_sb:
1050 kfree(fmsb);
1051 kfree(fm);
1052 goto out;
1053}
1054
1055/**
1056 * ubi_write_fastmap - writes a fastmap.
1057 * @ubi: UBI device object
1058 * @new_fm: the to be written fastmap
1059 *
1060 * Returns 0 on success, < 0 indicates an internal error.
1061 */
1062static int ubi_write_fastmap(struct ubi_device *ubi,
1063 struct ubi_fastmap_layout *new_fm)
1064{
1065 size_t fm_pos = 0;
1066 void *fm_raw;
1067 struct ubi_fm_sb *fmsb;
1068 struct ubi_fm_hdr *fmh;
1069 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1070 struct ubi_fm_ec *fec;
1071 struct ubi_fm_volhdr *fvh;
1072 struct ubi_fm_eba *feba;
1073 struct rb_node *node;
1074 struct ubi_wl_entry *wl_e;
1075 struct ubi_volume *vol;
1076 struct ubi_vid_hdr *avhdr, *dvhdr;
1077 struct ubi_work *ubi_wrk;
1078 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1079 int scrub_peb_count, erase_peb_count;
1080
1081 fm_raw = ubi->fm_buf;
1082 memset(ubi->fm_buf, 0, ubi->fm_size);
1083
1084 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1085 if (!avhdr) {
1086 ret = -ENOMEM;
1087 goto out;
1088 }
1089
1090 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1091 if (!dvhdr) {
1092 ret = -ENOMEM;
1093 goto out_kfree;
1094 }
1095
1096 spin_lock(&ubi->volumes_lock);
1097 spin_lock(&ubi->wl_lock);
1098
1099 fmsb = (struct ubi_fm_sb *)fm_raw;
1100 fm_pos += sizeof(*fmsb);
1101 ubi_assert(fm_pos <= ubi->fm_size);
1102
1103 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1104 fm_pos += sizeof(*fmh);
1105 ubi_assert(fm_pos <= ubi->fm_size);
1106
1107 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1108 fmsb->version = UBI_FM_FMT_VERSION;
1109 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1110 /* the max sqnum will be filled in while *reading* the fastmap */
1111 fmsb->sqnum = 0;
1112
1113 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1114 free_peb_count = 0;
1115 used_peb_count = 0;
1116 scrub_peb_count = 0;
1117 erase_peb_count = 0;
1118 vol_count = 0;
1119
1120 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1121 fm_pos += sizeof(*fmpl1);
1122 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1123 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1124 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1125
1126 for (i = 0; i < ubi->fm_pool.size; i++)
1127 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1128
1129 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1130 fm_pos += sizeof(*fmpl2);
1131 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1132 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1133 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1134
1135 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1136 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1137
1138 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1139 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1140 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1141
1142 fec->pnum = cpu_to_be32(wl_e->pnum);
1143 fec->ec = cpu_to_be32(wl_e->ec);
1144
1145 free_peb_count++;
1146 fm_pos += sizeof(*fec);
1147 ubi_assert(fm_pos <= ubi->fm_size);
1148 }
1149 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1150
1151 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1152 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1153 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1154
1155 fec->pnum = cpu_to_be32(wl_e->pnum);
1156 fec->ec = cpu_to_be32(wl_e->ec);
1157
1158 used_peb_count++;
1159 fm_pos += sizeof(*fec);
1160 ubi_assert(fm_pos <= ubi->fm_size);
1161 }
1162 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1163
1164 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1165 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1166 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1167
1168 fec->pnum = cpu_to_be32(wl_e->pnum);
1169 fec->ec = cpu_to_be32(wl_e->ec);
1170
1171 scrub_peb_count++;
1172 fm_pos += sizeof(*fec);
1173 ubi_assert(fm_pos <= ubi->fm_size);
1174 }
1175 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1176
1177
1178 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1179 if (ubi_is_erase_work(ubi_wrk)) {
1180 wl_e = ubi_wrk->e;
1181 ubi_assert(wl_e);
1182
1183 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1184
1185 fec->pnum = cpu_to_be32(wl_e->pnum);
1186 fec->ec = cpu_to_be32(wl_e->ec);
1187
1188 erase_peb_count++;
1189 fm_pos += sizeof(*fec);
1190 ubi_assert(fm_pos <= ubi->fm_size);
1191 }
1192 }
1193 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1194
1195 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1196 vol = ubi->volumes[i];
1197
1198 if (!vol)
1199 continue;
1200
1201 vol_count++;
1202
1203 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1204 fm_pos += sizeof(*fvh);
1205 ubi_assert(fm_pos <= ubi->fm_size);
1206
1207 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1208 fvh->vol_id = cpu_to_be32(vol->vol_id);
1209 fvh->vol_type = vol->vol_type;
1210 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1211 fvh->data_pad = cpu_to_be32(vol->data_pad);
1212 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1213
1214 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1215 vol->vol_type == UBI_STATIC_VOLUME);
1216
1217 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1218 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1219 ubi_assert(fm_pos <= ubi->fm_size);
1220
1221 for (j = 0; j < vol->reserved_pebs; j++)
1222 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1223
1224 feba->reserved_pebs = cpu_to_be32(j);
1225 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1226 }
1227 fmh->vol_count = cpu_to_be32(vol_count);
1228 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1229
1230 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1231 avhdr->lnum = 0;
1232
1233 spin_unlock(&ubi->wl_lock);
1234 spin_unlock(&ubi->volumes_lock);
1235
1236 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1237 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1238 if (ret) {
1239 ubi_err("unable to write vid_hdr to fastmap SB!");
1240 goto out_kfree;
1241 }
1242
1243 for (i = 0; i < new_fm->used_blocks; i++) {
1244 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1245 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1246 }
1247
1248 fmsb->data_crc = 0;
1249 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1250 ubi->fm_size));
1251
1252 for (i = 1; i < new_fm->used_blocks; i++) {
1253 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1254 dvhdr->lnum = cpu_to_be32(i);
1255 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1256 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1257 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1258 if (ret) {
1259 ubi_err("unable to write vid_hdr to PEB %i!",
1260 new_fm->e[i]->pnum);
1261 goto out_kfree;
1262 }
1263 }
1264
1265 for (i = 0; i < new_fm->used_blocks; i++) {
1266 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1267 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1268 if (ret) {
1269 ubi_err("unable to write fastmap to PEB %i!",
1270 new_fm->e[i]->pnum);
1271 goto out_kfree;
1272 }
1273 }
1274
1275 ubi_assert(new_fm);
1276 ubi->fm = new_fm;
1277
1278 dbg_bld("fastmap written!");
1279
1280out_kfree:
1281 ubi_free_vid_hdr(ubi, avhdr);
1282 ubi_free_vid_hdr(ubi, dvhdr);
1283out:
1284 return ret;
1285}
1286
1287/**
1288 * erase_block - Manually erase a PEB.
1289 * @ubi: UBI device object
1290 * @pnum: PEB to be erased
1291 *
1292 * Returns the new EC value on success, < 0 indicates an internal error.
1293 */
1294static int erase_block(struct ubi_device *ubi, int pnum)
1295{
1296 int ret;
1297 struct ubi_ec_hdr *ec_hdr;
1298 long long ec;
1299
1300 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1301 if (!ec_hdr)
1302 return -ENOMEM;
1303
1304 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1305 if (ret < 0)
1306 goto out;
1307 else if (ret && ret != UBI_IO_BITFLIPS) {
1308 ret = -EINVAL;
1309 goto out;
1310 }
1311
1312 ret = ubi_io_sync_erase(ubi, pnum, 0);
1313 if (ret < 0)
1314 goto out;
1315
1316 ec = be64_to_cpu(ec_hdr->ec);
1317 ec += ret;
1318 if (ec > UBI_MAX_ERASECOUNTER) {
1319 ret = -EINVAL;
1320 goto out;
1321 }
1322
1323 ec_hdr->ec = cpu_to_be64(ec);
1324 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1325 if (ret < 0)
1326 goto out;
1327
1328 ret = ec;
1329out:
1330 kfree(ec_hdr);
1331 return ret;
1332}
1333
1334/**
1335 * invalidate_fastmap - destroys a fastmap.
1336 * @ubi: UBI device object
1337 * @fm: the fastmap to be destroyed
1338 *
1339 * Returns 0 on success, < 0 indicates an internal error.
1340 */
1341static int invalidate_fastmap(struct ubi_device *ubi,
1342 struct ubi_fastmap_layout *fm)
1343{
1344 int ret, i;
1345 struct ubi_vid_hdr *vh;
1346
1347 ret = erase_block(ubi, fm->e[0]->pnum);
1348 if (ret < 0)
1349 return ret;
1350
1351 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1352 if (!vh)
1353 return -ENOMEM;
1354
1355 /* deleting the current fastmap SB is not enough, an old SB may exist,
1356 * so create a (corrupted) SB such that fastmap will find it and fall
1357 * back to scanning mode in any case */
1358 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1359 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1360
1361 for (i = 0; i < fm->used_blocks; i++)
1362 ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
1363
1364 return ret;
1365}
1366
1367/**
1368 * ubi_update_fastmap - will be called by UBI if a volume changes or
1369 * a fastmap pool becomes full.
1370 * @ubi: UBI device object
1371 *
1372 * Returns 0 on success, < 0 indicates an internal error.
1373 */
1374int ubi_update_fastmap(struct ubi_device *ubi)
1375{
1376 int ret, i;
1377 struct ubi_fastmap_layout *new_fm, *old_fm;
1378 struct ubi_wl_entry *tmp_e;
1379
1380 mutex_lock(&ubi->fm_mutex);
1381
1382 ubi_refill_pools(ubi);
1383
1384 if (ubi->ro_mode || ubi->fm_disabled) {
1385 mutex_unlock(&ubi->fm_mutex);
1386 return 0;
1387 }
1388
1389 ret = ubi_ensure_anchor_pebs(ubi);
1390 if (ret) {
1391 mutex_unlock(&ubi->fm_mutex);
1392 return ret;
1393 }
1394
1395 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1396 if (!new_fm) {
1397 mutex_unlock(&ubi->fm_mutex);
1398 return -ENOMEM;
1399 }
1400
1401 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1402
1403 for (i = 0; i < new_fm->used_blocks; i++) {
1404 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1405 if (!new_fm->e[i]) {
1406 while (i--)
1407 kfree(new_fm->e[i]);
1408
1409 kfree(new_fm);
1410 mutex_unlock(&ubi->fm_mutex);
1411 return -ENOMEM;
1412 }
1413 }
1414
1415 old_fm = ubi->fm;
1416 ubi->fm = NULL;
1417
1418 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1419 ubi_err("fastmap too large");
1420 ret = -ENOSPC;
1421 goto err;
1422 }
1423
1424 for (i = 1; i < new_fm->used_blocks; i++) {
1425 spin_lock(&ubi->wl_lock);
1426 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1427 spin_unlock(&ubi->wl_lock);
1428
1429 if (!tmp_e && !old_fm) {
1430 int j;
1431 ubi_err("could not get any free erase block");
1432
1433 for (j = 1; j < i; j++)
1434 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1435
1436 ret = -ENOSPC;
1437 goto err;
1438 } else if (!tmp_e && old_fm) {
1439 ret = erase_block(ubi, old_fm->e[i]->pnum);
1440 if (ret < 0) {
1441 int j;
1442
1443 for (j = 1; j < i; j++)
1444 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1445 j, 0);
1446
1447 ubi_err("could not erase old fastmap PEB");
1448 goto err;
1449 }
1450
1451 new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1452 new_fm->e[i]->ec = old_fm->e[i]->ec;
1453 } else {
1454 new_fm->e[i]->pnum = tmp_e->pnum;
1455 new_fm->e[i]->ec = tmp_e->ec;
1456
1457 if (old_fm)
1458 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1459 old_fm->to_be_tortured[i]);
1460 }
1461 }
1462
1463 spin_lock(&ubi->wl_lock);
1464 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1465 spin_unlock(&ubi->wl_lock);
1466
1467 if (old_fm) {
1468 /* no fresh anchor PEB was found, reuse the old one */
1469 if (!tmp_e) {
1470 ret = erase_block(ubi, old_fm->e[0]->pnum);
1471 if (ret < 0) {
1472 int i;
1473 ubi_err("could not erase old anchor PEB");
1474
1475 for (i = 1; i < new_fm->used_blocks; i++)
1476 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1477 i, 0);
1478 goto err;
1479 }
1480
1481 new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1482 new_fm->e[0]->ec = ret;
1483 } else {
1484 /* we've got a new anchor PEB, return the old one */
1485 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1486 old_fm->to_be_tortured[0]);
1487
1488 new_fm->e[0]->pnum = tmp_e->pnum;
1489 new_fm->e[0]->ec = tmp_e->ec;
1490 }
1491 } else {
1492 if (!tmp_e) {
1493 int i;
1494 ubi_err("could not find any anchor PEB");
1495
1496 for (i = 1; i < new_fm->used_blocks; i++)
1497 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1498
1499 ret = -ENOSPC;
1500 goto err;
1501 }
1502
1503 new_fm->e[0]->pnum = tmp_e->pnum;
1504 new_fm->e[0]->ec = tmp_e->ec;
1505 }
1506
1507 down_write(&ubi->work_sem);
1508 down_write(&ubi->fm_sem);
1509 ret = ubi_write_fastmap(ubi, new_fm);
1510 up_write(&ubi->fm_sem);
1511 up_write(&ubi->work_sem);
1512
1513 if (ret)
1514 goto err;
1515
1516out_unlock:
1517 mutex_unlock(&ubi->fm_mutex);
1518 kfree(old_fm);
1519 return ret;
1520
1521err:
1522 kfree(new_fm);
1523
1524 ubi_warn("Unable to write new fastmap, err=%i", ret);
1525
1526 ret = 0;
1527 if (old_fm) {
1528 ret = invalidate_fastmap(ubi, old_fm);
1529 if (ret < 0)
1530 ubi_err("Unable to invalidiate current fastmap!");
1531 else if (ret)
1532 ret = 0;
1533 }
1534 goto out_unlock;
1535}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index b93807b4c45..941bc3c05d6 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -41,7 +41,7 @@
41#include "ubi-media.h" 41#include "ubi-media.h"
42 42
43#define err_msg(fmt, ...) \ 43#define err_msg(fmt, ...) \
44 pr_err("gluebi (pid %d): %s: " fmt "\n", \ 44 printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
45 current->pid, __func__, ##__VA_ARGS__) 45 current->pid, __func__, ##__VA_ARGS__)
46 46
47/** 47/**
@@ -171,17 +171,21 @@ static void gluebi_put_device(struct mtd_info *mtd)
171static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, 171static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
172 size_t *retlen, unsigned char *buf) 172 size_t *retlen, unsigned char *buf)
173{ 173{
174 int err = 0, lnum, offs, bytes_left; 174 int err = 0, lnum, offs, total_read;
175 struct gluebi_device *gluebi; 175 struct gluebi_device *gluebi;
176 176
177 if (len < 0 || from < 0 || from + len > mtd->size)
178 return -EINVAL;
179
177 gluebi = container_of(mtd, struct gluebi_device, mtd); 180 gluebi = container_of(mtd, struct gluebi_device, mtd);
181
178 lnum = div_u64_rem(from, mtd->erasesize, &offs); 182 lnum = div_u64_rem(from, mtd->erasesize, &offs);
179 bytes_left = len; 183 total_read = len;
180 while (bytes_left) { 184 while (total_read) {
181 size_t to_read = mtd->erasesize - offs; 185 size_t to_read = mtd->erasesize - offs;
182 186
183 if (to_read > bytes_left) 187 if (to_read > total_read)
184 to_read = bytes_left; 188 to_read = total_read;
185 189
186 err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); 190 err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
187 if (err) 191 if (err)
@@ -189,11 +193,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
189 193
190 lnum += 1; 194 lnum += 1;
191 offs = 0; 195 offs = 0;
192 bytes_left -= to_read; 196 total_read -= to_read;
193 buf += to_read; 197 buf += to_read;
194 } 198 }
195 199
196 *retlen = len - bytes_left; 200 *retlen = len - total_read;
197 return err; 201 return err;
198} 202}
199 203
@@ -211,33 +215,40 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
211static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, 215static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
212 size_t *retlen, const u_char *buf) 216 size_t *retlen, const u_char *buf)
213{ 217{
214 int err = 0, lnum, offs, bytes_left; 218 int err = 0, lnum, offs, total_written;
215 struct gluebi_device *gluebi; 219 struct gluebi_device *gluebi;
216 220
221 if (len < 0 || to < 0 || len + to > mtd->size)
222 return -EINVAL;
223
217 gluebi = container_of(mtd, struct gluebi_device, mtd); 224 gluebi = container_of(mtd, struct gluebi_device, mtd);
225
226 if (!(mtd->flags & MTD_WRITEABLE))
227 return -EROFS;
228
218 lnum = div_u64_rem(to, mtd->erasesize, &offs); 229 lnum = div_u64_rem(to, mtd->erasesize, &offs);
219 230
220 if (len % mtd->writesize || offs % mtd->writesize) 231 if (len % mtd->writesize || offs % mtd->writesize)
221 return -EINVAL; 232 return -EINVAL;
222 233
223 bytes_left = len; 234 total_written = len;
224 while (bytes_left) { 235 while (total_written) {
225 size_t to_write = mtd->erasesize - offs; 236 size_t to_write = mtd->erasesize - offs;
226 237
227 if (to_write > bytes_left) 238 if (to_write > total_written)
228 to_write = bytes_left; 239 to_write = total_written;
229 240
230 err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write); 241 err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
231 if (err) 242 if (err)
232 break; 243 break;
233 244
234 lnum += 1; 245 lnum += 1;
235 offs = 0; 246 offs = 0;
236 bytes_left -= to_write; 247 total_written -= to_write;
237 buf += to_write; 248 buf += to_write;
238 } 249 }
239 250
240 *retlen = len - bytes_left; 251 *retlen = len - total_written;
241 return err; 252 return err;
242} 253}
243 254
@@ -254,13 +265,21 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
254 int err, i, lnum, count; 265 int err, i, lnum, count;
255 struct gluebi_device *gluebi; 266 struct gluebi_device *gluebi;
256 267
268 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
269 return -EINVAL;
270 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
271 return -EINVAL;
257 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) 272 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
258 return -EINVAL; 273 return -EINVAL;
259 274
260 lnum = mtd_div_by_eb(instr->addr, mtd); 275 lnum = mtd_div_by_eb(instr->addr, mtd);
261 count = mtd_div_by_eb(instr->len, mtd); 276 count = mtd_div_by_eb(instr->len, mtd);
277
262 gluebi = container_of(mtd, struct gluebi_device, mtd); 278 gluebi = container_of(mtd, struct gluebi_device, mtd);
263 279
280 if (!(mtd->flags & MTD_WRITEABLE))
281 return -EROFS;
282
264 for (i = 0; i < count - 1; i++) { 283 for (i = 0; i < count - 1; i++) {
265 err = ubi_leb_unmap(gluebi->desc, lnum + i); 284 err = ubi_leb_unmap(gluebi->desc, lnum + i);
266 if (err) 285 if (err)
@@ -321,11 +340,11 @@ static int gluebi_create(struct ubi_device_info *di,
321 mtd->owner = THIS_MODULE; 340 mtd->owner = THIS_MODULE;
322 mtd->writesize = di->min_io_size; 341 mtd->writesize = di->min_io_size;
323 mtd->erasesize = vi->usable_leb_size; 342 mtd->erasesize = vi->usable_leb_size;
324 mtd->_read = gluebi_read; 343 mtd->read = gluebi_read;
325 mtd->_write = gluebi_write; 344 mtd->write = gluebi_write;
326 mtd->_erase = gluebi_erase; 345 mtd->erase = gluebi_erase;
327 mtd->_get_device = gluebi_get_device; 346 mtd->get_device = gluebi_get_device;
328 mtd->_put_device = gluebi_put_device; 347 mtd->put_device = gluebi_put_device;
329 348
330 /* 349 /*
331 * In case of dynamic a volume, MTD device size is just volume size. In 350 * In case of dynamic a volume, MTD device size is just volume size. In
@@ -341,8 +360,9 @@ static int gluebi_create(struct ubi_device_info *di,
341 mutex_lock(&devices_mutex); 360 mutex_lock(&devices_mutex);
342 g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 361 g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
343 if (g) 362 if (g)
344 err_msg("gluebi MTD device %d form UBI device %d volume %d already exists", 363 err_msg("gluebi MTD device %d form UBI device %d volume %d "
345 g->mtd.index, vi->ubi_num, vi->vol_id); 364 "already exists", g->mtd.index, vi->ubi_num,
365 vi->vol_id);
346 mutex_unlock(&devices_mutex); 366 mutex_unlock(&devices_mutex);
347 367
348 if (mtd_device_register(mtd, NULL, 0)) { 368 if (mtd_device_register(mtd, NULL, 0)) {
@@ -375,8 +395,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
375 mutex_lock(&devices_mutex); 395 mutex_lock(&devices_mutex);
376 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 396 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
377 if (!gluebi) { 397 if (!gluebi) {
378 err_msg("got remove notification for unknown UBI device %d volume %d", 398 err_msg("got remove notification for unknown UBI device %d "
379 vi->ubi_num, vi->vol_id); 399 "volume %d", vi->ubi_num, vi->vol_id);
380 err = -ENOENT; 400 err = -ENOENT;
381 } else if (gluebi->refcnt) 401 } else if (gluebi->refcnt)
382 err = -EBUSY; 402 err = -EBUSY;
@@ -389,8 +409,9 @@ static int gluebi_remove(struct ubi_volume_info *vi)
389 mtd = &gluebi->mtd; 409 mtd = &gluebi->mtd;
390 err = mtd_device_unregister(mtd); 410 err = mtd_device_unregister(mtd);
391 if (err) { 411 if (err) {
392 err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d", 412 err_msg("cannot remove fake MTD device %d, UBI device %d, "
393 mtd->index, gluebi->ubi_num, gluebi->vol_id, err); 413 "volume %d, error %d", mtd->index, gluebi->ubi_num,
414 gluebi->vol_id, err);
394 mutex_lock(&devices_mutex); 415 mutex_lock(&devices_mutex);
395 list_add_tail(&gluebi->list, &gluebi_devices); 416 list_add_tail(&gluebi->list, &gluebi_devices);
396 mutex_unlock(&devices_mutex); 417 mutex_unlock(&devices_mutex);
@@ -420,8 +441,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
420 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 441 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
421 if (!gluebi) { 442 if (!gluebi) {
422 mutex_unlock(&devices_mutex); 443 mutex_unlock(&devices_mutex);
423 err_msg("got update notification for unknown UBI device %d volume %d", 444 err_msg("got update notification for unknown UBI device %d "
424 vi->ubi_num, vi->vol_id); 445 "volume %d", vi->ubi_num, vi->vol_id);
425 return -ENOENT; 446 return -ENOENT;
426 } 447 }
427 448
@@ -447,8 +468,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
447 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 468 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
448 if (!gluebi) { 469 if (!gluebi) {
449 mutex_unlock(&devices_mutex); 470 mutex_unlock(&devices_mutex);
450 err_msg("got update notification for unknown UBI device %d volume %d", 471 err_msg("got update notification for unknown UBI device %d "
451 vi->ubi_num, vi->vol_id); 472 "volume %d", vi->ubi_num, vi->vol_id);
452 return -ENOENT; 473 return -ENOENT;
453 } 474 }
454 gluebi->mtd.size = vi->used_bytes; 475 gluebi->mtd.size = vi->used_bytes;
@@ -505,9 +526,9 @@ static void __exit ubi_gluebi_exit(void)
505 526
506 err = mtd_device_unregister(mtd); 527 err = mtd_device_unregister(mtd);
507 if (err) 528 if (err)
508 err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring", 529 err_msg("error %d while removing gluebi MTD device %d, "
509 err, mtd->index, gluebi->ubi_num, 530 "UBI device %d, volume %d - ignoring", err,
510 gluebi->vol_id); 531 mtd->index, gluebi->ubi_num, gluebi->vol_id);
511 kfree(mtd->name); 532 kfree(mtd->name);
512 kfree(gluebi); 533 kfree(gluebi);
513 } 534 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index bf79def4012..6ba55c23587 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,15 +91,21 @@
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include "ubi.h" 92#include "ubi.h"
93 93
94static int self_check_not_bad(const struct ubi_device *ubi, int pnum); 94#ifdef CONFIG_MTD_UBI_DEBUG
95static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); 95static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
96static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, 96static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
97 const struct ubi_ec_hdr *ec_hdr); 97static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
98static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); 98 const struct ubi_ec_hdr *ec_hdr);
99static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, 99static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
100 const struct ubi_vid_hdr *vid_hdr); 100static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
101static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, 101 const struct ubi_vid_hdr *vid_hdr);
102 int offset, int len); 102#else
103#define paranoid_check_not_bad(ubi, pnum) 0
104#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
105#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
106#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
107#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
108#endif
103 109
104/** 110/**
105 * ubi_io_read - read data from a physical eraseblock. 111 * ubi_io_read - read data from a physical eraseblock.
@@ -136,7 +142,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
136 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); 142 ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
137 ubi_assert(len > 0); 143 ubi_assert(len > 0);
138 144
139 err = self_check_not_bad(ubi, pnum); 145 err = paranoid_check_not_bad(ubi, pnum);
140 if (err) 146 if (err)
141 return err; 147 return err;
142 148
@@ -164,11 +170,11 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
164 170
165 addr = (loff_t)pnum * ubi->peb_size + offset; 171 addr = (loff_t)pnum * ubi->peb_size + offset;
166retry: 172retry:
167 err = mtd_read(ubi->mtd, addr, len, &read, buf); 173 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
168 if (err) { 174 if (err) {
169 const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; 175 const char *errstr = (err == -EBADMSG) ? " (ECC error)" : "";
170 176
171 if (mtd_is_bitflip(err)) { 177 if (err == -EUCLEAN) {
172 /* 178 /*
173 * -EUCLEAN is reported if there was a bit-flip which 179 * -EUCLEAN is reported if there was a bit-flip which
174 * was corrected, so this is harmless. 180 * was corrected, so this is harmless.
@@ -177,28 +183,29 @@ retry:
177 * enabled. A corresponding message will be printed 183 * enabled. A corresponding message will be printed
178 * later, when it is has been scrubbed. 184 * later, when it is has been scrubbed.
179 */ 185 */
180 ubi_msg("fixable bit-flip detected at PEB %d", pnum); 186 dbg_msg("fixable bit-flip detected at PEB %d", pnum);
181 ubi_assert(len == read); 187 ubi_assert(len == read);
182 return UBI_IO_BITFLIPS; 188 return UBI_IO_BITFLIPS;
183 } 189 }
184 190
185 if (retries++ < UBI_IO_RETRIES) { 191 if (retries++ < UBI_IO_RETRIES) {
186 ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry", 192 dbg_io("error %d%s while reading %d bytes from PEB "
187 err, errstr, len, pnum, offset, read); 193 "%d:%d, read only %zd bytes, retry",
194 err, errstr, len, pnum, offset, read);
188 yield(); 195 yield();
189 goto retry; 196 goto retry;
190 } 197 }
191 198
192 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes", 199 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
193 err, errstr, len, pnum, offset, read); 200 "read %zd bytes", err, errstr, len, pnum, offset, read);
194 dump_stack(); 201 ubi_dbg_dump_stack();
195 202
196 /* 203 /*
197 * The driver should never return -EBADMSG if it failed to read 204 * The driver should never return -EBADMSG if it failed to read
198 * all the requested data. But some buggy drivers might do 205 * all the requested data. But some buggy drivers might do
199 * this, so we change it to -EIO. 206 * this, so we change it to -EIO.
200 */ 207 */
201 if (read != len && mtd_is_eccerr(err)) { 208 if (read != len && err == -EBADMSG) {
202 ubi_assert(0); 209 ubi_assert(0);
203 err = -EIO; 210 err = -EIO;
204 } 211 }
@@ -250,12 +257,14 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
250 return -EROFS; 257 return -EROFS;
251 } 258 }
252 259
253 err = self_check_not_bad(ubi, pnum); 260 /* The below has to be compiled out if paranoid checks are disabled */
261
262 err = paranoid_check_not_bad(ubi, pnum);
254 if (err) 263 if (err)
255 return err; 264 return err;
256 265
257 /* The area we are writing to has to contain all 0xFF bytes */ 266 /* The area we are writing to has to contain all 0xFF bytes */
258 err = ubi_self_check_all_ff(ubi, pnum, offset, len); 267 err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
259 if (err) 268 if (err)
260 return err; 269 return err;
261 270
@@ -264,33 +273,33 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
264 * We write to the data area of the physical eraseblock. Make 273 * We write to the data area of the physical eraseblock. Make
265 * sure it has valid EC and VID headers. 274 * sure it has valid EC and VID headers.
266 */ 275 */
267 err = self_check_peb_ec_hdr(ubi, pnum); 276 err = paranoid_check_peb_ec_hdr(ubi, pnum);
268 if (err) 277 if (err)
269 return err; 278 return err;
270 err = self_check_peb_vid_hdr(ubi, pnum); 279 err = paranoid_check_peb_vid_hdr(ubi, pnum);
271 if (err) 280 if (err)
272 return err; 281 return err;
273 } 282 }
274 283
275 if (ubi_dbg_is_write_failure(ubi)) { 284 if (ubi_dbg_is_write_failure(ubi)) {
276 ubi_err("cannot write %d bytes to PEB %d:%d (emulated)", 285 dbg_err("cannot write %d bytes to PEB %d:%d "
277 len, pnum, offset); 286 "(emulated)", len, pnum, offset);
278 dump_stack(); 287 ubi_dbg_dump_stack();
279 return -EIO; 288 return -EIO;
280 } 289 }
281 290
282 addr = (loff_t)pnum * ubi->peb_size + offset; 291 addr = (loff_t)pnum * ubi->peb_size + offset;
283 err = mtd_write(ubi->mtd, addr, len, &written, buf); 292 err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
284 if (err) { 293 if (err) {
285 ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes", 294 ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
286 err, len, pnum, offset, written); 295 "%zd bytes", err, len, pnum, offset, written);
287 dump_stack(); 296 ubi_dbg_dump_stack();
288 ubi_dump_flash(ubi, pnum, offset, len); 297 ubi_dbg_dump_flash(ubi, pnum, offset, len);
289 } else 298 } else
290 ubi_assert(written == len); 299 ubi_assert(written == len);
291 300
292 if (!err) { 301 if (!err) {
293 err = self_check_write(ubi, buf, pnum, offset, len); 302 err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
294 if (err) 303 if (err)
295 return err; 304 return err;
296 305
@@ -301,7 +310,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
301 offset += len; 310 offset += len;
302 len = ubi->peb_size - offset; 311 len = ubi->peb_size - offset;
303 if (len) 312 if (len)
304 err = ubi_self_check_all_ff(ubi, pnum, offset, len); 313 err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
305 } 314 }
306 315
307 return err; 316 return err;
@@ -352,16 +361,16 @@ retry:
352 ei.callback = erase_callback; 361 ei.callback = erase_callback;
353 ei.priv = (unsigned long)&wq; 362 ei.priv = (unsigned long)&wq;
354 363
355 err = mtd_erase(ubi->mtd, &ei); 364 err = ubi->mtd->erase(ubi->mtd, &ei);
356 if (err) { 365 if (err) {
357 if (retries++ < UBI_IO_RETRIES) { 366 if (retries++ < UBI_IO_RETRIES) {
358 ubi_warn("error %d while erasing PEB %d, retry", 367 dbg_io("error %d while erasing PEB %d, retry",
359 err, pnum); 368 err, pnum);
360 yield(); 369 yield();
361 goto retry; 370 goto retry;
362 } 371 }
363 ubi_err("cannot erase PEB %d, error %d", pnum, err); 372 ubi_err("cannot erase PEB %d, error %d", pnum, err);
364 dump_stack(); 373 ubi_dbg_dump_stack();
365 return err; 374 return err;
366 } 375 }
367 376
@@ -374,21 +383,21 @@ retry:
374 383
375 if (ei.state == MTD_ERASE_FAILED) { 384 if (ei.state == MTD_ERASE_FAILED) {
376 if (retries++ < UBI_IO_RETRIES) { 385 if (retries++ < UBI_IO_RETRIES) {
377 ubi_warn("error while erasing PEB %d, retry", pnum); 386 dbg_io("error while erasing PEB %d, retry", pnum);
378 yield(); 387 yield();
379 goto retry; 388 goto retry;
380 } 389 }
381 ubi_err("cannot erase PEB %d", pnum); 390 ubi_err("cannot erase PEB %d", pnum);
382 dump_stack(); 391 ubi_dbg_dump_stack();
383 return -EIO; 392 return -EIO;
384 } 393 }
385 394
386 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size); 395 err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
387 if (err) 396 if (err)
388 return err; 397 return err;
389 398
390 if (ubi_dbg_is_erase_failure(ubi)) { 399 if (ubi_dbg_is_erase_failure(ubi)) {
391 ubi_err("cannot erase PEB %d (emulated)", pnum); 400 dbg_err("cannot erase PEB %d (emulated)", pnum);
392 return -EIO; 401 return -EIO;
393 } 402 }
394 403
@@ -422,11 +431,11 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
422 goto out; 431 goto out;
423 432
424 /* Make sure the PEB contains only 0xFF bytes */ 433 /* Make sure the PEB contains only 0xFF bytes */
425 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); 434 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
426 if (err) 435 if (err)
427 goto out; 436 goto out;
428 437
429 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size); 438 err = ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
430 if (err == 0) { 439 if (err == 0) {
431 ubi_err("erased PEB %d, but a non-0xFF byte found", 440 ubi_err("erased PEB %d, but a non-0xFF byte found",
432 pnum); 441 pnum);
@@ -435,17 +444,17 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
435 } 444 }
436 445
437 /* Write a pattern and check it */ 446 /* Write a pattern and check it */
438 memset(ubi->peb_buf, patterns[i], ubi->peb_size); 447 memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
439 err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); 448 err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
440 if (err) 449 if (err)
441 goto out; 450 goto out;
442 451
443 memset(ubi->peb_buf, ~patterns[i], ubi->peb_size); 452 memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
444 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); 453 err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
445 if (err) 454 if (err)
446 goto out; 455 goto out;
447 456
448 err = ubi_check_pattern(ubi->peb_buf, patterns[i], 457 err = ubi_check_pattern(ubi->peb_buf1, patterns[i],
449 ubi->peb_size); 458 ubi->peb_size);
450 if (err == 0) { 459 if (err == 0) {
451 ubi_err("pattern %x checking failed for PEB %d", 460 ubi_err("pattern %x checking failed for PEB %d",
@@ -460,7 +469,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
460 469
461out: 470out:
462 mutex_unlock(&ubi->buf_mutex); 471 mutex_unlock(&ubi->buf_mutex);
463 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { 472 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
464 /* 473 /*
465 * If a bit-flip or data integrity error was detected, the test 474 * If a bit-flip or data integrity error was detected, the test
466 * has not passed because it happened on a freshly erased 475 * has not passed because it happened on a freshly erased
@@ -512,13 +521,15 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
512 * It is important to first invalidate the EC header, and then the VID 521 * It is important to first invalidate the EC header, and then the VID
513 * header. Otherwise a power cut may lead to valid EC header and 522 * header. Otherwise a power cut may lead to valid EC header and
514 * invalid VID header, in which case UBI will treat this PEB as 523 * invalid VID header, in which case UBI will treat this PEB as
515 * corrupted and will try to preserve it, and print scary warnings. 524 * corrupted and will try to preserve it, and print scary warnings (see
525 * the header comment in scan.c for more information).
516 */ 526 */
517 addr = (loff_t)pnum * ubi->peb_size; 527 addr = (loff_t)pnum * ubi->peb_size;
518 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); 528 err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
519 if (!err) { 529 if (!err) {
520 addr += ubi->vid_hdr_aloffset; 530 addr += ubi->vid_hdr_aloffset;
521 err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); 531 err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
532 (void *)&data);
522 if (!err) 533 if (!err)
523 return 0; 534 return 0;
524 } 535 }
@@ -553,7 +564,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
553 */ 564 */
554 ubi_err("cannot invalidate PEB %d, write returned %d read returned %d", 565 ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
555 pnum, err, err1); 566 pnum, err, err1);
556 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size); 567 ubi_dbg_dump_flash(ubi, pnum, 0, ubi->peb_size);
557 return -EIO; 568 return -EIO;
558} 569}
559 570
@@ -579,7 +590,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
579 590
580 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 591 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
581 592
582 err = self_check_not_bad(ubi, pnum); 593 err = paranoid_check_not_bad(ubi, pnum);
583 if (err != 0) 594 if (err != 0)
584 return err; 595 return err;
585 596
@@ -624,7 +635,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
624 if (ubi->bad_allowed) { 635 if (ubi->bad_allowed) {
625 int ret; 636 int ret;
626 637
627 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); 638 ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
628 if (ret < 0) 639 if (ret < 0)
629 ubi_err("error %d while checking if PEB %d is bad", 640 ubi_err("error %d while checking if PEB %d is bad",
630 ret, pnum); 641 ret, pnum);
@@ -659,7 +670,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
659 if (!ubi->bad_allowed) 670 if (!ubi->bad_allowed)
660 return 0; 671 return 0;
661 672
662 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); 673 err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
663 if (err) 674 if (err)
664 ubi_err("cannot mark PEB %d bad, error %d", pnum, err); 675 ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
665 return err; 676 return err;
@@ -684,7 +695,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
684 leb_start = be32_to_cpu(ec_hdr->data_offset); 695 leb_start = be32_to_cpu(ec_hdr->data_offset);
685 696
686 if (ec_hdr->version != UBI_VERSION) { 697 if (ec_hdr->version != UBI_VERSION) {
687 ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d", 698 ubi_err("node with incompatible UBI version found: "
699 "this UBI version is %d, image version is %d",
688 UBI_VERSION, (int)ec_hdr->version); 700 UBI_VERSION, (int)ec_hdr->version);
689 goto bad; 701 goto bad;
690 } 702 }
@@ -710,8 +722,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
710 722
711bad: 723bad:
712 ubi_err("bad EC header"); 724 ubi_err("bad EC header");
713 ubi_dump_ec_hdr(ec_hdr); 725 ubi_dbg_dump_ec_hdr(ec_hdr);
714 dump_stack(); 726 ubi_dbg_dump_stack();
715 return 1; 727 return 1;
716} 728}
717 729
@@ -748,7 +760,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
748 760
749 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 761 read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
750 if (read_err) { 762 if (read_err) {
751 if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) 763 if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
752 return read_err; 764 return read_err;
753 765
754 /* 766 /*
@@ -764,7 +776,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
764 776
765 magic = be32_to_cpu(ec_hdr->magic); 777 magic = be32_to_cpu(ec_hdr->magic);
766 if (magic != UBI_EC_HDR_MAGIC) { 778 if (magic != UBI_EC_HDR_MAGIC) {
767 if (mtd_is_eccerr(read_err)) 779 if (read_err == -EBADMSG)
768 return UBI_IO_BAD_HDR_EBADMSG; 780 return UBI_IO_BAD_HDR_EBADMSG;
769 781
770 /* 782 /*
@@ -775,10 +787,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
775 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { 787 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
776 /* The physical eraseblock is supposedly empty */ 788 /* The physical eraseblock is supposedly empty */
777 if (verbose) 789 if (verbose)
778 ubi_warn("no EC header found at PEB %d, only 0xFF bytes", 790 ubi_warn("no EC header found at PEB %d, "
779 pnum); 791 "only 0xFF bytes", pnum);
780 dbg_bld("no EC header found at PEB %d, only 0xFF bytes", 792 dbg_bld("no EC header found at PEB %d, "
781 pnum); 793 "only 0xFF bytes", pnum);
782 if (!read_err) 794 if (!read_err)
783 return UBI_IO_FF; 795 return UBI_IO_FF;
784 else 796 else
@@ -790,12 +802,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
790 * 0xFF bytes. Report that the header is corrupted. 802 * 0xFF bytes. Report that the header is corrupted.
791 */ 803 */
792 if (verbose) { 804 if (verbose) {
793 ubi_warn("bad magic number at PEB %d: %08x instead of %08x", 805 ubi_warn("bad magic number at PEB %d: %08x instead of "
794 pnum, magic, UBI_EC_HDR_MAGIC); 806 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
795 ubi_dump_ec_hdr(ec_hdr); 807 ubi_dbg_dump_ec_hdr(ec_hdr);
796 } 808 }
797 dbg_bld("bad magic number at PEB %d: %08x instead of %08x", 809 dbg_bld("bad magic number at PEB %d: %08x instead of "
798 pnum, magic, UBI_EC_HDR_MAGIC); 810 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
799 return UBI_IO_BAD_HDR; 811 return UBI_IO_BAD_HDR;
800 } 812 }
801 813
@@ -804,12 +816,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
804 816
805 if (hdr_crc != crc) { 817 if (hdr_crc != crc) {
806 if (verbose) { 818 if (verbose) {
807 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x", 819 ubi_warn("bad EC header CRC at PEB %d, calculated "
808 pnum, crc, hdr_crc); 820 "%#08x, read %#08x", pnum, crc, hdr_crc);
809 ubi_dump_ec_hdr(ec_hdr); 821 ubi_dbg_dump_ec_hdr(ec_hdr);
810 } 822 }
811 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x", 823 dbg_bld("bad EC header CRC at PEB %d, calculated "
812 pnum, crc, hdr_crc); 824 "%#08x, read %#08x", pnum, crc, hdr_crc);
813 825
814 if (!read_err) 826 if (!read_err)
815 return UBI_IO_BAD_HDR; 827 return UBI_IO_BAD_HDR;
@@ -863,7 +875,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
863 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); 875 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
864 ec_hdr->hdr_crc = cpu_to_be32(crc); 876 ec_hdr->hdr_crc = cpu_to_be32(crc);
865 877
866 err = self_check_ec_hdr(ubi, pnum, ec_hdr); 878 err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
867 if (err) 879 if (err)
868 return err; 880 return err;
869 881
@@ -894,40 +906,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
894 int usable_leb_size = ubi->leb_size - data_pad; 906 int usable_leb_size = ubi->leb_size - data_pad;
895 907
896 if (copy_flag != 0 && copy_flag != 1) { 908 if (copy_flag != 0 && copy_flag != 1) {
897 ubi_err("bad copy_flag"); 909 dbg_err("bad copy_flag");
898 goto bad; 910 goto bad;
899 } 911 }
900 912
901 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || 913 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
902 data_pad < 0) { 914 data_pad < 0) {
903 ubi_err("negative values"); 915 dbg_err("negative values");
904 goto bad; 916 goto bad;
905 } 917 }
906 918
907 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { 919 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
908 ubi_err("bad vol_id"); 920 dbg_err("bad vol_id");
909 goto bad; 921 goto bad;
910 } 922 }
911 923
912 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { 924 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
913 ubi_err("bad compat"); 925 dbg_err("bad compat");
914 goto bad; 926 goto bad;
915 } 927 }
916 928
917 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && 929 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
918 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && 930 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
919 compat != UBI_COMPAT_REJECT) { 931 compat != UBI_COMPAT_REJECT) {
920 ubi_err("bad compat"); 932 dbg_err("bad compat");
921 goto bad; 933 goto bad;
922 } 934 }
923 935
924 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { 936 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
925 ubi_err("bad vol_type"); 937 dbg_err("bad vol_type");
926 goto bad; 938 goto bad;
927 } 939 }
928 940
929 if (data_pad >= ubi->leb_size / 2) { 941 if (data_pad >= ubi->leb_size / 2) {
930 ubi_err("bad data_pad"); 942 dbg_err("bad data_pad");
931 goto bad; 943 goto bad;
932 } 944 }
933 945
@@ -939,45 +951,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
939 * mapped logical eraseblocks. 951 * mapped logical eraseblocks.
940 */ 952 */
941 if (used_ebs == 0) { 953 if (used_ebs == 0) {
942 ubi_err("zero used_ebs"); 954 dbg_err("zero used_ebs");
943 goto bad; 955 goto bad;
944 } 956 }
945 if (data_size == 0) { 957 if (data_size == 0) {
946 ubi_err("zero data_size"); 958 dbg_err("zero data_size");
947 goto bad; 959 goto bad;
948 } 960 }
949 if (lnum < used_ebs - 1) { 961 if (lnum < used_ebs - 1) {
950 if (data_size != usable_leb_size) { 962 if (data_size != usable_leb_size) {
951 ubi_err("bad data_size"); 963 dbg_err("bad data_size");
952 goto bad; 964 goto bad;
953 } 965 }
954 } else if (lnum == used_ebs - 1) { 966 } else if (lnum == used_ebs - 1) {
955 if (data_size == 0) { 967 if (data_size == 0) {
956 ubi_err("bad data_size at last LEB"); 968 dbg_err("bad data_size at last LEB");
957 goto bad; 969 goto bad;
958 } 970 }
959 } else { 971 } else {
960 ubi_err("too high lnum"); 972 dbg_err("too high lnum");
961 goto bad; 973 goto bad;
962 } 974 }
963 } else { 975 } else {
964 if (copy_flag == 0) { 976 if (copy_flag == 0) {
965 if (data_crc != 0) { 977 if (data_crc != 0) {
966 ubi_err("non-zero data CRC"); 978 dbg_err("non-zero data CRC");
967 goto bad; 979 goto bad;
968 } 980 }
969 if (data_size != 0) { 981 if (data_size != 0) {
970 ubi_err("non-zero data_size"); 982 dbg_err("non-zero data_size");
971 goto bad; 983 goto bad;
972 } 984 }
973 } else { 985 } else {
974 if (data_size == 0) { 986 if (data_size == 0) {
975 ubi_err("zero data_size of copy"); 987 dbg_err("zero data_size of copy");
976 goto bad; 988 goto bad;
977 } 989 }
978 } 990 }
979 if (used_ebs != 0) { 991 if (used_ebs != 0) {
980 ubi_err("bad used_ebs"); 992 dbg_err("bad used_ebs");
981 goto bad; 993 goto bad;
982 } 994 }
983 } 995 }
@@ -986,8 +998,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
986 998
987bad: 999bad:
988 ubi_err("bad VID header"); 1000 ubi_err("bad VID header");
989 ubi_dump_vid_hdr(vid_hdr); 1001 ubi_dbg_dump_vid_hdr(vid_hdr);
990 dump_stack(); 1002 ubi_dbg_dump_stack();
991 return 1; 1003 return 1;
992} 1004}
993 1005
@@ -1020,20 +1032,20 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1020 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1032 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1021 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1033 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1022 ubi->vid_hdr_alsize); 1034 ubi->vid_hdr_alsize);
1023 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) 1035 if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
1024 return read_err; 1036 return read_err;
1025 1037
1026 magic = be32_to_cpu(vid_hdr->magic); 1038 magic = be32_to_cpu(vid_hdr->magic);
1027 if (magic != UBI_VID_HDR_MAGIC) { 1039 if (magic != UBI_VID_HDR_MAGIC) {
1028 if (mtd_is_eccerr(read_err)) 1040 if (read_err == -EBADMSG)
1029 return UBI_IO_BAD_HDR_EBADMSG; 1041 return UBI_IO_BAD_HDR_EBADMSG;
1030 1042
1031 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { 1043 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1032 if (verbose) 1044 if (verbose)
1033 ubi_warn("no VID header found at PEB %d, only 0xFF bytes", 1045 ubi_warn("no VID header found at PEB %d, "
1034 pnum); 1046 "only 0xFF bytes", pnum);
1035 dbg_bld("no VID header found at PEB %d, only 0xFF bytes", 1047 dbg_bld("no VID header found at PEB %d, "
1036 pnum); 1048 "only 0xFF bytes", pnum);
1037 if (!read_err) 1049 if (!read_err)
1038 return UBI_IO_FF; 1050 return UBI_IO_FF;
1039 else 1051 else
@@ -1041,12 +1053,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1041 } 1053 }
1042 1054
1043 if (verbose) { 1055 if (verbose) {
1044 ubi_warn("bad magic number at PEB %d: %08x instead of %08x", 1056 ubi_warn("bad magic number at PEB %d: %08x instead of "
1045 pnum, magic, UBI_VID_HDR_MAGIC); 1057 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
1046 ubi_dump_vid_hdr(vid_hdr); 1058 ubi_dbg_dump_vid_hdr(vid_hdr);
1047 } 1059 }
1048 dbg_bld("bad magic number at PEB %d: %08x instead of %08x", 1060 dbg_bld("bad magic number at PEB %d: %08x instead of "
1049 pnum, magic, UBI_VID_HDR_MAGIC); 1061 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
1050 return UBI_IO_BAD_HDR; 1062 return UBI_IO_BAD_HDR;
1051 } 1063 }
1052 1064
@@ -1055,12 +1067,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1055 1067
1056 if (hdr_crc != crc) { 1068 if (hdr_crc != crc) {
1057 if (verbose) { 1069 if (verbose) {
1058 ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x", 1070 ubi_warn("bad CRC at PEB %d, calculated %#08x, "
1059 pnum, crc, hdr_crc); 1071 "read %#08x", pnum, crc, hdr_crc);
1060 ubi_dump_vid_hdr(vid_hdr); 1072 ubi_dbg_dump_vid_hdr(vid_hdr);
1061 } 1073 }
1062 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x", 1074 dbg_bld("bad CRC at PEB %d, calculated %#08x, "
1063 pnum, crc, hdr_crc); 1075 "read %#08x", pnum, crc, hdr_crc);
1064 if (!read_err) 1076 if (!read_err)
1065 return UBI_IO_BAD_HDR; 1077 return UBI_IO_BAD_HDR;
1066 else 1078 else
@@ -1101,7 +1113,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1101 dbg_io("write VID header to PEB %d", pnum); 1113 dbg_io("write VID header to PEB %d", pnum);
1102 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 1114 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
1103 1115
1104 err = self_check_peb_ec_hdr(ubi, pnum); 1116 err = paranoid_check_peb_ec_hdr(ubi, pnum);
1105 if (err) 1117 if (err)
1106 return err; 1118 return err;
1107 1119
@@ -1110,7 +1122,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1110 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); 1122 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
1111 vid_hdr->hdr_crc = cpu_to_be32(crc); 1123 vid_hdr->hdr_crc = cpu_to_be32(crc);
1112 1124
1113 err = self_check_vid_hdr(ubi, pnum, vid_hdr); 1125 err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
1114 if (err) 1126 if (err)
1115 return err; 1127 return err;
1116 1128
@@ -1120,32 +1132,34 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1120 return err; 1132 return err;
1121} 1133}
1122 1134
1135#ifdef CONFIG_MTD_UBI_DEBUG
1136
1123/** 1137/**
1124 * self_check_not_bad - ensure that a physical eraseblock is not bad. 1138 * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
1125 * @ubi: UBI device description object 1139 * @ubi: UBI device description object
1126 * @pnum: physical eraseblock number to check 1140 * @pnum: physical eraseblock number to check
1127 * 1141 *
1128 * This function returns zero if the physical eraseblock is good, %-EINVAL if 1142 * This function returns zero if the physical eraseblock is good, %-EINVAL if
1129 * it is bad and a negative error code if an error occurred. 1143 * it is bad and a negative error code if an error occurred.
1130 */ 1144 */
1131static int self_check_not_bad(const struct ubi_device *ubi, int pnum) 1145static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
1132{ 1146{
1133 int err; 1147 int err;
1134 1148
1135 if (!ubi_dbg_chk_io(ubi)) 1149 if (!ubi->dbg->chk_io)
1136 return 0; 1150 return 0;
1137 1151
1138 err = ubi_io_is_bad(ubi, pnum); 1152 err = ubi_io_is_bad(ubi, pnum);
1139 if (!err) 1153 if (!err)
1140 return err; 1154 return err;
1141 1155
1142 ubi_err("self-check failed for PEB %d", pnum); 1156 ubi_err("paranoid check failed for PEB %d", pnum);
1143 dump_stack(); 1157 ubi_dbg_dump_stack();
1144 return err > 0 ? -EINVAL : err; 1158 return err > 0 ? -EINVAL : err;
1145} 1159}
1146 1160
1147/** 1161/**
1148 * self_check_ec_hdr - check if an erase counter header is all right. 1162 * paranoid_check_ec_hdr - check if an erase counter header is all right.
1149 * @ubi: UBI device description object 1163 * @ubi: UBI device description object
1150 * @pnum: physical eraseblock number the erase counter header belongs to 1164 * @pnum: physical eraseblock number the erase counter header belongs to
1151 * @ec_hdr: the erase counter header to check 1165 * @ec_hdr: the erase counter header to check
@@ -1153,13 +1167,13 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1153 * This function returns zero if the erase counter header contains valid 1167 * This function returns zero if the erase counter header contains valid
1154 * values, and %-EINVAL if not. 1168 * values, and %-EINVAL if not.
1155 */ 1169 */
1156static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, 1170static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1157 const struct ubi_ec_hdr *ec_hdr) 1171 const struct ubi_ec_hdr *ec_hdr)
1158{ 1172{
1159 int err; 1173 int err;
1160 uint32_t magic; 1174 uint32_t magic;
1161 1175
1162 if (!ubi_dbg_chk_io(ubi)) 1176 if (!ubi->dbg->chk_io)
1163 return 0; 1177 return 0;
1164 1178
1165 magic = be32_to_cpu(ec_hdr->magic); 1179 magic = be32_to_cpu(ec_hdr->magic);
@@ -1171,33 +1185,33 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1171 1185
1172 err = validate_ec_hdr(ubi, ec_hdr); 1186 err = validate_ec_hdr(ubi, ec_hdr);
1173 if (err) { 1187 if (err) {
1174 ubi_err("self-check failed for PEB %d", pnum); 1188 ubi_err("paranoid check failed for PEB %d", pnum);
1175 goto fail; 1189 goto fail;
1176 } 1190 }
1177 1191
1178 return 0; 1192 return 0;
1179 1193
1180fail: 1194fail:
1181 ubi_dump_ec_hdr(ec_hdr); 1195 ubi_dbg_dump_ec_hdr(ec_hdr);
1182 dump_stack(); 1196 ubi_dbg_dump_stack();
1183 return -EINVAL; 1197 return -EINVAL;
1184} 1198}
1185 1199
1186/** 1200/**
1187 * self_check_peb_ec_hdr - check erase counter header. 1201 * paranoid_check_peb_ec_hdr - check erase counter header.
1188 * @ubi: UBI device description object 1202 * @ubi: UBI device description object
1189 * @pnum: the physical eraseblock number to check 1203 * @pnum: the physical eraseblock number to check
1190 * 1204 *
1191 * This function returns zero if the erase counter header is all right and and 1205 * This function returns zero if the erase counter header is all right and and
1192 * a negative error code if not or if an error occurred. 1206 * a negative error code if not or if an error occurred.
1193 */ 1207 */
1194static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) 1208static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1195{ 1209{
1196 int err; 1210 int err;
1197 uint32_t crc, hdr_crc; 1211 uint32_t crc, hdr_crc;
1198 struct ubi_ec_hdr *ec_hdr; 1212 struct ubi_ec_hdr *ec_hdr;
1199 1213
1200 if (!ubi_dbg_chk_io(ubi)) 1214 if (!ubi->dbg->chk_io)
1201 return 0; 1215 return 0;
1202 1216
1203 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1217 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1205,21 +1219,21 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1205 return -ENOMEM; 1219 return -ENOMEM;
1206 1220
1207 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 1221 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
1208 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1222 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
1209 goto exit; 1223 goto exit;
1210 1224
1211 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); 1225 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1212 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); 1226 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1213 if (hdr_crc != crc) { 1227 if (hdr_crc != crc) {
1214 ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); 1228 ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
1215 ubi_err("self-check failed for PEB %d", pnum); 1229 ubi_err("paranoid check failed for PEB %d", pnum);
1216 ubi_dump_ec_hdr(ec_hdr); 1230 ubi_dbg_dump_ec_hdr(ec_hdr);
1217 dump_stack(); 1231 ubi_dbg_dump_stack();
1218 err = -EINVAL; 1232 err = -EINVAL;
1219 goto exit; 1233 goto exit;
1220 } 1234 }
1221 1235
1222 err = self_check_ec_hdr(ubi, pnum, ec_hdr); 1236 err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
1223 1237
1224exit: 1238exit:
1225 kfree(ec_hdr); 1239 kfree(ec_hdr);
@@ -1227,7 +1241,7 @@ exit:
1227} 1241}
1228 1242
1229/** 1243/**
1230 * self_check_vid_hdr - check that a volume identifier header is all right. 1244 * paranoid_check_vid_hdr - check that a volume identifier header is all right.
1231 * @ubi: UBI device description object 1245 * @ubi: UBI device description object
1232 * @pnum: physical eraseblock number the volume identifier header belongs to 1246 * @pnum: physical eraseblock number the volume identifier header belongs to
1233 * @vid_hdr: the volume identifier header to check 1247 * @vid_hdr: the volume identifier header to check
@@ -1235,13 +1249,13 @@ exit:
1235 * This function returns zero if the volume identifier header is all right, and 1249 * This function returns zero if the volume identifier header is all right, and
1236 * %-EINVAL if not. 1250 * %-EINVAL if not.
1237 */ 1251 */
1238static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, 1252static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1239 const struct ubi_vid_hdr *vid_hdr) 1253 const struct ubi_vid_hdr *vid_hdr)
1240{ 1254{
1241 int err; 1255 int err;
1242 uint32_t magic; 1256 uint32_t magic;
1243 1257
1244 if (!ubi_dbg_chk_io(ubi)) 1258 if (!ubi->dbg->chk_io)
1245 return 0; 1259 return 0;
1246 1260
1247 magic = be32_to_cpu(vid_hdr->magic); 1261 magic = be32_to_cpu(vid_hdr->magic);
@@ -1253,36 +1267,36 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1253 1267
1254 err = validate_vid_hdr(ubi, vid_hdr); 1268 err = validate_vid_hdr(ubi, vid_hdr);
1255 if (err) { 1269 if (err) {
1256 ubi_err("self-check failed for PEB %d", pnum); 1270 ubi_err("paranoid check failed for PEB %d", pnum);
1257 goto fail; 1271 goto fail;
1258 } 1272 }
1259 1273
1260 return err; 1274 return err;
1261 1275
1262fail: 1276fail:
1263 ubi_err("self-check failed for PEB %d", pnum); 1277 ubi_err("paranoid check failed for PEB %d", pnum);
1264 ubi_dump_vid_hdr(vid_hdr); 1278 ubi_dbg_dump_vid_hdr(vid_hdr);
1265 dump_stack(); 1279 ubi_dbg_dump_stack();
1266 return -EINVAL; 1280 return -EINVAL;
1267 1281
1268} 1282}
1269 1283
1270/** 1284/**
1271 * self_check_peb_vid_hdr - check volume identifier header. 1285 * paranoid_check_peb_vid_hdr - check volume identifier header.
1272 * @ubi: UBI device description object 1286 * @ubi: UBI device description object
1273 * @pnum: the physical eraseblock number to check 1287 * @pnum: the physical eraseblock number to check
1274 * 1288 *
1275 * This function returns zero if the volume identifier header is all right, 1289 * This function returns zero if the volume identifier header is all right,
1276 * and a negative error code if not or if an error occurred. 1290 * and a negative error code if not or if an error occurred.
1277 */ 1291 */
1278static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) 1292static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1279{ 1293{
1280 int err; 1294 int err;
1281 uint32_t crc, hdr_crc; 1295 uint32_t crc, hdr_crc;
1282 struct ubi_vid_hdr *vid_hdr; 1296 struct ubi_vid_hdr *vid_hdr;
1283 void *p; 1297 void *p;
1284 1298
1285 if (!ubi_dbg_chk_io(ubi)) 1299 if (!ubi->dbg->chk_io)
1286 return 0; 1300 return 0;
1287 1301
1288 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 1302 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1292,22 +1306,22 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1292 p = (char *)vid_hdr - ubi->vid_hdr_shift; 1306 p = (char *)vid_hdr - ubi->vid_hdr_shift;
1293 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1307 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
1294 ubi->vid_hdr_alsize); 1308 ubi->vid_hdr_alsize);
1295 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) 1309 if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
1296 goto exit; 1310 goto exit;
1297 1311
1298 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1312 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1299 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); 1313 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1300 if (hdr_crc != crc) { 1314 if (hdr_crc != crc) {
1301 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x", 1315 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
1302 pnum, crc, hdr_crc); 1316 "read %#08x", pnum, crc, hdr_crc);
1303 ubi_err("self-check failed for PEB %d", pnum); 1317 ubi_err("paranoid check failed for PEB %d", pnum);
1304 ubi_dump_vid_hdr(vid_hdr); 1318 ubi_dbg_dump_vid_hdr(vid_hdr);
1305 dump_stack(); 1319 ubi_dbg_dump_stack();
1306 err = -EINVAL; 1320 err = -EINVAL;
1307 goto exit; 1321 goto exit;
1308 } 1322 }
1309 1323
1310 err = self_check_vid_hdr(ubi, pnum, vid_hdr); 1324 err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
1311 1325
1312exit: 1326exit:
1313 ubi_free_vid_hdr(ubi, vid_hdr); 1327 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1315,7 +1329,7 @@ exit:
1315} 1329}
1316 1330
1317/** 1331/**
1318 * self_check_write - make sure write succeeded. 1332 * ubi_dbg_check_write - make sure write succeeded.
1319 * @ubi: UBI device description object 1333 * @ubi: UBI device description object
1320 * @buf: buffer with data which were written 1334 * @buf: buffer with data which were written
1321 * @pnum: physical eraseblock number the data were written to 1335 * @pnum: physical eraseblock number the data were written to
@@ -1326,15 +1340,15 @@ exit:
1326 * the original data buffer - the data have to match. Returns zero if the data 1340 * the original data buffer - the data have to match. Returns zero if the data
1327 * match and a negative error code if not or in case of failure. 1341 * match and a negative error code if not or in case of failure.
1328 */ 1342 */
1329static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, 1343int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1330 int offset, int len) 1344 int offset, int len)
1331{ 1345{
1332 int err, i; 1346 int err, i;
1333 size_t read; 1347 size_t read;
1334 void *buf1; 1348 void *buf1;
1335 loff_t addr = (loff_t)pnum * ubi->peb_size + offset; 1349 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1336 1350
1337 if (!ubi_dbg_chk_io(ubi)) 1351 if (!ubi->dbg->chk_io)
1338 return 0; 1352 return 0;
1339 1353
1340 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); 1354 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1343,8 +1357,8 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1343 return 0; 1357 return 0;
1344 } 1358 }
1345 1359
1346 err = mtd_read(ubi->mtd, addr, len, &read, buf1); 1360 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
1347 if (err && !mtd_is_bitflip(err)) 1361 if (err && err != -EUCLEAN)
1348 goto out_free; 1362 goto out_free;
1349 1363
1350 for (i = 0; i < len; i++) { 1364 for (i = 0; i < len; i++) {
@@ -1355,7 +1369,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1355 if (c == c1) 1369 if (c == c1)
1356 continue; 1370 continue;
1357 1371
1358 ubi_err("self-check failed for PEB %d:%d, len %d", 1372 ubi_err("paranoid check failed for PEB %d:%d, len %d",
1359 pnum, offset, len); 1373 pnum, offset, len);
1360 ubi_msg("data differ at position %d", i); 1374 ubi_msg("data differ at position %d", i);
1361 dump_len = max_t(int, 128, len - i); 1375 dump_len = max_t(int, 128, len - i);
@@ -1367,7 +1381,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1367 i, i + dump_len); 1381 i, i + dump_len);
1368 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1382 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1369 buf1 + i, dump_len, 1); 1383 buf1 + i, dump_len, 1);
1370 dump_stack(); 1384 ubi_dbg_dump_stack();
1371 err = -EINVAL; 1385 err = -EINVAL;
1372 goto out_free; 1386 goto out_free;
1373 } 1387 }
@@ -1381,7 +1395,7 @@ out_free:
1381} 1395}
1382 1396
1383/** 1397/**
1384 * ubi_self_check_all_ff - check that a region of flash is empty. 1398 * ubi_dbg_check_all_ff - check that a region of flash is empty.
1385 * @ubi: UBI device description object 1399 * @ubi: UBI device description object
1386 * @pnum: the physical eraseblock number to check 1400 * @pnum: the physical eraseblock number to check
1387 * @offset: the starting offset within the physical eraseblock to check 1401 * @offset: the starting offset within the physical eraseblock to check
@@ -1391,14 +1405,14 @@ out_free:
1391 * @offset of the physical eraseblock @pnum, and a negative error code if not 1405 * @offset of the physical eraseblock @pnum, and a negative error code if not
1392 * or if an error occurred. 1406 * or if an error occurred.
1393 */ 1407 */
1394int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) 1408int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1395{ 1409{
1396 size_t read; 1410 size_t read;
1397 int err; 1411 int err;
1398 void *buf; 1412 void *buf;
1399 loff_t addr = (loff_t)pnum * ubi->peb_size + offset; 1413 loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
1400 1414
1401 if (!ubi_dbg_chk_io(ubi)) 1415 if (!ubi->dbg->chk_io)
1402 return 0; 1416 return 0;
1403 1417
1404 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); 1418 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1407,17 +1421,17 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1407 return 0; 1421 return 0;
1408 } 1422 }
1409 1423
1410 err = mtd_read(ubi->mtd, addr, len, &read, buf); 1424 err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
1411 if (err && !mtd_is_bitflip(err)) { 1425 if (err && err != -EUCLEAN) {
1412 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes", 1426 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
1413 err, len, pnum, offset, read); 1427 "read %zd bytes", err, len, pnum, offset, read);
1414 goto error; 1428 goto error;
1415 } 1429 }
1416 1430
1417 err = ubi_check_pattern(buf, 0xFF, len); 1431 err = ubi_check_pattern(buf, 0xFF, len);
1418 if (err == 0) { 1432 if (err == 0) {
1419 ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes", 1433 ubi_err("flash region at PEB %d:%d, length %d does not "
1420 pnum, offset, len); 1434 "contain all 0xFF bytes", pnum, offset, len);
1421 goto fail; 1435 goto fail;
1422 } 1436 }
1423 1437
@@ -1425,12 +1439,14 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1425 return 0; 1439 return 0;
1426 1440
1427fail: 1441fail:
1428 ubi_err("self-check failed for PEB %d", pnum); 1442 ubi_err("paranoid check failed for PEB %d", pnum);
1429 ubi_msg("hex dump of the %d-%d region", offset, offset + len); 1443 ubi_msg("hex dump of the %d-%d region", offset, offset + len);
1430 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); 1444 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1431 err = -EINVAL; 1445 err = -EINVAL;
1432error: 1446error:
1433 dump_stack(); 1447 ubi_dbg_dump_stack();
1434 vfree(buf); 1448 vfree(buf);
1435 return err; 1449 return err;
1436} 1450}
1451
1452#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 3aac1acceeb..d39716e5b20 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -221,7 +221,7 @@ out_free:
221 kfree(desc); 221 kfree(desc);
222out_put_ubi: 222out_put_ubi:
223 ubi_put_device(ubi); 223 ubi_put_device(ubi);
224 ubi_err("cannot open device %d, volume %d, error %d", 224 dbg_err("cannot open device %d, volume %d, error %d",
225 ubi_num, vol_id, err); 225 ubi_num, vol_id, err);
226 return ERR_PTR(err); 226 return ERR_PTR(err);
227} 227}
@@ -410,7 +410,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
410 return 0; 410 return 0;
411 411
412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); 412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
413 if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { 413 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
414 ubi_warn("mark volume %d as corrupted", vol_id); 414 ubi_warn("mark volume %d as corrupted", vol_id);
415 vol->corrupted = 1; 415 vol->corrupted = 1;
416 } 416 }
@@ -426,9 +426,11 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
426 * @buf: data to write 426 * @buf: data to write
427 * @offset: offset within the logical eraseblock where to write 427 * @offset: offset within the logical eraseblock where to write
428 * @len: how many bytes to write 428 * @len: how many bytes to write
429 * @dtype: expected data type
429 * 430 *
430 * This function writes @len bytes of data from @buf to offset @offset of 431 * This function writes @len bytes of data from @buf to offset @offset of
431 * logical eraseblock @lnum. 432 * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
433 * the data.
432 * 434 *
433 * This function takes care of physical eraseblock write failures. If write to 435 * This function takes care of physical eraseblock write failures. If write to
434 * the physical eraseblock write operation fails, the logical eraseblock is 436 * the physical eraseblock write operation fails, the logical eraseblock is
@@ -445,7 +447,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
445 * returns immediately with %-EBADF code. 447 * returns immediately with %-EBADF code.
446 */ 448 */
447int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, 449int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
448 int offset, int len) 450 int offset, int len, int dtype)
449{ 451{
450 struct ubi_volume *vol = desc->vol; 452 struct ubi_volume *vol = desc->vol;
451 struct ubi_device *ubi = vol->ubi; 453 struct ubi_device *ubi = vol->ubi;
@@ -464,13 +466,17 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
464 offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) 466 offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
465 return -EINVAL; 467 return -EINVAL;
466 468
469 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
470 dtype != UBI_UNKNOWN)
471 return -EINVAL;
472
467 if (vol->upd_marker) 473 if (vol->upd_marker)
468 return -EBADF; 474 return -EBADF;
469 475
470 if (len == 0) 476 if (len == 0)
471 return 0; 477 return 0;
472 478
473 return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len); 479 return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
474} 480}
475EXPORT_SYMBOL_GPL(ubi_leb_write); 481EXPORT_SYMBOL_GPL(ubi_leb_write);
476 482
@@ -480,6 +486,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
480 * @lnum: logical eraseblock number to change 486 * @lnum: logical eraseblock number to change
481 * @buf: data to write 487 * @buf: data to write
482 * @len: how many bytes to write 488 * @len: how many bytes to write
489 * @dtype: expected data type
483 * 490 *
484 * This function changes the contents of a logical eraseblock atomically. @buf 491 * This function changes the contents of a logical eraseblock atomically. @buf
485 * has to contain new logical eraseblock data, and @len - the length of the 492 * has to contain new logical eraseblock data, and @len - the length of the
@@ -490,7 +497,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
490 * code in case of failure. 497 * code in case of failure.
491 */ 498 */
492int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, 499int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
493 int len) 500 int len, int dtype)
494{ 501{
495 struct ubi_volume *vol = desc->vol; 502 struct ubi_volume *vol = desc->vol;
496 struct ubi_device *ubi = vol->ubi; 503 struct ubi_device *ubi = vol->ubi;
@@ -508,13 +515,17 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
508 len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) 515 len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
509 return -EINVAL; 516 return -EINVAL;
510 517
518 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
519 dtype != UBI_UNKNOWN)
520 return -EINVAL;
521
511 if (vol->upd_marker) 522 if (vol->upd_marker)
512 return -EBADF; 523 return -EBADF;
513 524
514 if (len == 0) 525 if (len == 0)
515 return 0; 526 return 0;
516 527
517 return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len); 528 return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
518} 529}
519EXPORT_SYMBOL_GPL(ubi_leb_change); 530EXPORT_SYMBOL_GPL(ubi_leb_change);
520 531
@@ -551,7 +562,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
551 if (err) 562 if (err)
552 return err; 563 return err;
553 564
554 return ubi_wl_flush(ubi, vol->vol_id, lnum); 565 return ubi_wl_flush(ubi);
555} 566}
556EXPORT_SYMBOL_GPL(ubi_leb_erase); 567EXPORT_SYMBOL_GPL(ubi_leb_erase);
557 568
@@ -615,6 +626,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
615 * ubi_leb_map - map logical eraseblock to a physical eraseblock. 626 * ubi_leb_map - map logical eraseblock to a physical eraseblock.
616 * @desc: volume descriptor 627 * @desc: volume descriptor
617 * @lnum: logical eraseblock number 628 * @lnum: logical eraseblock number
629 * @dtype: expected data type
618 * 630 *
619 * This function maps an un-mapped logical eraseblock @lnum to a physical 631 * This function maps an un-mapped logical eraseblock @lnum to a physical
620 * eraseblock. This means, that after a successful invocation of this 632 * eraseblock. This means, that after a successful invocation of this
@@ -627,7 +639,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
627 * eraseblock is already mapped, and other negative error codes in case of 639 * eraseblock is already mapped, and other negative error codes in case of
628 * other failures. 640 * other failures.
629 */ 641 */
630int ubi_leb_map(struct ubi_volume_desc *desc, int lnum) 642int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
631{ 643{
632 struct ubi_volume *vol = desc->vol; 644 struct ubi_volume *vol = desc->vol;
633 struct ubi_device *ubi = vol->ubi; 645 struct ubi_device *ubi = vol->ubi;
@@ -640,13 +652,17 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
640 if (lnum < 0 || lnum >= vol->reserved_pebs) 652 if (lnum < 0 || lnum >= vol->reserved_pebs)
641 return -EINVAL; 653 return -EINVAL;
642 654
655 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
656 dtype != UBI_UNKNOWN)
657 return -EINVAL;
658
643 if (vol->upd_marker) 659 if (vol->upd_marker)
644 return -EBADF; 660 return -EBADF;
645 661
646 if (vol->eba_tbl[lnum] >= 0) 662 if (vol->eba_tbl[lnum] >= 0)
647 return -EBADMSG; 663 return -EBADMSG;
648 664
649 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); 665 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
650} 666}
651EXPORT_SYMBOL_GPL(ubi_leb_map); 667EXPORT_SYMBOL_GPL(ubi_leb_map);
652 668
@@ -698,39 +714,14 @@ int ubi_sync(int ubi_num)
698 if (!ubi) 714 if (!ubi)
699 return -ENODEV; 715 return -ENODEV;
700 716
701 mtd_sync(ubi->mtd); 717 if (ubi->mtd->sync)
718 ubi->mtd->sync(ubi->mtd);
719
702 ubi_put_device(ubi); 720 ubi_put_device(ubi);
703 return 0; 721 return 0;
704} 722}
705EXPORT_SYMBOL_GPL(ubi_sync); 723EXPORT_SYMBOL_GPL(ubi_sync);
706 724
707/**
708 * ubi_flush - flush UBI work queue.
709 * @ubi_num: UBI device to flush work queue
710 * @vol_id: volume id to flush for
711 * @lnum: logical eraseblock number to flush for
712 *
713 * This function executes all pending works for a particular volume id / logical
714 * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
715 * a wildcard for all of the corresponding volume numbers or logical
716 * eraseblock numbers. It returns zero in case of success and a negative error
717 * code in case of failure.
718 */
719int ubi_flush(int ubi_num, int vol_id, int lnum)
720{
721 struct ubi_device *ubi;
722 int err = 0;
723
724 ubi = ubi_get_device(ubi_num);
725 if (!ubi)
726 return -ENODEV;
727
728 err = ubi_wl_flush(ubi, vol_id, lnum);
729 ubi_put_device(ubi);
730 return err;
731}
732EXPORT_SYMBOL_GPL(ubi_flush);
733
734BLOCKING_NOTIFIER_HEAD(ubi_notifiers); 725BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
735 726
736/** 727/**
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index f913d701a5b..ff2a65c37f6 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -81,7 +81,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
81 81
82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); 82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
83 if (err) { 83 if (err) {
84 if (mtd_is_eccerr(err)) 84 if (err == -EBADMSG)
85 err = 1; 85 err = 1;
86 break; 86 break;
87 } 87 }
@@ -92,45 +92,16 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
92} 92}
93 93
94/** 94/**
95 * ubi_update_reserved - update bad eraseblock handling accounting data. 95 * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
96 * @ubi: UBI device description object
97 *
98 * This function calculates the gap between current number of PEBs reserved for
99 * bad eraseblock handling and the required level of PEBs that must be
100 * reserved, and if necessary, reserves more PEBs to fill that gap, according
101 * to availability. Should be called with ubi->volumes_lock held.
102 */
103void ubi_update_reserved(struct ubi_device *ubi)
104{
105 int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
106
107 if (need <= 0 || ubi->avail_pebs == 0)
108 return;
109
110 need = min_t(int, need, ubi->avail_pebs);
111 ubi->avail_pebs -= need;
112 ubi->rsvd_pebs += need;
113 ubi->beb_rsvd_pebs += need;
114 ubi_msg("reserved more %d PEBs for bad PEB handling", need);
115}
116
117/**
118 * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
119 * eraseblock handling. 96 * eraseblock handling.
120 * @ubi: UBI device description object 97 * @ubi: UBI device description object
121 */ 98 */
122void ubi_calculate_reserved(struct ubi_device *ubi) 99void ubi_calculate_reserved(struct ubi_device *ubi)
123{ 100{
124 /* 101 ubi->beb_rsvd_level = ubi->good_peb_count/100;
125 * Calculate the actual number of PEBs currently needed to be reserved 102 ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
126 * for future bad eraseblock handling. 103 if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
127 */ 104 ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
128 ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
129 if (ubi->beb_rsvd_level < 0) {
130 ubi->beb_rsvd_level = 0;
131 ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
132 ubi->bad_peb_count, ubi->bad_peb_limit);
133 }
134} 105}
135 106
136/** 107/**
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index ac2b24d1783..6fb8ec2174a 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -149,10 +149,10 @@ enum {
149 * The @image_seq field is used to validate a UBI image that has been prepared 149 * The @image_seq field is used to validate a UBI image that has been prepared
150 * for a UBI device. The @image_seq value can be any value, but it must be the 150 * for a UBI device. The @image_seq value can be any value, but it must be the
151 * same on all eraseblocks. UBI will ensure that all new erase counter headers 151 * same on all eraseblocks. UBI will ensure that all new erase counter headers
152 * also contain this value, and will check the value when attaching the flash. 152 * also contain this value, and will check the value when scanning at start-up.
153 * One way to make use of @image_seq is to increase its value by one every time 153 * One way to make use of @image_seq is to increase its value by one every time
154 * an image is flashed over an existing image, then, if the flashing does not 154 * an image is flashed over an existing image, then, if the flashing does not
155 * complete, UBI will detect the error when attaching the media. 155 * complete, UBI will detect the error when scanning.
156 */ 156 */
157struct ubi_ec_hdr { 157struct ubi_ec_hdr {
158 __be32 magic; 158 __be32 magic;
@@ -298,8 +298,8 @@ struct ubi_vid_hdr {
298#define UBI_INT_VOL_COUNT 1 298#define UBI_INT_VOL_COUNT 1
299 299
300/* 300/*
301 * Starting ID of internal volumes: 0x7fffefff. 301 * Starting ID of internal volumes. There is reserved room for 4096 internal
302 * There is reserved room for 4096 internal volumes. 302 * volumes.
303 */ 303 */
304#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096) 304#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
305 305
@@ -375,141 +375,4 @@ struct ubi_vtbl_record {
375 __be32 crc; 375 __be32 crc;
376} __packed; 376} __packed;
377 377
378/* UBI fastmap on-flash data structures */
379
380#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
381#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
382
383/* fastmap on-flash data structure format version */
384#define UBI_FM_FMT_VERSION 1
385
386#define UBI_FM_SB_MAGIC 0x7B11D69F
387#define UBI_FM_HDR_MAGIC 0xD4B82EF7
388#define UBI_FM_VHDR_MAGIC 0xFA370ED1
389#define UBI_FM_POOL_MAGIC 0x67AF4D08
390#define UBI_FM_EBA_MAGIC 0xf0c040a8
391
392/* A fastmap supber block can be located between PEB 0 and
393 * UBI_FM_MAX_START */
394#define UBI_FM_MAX_START 64
395
396/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
397#define UBI_FM_MAX_BLOCKS 32
398
399/* 5% of the total number of PEBs have to be scanned while attaching
400 * from a fastmap.
401 * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
402 * UBI_FM_MAX_POOL_SIZE */
403#define UBI_FM_MIN_POOL_SIZE 8
404#define UBI_FM_MAX_POOL_SIZE 256
405
406#define UBI_FM_WL_POOL_SIZE 25
407
408/**
409 * struct ubi_fm_sb - UBI fastmap super block
410 * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
411 * @version: format version of this fastmap
412 * @data_crc: CRC over the fastmap data
413 * @used_blocks: number of PEBs used by this fastmap
414 * @block_loc: an array containing the location of all PEBs of the fastmap
415 * @block_ec: the erase counter of each used PEB
416 * @sqnum: highest sequence number value at the time while taking the fastmap
417 *
418 */
419struct ubi_fm_sb {
420 __be32 magic;
421 __u8 version;
422 __u8 padding1[3];
423 __be32 data_crc;
424 __be32 used_blocks;
425 __be32 block_loc[UBI_FM_MAX_BLOCKS];
426 __be32 block_ec[UBI_FM_MAX_BLOCKS];
427 __be64 sqnum;
428 __u8 padding2[32];
429} __packed;
430
431/**
432 * struct ubi_fm_hdr - header of the fastmap data set
433 * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
434 * @free_peb_count: number of free PEBs known by this fastmap
435 * @used_peb_count: number of used PEBs known by this fastmap
436 * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
437 * @bad_peb_count: number of bad PEBs known by this fastmap
438 * @erase_peb_count: number of bad PEBs which have to be erased
439 * @vol_count: number of UBI volumes known by this fastmap
440 */
441struct ubi_fm_hdr {
442 __be32 magic;
443 __be32 free_peb_count;
444 __be32 used_peb_count;
445 __be32 scrub_peb_count;
446 __be32 bad_peb_count;
447 __be32 erase_peb_count;
448 __be32 vol_count;
449 __u8 padding[4];
450} __packed;
451
452/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
453
454/**
455 * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
456 * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
457 * @size: current pool size
458 * @max_size: maximal pool size
459 * @pebs: an array containing the location of all PEBs in this pool
460 */
461struct ubi_fm_scan_pool {
462 __be32 magic;
463 __be16 size;
464 __be16 max_size;
465 __be32 pebs[UBI_FM_MAX_POOL_SIZE];
466 __be32 padding[4];
467} __packed;
468
469/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
470
471/**
472 * struct ubi_fm_ec - stores the erase counter of a PEB
473 * @pnum: PEB number
474 * @ec: ec of this PEB
475 */
476struct ubi_fm_ec {
477 __be32 pnum;
478 __be32 ec;
479} __packed;
480
481/**
482 * struct ubi_fm_volhdr - Fastmap volume header
483 * it identifies the start of an eba table
484 * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
485 * @vol_id: volume id of the fastmapped volume
486 * @vol_type: type of the fastmapped volume
487 * @data_pad: data_pad value of the fastmapped volume
488 * @used_ebs: number of used LEBs within this volume
489 * @last_eb_bytes: number of bytes used in the last LEB
490 */
491struct ubi_fm_volhdr {
492 __be32 magic;
493 __be32 vol_id;
494 __u8 vol_type;
495 __u8 padding1[3];
496 __be32 data_pad;
497 __be32 used_ebs;
498 __be32 last_eb_bytes;
499 __u8 padding2[8];
500} __packed;
501
502/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
503
504/**
505 * struct ubi_fm_eba - denotes an association beween a PEB and LEB
506 * @magic: EBA table magic number
507 * @reserved_pebs: number of table entries
508 * @pnum: PEB number of LEB (LEB is the index)
509 */
510struct ubi_fm_eba {
511 __be32 magic;
512 __be32 reserved_pebs;
513 __be32 pnum[0];
514} __packed;
515#endif /* !__UBI_MEDIA_H__ */ 378#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 8ea6297a208..d51d75d3444 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -43,6 +43,7 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44 44
45#include "ubi-media.h" 45#include "ubi-media.h"
46#include "scan.h"
46 47
47/* Maximum number of supported UBI devices */ 48/* Maximum number of supported UBI devices */
48#define UBI_MAX_DEVICES 32 49#define UBI_MAX_DEVICES 32
@@ -51,21 +52,21 @@
51#define UBI_NAME_STR "ubi" 52#define UBI_NAME_STR "ubi"
52 53
53/* Normal UBI messages */ 54/* Normal UBI messages */
54#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__) 55#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
55/* UBI warning messages */ 56/* UBI warning messages */
56#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \ 57#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
57 __func__, ##__VA_ARGS__) 58 __func__, ##__VA_ARGS__)
58/* UBI error messages */ 59/* UBI error messages */
59#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \ 60#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
60 __func__, ##__VA_ARGS__) 61 __func__, ##__VA_ARGS__)
61 62
63/* Lowest number PEBs reserved for bad PEB handling */
64#define MIN_RESEVED_PEBS 2
65
62/* Background thread name pattern */ 66/* Background thread name pattern */
63#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" 67#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
64 68
65/* 69/* This marker in the EBA table means that the LEB is um-mapped */
66 * This marker in the EBA table means that the LEB is um-mapped.
67 * NOTE! It has to have the same value as %UBI_ALL.
68 */
69#define UBI_LEB_UNMAPPED -1 70#define UBI_LEB_UNMAPPED -1
70 71
71/* 72/*
@@ -81,16 +82,6 @@
81 */ 82 */
82#define UBI_PROT_QUEUE_LEN 10 83#define UBI_PROT_QUEUE_LEN 10
83 84
84/* The volume ID/LEB number/erase counter is unknown */
85#define UBI_UNKNOWN -1
86
87/*
88 * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
89 * + 2 for the number plus 1 for the trailing zero byte.
90 */
91#define UBI_DFS_DIR_NAME "ubi%d"
92#define UBI_DFS_DIR_LEN (3 + 2 + 1)
93
94/* 85/*
95 * Error codes returned by the I/O sub-system. 86 * Error codes returned by the I/O sub-system.
96 * 87 *
@@ -127,7 +118,7 @@ enum {
127 * PEB 118 * PEB
128 * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target 119 * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
129 * PEB 120 * PEB
130 * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the 121 * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
131 * target PEB 122 * target PEB
132 * MOVE_RETRY: retry scrubbing the PEB 123 * MOVE_RETRY: retry scrubbing the PEB
133 */ 124 */
@@ -136,21 +127,10 @@ enum {
136 MOVE_SOURCE_RD_ERR, 127 MOVE_SOURCE_RD_ERR,
137 MOVE_TARGET_RD_ERR, 128 MOVE_TARGET_RD_ERR,
138 MOVE_TARGET_WR_ERR, 129 MOVE_TARGET_WR_ERR,
139 MOVE_TARGET_BITFLIPS, 130 MOVE_CANCEL_BITFLIPS,
140 MOVE_RETRY, 131 MOVE_RETRY,
141}; 132};
142 133
143/*
144 * Return codes of the fastmap sub-system
145 *
146 * UBI_NO_FASTMAP: No fastmap super block was found
147 * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
148 */
149enum {
150 UBI_NO_FASTMAP = 1,
151 UBI_BAD_FASTMAP,
152};
153
154/** 134/**
155 * struct ubi_wl_entry - wear-leveling entry. 135 * struct ubi_wl_entry - wear-leveling entry.
156 * @u.rb: link in the corresponding (free/used) RB-tree 136 * @u.rb: link in the corresponding (free/used) RB-tree
@@ -217,41 +197,6 @@ struct ubi_rename_entry {
217struct ubi_volume_desc; 197struct ubi_volume_desc;
218 198
219/** 199/**
220 * struct ubi_fastmap_layout - in-memory fastmap data structure.
221 * @e: PEBs used by the current fastmap
222 * @to_be_tortured: if non-zero tortured this PEB
223 * @used_blocks: number of used PEBs
224 * @max_pool_size: maximal size of the user pool
225 * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
226 */
227struct ubi_fastmap_layout {
228 struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
229 int to_be_tortured[UBI_FM_MAX_BLOCKS];
230 int used_blocks;
231 int max_pool_size;
232 int max_wl_pool_size;
233};
234
235/**
236 * struct ubi_fm_pool - in-memory fastmap pool
237 * @pebs: PEBs in this pool
238 * @used: number of used PEBs
239 * @size: total number of PEBs in this pool
240 * @max_size: maximal size of the pool
241 *
242 * A pool gets filled with up to max_size.
243 * If all PEBs within the pool are used a new fastmap will be written
244 * to the flash and the pool gets refilled with empty PEBs.
245 *
246 */
247struct ubi_fm_pool {
248 int pebs[UBI_FM_MAX_POOL_SIZE];
249 int used;
250 int size;
251 int max_size;
252};
253
254/**
255 * struct ubi_volume - UBI volume description data structure. 200 * struct ubi_volume - UBI volume description data structure.
256 * @dev: device object to make use of the the Linux device model 201 * @dev: device object to make use of the the Linux device model
257 * @cdev: character device object to create character device 202 * @cdev: character device object to create character device
@@ -277,6 +222,8 @@ struct ubi_fm_pool {
277 * @upd_ebs: how many eraseblocks are expected to be updated 222 * @upd_ebs: how many eraseblocks are expected to be updated
278 * @ch_lnum: LEB number which is being changing by the atomic LEB change 223 * @ch_lnum: LEB number which is being changing by the atomic LEB change
279 * operation 224 * operation
225 * @ch_dtype: data persistency type which is being changing by the atomic LEB
226 * change operation
280 * @upd_bytes: how many bytes are expected to be received for volume update or 227 * @upd_bytes: how many bytes are expected to be received for volume update or
281 * atomic LEB change 228 * atomic LEB change
282 * @upd_received: how many bytes were already received for volume update or 229 * @upd_received: how many bytes were already received for volume update or
@@ -323,6 +270,7 @@ struct ubi_volume {
323 270
324 int upd_ebs; 271 int upd_ebs;
325 int ch_lnum; 272 int ch_lnum;
273 int ch_dtype;
326 long long upd_bytes; 274 long long upd_bytes;
327 long long upd_received; 275 long long upd_received;
328 void *upd_buf; 276 void *upd_buf;
@@ -349,37 +297,6 @@ struct ubi_volume_desc {
349struct ubi_wl_entry; 297struct ubi_wl_entry;
350 298
351/** 299/**
352 * struct ubi_debug_info - debugging information for an UBI device.
353 *
354 * @chk_gen: if UBI general extra checks are enabled
355 * @chk_io: if UBI I/O extra checks are enabled
356 * @disable_bgt: disable the background task for testing purposes
357 * @emulate_bitflips: emulate bit-flips for testing purposes
358 * @emulate_io_failures: emulate write/erase failures for testing purposes
359 * @dfs_dir_name: name of debugfs directory containing files of this UBI device
360 * @dfs_dir: direntry object of the UBI device debugfs directory
361 * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
362 * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
363 * @dfs_disable_bgt: debugfs knob to disable the background task
364 * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
365 * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
366 */
367struct ubi_debug_info {
368 unsigned int chk_gen:1;
369 unsigned int chk_io:1;
370 unsigned int disable_bgt:1;
371 unsigned int emulate_bitflips:1;
372 unsigned int emulate_io_failures:1;
373 char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
374 struct dentry *dfs_dir;
375 struct dentry *dfs_chk_gen;
376 struct dentry *dfs_chk_io;
377 struct dentry *dfs_disable_bgt;
378 struct dentry *dfs_emulate_bitflips;
379 struct dentry *dfs_emulate_io_failures;
380};
381
382/**
383 * struct ubi_device - UBI device description structure 300 * struct ubi_device - UBI device description structure
384 * @dev: UBI device object to use the the Linux device model 301 * @dev: UBI device object to use the the Linux device model
385 * @cdev: character device object to create character device 302 * @cdev: character device object to create character device
@@ -417,21 +334,9 @@ struct ubi_debug_info {
417 * @ltree: the lock tree 334 * @ltree: the lock tree
418 * @alc_mutex: serializes "atomic LEB change" operations 335 * @alc_mutex: serializes "atomic LEB change" operations
419 * 336 *
420 * @fm_disabled: non-zero if fastmap is disabled (default)
421 * @fm: in-memory data structure of the currently used fastmap
422 * @fm_pool: in-memory data structure of the fastmap pool
423 * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
424 * sub-system
425 * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
426 * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
427 * @fm_size: fastmap size in bytes
428 * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
429 * @fm_work: fastmap work queue
430 *
431 * @used: RB-tree of used physical eraseblocks 337 * @used: RB-tree of used physical eraseblocks
432 * @erroneous: RB-tree of erroneous used physical eraseblocks 338 * @erroneous: RB-tree of erroneous used physical eraseblocks
433 * @free: RB-tree of free physical eraseblocks 339 * @free: RB-tree of free physical eraseblocks
434 * @free_count: Contains the number of elements in @free
435 * @scrub: RB-tree of physical eraseblocks which need scrubbing 340 * @scrub: RB-tree of physical eraseblocks which need scrubbing
436 * @pq: protection queue (contain physical eraseblocks which are temporarily 341 * @pq: protection queue (contain physical eraseblocks which are temporarily
437 * protected from the wear-leveling worker) 342 * protected from the wear-leveling worker)
@@ -456,7 +361,6 @@ struct ubi_debug_info {
456 * @flash_size: underlying MTD device size (in bytes) 361 * @flash_size: underlying MTD device size (in bytes)
457 * @peb_count: count of physical eraseblocks on the MTD device 362 * @peb_count: count of physical eraseblocks on the MTD device
458 * @peb_size: physical eraseblock size 363 * @peb_size: physical eraseblock size
459 * @bad_peb_limit: top limit of expected bad physical eraseblocks
460 * @bad_peb_count: count of bad physical eraseblocks 364 * @bad_peb_count: count of bad physical eraseblocks
461 * @good_peb_count: count of good physical eraseblocks 365 * @good_peb_count: count of good physical eraseblocks
462 * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not 366 * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -483,8 +387,9 @@ struct ubi_debug_info {
483 * time (MTD write buffer size) 387 * time (MTD write buffer size)
484 * @mtd: MTD device descriptor 388 * @mtd: MTD device descriptor
485 * 389 *
486 * @peb_buf: a buffer of PEB size used for different purposes 390 * @peb_buf1: a buffer of PEB size used for different purposes
487 * @buf_mutex: protects @peb_buf 391 * @peb_buf2: another buffer of PEB size used for different purposes
392 * @buf_mutex: protects @peb_buf1 and @peb_buf2
488 * @ckvol_mutex: serializes static volume checking when opening 393 * @ckvol_mutex: serializes static volume checking when opening
489 * 394 *
490 * @dbg: debugging information for this UBI device 395 * @dbg: debugging information for this UBI device
@@ -504,7 +409,6 @@ struct ubi_device {
504 int avail_pebs; 409 int avail_pebs;
505 int beb_rsvd_pebs; 410 int beb_rsvd_pebs;
506 int beb_rsvd_level; 411 int beb_rsvd_level;
507 int bad_peb_limit;
508 412
509 int autoresize_vol_id; 413 int autoresize_vol_id;
510 int vtbl_slots; 414 int vtbl_slots;
@@ -522,22 +426,10 @@ struct ubi_device {
522 struct rb_root ltree; 426 struct rb_root ltree;
523 struct mutex alc_mutex; 427 struct mutex alc_mutex;
524 428
525 /* Fastmap stuff */
526 int fm_disabled;
527 struct ubi_fastmap_layout *fm;
528 struct ubi_fm_pool fm_pool;
529 struct ubi_fm_pool fm_wl_pool;
530 struct rw_semaphore fm_sem;
531 struct mutex fm_mutex;
532 void *fm_buf;
533 size_t fm_size;
534 struct work_struct fm_work;
535
536 /* Wear-leveling sub-system's stuff */ 429 /* Wear-leveling sub-system's stuff */
537 struct rb_root used; 430 struct rb_root used;
538 struct rb_root erroneous; 431 struct rb_root erroneous;
539 struct rb_root free; 432 struct rb_root free;
540 int free_count;
541 struct rb_root scrub; 433 struct rb_root scrub;
542 struct list_head pq[UBI_PROT_QUEUE_LEN]; 434 struct list_head pq[UBI_PROT_QUEUE_LEN];
543 int pq_head; 435 int pq_head;
@@ -579,155 +471,12 @@ struct ubi_device {
579 int max_write_size; 471 int max_write_size;
580 struct mtd_info *mtd; 472 struct mtd_info *mtd;
581 473
582 void *peb_buf; 474 void *peb_buf1;
475 void *peb_buf2;
583 struct mutex buf_mutex; 476 struct mutex buf_mutex;
584 struct mutex ckvol_mutex; 477 struct mutex ckvol_mutex;
585 478
586 struct ubi_debug_info dbg; 479 struct ubi_debug_info *dbg;
587};
588
589/**
590 * struct ubi_ainf_peb - attach information about a physical eraseblock.
591 * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
592 * @pnum: physical eraseblock number
593 * @vol_id: ID of the volume this LEB belongs to
594 * @lnum: logical eraseblock number
595 * @scrub: if this physical eraseblock needs scrubbing
596 * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
597 * @sqnum: sequence number
598 * @u: unions RB-tree or @list links
599 * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
600 * @u.list: link in one of the eraseblock lists
601 *
602 * One object of this type is allocated for each physical eraseblock when
603 * attaching an MTD device. Note, if this PEB does not belong to any LEB /
604 * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
605 */
606struct ubi_ainf_peb {
607 int ec;
608 int pnum;
609 int vol_id;
610 int lnum;
611 unsigned int scrub:1;
612 unsigned int copy_flag:1;
613 unsigned long long sqnum;
614 union {
615 struct rb_node rb;
616 struct list_head list;
617 } u;
618};
619
620/**
621 * struct ubi_ainf_volume - attaching information about a volume.
622 * @vol_id: volume ID
623 * @highest_lnum: highest logical eraseblock number in this volume
624 * @leb_count: number of logical eraseblocks in this volume
625 * @vol_type: volume type
626 * @used_ebs: number of used logical eraseblocks in this volume (only for
627 * static volumes)
628 * @last_data_size: amount of data in the last logical eraseblock of this
629 * volume (always equivalent to the usable logical eraseblock
630 * size in case of dynamic volumes)
631 * @data_pad: how many bytes at the end of logical eraseblocks of this volume
632 * are not used (due to volume alignment)
633 * @compat: compatibility flags of this volume
634 * @rb: link in the volume RB-tree
635 * @root: root of the RB-tree containing all the eraseblock belonging to this
636 * volume (&struct ubi_ainf_peb objects)
637 *
638 * One object of this type is allocated for each volume when attaching an MTD
639 * device.
640 */
641struct ubi_ainf_volume {
642 int vol_id;
643 int highest_lnum;
644 int leb_count;
645 int vol_type;
646 int used_ebs;
647 int last_data_size;
648 int data_pad;
649 int compat;
650 struct rb_node rb;
651 struct rb_root root;
652};
653
654/**
655 * struct ubi_attach_info - MTD device attaching information.
656 * @volumes: root of the volume RB-tree
657 * @corr: list of corrupted physical eraseblocks
658 * @free: list of free physical eraseblocks
659 * @erase: list of physical eraseblocks which have to be erased
660 * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
661 * those belonging to "preserve"-compatible internal volumes)
662 * @corr_peb_count: count of PEBs in the @corr list
663 * @empty_peb_count: count of PEBs which are presumably empty (contain only
664 * 0xFF bytes)
665 * @alien_peb_count: count of PEBs in the @alien list
666 * @bad_peb_count: count of bad physical eraseblocks
667 * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
668 * as bad yet, but which look like bad
669 * @vols_found: number of volumes found
670 * @highest_vol_id: highest volume ID
671 * @is_empty: flag indicating whether the MTD device is empty or not
672 * @min_ec: lowest erase counter value
673 * @max_ec: highest erase counter value
674 * @max_sqnum: highest sequence number value
675 * @mean_ec: mean erase counter value
676 * @ec_sum: a temporary variable used when calculating @mean_ec
677 * @ec_count: a temporary variable used when calculating @mean_ec
678 * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
679 *
680 * This data structure contains the result of attaching an MTD device and may
681 * be used by other UBI sub-systems to build final UBI data structures, further
682 * error-recovery and so on.
683 */
684struct ubi_attach_info {
685 struct rb_root volumes;
686 struct list_head corr;
687 struct list_head free;
688 struct list_head erase;
689 struct list_head alien;
690 int corr_peb_count;
691 int empty_peb_count;
692 int alien_peb_count;
693 int bad_peb_count;
694 int maybe_bad_peb_count;
695 int vols_found;
696 int highest_vol_id;
697 int is_empty;
698 int min_ec;
699 int max_ec;
700 unsigned long long max_sqnum;
701 int mean_ec;
702 uint64_t ec_sum;
703 int ec_count;
704 struct kmem_cache *aeb_slab_cache;
705};
706
707/**
708 * struct ubi_work - UBI work description data structure.
709 * @list: a link in the list of pending works
710 * @func: worker function
711 * @e: physical eraseblock to erase
712 * @vol_id: the volume ID on which this erasure is being performed
713 * @lnum: the logical eraseblock number
714 * @torture: if the physical eraseblock has to be tortured
715 * @anchor: produce a anchor PEB to by used by fastmap
716 *
717 * The @func pointer points to the worker function. If the @cancel argument is
718 * not zero, the worker has to free the resources and exit immediately. The
719 * worker has to return zero in case of success and a negative error code in
720 * case of failure.
721 */
722struct ubi_work {
723 struct list_head list;
724 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
725 /* The below fields are only relevant to erasure works */
726 struct ubi_wl_entry *e;
727 int vol_id;
728 int lnum;
729 int torture;
730 int anchor;
731}; 480};
732 481
733#include "debug.h" 482#include "debug.h"
@@ -740,23 +489,12 @@ extern struct class *ubi_class;
740extern struct mutex ubi_devices_mutex; 489extern struct mutex ubi_devices_mutex;
741extern struct blocking_notifier_head ubi_notifiers; 490extern struct blocking_notifier_head ubi_notifiers;
742 491
743/* attach.c */
744int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
745 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
746struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
747 int vol_id);
748void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
749struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
750 struct ubi_attach_info *ai);
751int ubi_attach(struct ubi_device *ubi, int force_scan);
752void ubi_destroy_ai(struct ubi_attach_info *ai);
753
754/* vtbl.c */ 492/* vtbl.c */
755int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 493int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
756 struct ubi_vtbl_record *vtbl_rec); 494 struct ubi_vtbl_record *vtbl_rec);
757int ubi_vtbl_rename_volumes(struct ubi_device *ubi, 495int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
758 struct list_head *rename_list); 496 struct list_head *rename_list);
759int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai); 497int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
760 498
761/* vmt.c */ 499/* vmt.c */
762int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 500int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
@@ -780,7 +518,6 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
780int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, 518int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
781 int length); 519 int length);
782int ubi_check_volume(struct ubi_device *ubi, int vol_id); 520int ubi_check_volume(struct ubi_device *ubi, int vol_id);
783void ubi_update_reserved(struct ubi_device *ubi);
784void ubi_calculate_reserved(struct ubi_device *ubi); 521void ubi_calculate_reserved(struct ubi_device *ubi);
785int ubi_check_pattern(const void *buf, uint8_t patt, int size); 522int ubi_check_pattern(const void *buf, uint8_t patt, int size);
786 523
@@ -790,33 +527,24 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
790int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 527int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
791 void *buf, int offset, int len, int check); 528 void *buf, int offset, int len, int check);
792int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 529int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
793 const void *buf, int offset, int len); 530 const void *buf, int offset, int len, int dtype);
794int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 531int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
795 int lnum, const void *buf, int len, int used_ebs); 532 int lnum, const void *buf, int len, int dtype,
533 int used_ebs);
796int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 534int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
797 int lnum, const void *buf, int len); 535 int lnum, const void *buf, int len, int dtype);
798int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 536int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
799 struct ubi_vid_hdr *vid_hdr); 537 struct ubi_vid_hdr *vid_hdr);
800int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); 538int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
801unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
802int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
803 struct ubi_attach_info *ai_scan);
804 539
805/* wl.c */ 540/* wl.c */
806int ubi_wl_get_peb(struct ubi_device *ubi); 541int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
807int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, 542int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
808 int pnum, int torture); 543int ubi_wl_flush(struct ubi_device *ubi);
809int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
810int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 544int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
811int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); 545int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
812void ubi_wl_close(struct ubi_device *ubi); 546void ubi_wl_close(struct ubi_device *ubi);
813int ubi_thread(void *u); 547int ubi_thread(void *u);
814struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
815int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
816 int lnum, int torture);
817int ubi_is_erase_work(struct ubi_work *wrk);
818void ubi_refill_pools(struct ubi_device *ubi);
819int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
820 548
821/* io.c */ 549/* io.c */
822int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, 550int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -836,8 +564,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
836 struct ubi_vid_hdr *vid_hdr); 564 struct ubi_vid_hdr *vid_hdr);
837 565
838/* build.c */ 566/* build.c */
839int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, 567int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
840 int vid_hdr_offset, int max_beb_per1024);
841int ubi_detach_mtd_dev(int ubi_num, int anyway); 568int ubi_detach_mtd_dev(int ubi_num, int anyway);
842struct ubi_device *ubi_get_device(int ubi_num); 569struct ubi_device *ubi_get_device(int ubi_num);
843void ubi_put_device(struct ubi_device *ubi); 570void ubi_put_device(struct ubi_device *ubi);
@@ -848,21 +575,11 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
848int ubi_notify_all(struct ubi_device *ubi, int ntype, 575int ubi_notify_all(struct ubi_device *ubi, int ntype,
849 struct notifier_block *nb); 576 struct notifier_block *nb);
850int ubi_enumerate_volumes(struct notifier_block *nb); 577int ubi_enumerate_volumes(struct notifier_block *nb);
851void ubi_free_internal_volumes(struct ubi_device *ubi);
852 578
853/* kapi.c */ 579/* kapi.c */
854void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); 580void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
855void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, 581void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
856 struct ubi_volume_info *vi); 582 struct ubi_volume_info *vi);
857/* scan.c */
858int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
859 int pnum, const struct ubi_vid_hdr *vid_hdr);
860
861/* fastmap.c */
862size_t ubi_calc_fm_size(struct ubi_device *ubi);
863int ubi_update_fastmap(struct ubi_device *ubi);
864int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
865 int fm_anchor);
866 583
867/* 584/*
868 * ubi_rb_for_each_entry - walk an RB-tree. 585 * ubi_rb_for_each_entry - walk an RB-tree.
@@ -878,21 +595,6 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
878 rb = rb_next(rb), \ 595 rb = rb_next(rb), \
879 pos = (rb ? container_of(rb, typeof(*pos), member) : NULL)) 596 pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
880 597
881/*
882 * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
883 *
884 * @av: volume attaching information
885 * @aeb: attaching eraseblock information
886 * @list: the list to move to
887 */
888static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
889 struct ubi_ainf_peb *aeb,
890 struct list_head *list)
891{
892 rb_erase(&aeb->u.rb, &av->root);
893 list_add_tail(&aeb->u.list, list);
894}
895
896/** 598/**
897 * ubi_zalloc_vid_hdr - allocate a volume identifier header object. 599 * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
898 * @ubi: UBI device description object 600 * @ubi: UBI device description object
@@ -967,7 +669,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
967 if (!ubi->ro_mode) { 669 if (!ubi->ro_mode) {
968 ubi->ro_mode = 1; 670 ubi->ro_mode = 1;
969 ubi_warn("switch to read-only mode"); 671 ubi_warn("switch to read-only mode");
970 dump_stack(); 672 ubi_dbg_dump_stack();
971 } 673 }
972} 674}
973 675
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index ec2c2dc1c1c..425bf5a3edd 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -64,7 +64,8 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
64 return 0; 64 return 0;
65 } 65 }
66 66
67 vtbl_rec = ubi->vtbl[vol->vol_id]; 67 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
68 sizeof(struct ubi_vtbl_record));
68 vtbl_rec.upd_marker = 1; 69 vtbl_rec.upd_marker = 1;
69 70
70 mutex_lock(&ubi->device_mutex); 71 mutex_lock(&ubi->device_mutex);
@@ -92,7 +93,8 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
92 93
93 dbg_gen("clear update marker for volume %d", vol->vol_id); 94 dbg_gen("clear update marker for volume %d", vol->vol_id);
94 95
95 vtbl_rec = ubi->vtbl[vol->vol_id]; 96 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
97 sizeof(struct ubi_vtbl_record));
96 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); 98 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
97 vtbl_rec.upd_marker = 0; 99 vtbl_rec.upd_marker = 0;
98 100
@@ -145,7 +147,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
145 } 147 }
146 148
147 if (bytes == 0) { 149 if (bytes == 0) {
148 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); 150 err = ubi_wl_flush(ubi);
149 if (err) 151 if (err)
150 return err; 152 return err;
151 153
@@ -184,12 +186,14 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
184 dbg_gen("start changing LEB %d:%d, %u bytes", 186 dbg_gen("start changing LEB %d:%d, %u bytes",
185 vol->vol_id, req->lnum, req->bytes); 187 vol->vol_id, req->lnum, req->bytes);
186 if (req->bytes == 0) 188 if (req->bytes == 0)
187 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0); 189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
190 req->dtype);
188 191
189 vol->upd_bytes = req->bytes; 192 vol->upd_bytes = req->bytes;
190 vol->upd_received = 0; 193 vol->upd_received = 0;
191 vol->changing_leb = 1; 194 vol->changing_leb = 1;
192 vol->ch_lnum = req->lnum; 195 vol->ch_lnum = req->lnum;
196 vol->ch_dtype = req->dtype;
193 197
194 vol->upd_buf = vmalloc(req->bytes); 198 vol->upd_buf = vmalloc(req->bytes);
195 if (!vol->upd_buf) 199 if (!vol->upd_buf)
@@ -242,7 +246,8 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
242 return 0; 246 return 0;
243 } 247 }
244 248
245 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len); 249 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
250 UBI_UNKNOWN);
246 } else { 251 } else {
247 /* 252 /*
248 * When writing static volume, and this is the last logical 253 * When writing static volume, and this is the last logical
@@ -254,7 +259,8 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
254 * contain zeros, not random trash. 259 * contain zeros, not random trash.
255 */ 260 */
256 memset(buf + len, 0, vol->usable_leb_size - len); 261 memset(buf + len, 0, vol->usable_leb_size - len);
257 err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs); 262 err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
263 UBI_UNKNOWN, used_ebs);
258 } 264 }
259 265
260 return err; 266 return err;
@@ -359,7 +365,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
359 365
360 ubi_assert(vol->upd_received <= vol->upd_bytes); 366 ubi_assert(vol->upd_received <= vol->upd_bytes);
361 if (vol->upd_received == vol->upd_bytes) { 367 if (vol->upd_received == vol->upd_bytes) {
362 err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); 368 err = ubi_wl_flush(ubi);
363 if (err) 369 if (err)
364 return err; 370 return err;
365 /* The update is finished, clear the update marker */ 371 /* The update is finished, clear the update marker */
@@ -415,7 +421,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
415 len - vol->upd_bytes); 421 len - vol->upd_bytes);
416 len = ubi_calc_data_len(ubi, vol->upd_buf, len); 422 len = ubi_calc_data_len(ubi, vol->upd_buf, len);
417 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 423 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
418 vol->upd_buf, len); 424 vol->upd_buf, len, UBI_UNKNOWN);
419 if (err) 425 if (err)
420 return err; 426 return err;
421 } 427 }
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 8330703c098..97e093d1967 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -26,10 +26,13 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/math64.h> 27#include <linux/math64.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/export.h>
30#include "ubi.h" 29#include "ubi.h"
31 30
32static int self_check_volumes(struct ubi_device *ubi); 31#ifdef CONFIG_MTD_UBI_DEBUG
32static int paranoid_check_volumes(struct ubi_device *ubi);
33#else
34#define paranoid_check_volumes(ubi) 0
35#endif
33 36
34static ssize_t vol_attribute_show(struct device *dev, 37static ssize_t vol_attribute_show(struct device *dev,
35 struct device_attribute *attr, char *buf); 38 struct device_attribute *attr, char *buf);
@@ -223,7 +226,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
223 } 226 }
224 227
225 if (vol_id == UBI_VOL_NUM_AUTO) { 228 if (vol_id == UBI_VOL_NUM_AUTO) {
226 ubi_err("out of volume IDs"); 229 dbg_err("out of volume IDs");
227 err = -ENFILE; 230 err = -ENFILE;
228 goto out_unlock; 231 goto out_unlock;
229 } 232 }
@@ -237,7 +240,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
237 /* Ensure that this volume does not exist */ 240 /* Ensure that this volume does not exist */
238 err = -EEXIST; 241 err = -EEXIST;
239 if (ubi->volumes[vol_id]) { 242 if (ubi->volumes[vol_id]) {
240 ubi_err("volume %d already exists", vol_id); 243 dbg_err("volume %d already exists", vol_id);
241 goto out_unlock; 244 goto out_unlock;
242 } 245 }
243 246
@@ -246,7 +249,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
246 if (ubi->volumes[i] && 249 if (ubi->volumes[i] &&
247 ubi->volumes[i]->name_len == req->name_len && 250 ubi->volumes[i]->name_len == req->name_len &&
248 !strcmp(ubi->volumes[i]->name, req->name)) { 251 !strcmp(ubi->volumes[i]->name, req->name)) {
249 ubi_err("volume \"%s\" exists (ID %d)", req->name, i); 252 dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
250 goto out_unlock; 253 goto out_unlock;
251 } 254 }
252 255
@@ -257,9 +260,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
257 260
258 /* Reserve physical eraseblocks */ 261 /* Reserve physical eraseblocks */
259 if (vol->reserved_pebs > ubi->avail_pebs) { 262 if (vol->reserved_pebs > ubi->avail_pebs) {
260 ubi_err("not enough PEBs, only %d available", ubi->avail_pebs); 263 dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
261 if (ubi->corr_peb_count) 264 if (ubi->corr_peb_count)
262 ubi_err("%d PEBs are corrupted and not used", 265 dbg_err("%d PEBs are corrupted and not used",
263 ubi->corr_peb_count); 266 ubi->corr_peb_count);
264 err = -ENOSPC; 267 err = -ENOSPC;
265 goto out_unlock; 268 goto out_unlock;
@@ -280,7 +283,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
280 * Finish all pending erases because there may be some LEBs belonging 283 * Finish all pending erases because there may be some LEBs belonging
281 * to the same volume ID. 284 * to the same volume ID.
282 */ 285 */
283 err = ubi_wl_flush(ubi, vol_id, UBI_ALL); 286 err = ubi_wl_flush(ubi);
284 if (err) 287 if (err)
285 goto out_acc; 288 goto out_acc;
286 289
@@ -356,7 +359,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
356 spin_unlock(&ubi->volumes_lock); 359 spin_unlock(&ubi->volumes_lock);
357 360
358 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); 361 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
359 self_check_volumes(ubi); 362 if (paranoid_check_volumes(ubi))
363 dbg_err("check failed while creating volume %d", vol_id);
360 return err; 364 return err;
361 365
362out_sysfs: 366out_sysfs:
@@ -443,13 +447,21 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
443 spin_lock(&ubi->volumes_lock); 447 spin_lock(&ubi->volumes_lock);
444 ubi->rsvd_pebs -= reserved_pebs; 448 ubi->rsvd_pebs -= reserved_pebs;
445 ubi->avail_pebs += reserved_pebs; 449 ubi->avail_pebs += reserved_pebs;
446 ubi_update_reserved(ubi); 450 i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
451 if (i > 0) {
452 i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
453 ubi->avail_pebs -= i;
454 ubi->rsvd_pebs += i;
455 ubi->beb_rsvd_pebs += i;
456 if (i > 0)
457 ubi_msg("reserve more %d PEBs", i);
458 }
447 ubi->vol_count -= 1; 459 ubi->vol_count -= 1;
448 spin_unlock(&ubi->volumes_lock); 460 spin_unlock(&ubi->volumes_lock);
449 461
450 ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED); 462 ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
451 if (!no_vtbl) 463 if (!no_vtbl && paranoid_check_volumes(ubi))
452 self_check_volumes(ubi); 464 dbg_err("check failed while removing volume %d", vol_id);
453 465
454 return err; 466 return err;
455 467
@@ -487,7 +499,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
487 499
488 if (vol->vol_type == UBI_STATIC_VOLUME && 500 if (vol->vol_type == UBI_STATIC_VOLUME &&
489 reserved_pebs < vol->used_ebs) { 501 reserved_pebs < vol->used_ebs) {
490 ubi_err("too small size %d, %d LEBs contain data", 502 dbg_err("too small size %d, %d LEBs contain data",
491 reserved_pebs, vol->used_ebs); 503 reserved_pebs, vol->used_ebs);
492 return -EINVAL; 504 return -EINVAL;
493 } 505 }
@@ -516,10 +528,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
516 if (pebs > 0) { 528 if (pebs > 0) {
517 spin_lock(&ubi->volumes_lock); 529 spin_lock(&ubi->volumes_lock);
518 if (pebs > ubi->avail_pebs) { 530 if (pebs > ubi->avail_pebs) {
519 ubi_err("not enough PEBs: requested %d, available %d", 531 dbg_err("not enough PEBs: requested %d, available %d",
520 pebs, ubi->avail_pebs); 532 pebs, ubi->avail_pebs);
521 if (ubi->corr_peb_count) 533 if (ubi->corr_peb_count)
522 ubi_err("%d PEBs are corrupted and not used", 534 dbg_err("%d PEBs are corrupted and not used",
523 ubi->corr_peb_count); 535 ubi->corr_peb_count);
524 spin_unlock(&ubi->volumes_lock); 536 spin_unlock(&ubi->volumes_lock);
525 err = -ENOSPC; 537 err = -ENOSPC;
@@ -535,7 +547,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
535 } 547 }
536 548
537 /* Change volume table record */ 549 /* Change volume table record */
538 vtbl_rec = ubi->vtbl[vol_id]; 550 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
539 vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); 551 vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
540 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 552 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
541 if (err) 553 if (err)
@@ -550,7 +562,15 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
550 spin_lock(&ubi->volumes_lock); 562 spin_lock(&ubi->volumes_lock);
551 ubi->rsvd_pebs += pebs; 563 ubi->rsvd_pebs += pebs;
552 ubi->avail_pebs -= pebs; 564 ubi->avail_pebs -= pebs;
553 ubi_update_reserved(ubi); 565 pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
566 if (pebs > 0) {
567 pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
568 ubi->avail_pebs -= pebs;
569 ubi->rsvd_pebs += pebs;
570 ubi->beb_rsvd_pebs += pebs;
571 if (pebs > 0)
572 ubi_msg("reserve more %d PEBs", pebs);
573 }
554 for (i = 0; i < reserved_pebs; i++) 574 for (i = 0; i < reserved_pebs; i++)
555 new_mapping[i] = vol->eba_tbl[i]; 575 new_mapping[i] = vol->eba_tbl[i];
556 kfree(vol->eba_tbl); 576 kfree(vol->eba_tbl);
@@ -567,7 +587,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
567 } 587 }
568 588
569 ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED); 589 ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
570 self_check_volumes(ubi); 590 if (paranoid_check_volumes(ubi))
591 dbg_err("check failed while re-sizing volume %d", vol_id);
571 return err; 592 return err;
572 593
573out_acc: 594out_acc:
@@ -616,8 +637,8 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
616 } 637 }
617 } 638 }
618 639
619 if (!err) 640 if (!err && paranoid_check_volumes(ubi))
620 self_check_volumes(ubi); 641 ;
621 return err; 642 return err;
622} 643}
623 644
@@ -664,7 +685,8 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
664 return err; 685 return err;
665 } 686 }
666 687
667 self_check_volumes(ubi); 688 if (paranoid_check_volumes(ubi))
689 dbg_err("check failed while adding volume %d", vol_id);
668 return err; 690 return err;
669 691
670out_cdev: 692out_cdev:
@@ -689,14 +711,16 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
689 volume_sysfs_close(vol); 711 volume_sysfs_close(vol);
690} 712}
691 713
714#ifdef CONFIG_MTD_UBI_DEBUG
715
692/** 716/**
693 * self_check_volume - check volume information. 717 * paranoid_check_volume - check volume information.
694 * @ubi: UBI device description object 718 * @ubi: UBI device description object
695 * @vol_id: volume ID 719 * @vol_id: volume ID
696 * 720 *
697 * Returns zero if volume is all right and a a negative error code if not. 721 * Returns zero if volume is all right and a a negative error code if not.
698 */ 722 */
699static int self_check_volume(struct ubi_device *ubi, int vol_id) 723static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
700{ 724{
701 int idx = vol_id2idx(ubi, vol_id); 725 int idx = vol_id2idx(ubi, vol_id);
702 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; 726 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -746,7 +770,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
746 } 770 }
747 771
748 if (vol->upd_marker && vol->corrupted) { 772 if (vol->upd_marker && vol->corrupted) {
749 ubi_err("update marker and corrupted simultaneously"); 773 dbg_err("update marker and corrupted simultaneously");
750 goto fail; 774 goto fail;
751 } 775 }
752 776
@@ -828,33 +852,34 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
828 return 0; 852 return 0;
829 853
830fail: 854fail:
831 ubi_err("self-check failed for volume %d", vol_id); 855 ubi_err("paranoid check failed for volume %d", vol_id);
832 if (vol) 856 if (vol)
833 ubi_dump_vol_info(vol); 857 ubi_dbg_dump_vol_info(vol);
834 ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); 858 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
835 dump_stack(); 859 dump_stack();
836 spin_unlock(&ubi->volumes_lock); 860 spin_unlock(&ubi->volumes_lock);
837 return -EINVAL; 861 return -EINVAL;
838} 862}
839 863
840/** 864/**
841 * self_check_volumes - check information about all volumes. 865 * paranoid_check_volumes - check information about all volumes.
842 * @ubi: UBI device description object 866 * @ubi: UBI device description object
843 * 867 *
844 * Returns zero if volumes are all right and a a negative error code if not. 868 * Returns zero if volumes are all right and a a negative error code if not.
845 */ 869 */
846static int self_check_volumes(struct ubi_device *ubi) 870static int paranoid_check_volumes(struct ubi_device *ubi)
847{ 871{
848 int i, err = 0; 872 int i, err = 0;
849 873
850 if (!ubi_dbg_chk_gen(ubi)) 874 if (!ubi->dbg->chk_gen)
851 return 0; 875 return 0;
852 876
853 for (i = 0; i < ubi->vtbl_slots; i++) { 877 for (i = 0; i < ubi->vtbl_slots; i++) {
854 err = self_check_volume(ubi, i); 878 err = paranoid_check_volume(ubi, i);
855 if (err) 879 if (err)
856 break; 880 break;
857 } 881 }
858 882
859 return err; 883 return err;
860} 884}
885#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index d77b1c1d7c7..4b50a3029b8 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -37,15 +37,16 @@
37 * LEB 1. This scheme guarantees recoverability from unclean reboots. 37 * LEB 1. This scheme guarantees recoverability from unclean reboots.
38 * 38 *
39 * In this UBI implementation the on-flash volume table does not contain any 39 * In this UBI implementation the on-flash volume table does not contain any
40 * information about how much data static volumes contain. 40 * information about how many data static volumes contain. This information may
41 * be found from the scanning data.
41 * 42 *
42 * But it would still be beneficial to store this information in the volume 43 * But it would still be beneficial to store this information in the volume
43 * table. For example, suppose we have a static volume X, and all its physical 44 * table. For example, suppose we have a static volume X, and all its physical
44 * eraseblocks became bad for some reasons. Suppose we are attaching the 45 * eraseblocks became bad for some reasons. Suppose we are attaching the
45 * corresponding MTD device, for some reason we find no logical eraseblocks 46 * corresponding MTD device, the scanning has found no logical eraseblocks
46 * corresponding to the volume X. According to the volume table volume X does 47 * corresponding to the volume X. According to the volume table volume X does
47 * exist. So we don't know whether it is just empty or all its physical 48 * exist. So we don't know whether it is just empty or all its physical
48 * eraseblocks went bad. So we cannot alarm the user properly. 49 * eraseblocks went bad. So we cannot alarm the user about this corruption.
49 * 50 *
50 * The volume table also stores so-called "update marker", which is used for 51 * The volume table also stores so-called "update marker", which is used for
51 * volume updates. Before updating the volume, the update marker is set, and 52 * volume updates. Before updating the volume, the update marker is set, and
@@ -61,7 +62,11 @@
61#include <asm/div64.h> 62#include <asm/div64.h>
62#include "ubi.h" 63#include "ubi.h"
63 64
64static void self_vtbl_check(const struct ubi_device *ubi); 65#ifdef CONFIG_MTD_UBI_DEBUG
66static void paranoid_vtbl_check(const struct ubi_device *ubi);
67#else
68#define paranoid_vtbl_check(ubi)
69#endif
65 70
66/* Empty volume table record */ 71/* Empty volume table record */
67static struct ubi_vtbl_record empty_vtbl_record; 72static struct ubi_vtbl_record empty_vtbl_record;
@@ -101,12 +106,12 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
101 return err; 106 return err;
102 107
103 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, 108 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
104 ubi->vtbl_size); 109 ubi->vtbl_size, UBI_LONGTERM);
105 if (err) 110 if (err)
106 return err; 111 return err;
107 } 112 }
108 113
109 self_vtbl_check(ubi); 114 paranoid_vtbl_check(ubi);
110 return 0; 115 return 0;
111} 116}
112 117
@@ -153,7 +158,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
153 return err; 158 return err;
154 159
155 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, 160 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
156 ubi->vtbl_size); 161 ubi->vtbl_size, UBI_LONGTERM);
157 if (err) 162 if (err)
158 return err; 163 return err;
159 } 164 }
@@ -192,7 +197,7 @@ static int vtbl_check(const struct ubi_device *ubi,
192 if (be32_to_cpu(vtbl[i].crc) != crc) { 197 if (be32_to_cpu(vtbl[i].crc) != crc) {
193 ubi_err("bad CRC at record %u: %#08x, not %#08x", 198 ubi_err("bad CRC at record %u: %#08x, not %#08x",
194 i, crc, be32_to_cpu(vtbl[i].crc)); 199 i, crc, be32_to_cpu(vtbl[i].crc));
195 ubi_dump_vtbl_record(&vtbl[i], i); 200 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
196 return 1; 201 return 1;
197 } 202 }
198 203
@@ -224,7 +229,7 @@ static int vtbl_check(const struct ubi_device *ubi,
224 229
225 n = ubi->leb_size % alignment; 230 n = ubi->leb_size % alignment;
226 if (data_pad != n) { 231 if (data_pad != n) {
227 ubi_err("bad data_pad, has to be %d", n); 232 dbg_err("bad data_pad, has to be %d", n);
228 err = 6; 233 err = 6;
229 goto bad; 234 goto bad;
230 } 235 }
@@ -240,7 +245,7 @@ static int vtbl_check(const struct ubi_device *ubi,
240 } 245 }
241 246
242 if (reserved_pebs > ubi->good_peb_count) { 247 if (reserved_pebs > ubi->good_peb_count) {
243 ubi_err("too large reserved_pebs %d, good PEBs %d", 248 dbg_err("too large reserved_pebs %d, good PEBs %d",
244 reserved_pebs, ubi->good_peb_count); 249 reserved_pebs, ubi->good_peb_count);
245 err = 9; 250 err = 9;
246 goto bad; 251 goto bad;
@@ -270,10 +275,10 @@ static int vtbl_check(const struct ubi_device *ubi,
270 275
271 if (len1 > 0 && len1 == len2 && 276 if (len1 > 0 && len1 == len2 &&
272 !strncmp(vtbl[i].name, vtbl[n].name, len1)) { 277 !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
273 ubi_err("volumes %d and %d have the same name \"%s\"", 278 ubi_err("volumes %d and %d have the same name"
274 i, n, vtbl[i].name); 279 " \"%s\"", i, n, vtbl[i].name);
275 ubi_dump_vtbl_record(&vtbl[i], i); 280 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
276 ubi_dump_vtbl_record(&vtbl[n], n); 281 ubi_dbg_dump_vtbl_record(&vtbl[n], n);
277 return -EINVAL; 282 return -EINVAL;
278 } 283 }
279 } 284 }
@@ -283,64 +288,65 @@ static int vtbl_check(const struct ubi_device *ubi,
283 288
284bad: 289bad:
285 ubi_err("volume table check failed: record %d, error %d", i, err); 290 ubi_err("volume table check failed: record %d, error %d", i, err);
286 ubi_dump_vtbl_record(&vtbl[i], i); 291 ubi_dbg_dump_vtbl_record(&vtbl[i], i);
287 return -EINVAL; 292 return -EINVAL;
288} 293}
289 294
290/** 295/**
291 * create_vtbl - create a copy of volume table. 296 * create_vtbl - create a copy of volume table.
292 * @ubi: UBI device description object 297 * @ubi: UBI device description object
293 * @ai: attaching information 298 * @si: scanning information
294 * @copy: number of the volume table copy 299 * @copy: number of the volume table copy
295 * @vtbl: contents of the volume table 300 * @vtbl: contents of the volume table
296 * 301 *
297 * This function returns zero in case of success and a negative error code in 302 * This function returns zero in case of success and a negative error code in
298 * case of failure. 303 * case of failure.
299 */ 304 */
300static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, 305static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
301 int copy, void *vtbl) 306 int copy, void *vtbl)
302{ 307{
303 int err, tries = 0; 308 int err, tries = 0;
304 struct ubi_vid_hdr *vid_hdr; 309 static struct ubi_vid_hdr *vid_hdr;
305 struct ubi_ainf_peb *new_aeb; 310 struct ubi_scan_leb *new_seb;
306 311
307 dbg_gen("create volume table (copy #%d)", copy + 1); 312 ubi_msg("create volume table (copy #%d)", copy + 1);
308 313
309 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 314 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
310 if (!vid_hdr) 315 if (!vid_hdr)
311 return -ENOMEM; 316 return -ENOMEM;
312 317
313retry: 318retry:
314 new_aeb = ubi_early_get_peb(ubi, ai); 319 new_seb = ubi_scan_get_free_peb(ubi, si);
315 if (IS_ERR(new_aeb)) { 320 if (IS_ERR(new_seb)) {
316 err = PTR_ERR(new_aeb); 321 err = PTR_ERR(new_seb);
317 goto out_free; 322 goto out_free;
318 } 323 }
319 324
320 vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE; 325 vid_hdr->vol_type = UBI_VID_DYNAMIC;
321 vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); 326 vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
322 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; 327 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
323 vid_hdr->data_size = vid_hdr->used_ebs = 328 vid_hdr->data_size = vid_hdr->used_ebs =
324 vid_hdr->data_pad = cpu_to_be32(0); 329 vid_hdr->data_pad = cpu_to_be32(0);
325 vid_hdr->lnum = cpu_to_be32(copy); 330 vid_hdr->lnum = cpu_to_be32(copy);
326 vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); 331 vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
327 332
328 /* The EC header is already there, write the VID header */ 333 /* The EC header is already there, write the VID header */
329 err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr); 334 err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
330 if (err) 335 if (err)
331 goto write_error; 336 goto write_error;
332 337
333 /* Write the layout volume contents */ 338 /* Write the layout volume contents */
334 err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size); 339 err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
335 if (err) 340 if (err)
336 goto write_error; 341 goto write_error;
337 342
338 /* 343 /*
339 * And add it to the attaching information. Don't delete the old version 344 * And add it to the scanning information. Don't delete the old version
340 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. 345 * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
341 */ 346 */
342 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); 347 err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
343 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 348 vid_hdr, 0);
349 kfree(new_seb);
344 ubi_free_vid_hdr(ubi, vid_hdr); 350 ubi_free_vid_hdr(ubi, vid_hdr);
345 return err; 351 return err;
346 352
@@ -350,10 +356,10 @@ write_error:
350 * Probably this physical eraseblock went bad, try to pick 356 * Probably this physical eraseblock went bad, try to pick
351 * another one. 357 * another one.
352 */ 358 */
353 list_add(&new_aeb->u.list, &ai->erase); 359 list_add(&new_seb->u.list, &si->erase);
354 goto retry; 360 goto retry;
355 } 361 }
356 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 362 kfree(new_seb);
357out_free: 363out_free:
358 ubi_free_vid_hdr(ubi, vid_hdr); 364 ubi_free_vid_hdr(ubi, vid_hdr);
359 return err; 365 return err;
@@ -363,20 +369,20 @@ out_free:
363/** 369/**
364 * process_lvol - process the layout volume. 370 * process_lvol - process the layout volume.
365 * @ubi: UBI device description object 371 * @ubi: UBI device description object
366 * @ai: attaching information 372 * @si: scanning information
367 * @av: layout volume attaching information 373 * @sv: layout volume scanning information
368 * 374 *
369 * This function is responsible for reading the layout volume, ensuring it is 375 * This function is responsible for reading the layout volume, ensuring it is
370 * not corrupted, and recovering from corruptions if needed. Returns volume 376 * not corrupted, and recovering from corruptions if needed. Returns volume
371 * table in case of success and a negative error code in case of failure. 377 * table in case of success and a negative error code in case of failure.
372 */ 378 */
373static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, 379static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
374 struct ubi_attach_info *ai, 380 struct ubi_scan_info *si,
375 struct ubi_ainf_volume *av) 381 struct ubi_scan_volume *sv)
376{ 382{
377 int err; 383 int err;
378 struct rb_node *rb; 384 struct rb_node *rb;
379 struct ubi_ainf_peb *aeb; 385 struct ubi_scan_leb *seb;
380 struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; 386 struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
381 int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; 387 int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
382 388
@@ -408,27 +414,27 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
408 dbg_gen("check layout volume"); 414 dbg_gen("check layout volume");
409 415
410 /* Read both LEB 0 and LEB 1 into memory */ 416 /* Read both LEB 0 and LEB 1 into memory */
411 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { 417 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
412 leb[aeb->lnum] = vzalloc(ubi->vtbl_size); 418 leb[seb->lnum] = vzalloc(ubi->vtbl_size);
413 if (!leb[aeb->lnum]) { 419 if (!leb[seb->lnum]) {
414 err = -ENOMEM; 420 err = -ENOMEM;
415 goto out_free; 421 goto out_free;
416 } 422 }
417 423
418 err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0, 424 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
419 ubi->vtbl_size); 425 ubi->vtbl_size);
420 if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) 426 if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
421 /* 427 /*
422 * Scrub the PEB later. Note, -EBADMSG indicates an 428 * Scrub the PEB later. Note, -EBADMSG indicates an
423 * uncorrectable ECC error, but we have our own CRC and 429 * uncorrectable ECC error, but we have our own CRC and
424 * the data will be checked later. If the data is OK, 430 * the data will be checked later. If the data is OK,
425 * the PEB will be scrubbed (because we set 431 * the PEB will be scrubbed (because we set
426 * aeb->scrub). If the data is not OK, the contents of 432 * seb->scrub). If the data is not OK, the contents of
427 * the PEB will be recovered from the second copy, and 433 * the PEB will be recovered from the second copy, and
428 * aeb->scrub will be cleared in 434 * seb->scrub will be cleared in
429 * 'ubi_add_to_av()'. 435 * 'ubi_scan_add_used()'.
430 */ 436 */
431 aeb->scrub = 1; 437 seb->scrub = 1;
432 else if (err) 438 else if (err)
433 goto out_free; 439 goto out_free;
434 } 440 }
@@ -447,7 +453,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
447 ubi->vtbl_size); 453 ubi->vtbl_size);
448 if (leb_corrupted[1]) { 454 if (leb_corrupted[1]) {
449 ubi_warn("volume table copy #2 is corrupted"); 455 ubi_warn("volume table copy #2 is corrupted");
450 err = create_vtbl(ubi, ai, 1, leb[0]); 456 err = create_vtbl(ubi, si, 1, leb[0]);
451 if (err) 457 if (err)
452 goto out_free; 458 goto out_free;
453 ubi_msg("volume table was restored"); 459 ubi_msg("volume table was restored");
@@ -470,7 +476,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
470 } 476 }
471 477
472 ubi_warn("volume table copy #1 is corrupted"); 478 ubi_warn("volume table copy #1 is corrupted");
473 err = create_vtbl(ubi, ai, 0, leb[1]); 479 err = create_vtbl(ubi, si, 0, leb[1]);
474 if (err) 480 if (err)
475 goto out_free; 481 goto out_free;
476 ubi_msg("volume table was restored"); 482 ubi_msg("volume table was restored");
@@ -488,13 +494,13 @@ out_free:
488/** 494/**
489 * create_empty_lvol - create empty layout volume. 495 * create_empty_lvol - create empty layout volume.
490 * @ubi: UBI device description object 496 * @ubi: UBI device description object
491 * @ai: attaching information 497 * @si: scanning information
492 * 498 *
493 * This function returns volume table contents in case of success and a 499 * This function returns volume table contents in case of success and a
494 * negative error code in case of failure. 500 * negative error code in case of failure.
495 */ 501 */
496static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, 502static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
497 struct ubi_attach_info *ai) 503 struct ubi_scan_info *si)
498{ 504{
499 int i; 505 int i;
500 struct ubi_vtbl_record *vtbl; 506 struct ubi_vtbl_record *vtbl;
@@ -509,7 +515,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
509 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 515 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
510 int err; 516 int err;
511 517
512 err = create_vtbl(ubi, ai, i, vtbl); 518 err = create_vtbl(ubi, si, i, vtbl);
513 if (err) { 519 if (err) {
514 vfree(vtbl); 520 vfree(vtbl);
515 return ERR_PTR(err); 521 return ERR_PTR(err);
@@ -522,19 +528,18 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
522/** 528/**
523 * init_volumes - initialize volume information for existing volumes. 529 * init_volumes - initialize volume information for existing volumes.
524 * @ubi: UBI device description object 530 * @ubi: UBI device description object
525 * @ai: scanning information 531 * @si: scanning information
526 * @vtbl: volume table 532 * @vtbl: volume table
527 * 533 *
528 * This function allocates volume description objects for existing volumes. 534 * This function allocates volume description objects for existing volumes.
529 * Returns zero in case of success and a negative error code in case of 535 * Returns zero in case of success and a negative error code in case of
530 * failure. 536 * failure.
531 */ 537 */
532static int init_volumes(struct ubi_device *ubi, 538static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
533 const struct ubi_attach_info *ai,
534 const struct ubi_vtbl_record *vtbl) 539 const struct ubi_vtbl_record *vtbl)
535{ 540{
536 int i, reserved_pebs = 0; 541 int i, reserved_pebs = 0;
537 struct ubi_ainf_volume *av; 542 struct ubi_scan_volume *sv;
538 struct ubi_volume *vol; 543 struct ubi_volume *vol;
539 544
540 for (i = 0; i < ubi->vtbl_slots; i++) { 545 for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -562,8 +567,8 @@ static int init_volumes(struct ubi_device *ubi,
562 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 567 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
563 /* Auto re-size flag may be set only for one volume */ 568 /* Auto re-size flag may be set only for one volume */
564 if (ubi->autoresize_vol_id != -1) { 569 if (ubi->autoresize_vol_id != -1) {
565 ubi_err("more than one auto-resize volume (%d and %d)", 570 ubi_err("more than one auto-resize volume (%d "
566 ubi->autoresize_vol_id, i); 571 "and %d)", ubi->autoresize_vol_id, i);
567 kfree(vol); 572 kfree(vol);
568 return -EINVAL; 573 return -EINVAL;
569 } 574 }
@@ -590,8 +595,8 @@ static int init_volumes(struct ubi_device *ubi,
590 } 595 }
591 596
592 /* Static volumes only */ 597 /* Static volumes only */
593 av = ubi_find_av(ai, i); 598 sv = ubi_scan_find_sv(si, i);
594 if (!av) { 599 if (!sv) {
595 /* 600 /*
596 * No eraseblocks belonging to this volume found. We 601 * No eraseblocks belonging to this volume found. We
597 * don't actually know whether this static volume is 602 * don't actually know whether this static volume is
@@ -603,22 +608,22 @@ static int init_volumes(struct ubi_device *ubi,
603 continue; 608 continue;
604 } 609 }
605 610
606 if (av->leb_count != av->used_ebs) { 611 if (sv->leb_count != sv->used_ebs) {
607 /* 612 /*
608 * We found a static volume which misses several 613 * We found a static volume which misses several
609 * eraseblocks. Treat it as corrupted. 614 * eraseblocks. Treat it as corrupted.
610 */ 615 */
611 ubi_warn("static volume %d misses %d LEBs - corrupted", 616 ubi_warn("static volume %d misses %d LEBs - corrupted",
612 av->vol_id, av->used_ebs - av->leb_count); 617 sv->vol_id, sv->used_ebs - sv->leb_count);
613 vol->corrupted = 1; 618 vol->corrupted = 1;
614 continue; 619 continue;
615 } 620 }
616 621
617 vol->used_ebs = av->used_ebs; 622 vol->used_ebs = sv->used_ebs;
618 vol->used_bytes = 623 vol->used_bytes =
619 (long long)(vol->used_ebs - 1) * vol->usable_leb_size; 624 (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
620 vol->used_bytes += av->last_data_size; 625 vol->used_bytes += sv->last_data_size;
621 vol->last_eb_bytes = av->last_data_size; 626 vol->last_eb_bytes = sv->last_data_size;
622 } 627 }
623 628
624 /* And add the layout volume */ 629 /* And add the layout volume */
@@ -627,7 +632,7 @@ static int init_volumes(struct ubi_device *ubi,
627 return -ENOMEM; 632 return -ENOMEM;
628 633
629 vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; 634 vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
630 vol->alignment = UBI_LAYOUT_VOLUME_ALIGN; 635 vol->alignment = 1;
631 vol->vol_type = UBI_DYNAMIC_VOLUME; 636 vol->vol_type = UBI_DYNAMIC_VOLUME;
632 vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; 637 vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
633 memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); 638 memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
@@ -659,104 +664,105 @@ static int init_volumes(struct ubi_device *ubi,
659} 664}
660 665
661/** 666/**
662 * check_av - check volume attaching information. 667 * check_sv - check volume scanning information.
663 * @vol: UBI volume description object 668 * @vol: UBI volume description object
664 * @av: volume attaching information 669 * @sv: volume scanning information
665 * 670 *
666 * This function returns zero if the volume attaching information is consistent 671 * This function returns zero if the volume scanning information is consistent
667 * to the data read from the volume tabla, and %-EINVAL if not. 672 * to the data read from the volume tabla, and %-EINVAL if not.
668 */ 673 */
669static int check_av(const struct ubi_volume *vol, 674static int check_sv(const struct ubi_volume *vol,
670 const struct ubi_ainf_volume *av) 675 const struct ubi_scan_volume *sv)
671{ 676{
672 int err; 677 int err;
673 678
674 if (av->highest_lnum >= vol->reserved_pebs) { 679 if (sv->highest_lnum >= vol->reserved_pebs) {
675 err = 1; 680 err = 1;
676 goto bad; 681 goto bad;
677 } 682 }
678 if (av->leb_count > vol->reserved_pebs) { 683 if (sv->leb_count > vol->reserved_pebs) {
679 err = 2; 684 err = 2;
680 goto bad; 685 goto bad;
681 } 686 }
682 if (av->vol_type != vol->vol_type) { 687 if (sv->vol_type != vol->vol_type) {
683 err = 3; 688 err = 3;
684 goto bad; 689 goto bad;
685 } 690 }
686 if (av->used_ebs > vol->reserved_pebs) { 691 if (sv->used_ebs > vol->reserved_pebs) {
687 err = 4; 692 err = 4;
688 goto bad; 693 goto bad;
689 } 694 }
690 if (av->data_pad != vol->data_pad) { 695 if (sv->data_pad != vol->data_pad) {
691 err = 5; 696 err = 5;
692 goto bad; 697 goto bad;
693 } 698 }
694 return 0; 699 return 0;
695 700
696bad: 701bad:
697 ubi_err("bad attaching information, error %d", err); 702 ubi_err("bad scanning information, error %d", err);
698 ubi_dump_av(av); 703 ubi_dbg_dump_sv(sv);
699 ubi_dump_vol_info(vol); 704 ubi_dbg_dump_vol_info(vol);
700 return -EINVAL; 705 return -EINVAL;
701} 706}
702 707
703/** 708/**
704 * check_attaching_info - check that attaching information. 709 * check_scanning_info - check that scanning information.
705 * @ubi: UBI device description object 710 * @ubi: UBI device description object
706 * @ai: attaching information 711 * @si: scanning information
707 * 712 *
708 * Even though we protect on-flash data by CRC checksums, we still don't trust 713 * Even though we protect on-flash data by CRC checksums, we still don't trust
709 * the media. This function ensures that attaching information is consistent to 714 * the media. This function ensures that scanning information is consistent to
710 * the information read from the volume table. Returns zero if the attaching 715 * the information read from the volume table. Returns zero if the scanning
711 * information is OK and %-EINVAL if it is not. 716 * information is OK and %-EINVAL if it is not.
712 */ 717 */
713static int check_attaching_info(const struct ubi_device *ubi, 718static int check_scanning_info(const struct ubi_device *ubi,
714 struct ubi_attach_info *ai) 719 struct ubi_scan_info *si)
715{ 720{
716 int err, i; 721 int err, i;
717 struct ubi_ainf_volume *av; 722 struct ubi_scan_volume *sv;
718 struct ubi_volume *vol; 723 struct ubi_volume *vol;
719 724
720 if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { 725 if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
721 ubi_err("found %d volumes while attaching, maximum is %d + %d", 726 ubi_err("scanning found %d volumes, maximum is %d + %d",
722 ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); 727 si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
723 return -EINVAL; 728 return -EINVAL;
724 } 729 }
725 730
726 if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && 731 if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
727 ai->highest_vol_id < UBI_INTERNAL_VOL_START) { 732 si->highest_vol_id < UBI_INTERNAL_VOL_START) {
728 ubi_err("too large volume ID %d found", ai->highest_vol_id); 733 ubi_err("too large volume ID %d found by scanning",
734 si->highest_vol_id);
729 return -EINVAL; 735 return -EINVAL;
730 } 736 }
731 737
732 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 738 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
733 cond_resched(); 739 cond_resched();
734 740
735 av = ubi_find_av(ai, i); 741 sv = ubi_scan_find_sv(si, i);
736 vol = ubi->volumes[i]; 742 vol = ubi->volumes[i];
737 if (!vol) { 743 if (!vol) {
738 if (av) 744 if (sv)
739 ubi_remove_av(ai, av); 745 ubi_scan_rm_volume(si, sv);
740 continue; 746 continue;
741 } 747 }
742 748
743 if (vol->reserved_pebs == 0) { 749 if (vol->reserved_pebs == 0) {
744 ubi_assert(i < ubi->vtbl_slots); 750 ubi_assert(i < ubi->vtbl_slots);
745 751
746 if (!av) 752 if (!sv)
747 continue; 753 continue;
748 754
749 /* 755 /*
750 * During attaching we found a volume which does not 756 * During scanning we found a volume which does not
751 * exist according to the information in the volume 757 * exist according to the information in the volume
752 * table. This must have happened due to an unclean 758 * table. This must have happened due to an unclean
753 * reboot while the volume was being removed. Discard 759 * reboot while the volume was being removed. Discard
754 * these eraseblocks. 760 * these eraseblocks.
755 */ 761 */
756 ubi_msg("finish volume %d removal", av->vol_id); 762 ubi_msg("finish volume %d removal", sv->vol_id);
757 ubi_remove_av(ai, av); 763 ubi_scan_rm_volume(si, sv);
758 } else if (av) { 764 } else if (sv) {
759 err = check_av(vol, av); 765 err = check_sv(vol, sv);
760 if (err) 766 if (err)
761 return err; 767 return err;
762 } 768 }
@@ -768,16 +774,16 @@ static int check_attaching_info(const struct ubi_device *ubi,
768/** 774/**
769 * ubi_read_volume_table - read the volume table. 775 * ubi_read_volume_table - read the volume table.
770 * @ubi: UBI device description object 776 * @ubi: UBI device description object
771 * @ai: attaching information 777 * @si: scanning information
772 * 778 *
773 * This function reads volume table, checks it, recover from errors if needed, 779 * This function reads volume table, checks it, recover from errors if needed,
774 * or creates it if needed. Returns zero in case of success and a negative 780 * or creates it if needed. Returns zero in case of success and a negative
775 * error code in case of failure. 781 * error code in case of failure.
776 */ 782 */
777int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) 783int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
778{ 784{
779 int i, err; 785 int i, err;
780 struct ubi_ainf_volume *av; 786 struct ubi_scan_volume *sv;
781 787
782 empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); 788 empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
783 789
@@ -792,8 +798,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
792 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; 798 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
793 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); 799 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
794 800
795 av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID); 801 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
796 if (!av) { 802 if (!sv) {
797 /* 803 /*
798 * No logical eraseblocks belonging to the layout volume were 804 * No logical eraseblocks belonging to the layout volume were
799 * found. This could mean that the flash is just empty. In 805 * found. This could mean that the flash is just empty. In
@@ -802,8 +808,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
802 * But if flash is not empty this must be a corruption or the 808 * But if flash is not empty this must be a corruption or the
803 * MTD device just contains garbage. 809 * MTD device just contains garbage.
804 */ 810 */
805 if (ai->is_empty) { 811 if (si->is_empty) {
806 ubi->vtbl = create_empty_lvol(ubi, ai); 812 ubi->vtbl = create_empty_lvol(ubi, si);
807 if (IS_ERR(ubi->vtbl)) 813 if (IS_ERR(ubi->vtbl))
808 return PTR_ERR(ubi->vtbl); 814 return PTR_ERR(ubi->vtbl);
809 } else { 815 } else {
@@ -811,14 +817,14 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
811 return -EINVAL; 817 return -EINVAL;
812 } 818 }
813 } else { 819 } else {
814 if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { 820 if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
815 /* This must not happen with proper UBI images */ 821 /* This must not happen with proper UBI images */
816 ubi_err("too many LEBs (%d) in layout volume", 822 dbg_err("too many LEBs (%d) in layout volume",
817 av->leb_count); 823 sv->leb_count);
818 return -EINVAL; 824 return -EINVAL;
819 } 825 }
820 826
821 ubi->vtbl = process_lvol(ubi, ai, av); 827 ubi->vtbl = process_lvol(ubi, si, sv);
822 if (IS_ERR(ubi->vtbl)) 828 if (IS_ERR(ubi->vtbl))
823 return PTR_ERR(ubi->vtbl); 829 return PTR_ERR(ubi->vtbl);
824 } 830 }
@@ -829,15 +835,15 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
829 * The layout volume is OK, initialize the corresponding in-RAM data 835 * The layout volume is OK, initialize the corresponding in-RAM data
830 * structures. 836 * structures.
831 */ 837 */
832 err = init_volumes(ubi, ai, ubi->vtbl); 838 err = init_volumes(ubi, si, ubi->vtbl);
833 if (err) 839 if (err)
834 goto out_free; 840 goto out_free;
835 841
836 /* 842 /*
837 * Make sure that the attaching information is consistent to the 843 * Make sure that the scanning information is consistent to the
838 * information stored in the volume table. 844 * information stored in the volume table.
839 */ 845 */
840 err = check_attaching_info(ubi, ai); 846 err = check_scanning_info(ubi, si);
841 if (err) 847 if (err)
842 goto out_free; 848 goto out_free;
843 849
@@ -852,17 +858,21 @@ out_free:
852 return err; 858 return err;
853} 859}
854 860
861#ifdef CONFIG_MTD_UBI_DEBUG
862
855/** 863/**
856 * self_vtbl_check - check volume table. 864 * paranoid_vtbl_check - check volume table.
857 * @ubi: UBI device description object 865 * @ubi: UBI device description object
858 */ 866 */
859static void self_vtbl_check(const struct ubi_device *ubi) 867static void paranoid_vtbl_check(const struct ubi_device *ubi)
860{ 868{
861 if (!ubi_dbg_chk_gen(ubi)) 869 if (!ubi->dbg->chk_gen)
862 return; 870 return;
863 871
864 if (vtbl_check(ubi, ubi->vtbl)) { 872 if (vtbl_check(ubi, ubi->vtbl)) {
865 ubi_err("self-check failed"); 873 ubi_err("paranoid check failed");
866 BUG(); 874 BUG();
867 } 875 }
868} 876}
877
878#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5df49d3cb5c..0696e36b053 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * @ubi: UBI device description object
2 * Copyright (c) International Business Machines Corp., 2006 3 * Copyright (c) International Business Machines Corp., 2006
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -40,6 +41,12 @@
40 * physical eraseblocks with low erase counter to free physical eraseblocks 41 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter. 42 * with high erase counter.
42 * 43 *
44 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
45 * an "optimal" physical eraseblock. For example, when it is known that the
46 * physical eraseblock will be "put" soon because it contains short-term data,
47 * the WL sub-system may pick a free physical eraseblock with low erase
48 * counter, and so forth.
49 *
43 * If the WL sub-system fails to erase a physical eraseblock, it marks it as 50 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
44 * bad. 51 * bad.
45 * 52 *
@@ -63,7 +70,8 @@
63 * to the user; instead, we first want to let users fill them up with data; 70 * to the user; instead, we first want to let users fill them up with data;
64 * 71 *
65 * o there is a chance that the user will put the physical eraseblock very 72 * o there is a chance that the user will put the physical eraseblock very
66 * soon, so it makes sense not to move it for some time, but wait. 73 * soon, so it makes sense not to move it for some time, but wait; this is
74 * especially important in case of "short term" physical eraseblocks.
67 * 75 *
68 * Physical eraseblocks stay protected only for limited time. But the "time" is 76 * Physical eraseblocks stay protected only for limited time. But the "time" is
69 * measured in erase cycles in this case. This is implemented with help of the 77 * measured in erase cycles in this case. This is implemented with help of the
@@ -134,46 +142,37 @@
134 */ 142 */
135#define WL_MAX_FAILURES 32 143#define WL_MAX_FAILURES 32
136 144
137static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
138static int self_check_in_wl_tree(const struct ubi_device *ubi,
139 struct ubi_wl_entry *e, struct rb_root *root);
140static int self_check_in_pq(const struct ubi_device *ubi,
141 struct ubi_wl_entry *e);
142
143#ifdef CONFIG_MTD_UBI_FASTMAP
144/** 145/**
145 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue 146 * struct ubi_work - UBI work description data structure.
146 * @wrk: the work description object 147 * @list: a link in the list of pending works
147 */ 148 * @func: worker function
148static void update_fastmap_work_fn(struct work_struct *wrk) 149 * @e: physical eraseblock to erase
149{ 150 * @torture: if the physical eraseblock has to be tortured
150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); 151 *
151 ubi_update_fastmap(ubi); 152 * The @func pointer points to the worker function. If the @cancel argument is
152} 153 * not zero, the worker has to free the resources and exit immediately. The
153 154 * worker has to return zero in case of success and a negative error code in
154/** 155 * case of failure.
155 * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
156 * @ubi: UBI device description object
157 * @pnum: the to be checked PEB
158 */ 156 */
159static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) 157struct ubi_work {
160{ 158 struct list_head list;
161 int i; 159 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
162 160 /* The below fields are only relevant to erasure works */
163 if (!ubi->fm) 161 struct ubi_wl_entry *e;
164 return 0; 162 int torture;
165 163};
166 for (i = 0; i < ubi->fm->used_blocks; i++) 164
167 if (ubi->fm->e[i]->pnum == pnum) 165#ifdef CONFIG_MTD_UBI_DEBUG
168 return 1; 166static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
169 167static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
170 return 0; 168 struct ubi_wl_entry *e,
171} 169 struct rb_root *root);
170static int paranoid_check_in_pq(const struct ubi_device *ubi,
171 struct ubi_wl_entry *e);
172#else 172#else
173static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) 173#define paranoid_check_ec(ubi, pnum, ec) 0
174{ 174#define paranoid_check_in_wl_tree(ubi, e, root)
175 return 0; 175#define paranoid_check_in_pq(ubi, e) 0
176}
177#endif 176#endif
178 177
179/** 178/**
@@ -272,16 +271,18 @@ static int produce_free_peb(struct ubi_device *ubi)
272{ 271{
273 int err; 272 int err;
274 273
274 spin_lock(&ubi->wl_lock);
275 while (!ubi->free.rb_node) { 275 while (!ubi->free.rb_node) {
276 spin_unlock(&ubi->wl_lock); 276 spin_unlock(&ubi->wl_lock);
277 277
278 dbg_wl("do one work synchronously"); 278 dbg_wl("do one work synchronously");
279 err = do_work(ubi); 279 err = do_work(ubi);
280
281 spin_lock(&ubi->wl_lock);
282 if (err) 280 if (err)
283 return err; 281 return err;
282
283 spin_lock(&ubi->wl_lock);
284 } 284 }
285 spin_unlock(&ubi->wl_lock);
285 286
286 return 0; 287 return 0;
287} 288}
@@ -348,22 +349,19 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
348 349
349/** 350/**
350 * find_wl_entry - find wear-leveling entry closest to certain erase counter. 351 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
351 * @ubi: UBI device description object
352 * @root: the RB-tree where to look for 352 * @root: the RB-tree where to look for
353 * @diff: maximum possible difference from the smallest erase counter 353 * @max: highest possible erase counter
354 * 354 *
355 * This function looks for a wear leveling entry with erase counter closest to 355 * This function looks for a wear leveling entry with erase counter closest to
356 * min + @diff, where min is the smallest erase counter. 356 * @max and less than @max.
357 */ 357 */
358static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, 358static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
359 struct rb_root *root, int diff)
360{ 359{
361 struct rb_node *p; 360 struct rb_node *p;
362 struct ubi_wl_entry *e, *prev_e = NULL; 361 struct ubi_wl_entry *e;
363 int max;
364 362
365 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); 363 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
366 max = e->ec + diff; 364 max += e->ec;
367 365
368 p = root->rb_node; 366 p = root->rb_node;
369 while (p) { 367 while (p) {
@@ -374,143 +372,39 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
374 p = p->rb_left; 372 p = p->rb_left;
375 else { 373 else {
376 p = p->rb_right; 374 p = p->rb_right;
377 prev_e = e;
378 e = e1; 375 e = e1;
379 } 376 }
380 } 377 }
381 378
382 /* If no fastmap has been written and this WL entry can be used
383 * as anchor PEB, hold it back and return the second best WL entry
384 * such that fastmap can use the anchor PEB later. */
385 if (prev_e && !ubi->fm_disabled &&
386 !ubi->fm && e->pnum < UBI_FM_MAX_START)
387 return prev_e;
388
389 return e; 379 return e;
390} 380}
391 381
392/** 382/**
393 * find_mean_wl_entry - find wear-leveling entry with medium erase counter. 383 * ubi_wl_get_peb - get a physical eraseblock.
394 * @ubi: UBI device description object 384 * @ubi: UBI device description object
395 * @root: the RB-tree where to look for 385 * @dtype: type of data which will be stored in this physical eraseblock
396 * 386 *
397 * This function looks for a wear leveling entry with medium erase counter, 387 * This function returns a physical eraseblock in case of success and a
398 * but not greater or equivalent than the lowest erase counter plus 388 * negative error code in case of failure. Might sleep.
399 * %WL_FREE_MAX_DIFF/2.
400 */ 389 */
401static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, 390int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
402 struct rb_root *root)
403{ 391{
392 int err, medium_ec;
404 struct ubi_wl_entry *e, *first, *last; 393 struct ubi_wl_entry *e, *first, *last;
405 394
406 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); 395 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
407 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); 396 dtype == UBI_UNKNOWN);
408
409 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
410 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
411
412#ifdef CONFIG_MTD_UBI_FASTMAP
413 /* If no fastmap has been written and this WL entry can be used
414 * as anchor PEB, hold it back and return the second best
415 * WL entry such that fastmap can use the anchor PEB later. */
416 if (e && !ubi->fm_disabled && !ubi->fm &&
417 e->pnum < UBI_FM_MAX_START)
418 e = rb_entry(rb_next(root->rb_node),
419 struct ubi_wl_entry, u.rb);
420#endif
421 } else
422 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
423
424 return e;
425}
426
427#ifdef CONFIG_MTD_UBI_FASTMAP
428/**
429 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
430 * @root: the RB-tree where to look for
431 */
432static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
433{
434 struct rb_node *p;
435 struct ubi_wl_entry *e, *victim = NULL;
436 int max_ec = UBI_MAX_ERASECOUNTER;
437
438 ubi_rb_for_each_entry(p, e, root, u.rb) {
439 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
440 victim = e;
441 max_ec = e->ec;
442 }
443 }
444
445 return victim;
446}
447
448static int anchor_pebs_avalible(struct rb_root *root)
449{
450 struct rb_node *p;
451 struct ubi_wl_entry *e;
452
453 ubi_rb_for_each_entry(p, e, root, u.rb)
454 if (e->pnum < UBI_FM_MAX_START)
455 return 1;
456
457 return 0;
458}
459
460/**
461 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
462 * @ubi: UBI device description object
463 * @anchor: This PEB will be used as anchor PEB by fastmap
464 *
465 * The function returns a physical erase block with a given maximal number
466 * and removes it from the wl subsystem.
467 * Must be called with wl_lock held!
468 */
469struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
470{
471 struct ubi_wl_entry *e = NULL;
472
473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
474 goto out;
475
476 if (anchor)
477 e = find_anchor_wl_entry(&ubi->free);
478 else
479 e = find_mean_wl_entry(ubi, &ubi->free);
480
481 if (!e)
482 goto out;
483
484 self_check_in_wl_tree(ubi, e, &ubi->free);
485
486 /* remove it from the free list,
487 * the wl subsystem does no longer know this erase block */
488 rb_erase(&e->u.rb, &ubi->free);
489 ubi->free_count--;
490out:
491 return e;
492}
493#endif
494
495/**
496 * __wl_get_peb - get a physical eraseblock.
497 * @ubi: UBI device description object
498 *
499 * This function returns a physical eraseblock in case of success and a
500 * negative error code in case of failure.
501 */
502static int __wl_get_peb(struct ubi_device *ubi)
503{
504 int err;
505 struct ubi_wl_entry *e;
506 397
507retry: 398retry:
399 spin_lock(&ubi->wl_lock);
508 if (!ubi->free.rb_node) { 400 if (!ubi->free.rb_node) {
509 if (ubi->works_count == 0) { 401 if (ubi->works_count == 0) {
510 ubi_err("no free eraseblocks");
511 ubi_assert(list_empty(&ubi->works)); 402 ubi_assert(list_empty(&ubi->works));
403 ubi_err("no free eraseblocks");
404 spin_unlock(&ubi->wl_lock);
512 return -ENOSPC; 405 return -ENOSPC;
513 } 406 }
407 spin_unlock(&ubi->wl_lock);
514 408
515 err = produce_free_peb(ubi); 409 err = produce_free_peb(ubi);
516 if (err < 0) 410 if (err < 0)
@@ -518,186 +412,66 @@ retry:
518 goto retry; 412 goto retry;
519 } 413 }
520 414
521 e = find_mean_wl_entry(ubi, &ubi->free); 415 switch (dtype) {
522 if (!e) { 416 case UBI_LONGTERM:
523 ubi_err("no free eraseblocks"); 417 /*
524 return -ENOSPC; 418 * For long term data we pick a physical eraseblock with high
419 * erase counter. But the highest erase counter we can pick is
420 * bounded by the the lowest erase counter plus
421 * %WL_FREE_MAX_DIFF.
422 */
423 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
424 break;
425 case UBI_UNKNOWN:
426 /*
427 * For unknown data we pick a physical eraseblock with medium
428 * erase counter. But we by no means can pick a physical
429 * eraseblock with erase counter greater or equivalent than the
430 * lowest erase counter plus %WL_FREE_MAX_DIFF.
431 */
432 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
433 u.rb);
434 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
435
436 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
437 e = rb_entry(ubi->free.rb_node,
438 struct ubi_wl_entry, u.rb);
439 else {
440 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
441 e = find_wl_entry(&ubi->free, medium_ec);
442 }
443 break;
444 case UBI_SHORTTERM:
445 /*
446 * For short term data we pick a physical eraseblock with the
447 * lowest erase counter as we expect it will be erased soon.
448 */
449 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
450 break;
451 default:
452 BUG();
525 } 453 }
526 454
527 self_check_in_wl_tree(ubi, e, &ubi->free); 455 paranoid_check_in_wl_tree(ubi, e, &ubi->free);
528 456
529 /* 457 /*
530 * Move the physical eraseblock to the protection queue where it will 458 * Move the physical eraseblock to the protection queue where it will
531 * be protected from being moved for some time. 459 * be protected from being moved for some time.
532 */ 460 */
533 rb_erase(&e->u.rb, &ubi->free); 461 rb_erase(&e->u.rb, &ubi->free);
534 ubi->free_count--;
535 dbg_wl("PEB %d EC %d", e->pnum, e->ec); 462 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
536#ifndef CONFIG_MTD_UBI_FASTMAP
537 /* We have to enqueue e only if fastmap is disabled,
538 * is fastmap enabled prot_queue_add() will be called by
539 * ubi_wl_get_peb() after removing e from the pool. */
540 prot_queue_add(ubi, e); 463 prot_queue_add(ubi, e);
541#endif
542 return e->pnum;
543}
544
545#ifdef CONFIG_MTD_UBI_FASTMAP
546/**
547 * return_unused_pool_pebs - returns unused PEB to the free tree.
548 * @ubi: UBI device description object
549 * @pool: fastmap pool description object
550 */
551static void return_unused_pool_pebs(struct ubi_device *ubi,
552 struct ubi_fm_pool *pool)
553{
554 int i;
555 struct ubi_wl_entry *e;
556
557 for (i = pool->used; i < pool->size; i++) {
558 e = ubi->lookuptbl[pool->pebs[i]];
559 wl_tree_add(e, &ubi->free);
560 ubi->free_count++;
561 }
562}
563
564/**
565 * refill_wl_pool - refills all the fastmap pool used by the
566 * WL sub-system.
567 * @ubi: UBI device description object
568 */
569static void refill_wl_pool(struct ubi_device *ubi)
570{
571 struct ubi_wl_entry *e;
572 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
573
574 return_unused_pool_pebs(ubi, pool);
575
576 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
577 if (!ubi->free.rb_node ||
578 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
579 break;
580
581 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
582 self_check_in_wl_tree(ubi, e, &ubi->free);
583 rb_erase(&e->u.rb, &ubi->free);
584 ubi->free_count--;
585
586 pool->pebs[pool->size] = e->pnum;
587 }
588 pool->used = 0;
589}
590
591/**
592 * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
593 * @ubi: UBI device description object
594 */
595static void refill_wl_user_pool(struct ubi_device *ubi)
596{
597 struct ubi_fm_pool *pool = &ubi->fm_pool;
598
599 return_unused_pool_pebs(ubi, pool);
600
601 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
602 if (!ubi->free.rb_node ||
603 (ubi->free_count - ubi->beb_rsvd_pebs < 1))
604 break;
605
606 pool->pebs[pool->size] = __wl_get_peb(ubi);
607 if (pool->pebs[pool->size] < 0)
608 break;
609 }
610 pool->used = 0;
611}
612
613/**
614 * ubi_refill_pools - refills all fastmap PEB pools.
615 * @ubi: UBI device description object
616 */
617void ubi_refill_pools(struct ubi_device *ubi)
618{
619 spin_lock(&ubi->wl_lock);
620 refill_wl_pool(ubi);
621 refill_wl_user_pool(ubi);
622 spin_unlock(&ubi->wl_lock);
623}
624
625/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
626 * the fastmap pool.
627 */
628int ubi_wl_get_peb(struct ubi_device *ubi)
629{
630 int ret;
631 struct ubi_fm_pool *pool = &ubi->fm_pool;
632 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
633
634 if (!pool->size || !wl_pool->size || pool->used == pool->size ||
635 wl_pool->used == wl_pool->size)
636 ubi_update_fastmap(ubi);
637
638 /* we got not a single free PEB */
639 if (!pool->size)
640 ret = -ENOSPC;
641 else {
642 spin_lock(&ubi->wl_lock);
643 ret = pool->pebs[pool->used++];
644 prot_queue_add(ubi, ubi->lookuptbl[ret]);
645 spin_unlock(&ubi->wl_lock);
646 }
647
648 return ret;
649}
650
651/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
652 *
653 * @ubi: UBI device description object
654 */
655static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
656{
657 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
658 int pnum;
659
660 if (pool->used == pool->size || !pool->size) {
661 /* We cannot update the fastmap here because this
662 * function is called in atomic context.
663 * Let's fail here and refill/update it as soon as possible. */
664 schedule_work(&ubi->fm_work);
665 return NULL;
666 } else {
667 pnum = pool->pebs[pool->used++];
668 return ubi->lookuptbl[pnum];
669 }
670}
671#else
672static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
673{
674 struct ubi_wl_entry *e;
675
676 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
677 self_check_in_wl_tree(ubi, e, &ubi->free);
678 rb_erase(&e->u.rb, &ubi->free);
679
680 return e;
681}
682
683int ubi_wl_get_peb(struct ubi_device *ubi)
684{
685 int peb, err;
686
687 spin_lock(&ubi->wl_lock);
688 peb = __wl_get_peb(ubi);
689 spin_unlock(&ubi->wl_lock); 464 spin_unlock(&ubi->wl_lock);
690 465
691 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, 466 err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
692 ubi->peb_size - ubi->vid_hdr_aloffset); 467 ubi->peb_size - ubi->vid_hdr_aloffset);
693 if (err) { 468 if (err) {
694 ubi_err("new PEB %d does not contain all 0xFF bytes", peb); 469 ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
695 return err; 470 return err;
696 } 471 }
697 472
698 return peb; 473 return e->pnum;
699} 474}
700#endif
701 475
702/** 476/**
703 * prot_queue_del - remove a physical eraseblock from the protection queue. 477 * prot_queue_del - remove a physical eraseblock from the protection queue.
@@ -715,7 +489,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
715 if (!e) 489 if (!e)
716 return -ENODEV; 490 return -ENODEV;
717 491
718 if (self_check_in_pq(ubi, e)) 492 if (paranoid_check_in_pq(ubi, e))
719 return -ENODEV; 493 return -ENODEV;
720 494
721 list_del(&e->u.list); 495 list_del(&e->u.list);
@@ -741,7 +515,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
741 515
742 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); 516 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
743 517
744 err = self_check_ec(ubi, e->pnum, e->ec); 518 err = paranoid_check_ec(ubi, e->pnum, e->ec);
745 if (err) 519 if (err)
746 return -EINVAL; 520 return -EINVAL;
747 521
@@ -829,14 +603,14 @@ repeat:
829} 603}
830 604
831/** 605/**
832 * __schedule_ubi_work - schedule a work. 606 * schedule_ubi_work - schedule a work.
833 * @ubi: UBI device description object 607 * @ubi: UBI device description object
834 * @wrk: the work to schedule 608 * @wrk: the work to schedule
835 * 609 *
836 * This function adds a work defined by @wrk to the tail of the pending works 610 * This function adds a work defined by @wrk to the tail of the pending works
837 * list. Can only be used of ubi->work_sem is already held in read mode! 611 * list.
838 */ 612 */
839static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 613static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
840{ 614{
841 spin_lock(&ubi->wl_lock); 615 spin_lock(&ubi->wl_lock);
842 list_add_tail(&wrk->list, &ubi->works); 616 list_add_tail(&wrk->list, &ubi->works);
@@ -847,54 +621,23 @@ static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
847 spin_unlock(&ubi->wl_lock); 621 spin_unlock(&ubi->wl_lock);
848} 622}
849 623
850/**
851 * schedule_ubi_work - schedule a work.
852 * @ubi: UBI device description object
853 * @wrk: the work to schedule
854 *
855 * This function adds a work defined by @wrk to the tail of the pending works
856 * list.
857 */
858static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
859{
860 down_read(&ubi->work_sem);
861 __schedule_ubi_work(ubi, wrk);
862 up_read(&ubi->work_sem);
863}
864
865static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 624static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
866 int cancel); 625 int cancel);
867 626
868#ifdef CONFIG_MTD_UBI_FASTMAP
869/**
870 * ubi_is_erase_work - checks whether a work is erase work.
871 * @wrk: The work object to be checked
872 */
873int ubi_is_erase_work(struct ubi_work *wrk)
874{
875 return wrk->func == erase_worker;
876}
877#endif
878
879/** 627/**
880 * schedule_erase - schedule an erase work. 628 * schedule_erase - schedule an erase work.
881 * @ubi: UBI device description object 629 * @ubi: UBI device description object
882 * @e: the WL entry of the physical eraseblock to erase 630 * @e: the WL entry of the physical eraseblock to erase
883 * @vol_id: the volume ID that last used this PEB
884 * @lnum: the last used logical eraseblock number for the PEB
885 * @torture: if the physical eraseblock has to be tortured 631 * @torture: if the physical eraseblock has to be tortured
886 * 632 *
887 * This function returns zero in case of success and a %-ENOMEM in case of 633 * This function returns zero in case of success and a %-ENOMEM in case of
888 * failure. 634 * failure.
889 */ 635 */
890static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 636static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
891 int vol_id, int lnum, int torture) 637 int torture)
892{ 638{
893 struct ubi_work *wl_wrk; 639 struct ubi_work *wl_wrk;
894 640
895 ubi_assert(e);
896 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
897
898 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 641 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
899 e->pnum, e->ec, torture); 642 e->pnum, e->ec, torture);
900 643
@@ -904,8 +647,6 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
904 647
905 wl_wrk->func = &erase_worker; 648 wl_wrk->func = &erase_worker;
906 wl_wrk->e = e; 649 wl_wrk->e = e;
907 wl_wrk->vol_id = vol_id;
908 wl_wrk->lnum = lnum;
909 wl_wrk->torture = torture; 650 wl_wrk->torture = torture;
910 651
911 schedule_ubi_work(ubi, wl_wrk); 652 schedule_ubi_work(ubi, wl_wrk);
@@ -913,79 +654,6 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
913} 654}
914 655
915/** 656/**
916 * do_sync_erase - run the erase worker synchronously.
917 * @ubi: UBI device description object
918 * @e: the WL entry of the physical eraseblock to erase
919 * @vol_id: the volume ID that last used this PEB
920 * @lnum: the last used logical eraseblock number for the PEB
921 * @torture: if the physical eraseblock has to be tortured
922 *
923 */
924static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
925 int vol_id, int lnum, int torture)
926{
927 struct ubi_work *wl_wrk;
928
929 dbg_wl("sync erase of PEB %i", e->pnum);
930
931 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
932 if (!wl_wrk)
933 return -ENOMEM;
934
935 wl_wrk->e = e;
936 wl_wrk->vol_id = vol_id;
937 wl_wrk->lnum = lnum;
938 wl_wrk->torture = torture;
939
940 return erase_worker(ubi, wl_wrk, 0);
941}
942
943#ifdef CONFIG_MTD_UBI_FASTMAP
944/**
945 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
946 * sub-system.
947 * see: ubi_wl_put_peb()
948 *
949 * @ubi: UBI device description object
950 * @fm_e: physical eraseblock to return
951 * @lnum: the last used logical eraseblock number for the PEB
952 * @torture: if this physical eraseblock has to be tortured
953 */
954int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
955 int lnum, int torture)
956{
957 struct ubi_wl_entry *e;
958 int vol_id, pnum = fm_e->pnum;
959
960 dbg_wl("PEB %d", pnum);
961
962 ubi_assert(pnum >= 0);
963 ubi_assert(pnum < ubi->peb_count);
964
965 spin_lock(&ubi->wl_lock);
966 e = ubi->lookuptbl[pnum];
967
968 /* This can happen if we recovered from a fastmap the very
969 * first time and writing now a new one. In this case the wl system
970 * has never seen any PEB used by the original fastmap.
971 */
972 if (!e) {
973 e = fm_e;
974 ubi_assert(e->ec >= 0);
975 ubi->lookuptbl[pnum] = e;
976 } else {
977 e->ec = fm_e->ec;
978 kfree(fm_e);
979 }
980
981 spin_unlock(&ubi->wl_lock);
982
983 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
984 return schedule_erase(ubi, e, vol_id, lnum, torture);
985}
986#endif
987
988/**
989 * wear_leveling_worker - wear-leveling worker function. 657 * wear_leveling_worker - wear-leveling worker function.
990 * @ubi: UBI device description object 658 * @ubi: UBI device description object
991 * @wrk: the work object 659 * @wrk: the work object
@@ -1000,9 +668,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1000{ 668{
1001 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; 669 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1002 int vol_id = -1, uninitialized_var(lnum); 670 int vol_id = -1, uninitialized_var(lnum);
1003#ifdef CONFIG_MTD_UBI_FASTMAP
1004 int anchor = wrk->anchor;
1005#endif
1006 struct ubi_wl_entry *e1, *e2; 671 struct ubi_wl_entry *e1, *e2;
1007 struct ubi_vid_hdr *vid_hdr; 672 struct ubi_vid_hdr *vid_hdr;
1008 673
@@ -1036,42 +701,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1036 goto out_cancel; 701 goto out_cancel;
1037 } 702 }
1038 703
1039#ifdef CONFIG_MTD_UBI_FASTMAP
1040 /* Check whether we need to produce an anchor PEB */
1041 if (!anchor)
1042 anchor = !anchor_pebs_avalible(&ubi->free);
1043
1044 if (anchor) {
1045 e1 = find_anchor_wl_entry(&ubi->used);
1046 if (!e1)
1047 goto out_cancel;
1048 e2 = get_peb_for_wl(ubi);
1049 if (!e2)
1050 goto out_cancel;
1051
1052 self_check_in_wl_tree(ubi, e1, &ubi->used);
1053 rb_erase(&e1->u.rb, &ubi->used);
1054 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1055 } else if (!ubi->scrub.rb_node) {
1056#else
1057 if (!ubi->scrub.rb_node) { 704 if (!ubi->scrub.rb_node) {
1058#endif
1059 /* 705 /*
1060 * Now pick the least worn-out used physical eraseblock and a 706 * Now pick the least worn-out used physical eraseblock and a
1061 * highly worn-out free physical eraseblock. If the erase 707 * highly worn-out free physical eraseblock. If the erase
1062 * counters differ much enough, start wear-leveling. 708 * counters differ much enough, start wear-leveling.
1063 */ 709 */
1064 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 710 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1065 e2 = get_peb_for_wl(ubi); 711 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1066 if (!e2)
1067 goto out_cancel;
1068 712
1069 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 713 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1070 dbg_wl("no WL needed: min used EC %d, max free EC %d", 714 dbg_wl("no WL needed: min used EC %d, max free EC %d",
1071 e1->ec, e2->ec); 715 e1->ec, e2->ec);
1072 goto out_cancel; 716 goto out_cancel;
1073 } 717 }
1074 self_check_in_wl_tree(ubi, e1, &ubi->used); 718 paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
1075 rb_erase(&e1->u.rb, &ubi->used); 719 rb_erase(&e1->u.rb, &ubi->used);
1076 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 720 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1077 e1->pnum, e1->ec, e2->pnum, e2->ec); 721 e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -1079,15 +723,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1079 /* Perform scrubbing */ 723 /* Perform scrubbing */
1080 scrubbing = 1; 724 scrubbing = 1;
1081 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); 725 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1082 e2 = get_peb_for_wl(ubi); 726 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1083 if (!e2) 727 paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
1084 goto out_cancel;
1085
1086 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1087 rb_erase(&e1->u.rb, &ubi->scrub); 728 rb_erase(&e1->u.rb, &ubi->scrub);
1088 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 729 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1089 } 730 }
1090 731
732 paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
733 rb_erase(&e2->u.rb, &ubi->free);
1091 ubi->move_from = e1; 734 ubi->move_from = e1;
1092 ubi->move_to = e2; 735 ubi->move_to = e2;
1093 spin_unlock(&ubi->wl_lock); 736 spin_unlock(&ubi->wl_lock);
@@ -1156,7 +799,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1156 scrubbing = 1; 799 scrubbing = 1;
1157 goto out_not_moved; 800 goto out_not_moved;
1158 } 801 }
1159 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || 802 if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1160 err == MOVE_TARGET_RD_ERR) { 803 err == MOVE_TARGET_RD_ERR) {
1161 /* 804 /*
1162 * Target PEB had bit-flips or write error - torture it. 805 * Target PEB had bit-flips or write error - torture it.
@@ -1204,7 +847,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1204 ubi->move_to_put = ubi->wl_scheduled = 0; 847 ubi->move_to_put = ubi->wl_scheduled = 0;
1205 spin_unlock(&ubi->wl_lock); 848 spin_unlock(&ubi->wl_lock);
1206 849
1207 err = do_sync_erase(ubi, e1, vol_id, lnum, 0); 850 err = schedule_erase(ubi, e1, 0);
1208 if (err) { 851 if (err) {
1209 kmem_cache_free(ubi_wl_entry_slab, e1); 852 kmem_cache_free(ubi_wl_entry_slab, e1);
1210 if (e2) 853 if (e2)
@@ -1219,7 +862,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1219 */ 862 */
1220 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", 863 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1221 e2->pnum, vol_id, lnum); 864 e2->pnum, vol_id, lnum);
1222 err = do_sync_erase(ubi, e2, vol_id, lnum, 0); 865 err = schedule_erase(ubi, e2, 0);
1223 if (err) { 866 if (err) {
1224 kmem_cache_free(ubi_wl_entry_slab, e2); 867 kmem_cache_free(ubi_wl_entry_slab, e2);
1225 goto out_ro; 868 goto out_ro;
@@ -1258,7 +901,7 @@ out_not_moved:
1258 spin_unlock(&ubi->wl_lock); 901 spin_unlock(&ubi->wl_lock);
1259 902
1260 ubi_free_vid_hdr(ubi, vid_hdr); 903 ubi_free_vid_hdr(ubi, vid_hdr);
1261 err = do_sync_erase(ubi, e2, vol_id, lnum, torture); 904 err = schedule_erase(ubi, e2, torture);
1262 if (err) { 905 if (err) {
1263 kmem_cache_free(ubi_wl_entry_slab, e2); 906 kmem_cache_free(ubi_wl_entry_slab, e2);
1264 goto out_ro; 907 goto out_ro;
@@ -1299,13 +942,12 @@ out_cancel:
1299/** 942/**
1300 * ensure_wear_leveling - schedule wear-leveling if it is needed. 943 * ensure_wear_leveling - schedule wear-leveling if it is needed.
1301 * @ubi: UBI device description object 944 * @ubi: UBI device description object
1302 * @nested: set to non-zero if this function is called from UBI worker
1303 * 945 *
1304 * This function checks if it is time to start wear-leveling and schedules it 946 * This function checks if it is time to start wear-leveling and schedules it
1305 * if yes. This function returns zero in case of success and a negative error 947 * if yes. This function returns zero in case of success and a negative error
1306 * code in case of failure. 948 * code in case of failure.
1307 */ 949 */
1308static int ensure_wear_leveling(struct ubi_device *ubi, int nested) 950static int ensure_wear_leveling(struct ubi_device *ubi)
1309{ 951{
1310 int err = 0; 952 int err = 0;
1311 struct ubi_wl_entry *e1; 953 struct ubi_wl_entry *e1;
@@ -1333,7 +975,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1333 * %UBI_WL_THRESHOLD. 975 * %UBI_WL_THRESHOLD.
1334 */ 976 */
1335 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 977 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1336 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 978 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1337 979
1338 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 980 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1339 goto out_unlock; 981 goto out_unlock;
@@ -1350,12 +992,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1350 goto out_cancel; 992 goto out_cancel;
1351 } 993 }
1352 994
1353 wrk->anchor = 0;
1354 wrk->func = &wear_leveling_worker; 995 wrk->func = &wear_leveling_worker;
1355 if (nested) 996 schedule_ubi_work(ubi, wrk);
1356 __schedule_ubi_work(ubi, wrk);
1357 else
1358 schedule_ubi_work(ubi, wrk);
1359 return err; 997 return err;
1360 998
1361out_cancel: 999out_cancel:
@@ -1366,38 +1004,6 @@ out_unlock:
1366 return err; 1004 return err;
1367} 1005}
1368 1006
1369#ifdef CONFIG_MTD_UBI_FASTMAP
1370/**
1371 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1372 * @ubi: UBI device description object
1373 */
1374int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1375{
1376 struct ubi_work *wrk;
1377
1378 spin_lock(&ubi->wl_lock);
1379 if (ubi->wl_scheduled) {
1380 spin_unlock(&ubi->wl_lock);
1381 return 0;
1382 }
1383 ubi->wl_scheduled = 1;
1384 spin_unlock(&ubi->wl_lock);
1385
1386 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1387 if (!wrk) {
1388 spin_lock(&ubi->wl_lock);
1389 ubi->wl_scheduled = 0;
1390 spin_unlock(&ubi->wl_lock);
1391 return -ENOMEM;
1392 }
1393
1394 wrk->anchor = 1;
1395 wrk->func = &wear_leveling_worker;
1396 schedule_ubi_work(ubi, wrk);
1397 return 0;
1398}
1399#endif
1400
1401/** 1007/**
1402 * erase_worker - physical eraseblock erase worker function. 1008 * erase_worker - physical eraseblock erase worker function.
1403 * @ubi: UBI device description object 1009 * @ubi: UBI device description object
@@ -1413,10 +1019,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1413 int cancel) 1019 int cancel)
1414{ 1020{
1415 struct ubi_wl_entry *e = wl_wrk->e; 1021 struct ubi_wl_entry *e = wl_wrk->e;
1416 int pnum = e->pnum; 1022 int pnum = e->pnum, err, need;
1417 int vol_id = wl_wrk->vol_id;
1418 int lnum = wl_wrk->lnum;
1419 int err, available_consumed = 0;
1420 1023
1421 if (cancel) { 1024 if (cancel) {
1422 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1025 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1425,10 +1028,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1425 return 0; 1028 return 0;
1426 } 1029 }
1427 1030
1428 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1031 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1429 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1430
1431 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1432 1032
1433 err = sync_erase(ubi, e, wl_wrk->torture); 1033 err = sync_erase(ubi, e, wl_wrk->torture);
1434 if (!err) { 1034 if (!err) {
@@ -1437,7 +1037,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1437 1037
1438 spin_lock(&ubi->wl_lock); 1038 spin_lock(&ubi->wl_lock);
1439 wl_tree_add(e, &ubi->free); 1039 wl_tree_add(e, &ubi->free);
1440 ubi->free_count++;
1441 spin_unlock(&ubi->wl_lock); 1040 spin_unlock(&ubi->wl_lock);
1442 1041
1443 /* 1042 /*
@@ -1447,7 +1046,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1447 serve_prot_queue(ubi); 1046 serve_prot_queue(ubi);
1448 1047
1449 /* And take care about wear-leveling */ 1048 /* And take care about wear-leveling */
1450 err = ensure_wear_leveling(ubi, 1); 1049 err = ensure_wear_leveling(ubi);
1451 return err; 1050 return err;
1452 } 1051 }
1453 1052
@@ -1459,7 +1058,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1459 int err1; 1058 int err1;
1460 1059
1461 /* Re-schedule the LEB for erasure */ 1060 /* Re-schedule the LEB for erasure */
1462 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1061 err1 = schedule_erase(ubi, e, 0);
1463 if (err1) { 1062 if (err1) {
1464 err = err1; 1063 err = err1;
1465 goto out_ro; 1064 goto out_ro;
@@ -1484,14 +1083,20 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1484 } 1083 }
1485 1084
1486 spin_lock(&ubi->volumes_lock); 1085 spin_lock(&ubi->volumes_lock);
1086 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1087 if (need > 0) {
1088 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1089 ubi->avail_pebs -= need;
1090 ubi->rsvd_pebs += need;
1091 ubi->beb_rsvd_pebs += need;
1092 if (need > 0)
1093 ubi_msg("reserve more %d PEBs", need);
1094 }
1095
1487 if (ubi->beb_rsvd_pebs == 0) { 1096 if (ubi->beb_rsvd_pebs == 0) {
1488 if (ubi->avail_pebs == 0) { 1097 spin_unlock(&ubi->volumes_lock);
1489 spin_unlock(&ubi->volumes_lock); 1098 ubi_err("no reserved physical eraseblocks");
1490 ubi_err("no reserved/available physical eraseblocks"); 1099 goto out_ro;
1491 goto out_ro;
1492 }
1493 ubi->avail_pebs -= 1;
1494 available_consumed = 1;
1495 } 1100 }
1496 spin_unlock(&ubi->volumes_lock); 1101 spin_unlock(&ubi->volumes_lock);
1497 1102
@@ -1501,36 +1106,19 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1501 goto out_ro; 1106 goto out_ro;
1502 1107
1503 spin_lock(&ubi->volumes_lock); 1108 spin_lock(&ubi->volumes_lock);
1504 if (ubi->beb_rsvd_pebs > 0) { 1109 ubi->beb_rsvd_pebs -= 1;
1505 if (available_consumed) {
1506 /*
1507 * The amount of reserved PEBs increased since we last
1508 * checked.
1509 */
1510 ubi->avail_pebs += 1;
1511 available_consumed = 0;
1512 }
1513 ubi->beb_rsvd_pebs -= 1;
1514 }
1515 ubi->bad_peb_count += 1; 1110 ubi->bad_peb_count += 1;
1516 ubi->good_peb_count -= 1; 1111 ubi->good_peb_count -= 1;
1517 ubi_calculate_reserved(ubi); 1112 ubi_calculate_reserved(ubi);
1518 if (available_consumed) 1113 if (ubi->beb_rsvd_pebs)
1519 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1520 else if (ubi->beb_rsvd_pebs)
1521 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); 1114 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1522 else 1115 else
1523 ubi_warn("last PEB from the reserve was used"); 1116 ubi_warn("last PEB from the reserved pool was used");
1524 spin_unlock(&ubi->volumes_lock); 1117 spin_unlock(&ubi->volumes_lock);
1525 1118
1526 return err; 1119 return err;
1527 1120
1528out_ro: 1121out_ro:
1529 if (available_consumed) {
1530 spin_lock(&ubi->volumes_lock);
1531 ubi->avail_pebs += 1;
1532 spin_unlock(&ubi->volumes_lock);
1533 }
1534 ubi_ro_mode(ubi); 1122 ubi_ro_mode(ubi);
1535 return err; 1123 return err;
1536} 1124}
@@ -1538,8 +1126,6 @@ out_ro:
1538/** 1126/**
1539 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. 1127 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1540 * @ubi: UBI device description object 1128 * @ubi: UBI device description object
1541 * @vol_id: the volume ID that last used this PEB
1542 * @lnum: the last used logical eraseblock number for the PEB
1543 * @pnum: physical eraseblock to return 1129 * @pnum: physical eraseblock to return
1544 * @torture: if this physical eraseblock has to be tortured 1130 * @torture: if this physical eraseblock has to be tortured
1545 * 1131 *
@@ -1548,8 +1134,7 @@ out_ro:
1548 * occurred to this @pnum and it has to be tested. This function returns zero 1134 * occurred to this @pnum and it has to be tested. This function returns zero
1549 * in case of success, and a negative error code in case of failure. 1135 * in case of success, and a negative error code in case of failure.
1550 */ 1136 */
1551int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, 1137int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1552 int pnum, int torture)
1553{ 1138{
1554 int err; 1139 int err;
1555 struct ubi_wl_entry *e; 1140 struct ubi_wl_entry *e;
@@ -1591,13 +1176,13 @@ retry:
1591 return 0; 1176 return 0;
1592 } else { 1177 } else {
1593 if (in_wl_tree(e, &ubi->used)) { 1178 if (in_wl_tree(e, &ubi->used)) {
1594 self_check_in_wl_tree(ubi, e, &ubi->used); 1179 paranoid_check_in_wl_tree(ubi, e, &ubi->used);
1595 rb_erase(&e->u.rb, &ubi->used); 1180 rb_erase(&e->u.rb, &ubi->used);
1596 } else if (in_wl_tree(e, &ubi->scrub)) { 1181 } else if (in_wl_tree(e, &ubi->scrub)) {
1597 self_check_in_wl_tree(ubi, e, &ubi->scrub); 1182 paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
1598 rb_erase(&e->u.rb, &ubi->scrub); 1183 rb_erase(&e->u.rb, &ubi->scrub);
1599 } else if (in_wl_tree(e, &ubi->erroneous)) { 1184 } else if (in_wl_tree(e, &ubi->erroneous)) {
1600 self_check_in_wl_tree(ubi, e, &ubi->erroneous); 1185 paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
1601 rb_erase(&e->u.rb, &ubi->erroneous); 1186 rb_erase(&e->u.rb, &ubi->erroneous);
1602 ubi->erroneous_peb_count -= 1; 1187 ubi->erroneous_peb_count -= 1;
1603 ubi_assert(ubi->erroneous_peb_count >= 0); 1188 ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1615,7 +1200,7 @@ retry:
1615 } 1200 }
1616 spin_unlock(&ubi->wl_lock); 1201 spin_unlock(&ubi->wl_lock);
1617 1202
1618 err = schedule_erase(ubi, e, vol_id, lnum, torture); 1203 err = schedule_erase(ubi, e, torture);
1619 if (err) { 1204 if (err) {
1620 spin_lock(&ubi->wl_lock); 1205 spin_lock(&ubi->wl_lock);
1621 wl_tree_add(e, &ubi->used); 1206 wl_tree_add(e, &ubi->used);
@@ -1639,7 +1224,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1639{ 1224{
1640 struct ubi_wl_entry *e; 1225 struct ubi_wl_entry *e;
1641 1226
1642 ubi_msg("schedule PEB %d for scrubbing", pnum); 1227 dbg_msg("schedule PEB %d for scrubbing", pnum);
1643 1228
1644retry: 1229retry:
1645 spin_lock(&ubi->wl_lock); 1230 spin_lock(&ubi->wl_lock);
@@ -1664,7 +1249,7 @@ retry:
1664 } 1249 }
1665 1250
1666 if (in_wl_tree(e, &ubi->used)) { 1251 if (in_wl_tree(e, &ubi->used)) {
1667 self_check_in_wl_tree(ubi, e, &ubi->used); 1252 paranoid_check_in_wl_tree(ubi, e, &ubi->used);
1668 rb_erase(&e->u.rb, &ubi->used); 1253 rb_erase(&e->u.rb, &ubi->used);
1669 } else { 1254 } else {
1670 int err; 1255 int err;
@@ -1685,60 +1270,29 @@ retry:
1685 * Technically scrubbing is the same as wear-leveling, so it is done 1270 * Technically scrubbing is the same as wear-leveling, so it is done
1686 * by the WL worker. 1271 * by the WL worker.
1687 */ 1272 */
1688 return ensure_wear_leveling(ubi, 0); 1273 return ensure_wear_leveling(ubi);
1689} 1274}
1690 1275
1691/** 1276/**
1692 * ubi_wl_flush - flush all pending works. 1277 * ubi_wl_flush - flush all pending works.
1693 * @ubi: UBI device description object 1278 * @ubi: UBI device description object
1694 * @vol_id: the volume id to flush for
1695 * @lnum: the logical eraseblock number to flush for
1696 * 1279 *
1697 * This function executes all pending works for a particular volume id / 1280 * This function returns zero in case of success and a negative error code in
1698 * logical eraseblock number pair. If either value is set to %UBI_ALL, then it 1281 * case of failure.
1699 * acts as a wildcard for all of the corresponding volume numbers or logical
1700 * eraseblock numbers. It returns zero in case of success and a negative error
1701 * code in case of failure.
1702 */ 1282 */
1703int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) 1283int ubi_wl_flush(struct ubi_device *ubi)
1704{ 1284{
1705 int err = 0; 1285 int err;
1706 int found = 1;
1707 1286
1708 /* 1287 /*
1709 * Erase while the pending works queue is not empty, but not more than 1288 * Erase while the pending works queue is not empty, but not more than
1710 * the number of currently pending works. 1289 * the number of currently pending works.
1711 */ 1290 */
1712 dbg_wl("flush pending work for LEB %d:%d (%d pending works)", 1291 dbg_wl("flush (%d pending works)", ubi->works_count);
1713 vol_id, lnum, ubi->works_count); 1292 while (ubi->works_count) {
1714 1293 err = do_work(ubi);
1715 while (found) { 1294 if (err)
1716 struct ubi_work *wrk; 1295 return err;
1717 found = 0;
1718
1719 down_read(&ubi->work_sem);
1720 spin_lock(&ubi->wl_lock);
1721 list_for_each_entry(wrk, &ubi->works, list) {
1722 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1723 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1724 list_del(&wrk->list);
1725 ubi->works_count -= 1;
1726 ubi_assert(ubi->works_count >= 0);
1727 spin_unlock(&ubi->wl_lock);
1728
1729 err = wrk->func(ubi, wrk, 0);
1730 if (err) {
1731 up_read(&ubi->work_sem);
1732 return err;
1733 }
1734
1735 spin_lock(&ubi->wl_lock);
1736 found = 1;
1737 break;
1738 }
1739 }
1740 spin_unlock(&ubi->wl_lock);
1741 up_read(&ubi->work_sem);
1742 } 1296 }
1743 1297
1744 /* 1298 /*
@@ -1748,7 +1302,18 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1748 down_write(&ubi->work_sem); 1302 down_write(&ubi->work_sem);
1749 up_write(&ubi->work_sem); 1303 up_write(&ubi->work_sem);
1750 1304
1751 return err; 1305 /*
1306 * And in case last was the WL worker and it canceled the LEB
1307 * movement, flush again.
1308 */
1309 while (ubi->works_count) {
1310 dbg_wl("flush more (%d pending works)", ubi->works_count);
1311 err = do_work(ubi);
1312 if (err)
1313 return err;
1314 }
1315
1316 return 0;
1752} 1317}
1753 1318
1754/** 1319/**
@@ -1857,30 +1422,27 @@ static void cancel_pending(struct ubi_device *ubi)
1857} 1422}
1858 1423
1859/** 1424/**
1860 * ubi_wl_init - initialize the WL sub-system using attaching information. 1425 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1861 * @ubi: UBI device description object 1426 * @ubi: UBI device description object
1862 * @ai: attaching information 1427 * @si: scanning information
1863 * 1428 *
1864 * This function returns zero in case of success, and a negative error code in 1429 * This function returns zero in case of success, and a negative error code in
1865 * case of failure. 1430 * case of failure.
1866 */ 1431 */
1867int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1432int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1868{ 1433{
1869 int err, i, reserved_pebs, found_pebs = 0; 1434 int err, i;
1870 struct rb_node *rb1, *rb2; 1435 struct rb_node *rb1, *rb2;
1871 struct ubi_ainf_volume *av; 1436 struct ubi_scan_volume *sv;
1872 struct ubi_ainf_peb *aeb, *tmp; 1437 struct ubi_scan_leb *seb, *tmp;
1873 struct ubi_wl_entry *e; 1438 struct ubi_wl_entry *e;
1874 1439
1875 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; 1440 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1876 spin_lock_init(&ubi->wl_lock); 1441 spin_lock_init(&ubi->wl_lock);
1877 mutex_init(&ubi->move_mutex); 1442 mutex_init(&ubi->move_mutex);
1878 init_rwsem(&ubi->work_sem); 1443 init_rwsem(&ubi->work_sem);
1879 ubi->max_ec = ai->max_ec; 1444 ubi->max_ec = si->max_ec;
1880 INIT_LIST_HEAD(&ubi->works); 1445 INIT_LIST_HEAD(&ubi->works);
1881#ifdef CONFIG_MTD_UBI_FASTMAP
1882 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1883#endif
1884 1446
1885 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1447 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1886 1448
@@ -1893,59 +1455,48 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1893 INIT_LIST_HEAD(&ubi->pq[i]); 1455 INIT_LIST_HEAD(&ubi->pq[i]);
1894 ubi->pq_head = 0; 1456 ubi->pq_head = 0;
1895 1457
1896 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { 1458 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1897 cond_resched(); 1459 cond_resched();
1898 1460
1899 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1461 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1900 if (!e) 1462 if (!e)
1901 goto out_free; 1463 goto out_free;
1902 1464
1903 e->pnum = aeb->pnum; 1465 e->pnum = seb->pnum;
1904 e->ec = aeb->ec; 1466 e->ec = seb->ec;
1905 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1906 ubi->lookuptbl[e->pnum] = e; 1467 ubi->lookuptbl[e->pnum] = e;
1907 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1468 if (schedule_erase(ubi, e, 0)) {
1908 kmem_cache_free(ubi_wl_entry_slab, e); 1469 kmem_cache_free(ubi_wl_entry_slab, e);
1909 goto out_free; 1470 goto out_free;
1910 } 1471 }
1911
1912 found_pebs++;
1913 } 1472 }
1914 1473
1915 ubi->free_count = 0; 1474 list_for_each_entry(seb, &si->free, u.list) {
1916 list_for_each_entry(aeb, &ai->free, u.list) {
1917 cond_resched(); 1475 cond_resched();
1918 1476
1919 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1477 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1920 if (!e) 1478 if (!e)
1921 goto out_free; 1479 goto out_free;
1922 1480
1923 e->pnum = aeb->pnum; 1481 e->pnum = seb->pnum;
1924 e->ec = aeb->ec; 1482 e->ec = seb->ec;
1925 ubi_assert(e->ec >= 0); 1483 ubi_assert(e->ec >= 0);
1926 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1927
1928 wl_tree_add(e, &ubi->free); 1484 wl_tree_add(e, &ubi->free);
1929 ubi->free_count++;
1930
1931 ubi->lookuptbl[e->pnum] = e; 1485 ubi->lookuptbl[e->pnum] = e;
1932
1933 found_pebs++;
1934 } 1486 }
1935 1487
1936 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { 1488 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1937 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { 1489 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1938 cond_resched(); 1490 cond_resched();
1939 1491
1940 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1492 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1941 if (!e) 1493 if (!e)
1942 goto out_free; 1494 goto out_free;
1943 1495
1944 e->pnum = aeb->pnum; 1496 e->pnum = seb->pnum;
1945 e->ec = aeb->ec; 1497 e->ec = seb->ec;
1946 ubi->lookuptbl[e->pnum] = e; 1498 ubi->lookuptbl[e->pnum] = e;
1947 1499 if (!seb->scrub) {
1948 if (!aeb->scrub) {
1949 dbg_wl("add PEB %d EC %d to the used tree", 1500 dbg_wl("add PEB %d EC %d to the used tree",
1950 e->pnum, e->ec); 1501 e->pnum, e->ec);
1951 wl_tree_add(e, &ubi->used); 1502 wl_tree_add(e, &ubi->used);
@@ -1954,38 +1505,22 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1954 e->pnum, e->ec); 1505 e->pnum, e->ec);
1955 wl_tree_add(e, &ubi->scrub); 1506 wl_tree_add(e, &ubi->scrub);
1956 } 1507 }
1957
1958 found_pebs++;
1959 } 1508 }
1960 } 1509 }
1961 1510
1962 dbg_wl("found %i PEBs", found_pebs); 1511 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1963
1964 if (ubi->fm)
1965 ubi_assert(ubi->good_peb_count == \
1966 found_pebs + ubi->fm->used_blocks);
1967 else
1968 ubi_assert(ubi->good_peb_count == found_pebs);
1969
1970 reserved_pebs = WL_RESERVED_PEBS;
1971#ifdef CONFIG_MTD_UBI_FASTMAP
1972 /* Reserve enough LEBs to store two fastmaps. */
1973 reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1974#endif
1975
1976 if (ubi->avail_pebs < reserved_pebs) {
1977 ubi_err("no enough physical eraseblocks (%d, need %d)", 1512 ubi_err("no enough physical eraseblocks (%d, need %d)",
1978 ubi->avail_pebs, reserved_pebs); 1513 ubi->avail_pebs, WL_RESERVED_PEBS);
1979 if (ubi->corr_peb_count) 1514 if (ubi->corr_peb_count)
1980 ubi_err("%d PEBs are corrupted and not used", 1515 ubi_err("%d PEBs are corrupted and not used",
1981 ubi->corr_peb_count); 1516 ubi->corr_peb_count);
1982 goto out_free; 1517 goto out_free;
1983 } 1518 }
1984 ubi->avail_pebs -= reserved_pebs; 1519 ubi->avail_pebs -= WL_RESERVED_PEBS;
1985 ubi->rsvd_pebs += reserved_pebs; 1520 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1986 1521
1987 /* Schedule wear-leveling if needed */ 1522 /* Schedule wear-leveling if needed */
1988 err = ensure_wear_leveling(ubi, 0); 1523 err = ensure_wear_leveling(ubi);
1989 if (err) 1524 if (err)
1990 goto out_free; 1525 goto out_free;
1991 1526
@@ -2033,8 +1568,10 @@ void ubi_wl_close(struct ubi_device *ubi)
2033 kfree(ubi->lookuptbl); 1568 kfree(ubi->lookuptbl);
2034} 1569}
2035 1570
1571#ifdef CONFIG_MTD_UBI_DEBUG
1572
2036/** 1573/**
2037 * self_check_ec - make sure that the erase counter of a PEB is correct. 1574 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
2038 * @ubi: UBI device description object 1575 * @ubi: UBI device description object
2039 * @pnum: the physical eraseblock number to check 1576 * @pnum: the physical eraseblock number to check
2040 * @ec: the erase counter to check 1577 * @ec: the erase counter to check
@@ -2043,13 +1580,13 @@ void ubi_wl_close(struct ubi_device *ubi)
2043 * is equivalent to @ec, and a negative error code if not or if an error 1580 * is equivalent to @ec, and a negative error code if not or if an error
2044 * occurred. 1581 * occurred.
2045 */ 1582 */
2046static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) 1583static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
2047{ 1584{
2048 int err; 1585 int err;
2049 long long read_ec; 1586 long long read_ec;
2050 struct ubi_ec_hdr *ec_hdr; 1587 struct ubi_ec_hdr *ec_hdr;
2051 1588
2052 if (!ubi_dbg_chk_gen(ubi)) 1589 if (!ubi->dbg->chk_gen)
2053 return 0; 1590 return 0;
2054 1591
2055 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1592 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -2064,10 +1601,10 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2064 } 1601 }
2065 1602
2066 read_ec = be64_to_cpu(ec_hdr->ec); 1603 read_ec = be64_to_cpu(ec_hdr->ec);
2067 if (ec != read_ec && read_ec - ec > 1) { 1604 if (ec != read_ec) {
2068 ubi_err("self-check failed for PEB %d", pnum); 1605 ubi_err("paranoid check failed for PEB %d", pnum);
2069 ubi_err("read EC is %lld, should be %d", read_ec, ec); 1606 ubi_err("read EC is %lld, should be %d", read_ec, ec);
2070 dump_stack(); 1607 ubi_dbg_dump_stack();
2071 err = 1; 1608 err = 1;
2072 } else 1609 } else
2073 err = 0; 1610 err = 0;
@@ -2078,7 +1615,7 @@ out_free:
2078} 1615}
2079 1616
2080/** 1617/**
2081 * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. 1618 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2082 * @ubi: UBI device description object 1619 * @ubi: UBI device description object
2083 * @e: the wear-leveling entry to check 1620 * @e: the wear-leveling entry to check
2084 * @root: the root of the tree 1621 * @root: the root of the tree
@@ -2086,36 +1623,37 @@ out_free:
2086 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it 1623 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2087 * is not. 1624 * is not.
2088 */ 1625 */
2089static int self_check_in_wl_tree(const struct ubi_device *ubi, 1626static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
2090 struct ubi_wl_entry *e, struct rb_root *root) 1627 struct ubi_wl_entry *e,
1628 struct rb_root *root)
2091{ 1629{
2092 if (!ubi_dbg_chk_gen(ubi)) 1630 if (!ubi->dbg->chk_gen)
2093 return 0; 1631 return 0;
2094 1632
2095 if (in_wl_tree(e, root)) 1633 if (in_wl_tree(e, root))
2096 return 0; 1634 return 0;
2097 1635
2098 ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ", 1636 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
2099 e->pnum, e->ec, root); 1637 e->pnum, e->ec, root);
2100 dump_stack(); 1638 ubi_dbg_dump_stack();
2101 return -EINVAL; 1639 return -EINVAL;
2102} 1640}
2103 1641
2104/** 1642/**
2105 * self_check_in_pq - check if wear-leveling entry is in the protection 1643 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
2106 * queue. 1644 * queue.
2107 * @ubi: UBI device description object 1645 * @ubi: UBI device description object
2108 * @e: the wear-leveling entry to check 1646 * @e: the wear-leveling entry to check
2109 * 1647 *
2110 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. 1648 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2111 */ 1649 */
2112static int self_check_in_pq(const struct ubi_device *ubi, 1650static int paranoid_check_in_pq(const struct ubi_device *ubi,
2113 struct ubi_wl_entry *e) 1651 struct ubi_wl_entry *e)
2114{ 1652{
2115 struct ubi_wl_entry *p; 1653 struct ubi_wl_entry *p;
2116 int i; 1654 int i;
2117 1655
2118 if (!ubi_dbg_chk_gen(ubi)) 1656 if (!ubi->dbg->chk_gen)
2119 return 0; 1657 return 0;
2120 1658
2121 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) 1659 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
@@ -2123,8 +1661,10 @@ static int self_check_in_pq(const struct ubi_device *ubi,
2123 if (p == e) 1661 if (p == e)
2124 return 0; 1662 return 0;
2125 1663
2126 ubi_err("self-check failed for PEB %d, EC %d, Protect queue", 1664 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
2127 e->pnum, e->ec); 1665 e->pnum, e->ec);
2128 dump_stack(); 1666 ubi_dbg_dump_stack();
2129 return -EINVAL; 1667 return -EINVAL;
2130} 1668}
1669
1670#endif /* CONFIG_MTD_UBI_DEBUG */