aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 13:23:43 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 13:23:43 -0500
commit60d9aa758c00f20ade0cb1951f6a934f628dd2d7 (patch)
treee3bdfa4ec0d3f9a29a822810b8b9188c7d613cbd
parentb2adf0cbec4cf0934c63f48f893e0cebde380d0c (diff)
parent2e16cfca6e17ae37ae21feca080a6f2eca9087dc (diff)
Merge git://git.infradead.org/mtd-2.6
* git://git.infradead.org/mtd-2.6: (90 commits) jffs2: Fix long-standing bug with symlink garbage collection. mtd: OneNAND: Fix test of unsigned in onenand_otp_walk() mtd: cfi_cmdset_0002, fix lock imbalance Revert "mtd: move mxcnd_remove to .exit.text" mtd: m25p80: add support for Macronix MX25L4005A kmsg_dump: fix build for CONFIG_PRINTK=n mtd: nandsim: add support for 4KiB pages mtd: mtdoops: refactor as a kmsg_dumper mtd: mtdoops: make record size configurable mtd: mtdoops: limit the maximum mtd partition size mtd: mtdoops: keep track of used/unused pages in an array mtd: mtdoops: several minor cleanups core: Add kernel message dumper to call on oopses and panics mtd: add ARM pismo support mtd: pxa3xx_nand: Fix PIO data transfer mtd: nand: fix multi-chip suspend problem mtd: add support for switching old SST chips into QRY mode mtd: fix M29W800D dev_id and uaddr mtd: don't use PF_MEMALLOC mtd: Add bad block table overrides to Davinci NAND driver ... Fixed up conflicts (mostly trivial) in drivers/mtd/devices/m25p80.c drivers/mtd/maps/pcmciamtd.c drivers/mtd/nand/pxa3xx_nand.c kernel/printk.c
-rw-r--r--arch/arm/mach-bcmring/arch.c10
-rw-r--r--arch/arm/mach-bcmring/include/mach/reg_nand.h66
-rw-r--r--arch/arm/mach-bcmring/include/mach/reg_umi.h237
-rw-r--r--arch/arm/mach-davinci/include/mach/nand.h4
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_nand.h3
-rw-r--r--arch/arm/plat-s3c/include/plat/nand.h2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c35
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c17
-rwxr-xr-xdrivers/mtd/chips/cfi_util.c7
-rw-r--r--drivers/mtd/chips/jedec_probe.c8
-rw-r--r--drivers/mtd/devices/m25p80.c334
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c19
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c460
-rw-r--r--drivers/mtd/maps/ixp4xx.c6
-rw-r--r--drivers/mtd/maps/physmap.c21
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/vmu-flash.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c5
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c389
-rw-r--r--drivers/mtd/nand/Kconfig16
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/alauda.c11
-rw-r--r--drivers/mtd/nand/atmel_nand.c5
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c213
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c581
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/excite_nandflash.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c86
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c783
-rw-r--r--drivers/mtd/nand/nand_base.c141
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h358
-rw-r--r--drivers/mtd/nand/nand_ecc.c25
-rw-r--r--drivers/mtd/nand/nandsim.c7
-rw-r--r--drivers/mtd/nand/plat_nand.c50
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/onenand/omap2.c22
-rw-r--r--drivers/mtd/onenand/onenand_base.c745
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c87
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c18
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c1
-rw-r--r--fs/jffs2/gc.c3
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c2
-rw-r--r--include/linux/kmsg_dump.h60
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/cfi.h9
-rw-r--r--include/linux/mtd/flashchip.h9
-rw-r--r--include/linux/mtd/nand.h97
-rw-r--r--include/linux/mtd/nand_ecc.h10
-rw-r--r--include/linux/mtd/onenand.h23
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/printk.c119
61 files changed, 3606 insertions, 1736 deletions
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index fbe6fa02c882..53dd2a9eecf9 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -70,9 +70,19 @@ static struct ctl_table bcmring_sysctl_reboot[] = {
70 {} 70 {}
71}; 71};
72 72
73static struct resource nand_resource[] = {
74 [0] = {
75 .start = MM_ADDR_IO_NAND,
76 .end = MM_ADDR_IO_NAND + 0x1000 - 1,
77 .flags = IORESOURCE_MEM,
78 },
79};
80
73static struct platform_device nand_device = { 81static struct platform_device nand_device = {
74 .name = "bcm-nand", 82 .name = "bcm-nand",
75 .id = -1, 83 .id = -1,
84 .resource = nand_resource,
85 .num_resources = ARRAY_SIZE(nand_resource),
76}; 86};
77 87
78static struct platform_device *devices[] __initdata = { 88static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-bcmring/include/mach/reg_nand.h b/arch/arm/mach-bcmring/include/mach/reg_nand.h
new file mode 100644
index 000000000000..387376ffb56b
--- /dev/null
+++ b/arch/arm/mach-bcmring/include/mach/reg_nand.h
@@ -0,0 +1,66 @@
1/*****************************************************************************
2* Copyright 2001 - 2008 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/*
16*
17*****************************************************************************
18*
19* REG_NAND.h
20*
21* PURPOSE:
22*
23* This file contains definitions for the nand registers:
24*
25* NOTES:
26*
27*****************************************************************************/
28
29#if !defined(__ASM_ARCH_REG_NAND_H)
30#define __ASM_ARCH_REG_NAND_H
31
32/* ---- Include Files ---------------------------------------------------- */
33#include <csp/reg.h>
34#include <mach/reg_umi.h>
35
36/* ---- Constants and Types ---------------------------------------------- */
37
38#define HW_NAND_BASE MM_IO_BASE_NAND /* NAND Flash */
39
40/* DMA accesses by the bootstrap need hard nonvirtual addresses */
41#define REG_NAND_CMD __REG16(HW_NAND_BASE + 0)
42#define REG_NAND_ADDR __REG16(HW_NAND_BASE + 4)
43
44#define REG_NAND_PHYS_DATA16 (HW_NAND_BASE + 8)
45#define REG_NAND_PHYS_DATA8 (HW_NAND_BASE + 8)
46#define REG_NAND_DATA16 __REG16(REG_NAND_PHYS_DATA16)
47#define REG_NAND_DATA8 __REG8(REG_NAND_PHYS_DATA8)
48
49/* use appropriate offset to make sure it start at the 1K boundary */
50#define REG_NAND_PHYS_DATA_DMA (HW_NAND_BASE + 0x400)
51#define REG_NAND_DATA_DMA __REG32(REG_NAND_PHYS_DATA_DMA)
52
53/* Linux DMA requires physical address of the data register */
54#define REG_NAND_DATA16_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA16)
55#define REG_NAND_DATA8_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA8)
56#define REG_NAND_DATA_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA_DMA)
57
58#define NAND_BUS_16BIT() (0)
59#define NAND_BUS_8BIT() (!NAND_BUS_16BIT())
60
61/* Register offsets */
62#define REG_NAND_CMD_OFFSET (0)
63#define REG_NAND_ADDR_OFFSET (4)
64#define REG_NAND_DATA8_OFFSET (8)
65
66#endif
diff --git a/arch/arm/mach-bcmring/include/mach/reg_umi.h b/arch/arm/mach-bcmring/include/mach/reg_umi.h
new file mode 100644
index 000000000000..06a355481ea6
--- /dev/null
+++ b/arch/arm/mach-bcmring/include/mach/reg_umi.h
@@ -0,0 +1,237 @@
1/*****************************************************************************
2* Copyright 2005 - 2008 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/*
16*
17*****************************************************************************
18*
19* REG_UMI.h
20*
21* PURPOSE:
22*
23* This file contains definitions for the nand registers:
24*
25* NOTES:
26*
27*****************************************************************************/
28
29#if !defined(__ASM_ARCH_REG_UMI_H)
30#define __ASM_ARCH_REG_UMI_H
31
32/* ---- Include Files ---------------------------------------------------- */
33#include <csp/reg.h>
34#include <mach/csp/mm_io.h>
35
36/* ---- Constants and Types ---------------------------------------------- */
37
38/* Unified Memory Interface Ctrl Register */
39#define HW_UMI_BASE MM_IO_BASE_UMI
40
41/* Flash bank 0 timing and control register */
42#define REG_UMI_FLASH0_TCR __REG32(HW_UMI_BASE + 0x00)
43/* Flash bank 1 timing and control register */
44#define REG_UMI_FLASH1_TCR __REG32(HW_UMI_BASE + 0x04)
45/* Flash bank 2 timing and control register */
46#define REG_UMI_FLASH2_TCR __REG32(HW_UMI_BASE + 0x08)
47/* MMD interface and control register */
48#define REG_UMI_MMD_ICR __REG32(HW_UMI_BASE + 0x0c)
49/* NAND timing and control register */
50#define REG_UMI_NAND_TCR __REG32(HW_UMI_BASE + 0x18)
51/* NAND ready/chip select register */
52#define REG_UMI_NAND_RCSR __REG32(HW_UMI_BASE + 0x1c)
53/* NAND ECC control & status register */
54#define REG_UMI_NAND_ECC_CSR __REG32(HW_UMI_BASE + 0x20)
55/* NAND ECC data register XXB2B1B0 */
56#define REG_UMI_NAND_ECC_DATA __REG32(HW_UMI_BASE + 0x24)
57/* BCH ECC Parameter N */
58#define REG_UMI_BCH_N __REG32(HW_UMI_BASE + 0x40)
59/* BCH ECC Parameter T */
60#define REG_UMI_BCH_K __REG32(HW_UMI_BASE + 0x44)
61/* BCH ECC Parameter K */
62#define REG_UMI_BCH_T __REG32(HW_UMI_BASE + 0x48)
63/* BCH ECC Contro Status */
64#define REG_UMI_BCH_CTRL_STATUS __REG32(HW_UMI_BASE + 0x4C)
65/* BCH WR ECC 31:0 */
66#define REG_UMI_BCH_WR_ECC_0 __REG32(HW_UMI_BASE + 0x50)
67/* BCH WR ECC 63:32 */
68#define REG_UMI_BCH_WR_ECC_1 __REG32(HW_UMI_BASE + 0x54)
69/* BCH WR ECC 95:64 */
70#define REG_UMI_BCH_WR_ECC_2 __REG32(HW_UMI_BASE + 0x58)
71/* BCH WR ECC 127:96 */
72#define REG_UMI_BCH_WR_ECC_3 __REG32(HW_UMI_BASE + 0x5c)
73/* BCH WR ECC 155:128 */
74#define REG_UMI_BCH_WR_ECC_4 __REG32(HW_UMI_BASE + 0x60)
75/* BCH Read Error Location 1,0 */
76#define REG_UMI_BCH_RD_ERR_LOC_1_0 __REG32(HW_UMI_BASE + 0x64)
77/* BCH Read Error Location 3,2 */
78#define REG_UMI_BCH_RD_ERR_LOC_3_2 __REG32(HW_UMI_BASE + 0x68)
79/* BCH Read Error Location 5,4 */
80#define REG_UMI_BCH_RD_ERR_LOC_5_4 __REG32(HW_UMI_BASE + 0x6c)
81/* BCH Read Error Location 7,6 */
82#define REG_UMI_BCH_RD_ERR_LOC_7_6 __REG32(HW_UMI_BASE + 0x70)
83/* BCH Read Error Location 9,8 */
84#define REG_UMI_BCH_RD_ERR_LOC_9_8 __REG32(HW_UMI_BASE + 0x74)
85/* BCH Read Error Location 11,10 */
86#define REG_UMI_BCH_RD_ERR_LOC_B_A __REG32(HW_UMI_BASE + 0x78)
87
88/* REG_UMI_FLASH0/1/2_TCR, REG_UMI_SRAM0/1_TCR bits */
89/* Enable wait pin during burst write or read */
90#define REG_UMI_TCR_WAITEN 0x80000000
91/* Enable mem ctrlr to work iwth ext mem of lower freq than AHB clk */
92#define REG_UMI_TCR_LOWFREQ 0x40000000
93/* 1=synch write, 0=async write */
94#define REG_UMI_TCR_MEMTYPE_SYNCWRITE 0x20000000
95/* 1=synch read, 0=async read */
96#define REG_UMI_TCR_MEMTYPE_SYNCREAD 0x10000000
97/* 1=page mode read, 0=normal mode read */
98#define REG_UMI_TCR_MEMTYPE_PAGEREAD 0x08000000
99/* page size/burst size (wrap only) */
100#define REG_UMI_TCR_MEMTYPE_PGSZ_MASK 0x07000000
101/* 4 word */
102#define REG_UMI_TCR_MEMTYPE_PGSZ_4 0x00000000
103/* 8 word */
104#define REG_UMI_TCR_MEMTYPE_PGSZ_8 0x01000000
105/* 16 word */
106#define REG_UMI_TCR_MEMTYPE_PGSZ_16 0x02000000
107/* 32 word */
108#define REG_UMI_TCR_MEMTYPE_PGSZ_32 0x03000000
109/* 64 word */
110#define REG_UMI_TCR_MEMTYPE_PGSZ_64 0x04000000
111/* 128 word */
112#define REG_UMI_TCR_MEMTYPE_PGSZ_128 0x05000000
113/* 256 word */
114#define REG_UMI_TCR_MEMTYPE_PGSZ_256 0x06000000
115/* 512 word */
116#define REG_UMI_TCR_MEMTYPE_PGSZ_512 0x07000000
117/* Page read access cycle / Burst write latency (n+2 / n+1) */
118#define REG_UMI_TCR_TPRC_TWLC_MASK 0x00f80000
119/* Bus turnaround cycle (n) */
120#define REG_UMI_TCR_TBTA_MASK 0x00070000
121/* Write pulse width cycle (n+1) */
122#define REG_UMI_TCR_TWP_MASK 0x0000f800
123/* Write recovery cycle (n+1) */
124#define REG_UMI_TCR_TWR_MASK 0x00000600
125/* Write address setup cycle (n+1) */
126#define REG_UMI_TCR_TAS_MASK 0x00000180
127/* Output enable delay cycle (n) */
128#define REG_UMI_TCR_TOE_MASK 0x00000060
129/* Read access cycle / Burst read latency (n+2 / n+1) */
130#define REG_UMI_TCR_TRC_TLC_MASK 0x0000001f
131
132/* REG_UMI_MMD_ICR bits */
133/* Flash write protection pin control */
134#define REG_UMI_MMD_ICR_FLASH_WP 0x8000
135/* Extend hold time for sram0, sram1 csn (39 MHz operation) */
136#define REG_UMI_MMD_ICR_XHCS 0x4000
137/* Enable SDRAM 2 interface control */
138#define REG_UMI_MMD_ICR_SDRAM2EN 0x2000
139/* Enable merge of flash banks 0/1 to 512 MBit bank */
140#define REG_UMI_MMD_ICR_INST512 0x1000
141/* Enable merge of flash banks 1/2 to 512 MBit bank */
142#define REG_UMI_MMD_ICR_DATA512 0x0800
143/* Enable SDRAM interface control */
144#define REG_UMI_MMD_ICR_SDRAMEN 0x0400
145/* Polarity of busy state of Burst Wait Signal */
146#define REG_UMI_MMD_ICR_WAITPOL 0x0200
147/* Enable burst clock stopped when not accessing external burst flash/sram */
148#define REG_UMI_MMD_ICR_BCLKSTOP 0x0100
149/* Enable the peri1_csn to replace flash1_csn in 512 Mb flash mode */
150#define REG_UMI_MMD_ICR_PERI1EN 0x0080
151/* Enable the peri2_csn to replace sdram_csn */
152#define REG_UMI_MMD_ICR_PERI2EN 0x0040
153/* Enable the peri3_csn to replace sdram2_csn */
154#define REG_UMI_MMD_ICR_PERI3EN 0x0020
155/* Enable sram bank1 for H/W controlled MRS */
156#define REG_UMI_MMD_ICR_MRSB1 0x0010
157/* Enable sram bank0 for H/W controlled MRS */
158#define REG_UMI_MMD_ICR_MRSB0 0x0008
159/* Polarity for assert3ed state of H/W controlled MRS */
160#define REG_UMI_MMD_ICR_MRSPOL 0x0004
161/* 0: S/W controllable ZZ/MRS/CRE/P-Mode pin */
162/* 1: H/W controlled ZZ/MRS/CRE/P-Mode, same timing as CS */
163#define REG_UMI_MMD_ICR_MRSMODE 0x0002
164/* MRS state for S/W controlled mode */
165#define REG_UMI_MMD_ICR_MRSSTATE 0x0001
166
167/* REG_UMI_NAND_TCR bits */
168/* Enable software to control CS */
169#define REG_UMI_NAND_TCR_CS_SWCTRL 0x80000000
170/* 16-bit nand wordsize if set */
171#define REG_UMI_NAND_TCR_WORD16 0x40000000
172/* Bus turnaround cycle (n) */
173#define REG_UMI_NAND_TCR_TBTA_MASK 0x00070000
174/* Write pulse width cycle (n+1) */
175#define REG_UMI_NAND_TCR_TWP_MASK 0x0000f800
176/* Write recovery cycle (n+1) */
177#define REG_UMI_NAND_TCR_TWR_MASK 0x00000600
178/* Write address setup cycle (n+1) */
179#define REG_UMI_NAND_TCR_TAS_MASK 0x00000180
180/* Output enable delay cycle (n) */
181#define REG_UMI_NAND_TCR_TOE_MASK 0x00000060
182/* Read access cycle (n+2) */
183#define REG_UMI_NAND_TCR_TRC_TLC_MASK 0x0000001f
184
185/* REG_UMI_NAND_RCSR bits */
186/* Status: Ready=1, Busy=0 */
187#define REG_UMI_NAND_RCSR_RDY 0x02
188/* Keep CS asserted during operation */
189#define REG_UMI_NAND_RCSR_CS_ASSERTED 0x01
190
191/* REG_UMI_NAND_ECC_CSR bits */
192/* Interrupt status - read-only */
193#define REG_UMI_NAND_ECC_CSR_NANDINT 0x80000000
194/* Read: Status of ECC done, Write: clear ECC interrupt */
195#define REG_UMI_NAND_ECC_CSR_ECCINT_RAW 0x00800000
196/* Read: Status of R/B, Write: clear R/B interrupt */
197#define REG_UMI_NAND_ECC_CSR_RBINT_RAW 0x00400000
198/* 1 = Enable ECC Interrupt */
199#define REG_UMI_NAND_ECC_CSR_ECCINT_ENABLE 0x00008000
200/* 1 = Assert interrupt at rising edge of R/B_ */
201#define REG_UMI_NAND_ECC_CSR_RBINT_ENABLE 0x00004000
202/* Calculate ECC by 0=512 bytes, 1=256 bytes */
203#define REG_UMI_NAND_ECC_CSR_256BYTE 0x00000080
204/* Enable ECC in hardware */
205#define REG_UMI_NAND_ECC_CSR_ECC_ENABLE 0x00000001
206
207/* REG_UMI_BCH_CTRL_STATUS bits */
208/* Shift to Indicate Number of correctable errors detected */
209#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR_SHIFT 20
210/* Indicate Number of correctable errors detected */
211#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR 0x00F00000
212/* Indicate Errors detected during read but uncorrectable */
213#define REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR 0x00080000
214/* Indicate Errors detected during read and are correctable */
215#define REG_UMI_BCH_CTRL_STATUS_CORR_ERR 0x00040000
216/* Flag indicates BCH's ECC status of read process are valid */
217#define REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID 0x00020000
218/* Flag indicates BCH's ECC status of write process are valid */
219#define REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID 0x00010000
220/* Pause ECC calculation */
221#define REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC 0x00000010
222/* Enable Interrupt */
223#define REG_UMI_BCH_CTRL_STATUS_INT_EN 0x00000004
224/* Enable ECC during read */
225#define REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN 0x00000002
226/* Enable ECC during write */
227#define REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN 0x00000001
228/* Mask for location */
229#define REG_UMI_BCH_ERR_LOC_MASK 0x00001FFF
230/* location within a byte */
231#define REG_UMI_BCH_ERR_LOC_BYTE 0x00000007
232/* location within a word */
233#define REG_UMI_BCH_ERR_LOC_WORD 0x00000018
234/* location within a page (512 byte) */
235#define REG_UMI_BCH_ERR_LOC_PAGE 0x00001FE0
236#define REG_UMI_BCH_ERR_LOC_ADDR(index) (__REG32(HW_UMI_BASE + 0x64 + (index / 2)*4) >> ((index % 2) * 16))
237#endif
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index b520c4b5678a..b2ad8090bd10 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -79,6 +79,10 @@ struct davinci_nand_pdata { /* platform_data */
79 79
80 /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */ 80 /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
81 unsigned options; 81 unsigned options;
82
83 /* Main and mirror bbt descriptor overrides */
84 struct nand_bbt_descr *bbt_td;
85 struct nand_bbt_descr *bbt_md;
82}; 86};
83 87
84#endif /* __ARCH_ARM_DAVINCI_NAND_H */ 88#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 116394484e71..9438bf6613a3 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -18,6 +18,7 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
21#include <linux/mtd/onenand.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
22#include <linux/io.h> 23#include <linux/io.h>
23#include <asm/sizes.h> 24#include <asm/sizes.h>
@@ -149,7 +150,7 @@ static struct mtd_partition nhk8815_onenand_partitions[] = {
149 } 150 }
150}; 151};
151 152
152static struct flash_platform_data nhk8815_onenand_data = { 153static struct onenand_platform_data nhk8815_onenand_data = {
153 .parts = nhk8815_onenand_partitions, 154 .parts = nhk8815_onenand_partitions,
154 .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions), 155 .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions),
155}; 156};
@@ -163,7 +164,7 @@ static struct resource nhk8815_onenand_resource[] = {
163}; 164};
164 165
165static struct platform_device nhk8815_onenand_device = { 166static struct platform_device nhk8815_onenand_device = {
166 .name = "onenand", 167 .name = "onenand-flash",
167 .id = -1, 168 .id = -1,
168 .dev = { 169 .dev = {
169 .platform_data = &nhk8815_onenand_data, 170 .platform_data = &nhk8815_onenand_data,
@@ -174,10 +175,10 @@ static struct platform_device nhk8815_onenand_device = {
174 175
175static void __init nhk8815_onenand_init(void) 176static void __init nhk8815_onenand_init(void)
176{ 177{
177#ifdef CONFIG_ONENAND 178#ifdef CONFIG_MTD_ONENAND
178 /* Set up SMCS0 for OneNand */ 179 /* Set up SMCS0 for OneNand */
179 writel(0x000030db, FSMC_BCR0); 180 writel(0x000030db, FSMC_BCR(0));
180 writel(0x02100551, FSMC_BTR0); 181 writel(0x02100551, FSMC_BTR(0));
181#endif 182#endif
182} 183}
183 184
diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h
index 2b972df22d12..5d2d21d414e0 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_nand.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h
@@ -22,6 +22,7 @@
22 22
23struct mxc_nand_platform_data { 23struct mxc_nand_platform_data {
24 int width; /* data bus width in bytes */ 24 int width; /* data bus width in bytes */
25 int hw_ecc; /* 0 if supress hardware ECC */ 25 int hw_ecc:1; /* 0 if supress hardware ECC */
26 int flash_bbt:1; /* set to 1 to use a flash based bbt */
26}; 27};
27#endif /* __ASM_ARCH_NAND_H */ 28#endif /* __ASM_ARCH_NAND_H */
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index 065985978413..226147b7e026 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -17,6 +17,7 @@
17 * Setting this flag will allow the kernel to 17 * Setting this flag will allow the kernel to
18 * look for it at boot time and also skip the NAND 18 * look for it at boot time and also skip the NAND
19 * scan. 19 * scan.
20 * @options: Default value to set into 'struct nand_chip' options.
20 * @nr_chips: Number of chips in this set 21 * @nr_chips: Number of chips in this set
21 * @nr_partitions: Number of partitions pointed to by @partitions 22 * @nr_partitions: Number of partitions pointed to by @partitions
22 * @name: Name of set (optional) 23 * @name: Name of set (optional)
@@ -31,6 +32,7 @@ struct s3c2410_nand_set {
31 unsigned int disable_ecc:1; 32 unsigned int disable_ecc:1;
32 unsigned int flash_bbt:1; 33 unsigned int flash_bbt:1;
33 34
35 unsigned int options;
34 int nr_chips; 36 int nr_chips;
35 int nr_partitions; 37 int nr_partitions;
36 char *name; 38 char *name;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e7563a9872d0..5fbf29e1e64f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -43,15 +43,17 @@
43// debugging, turns off buffer write mode if set to 1 43// debugging, turns off buffer write mode if set to 1
44#define FORCE_WORD_WRITE 0 44#define FORCE_WORD_WRITE 0
45 45
46#define MANUFACTURER_INTEL 0x0089 46/* Intel chips */
47#define I82802AB 0x00ad 47#define I82802AB 0x00ad
48#define I82802AC 0x00ac 48#define I82802AC 0x00ac
49#define PF38F4476 0x881c 49#define PF38F4476 0x881c
50#define MANUFACTURER_ST 0x0020 50/* STMicroelectronics chips */
51#define M50LPW080 0x002F 51#define M50LPW080 0x002F
52#define M50FLW080A 0x0080 52#define M50FLW080A 0x0080
53#define M50FLW080B 0x0081 53#define M50FLW080B 0x0081
54/* Atmel chips */
54#define AT49BV640D 0x02de 55#define AT49BV640D 0x02de
56#define AT49BV640DT 0x02db
55 57
56static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 58static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 59static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -199,6 +201,16 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
199 cfi->cfiq->BufWriteTimeoutMax = 0; 201 cfi->cfiq->BufWriteTimeoutMax = 0;
200} 202}
201 203
204static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
205{
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209
210 cfip->FeatureSupport |= (1 << 5);
211 mtd->flags |= MTD_POWERUP_LOCK;
212}
213
202#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 215/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 216static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -283,6 +295,8 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
283 295
284static struct cfi_fixup cfi_fixup_table[] = { 296static struct cfi_fixup cfi_fixup_table[] = {
285 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 297 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
298 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
299 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
286#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 300#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 301 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
288#endif 302#endif
@@ -294,16 +308,16 @@ static struct cfi_fixup cfi_fixup_table[] = {
294#endif 308#endif
295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
297 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, 311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
298 { 0, 0, NULL, NULL } 312 { 0, 0, NULL, NULL }
299}; 313};
300 314
301static struct cfi_fixup jedec_fixup_table[] = { 315static struct cfi_fixup jedec_fixup_table[] = {
302 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, 316 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, 317 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
304 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, 318 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
305 { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, 319 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
306 { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, 320 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
307 { 0, 0, NULL, NULL } 321 { 0, 0, NULL, NULL }
308}; 322};
309static struct cfi_fixup fixup_table[] = { 323static struct cfi_fixup fixup_table[] = {
@@ -319,7 +333,7 @@ static struct cfi_fixup fixup_table[] = {
319static void cfi_fixup_major_minor(struct cfi_private *cfi, 333static void cfi_fixup_major_minor(struct cfi_private *cfi,
320 struct cfi_pri_intelext *extp) 334 struct cfi_pri_intelext *extp)
321{ 335{
322 if (cfi->mfr == MANUFACTURER_INTEL && 336 if (cfi->mfr == CFI_MFR_INTEL &&
323 cfi->id == PF38F4476 && extp->MinorVersion == '3') 337 cfi->id == PF38F4476 && extp->MinorVersion == '3')
324 extp->MinorVersion = '1'; 338 extp->MinorVersion = '1';
325} 339}
@@ -2235,7 +2249,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2235 2249
2236 /* Some chips have OTP located in the _top_ partition only. 2250 /* Some chips have OTP located in the _top_ partition only.
2237 For example: Intel 28F256L18T (T means top-parameter device) */ 2251 For example: Intel 28F256L18T (T means top-parameter device) */
2238 if (cfi->mfr == MANUFACTURER_INTEL) { 2252 if (cfi->mfr == CFI_MFR_INTEL) {
2239 switch (cfi->id) { 2253 switch (cfi->id) {
2240 case 0x880b: 2254 case 0x880b:
2241 case 0x880c: 2255 case 0x880c:
@@ -2564,6 +2578,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
2564 if (!ret) { 2578 if (!ret) {
2565 map_write(map, CMD(0xff), chip->start); 2579 map_write(map, CMD(0xff), chip->start);
2566 chip->state = FL_SHUTDOWN; 2580 chip->state = FL_SHUTDOWN;
2581 put_chip(map, chip, chip->start);
2567 } 2582 }
2568 spin_unlock(chip->mutex); 2583 spin_unlock(chip->mutex);
2569 } 2584 }
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 94bb61e19047..f3600e8d5382 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -490,10 +490,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
490 } 490 }
491#endif 491#endif
492 492
493 /* FIXME: erase-suspend-program is broken. See
494 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
495 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
496
497 __module_get(THIS_MODULE); 493 __module_get(THIS_MODULE);
498 return mtd; 494 return mtd;
499 495
@@ -573,7 +569,6 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
573 569
574 if (time_after(jiffies, timeo)) { 570 if (time_after(jiffies, timeo)) {
575 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 571 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
576 spin_unlock(chip->mutex);
577 return -EIO; 572 return -EIO;
578 } 573 }
579 spin_unlock(chip->mutex); 574 spin_unlock(chip->mutex);
@@ -589,15 +584,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
589 return 0; 584 return 0;
590 585
591 case FL_ERASING: 586 case FL_ERASING:
592 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ 587 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
593 goto sleep; 588 !(mode == FL_READY || mode == FL_POINT ||
594 589 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
595 if (!( mode == FL_READY
596 || mode == FL_POINT
597 || !cfip
598 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
599 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
600 )))
601 goto sleep; 590 goto sleep;
602 591
603 /* We could check to see if we're trying to access the sector 592 /* We could check to see if we're trying to access the sector
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index c5a84fda5410..ca584d0380b4 100755
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -71,6 +71,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); 71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
72 if (cfi_qry_present(map, base, cfi)) 72 if (cfi_qry_present(map, base, cfi))
73 return 1; 73 return 1;
74 /* some old SST chips, e.g. 39VF160x/39VF320x */
75 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
76 cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
77 cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
78 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
79 if (cfi_qry_present(map, base, cfi))
80 return 1;
74 /* QRY not found */ 81 /* QRY not found */
75 return 0; 82 return 0;
76} 83}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 736a3be265f2..1bec5e1ce6ac 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -142,8 +142,8 @@
142 142
143/* ST - www.st.com */ 143/* ST - www.st.com */
144#define M29F800AB 0x0058 144#define M29F800AB 0x0058
145#define M29W800DT 0x00D7 145#define M29W800DT 0x22D7
146#define M29W800DB 0x005B 146#define M29W800DB 0x225B
147#define M29W400DT 0x00EE 147#define M29W400DT 0x00EE
148#define M29W400DB 0x00EF 148#define M29W400DB 0x00EF
149#define M29W160DT 0x22C4 149#define M29W160DT 0x22C4
@@ -1575,7 +1575,7 @@ static const struct amd_flash_info jedec_table[] = {
1575 .dev_id = M29W800DT, 1575 .dev_id = M29W800DT,
1576 .name = "ST M29W800DT", 1576 .name = "ST M29W800DT",
1577 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1577 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1578 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1578 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1579 .dev_size = SIZE_1MiB, 1579 .dev_size = SIZE_1MiB,
1580 .cmd_set = P_ID_AMD_STD, 1580 .cmd_set = P_ID_AMD_STD,
1581 .nr_regions = 4, 1581 .nr_regions = 4,
@@ -1590,7 +1590,7 @@ static const struct amd_flash_info jedec_table[] = {
1590 .dev_id = M29W800DB, 1590 .dev_id = M29W800DB,
1591 .name = "ST M29W800DB", 1591 .name = "ST M29W800DB",
1592 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1592 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
1593 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1593 .uaddr = MTD_UADDR_0x0AAA_0x0555,
1594 .dev_size = SIZE_1MiB, 1594 .dev_size = SIZE_1MiB,
1595 .cmd_set = P_ID_AMD_STD, 1595 .cmd_set = P_ID_AMD_STD,
1596 .nr_regions = 4, 1596 .nr_regions = 4,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4c19269de91a..f3f4768d6e18 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -22,6 +22,7 @@
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/math64.h> 23#include <linux/math64.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/mod_devicetable.h>
25 26
26#include <linux/mtd/mtd.h> 27#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
@@ -29,9 +30,6 @@
29#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
30#include <linux/spi/flash.h> 31#include <linux/spi/flash.h>
31 32
32
33#define FLASH_PAGESIZE 256
34
35/* Flash opcodes. */ 33/* Flash opcodes. */
36#define OPCODE_WREN 0x06 /* Write enable */ 34#define OPCODE_WREN 0x06 /* Write enable */
37#define OPCODE_RDSR 0x05 /* Read status register */ 35#define OPCODE_RDSR 0x05 /* Read status register */
@@ -61,7 +59,7 @@
61 59
62/* Define max times to check status register before we give up. */ 60/* Define max times to check status register before we give up. */
63#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 61#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
64#define CMD_SIZE 4 62#define MAX_CMD_SIZE 4
65 63
66#ifdef CONFIG_M25PXX_USE_FAST_READ 64#ifdef CONFIG_M25PXX_USE_FAST_READ
67#define OPCODE_READ OPCODE_FAST_READ 65#define OPCODE_READ OPCODE_FAST_READ
@@ -78,8 +76,10 @@ struct m25p {
78 struct mutex lock; 76 struct mutex lock;
79 struct mtd_info mtd; 77 struct mtd_info mtd;
80 unsigned partitioned:1; 78 unsigned partitioned:1;
79 u16 page_size;
80 u16 addr_width;
81 u8 erase_opcode; 81 u8 erase_opcode;
82 u8 command[CMD_SIZE + FAST_READ_DUMMY_BYTE]; 82 u8 *command;
83}; 83};
84 84
85static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) 85static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -198,6 +198,19 @@ static int erase_chip(struct m25p *flash)
198 return 0; 198 return 0;
199} 199}
200 200
201static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
202{
203 /* opcode is in cmd[0] */
204 cmd[1] = addr >> (flash->addr_width * 8 - 8);
205 cmd[2] = addr >> (flash->addr_width * 8 - 16);
206 cmd[3] = addr >> (flash->addr_width * 8 - 24);
207}
208
209static int m25p_cmdsz(struct m25p *flash)
210{
211 return 1 + flash->addr_width;
212}
213
201/* 214/*
202 * Erase one sector of flash memory at offset ``offset'' which is any 215 * Erase one sector of flash memory at offset ``offset'' which is any
203 * address within the sector which should be erased. 216 * address within the sector which should be erased.
@@ -219,11 +232,9 @@ static int erase_sector(struct m25p *flash, u32 offset)
219 232
220 /* Set up command buffer. */ 233 /* Set up command buffer. */
221 flash->command[0] = flash->erase_opcode; 234 flash->command[0] = flash->erase_opcode;
222 flash->command[1] = offset >> 16; 235 m25p_addr2cmd(flash, offset, flash->command);
223 flash->command[2] = offset >> 8;
224 flash->command[3] = offset;
225 236
226 spi_write(flash->spi, flash->command, CMD_SIZE); 237 spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
227 238
228 return 0; 239 return 0;
229} 240}
@@ -325,7 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
325 * Should add 1 byte DUMMY_BYTE. 336 * Should add 1 byte DUMMY_BYTE.
326 */ 337 */
327 t[0].tx_buf = flash->command; 338 t[0].tx_buf = flash->command;
328 t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE; 339 t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
329 spi_message_add_tail(&t[0], &m); 340 spi_message_add_tail(&t[0], &m);
330 341
331 t[1].rx_buf = buf; 342 t[1].rx_buf = buf;
@@ -352,13 +363,11 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
352 363
353 /* Set up the write data buffer. */ 364 /* Set up the write data buffer. */
354 flash->command[0] = OPCODE_READ; 365 flash->command[0] = OPCODE_READ;
355 flash->command[1] = from >> 16; 366 m25p_addr2cmd(flash, from, flash->command);
356 flash->command[2] = from >> 8;
357 flash->command[3] = from;
358 367
359 spi_sync(flash->spi, &m); 368 spi_sync(flash->spi, &m);
360 369
361 *retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE; 370 *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
362 371
363 mutex_unlock(&flash->lock); 372 mutex_unlock(&flash->lock);
364 373
@@ -396,7 +405,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
396 memset(t, 0, (sizeof t)); 405 memset(t, 0, (sizeof t));
397 406
398 t[0].tx_buf = flash->command; 407 t[0].tx_buf = flash->command;
399 t[0].len = CMD_SIZE; 408 t[0].len = m25p_cmdsz(flash);
400 spi_message_add_tail(&t[0], &m); 409 spi_message_add_tail(&t[0], &m);
401 410
402 t[1].tx_buf = buf; 411 t[1].tx_buf = buf;
@@ -414,41 +423,36 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
414 423
415 /* Set up the opcode in the write buffer. */ 424 /* Set up the opcode in the write buffer. */
416 flash->command[0] = OPCODE_PP; 425 flash->command[0] = OPCODE_PP;
417 flash->command[1] = to >> 16; 426 m25p_addr2cmd(flash, to, flash->command);
418 flash->command[2] = to >> 8;
419 flash->command[3] = to;
420 427
421 /* what page do we start with? */ 428 page_offset = to & (flash->page_size - 1);
422 page_offset = to % FLASH_PAGESIZE;
423 429
424 /* do all the bytes fit onto one page? */ 430 /* do all the bytes fit onto one page? */
425 if (page_offset + len <= FLASH_PAGESIZE) { 431 if (page_offset + len <= flash->page_size) {
426 t[1].len = len; 432 t[1].len = len;
427 433
428 spi_sync(flash->spi, &m); 434 spi_sync(flash->spi, &m);
429 435
430 *retlen = m.actual_length - CMD_SIZE; 436 *retlen = m.actual_length - m25p_cmdsz(flash);
431 } else { 437 } else {
432 u32 i; 438 u32 i;
433 439
434 /* the size of data remaining on the first page */ 440 /* the size of data remaining on the first page */
435 page_size = FLASH_PAGESIZE - page_offset; 441 page_size = flash->page_size - page_offset;
436 442
437 t[1].len = page_size; 443 t[1].len = page_size;
438 spi_sync(flash->spi, &m); 444 spi_sync(flash->spi, &m);
439 445
440 *retlen = m.actual_length - CMD_SIZE; 446 *retlen = m.actual_length - m25p_cmdsz(flash);
441 447
442 /* write everything in PAGESIZE chunks */ 448 /* write everything in flash->page_size chunks */
443 for (i = page_size; i < len; i += page_size) { 449 for (i = page_size; i < len; i += page_size) {
444 page_size = len - i; 450 page_size = len - i;
445 if (page_size > FLASH_PAGESIZE) 451 if (page_size > flash->page_size)
446 page_size = FLASH_PAGESIZE; 452 page_size = flash->page_size;
447 453
448 /* write the next page to flash */ 454 /* write the next page to flash */
449 flash->command[1] = (to + i) >> 16; 455 m25p_addr2cmd(flash, to + i, flash->command);
450 flash->command[2] = (to + i) >> 8;
451 flash->command[3] = (to + i);
452 456
453 t[1].tx_buf = buf + i; 457 t[1].tx_buf = buf + i;
454 t[1].len = page_size; 458 t[1].len = page_size;
@@ -460,7 +464,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
460 spi_sync(flash->spi, &m); 464 spi_sync(flash->spi, &m);
461 465
462 if (retlen) 466 if (retlen)
463 *retlen += m.actual_length - CMD_SIZE; 467 *retlen += m.actual_length - m25p_cmdsz(flash);
464 } 468 }
465 } 469 }
466 470
@@ -492,7 +496,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
492 memset(t, 0, (sizeof t)); 496 memset(t, 0, (sizeof t));
493 497
494 t[0].tx_buf = flash->command; 498 t[0].tx_buf = flash->command;
495 t[0].len = CMD_SIZE; 499 t[0].len = m25p_cmdsz(flash);
496 spi_message_add_tail(&t[0], &m); 500 spi_message_add_tail(&t[0], &m);
497 501
498 t[1].tx_buf = buf; 502 t[1].tx_buf = buf;
@@ -511,9 +515,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
511 /* Start write from odd address. */ 515 /* Start write from odd address. */
512 if (actual) { 516 if (actual) {
513 flash->command[0] = OPCODE_BP; 517 flash->command[0] = OPCODE_BP;
514 flash->command[1] = to >> 16; 518 m25p_addr2cmd(flash, to, flash->command);
515 flash->command[2] = to >> 8;
516 flash->command[3] = to;
517 519
518 /* write one byte. */ 520 /* write one byte. */
519 t[1].len = 1; 521 t[1].len = 1;
@@ -521,17 +523,15 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
521 ret = wait_till_ready(flash); 523 ret = wait_till_ready(flash);
522 if (ret) 524 if (ret)
523 goto time_out; 525 goto time_out;
524 *retlen += m.actual_length - CMD_SIZE; 526 *retlen += m.actual_length - m25p_cmdsz(flash);
525 } 527 }
526 to += actual; 528 to += actual;
527 529
528 flash->command[0] = OPCODE_AAI_WP; 530 flash->command[0] = OPCODE_AAI_WP;
529 flash->command[1] = to >> 16; 531 m25p_addr2cmd(flash, to, flash->command);
530 flash->command[2] = to >> 8;
531 flash->command[3] = to;
532 532
533 /* Write out most of the data here. */ 533 /* Write out most of the data here. */
534 cmd_sz = CMD_SIZE; 534 cmd_sz = m25p_cmdsz(flash);
535 for (; actual < len - 1; actual += 2) { 535 for (; actual < len - 1; actual += 2) {
536 t[0].len = cmd_sz; 536 t[0].len = cmd_sz;
537 /* write two bytes. */ 537 /* write two bytes. */
@@ -555,10 +555,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
555 if (actual != len) { 555 if (actual != len) {
556 write_enable(flash); 556 write_enable(flash);
557 flash->command[0] = OPCODE_BP; 557 flash->command[0] = OPCODE_BP;
558 flash->command[1] = to >> 16; 558 m25p_addr2cmd(flash, to, flash->command);
559 flash->command[2] = to >> 8; 559 t[0].len = m25p_cmdsz(flash);
560 flash->command[3] = to;
561 t[0].len = CMD_SIZE;
562 t[1].len = 1; 560 t[1].len = 1;
563 t[1].tx_buf = buf + actual; 561 t[1].tx_buf = buf + actual;
564 562
@@ -566,7 +564,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
566 ret = wait_till_ready(flash); 564 ret = wait_till_ready(flash);
567 if (ret) 565 if (ret)
568 goto time_out; 566 goto time_out;
569 *retlen += m.actual_length - CMD_SIZE; 567 *retlen += m.actual_length - m25p_cmdsz(flash);
570 write_disable(flash); 568 write_disable(flash);
571 } 569 }
572 570
@@ -582,8 +580,6 @@ time_out:
582 */ 580 */
583 581
584struct flash_info { 582struct flash_info {
585 char *name;
586
587 /* JEDEC id zero means "no ID" (most older chips); otherwise it has 583 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
588 * a high byte of zero plus three data bytes: the manufacturer id, 584 * a high byte of zero plus three data bytes: the manufacturer id,
589 * then a two byte device id. 585 * then a two byte device id.
@@ -597,87 +593,119 @@ struct flash_info {
597 unsigned sector_size; 593 unsigned sector_size;
598 u16 n_sectors; 594 u16 n_sectors;
599 595
596 u16 page_size;
597 u16 addr_width;
598
600 u16 flags; 599 u16 flags;
601#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ 600#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
601#define M25P_NO_ERASE 0x02 /* No erase command needed */
602}; 602};
603 603
604#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
605 ((kernel_ulong_t)&(struct flash_info) { \
606 .jedec_id = (_jedec_id), \
607 .ext_id = (_ext_id), \
608 .sector_size = (_sector_size), \
609 .n_sectors = (_n_sectors), \
610 .page_size = 256, \
611 .addr_width = 3, \
612 .flags = (_flags), \
613 })
614
615#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
616 ((kernel_ulong_t)&(struct flash_info) { \
617 .sector_size = (_sector_size), \
618 .n_sectors = (_n_sectors), \
619 .page_size = (_page_size), \
620 .addr_width = (_addr_width), \
621 .flags = M25P_NO_ERASE, \
622 })
604 623
605/* NOTE: double check command sets and memory organization when you add 624/* NOTE: double check command sets and memory organization when you add
606 * more flash chips. This current list focusses on newer chips, which 625 * more flash chips. This current list focusses on newer chips, which
607 * have been converging on command sets which including JEDEC ID. 626 * have been converging on command sets which including JEDEC ID.
608 */ 627 */
609static struct flash_info __devinitdata m25p_data [] = { 628static const struct spi_device_id m25p_ids[] = {
610
611 /* Atmel -- some are (confusingly) marketed as "DataFlash" */ 629 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
612 { "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, }, 630 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
613 { "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, }, 631 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
614 632
615 { "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, }, 633 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
616 { "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, }, 634 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
617 635
618 { "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, }, 636 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
619 { "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, }, 637 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
620 { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, }, 638 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
621 { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, 639 { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
622 640
623 /* Macronix */ 641 /* Macronix */
624 { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, }, 642 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
625 { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, }, 643 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
626 { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, }, 644 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
627 { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, }, 645 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
646 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
628 647
629 /* Spansion -- single (large) sector size only, at least 648 /* Spansion -- single (large) sector size only, at least
630 * for the chips listed here (without boot sectors). 649 * for the chips listed here (without boot sectors).
631 */ 650 */
632 { "s25sl004a", 0x010212, 0, 64 * 1024, 8, }, 651 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
633 { "s25sl008a", 0x010213, 0, 64 * 1024, 16, }, 652 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
634 { "s25sl016a", 0x010214, 0, 64 * 1024, 32, }, 653 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
635 { "s25sl032a", 0x010215, 0, 64 * 1024, 64, }, 654 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
636 { "s25sl064a", 0x010216, 0, 64 * 1024, 128, }, 655 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
637 { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, 656 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
638 { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, }, 657 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
639 { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, }, 658 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
640 { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, }, 659 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
641 660
642 /* SST -- large erase sizes are "overlays", "sectors" are 4K */ 661 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
643 { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, }, 662 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
644 { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, }, 663 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
645 { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, }, 664 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
646 { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, }, 665 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
647 { "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, }, 666 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
648 { "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, }, 667 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
649 { "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, }, 668 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
650 { "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, }, 669 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
651 670
652 /* ST Microelectronics -- newer production may have feature updates */ 671 /* ST Microelectronics -- newer production may have feature updates */
653 { "m25p05", 0x202010, 0, 32 * 1024, 2, }, 672 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
654 { "m25p10", 0x202011, 0, 32 * 1024, 4, }, 673 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
655 { "m25p20", 0x202012, 0, 64 * 1024, 4, }, 674 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
656 { "m25p40", 0x202013, 0, 64 * 1024, 8, }, 675 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
657 { "m25p80", 0, 0, 64 * 1024, 16, }, 676 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
658 { "m25p16", 0x202015, 0, 64 * 1024, 32, }, 677 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
659 { "m25p32", 0x202016, 0, 64 * 1024, 64, }, 678 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
660 { "m25p64", 0x202017, 0, 64 * 1024, 128, }, 679 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
661 { "m25p128", 0x202018, 0, 256 * 1024, 64, }, 680 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
662 681
663 { "m45pe10", 0x204011, 0, 64 * 1024, 2, }, 682 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
664 { "m45pe80", 0x204014, 0, 64 * 1024, 16, }, 683 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
665 { "m45pe16", 0x204015, 0, 64 * 1024, 32, }, 684 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
666 685
667 { "m25pe80", 0x208014, 0, 64 * 1024, 16, }, 686 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
668 { "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, }, 687 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
669 688
670 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ 689 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
671 { "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, }, 690 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
672 { "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, }, 691 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
673 { "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, }, 692 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
674 { "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, }, 693 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
675 { "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, }, 694 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
676 { "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, }, 695 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
677 { "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, }, 696 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
697
698 /* Catalyst / On Semiconductor -- non-JEDEC */
699 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
700 { "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
701 { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
702 { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
703 { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
704 { },
678}; 705};
706MODULE_DEVICE_TABLE(spi, m25p_ids);
679 707
680static struct flash_info *__devinit jedec_probe(struct spi_device *spi) 708static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
681{ 709{
682 int tmp; 710 int tmp;
683 u8 code = OPCODE_RDID; 711 u8 code = OPCODE_RDID;
@@ -702,18 +730,24 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
702 jedec = jedec << 8; 730 jedec = jedec << 8;
703 jedec |= id[2]; 731 jedec |= id[2];
704 732
733 /*
734 * Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants,
735 * which depend on technology process. Officially RDID command doesn't
736 * exist for non-JEDEC chips, but for compatibility they return ID 0.
737 */
738 if (jedec == 0)
739 return NULL;
740
705 ext_jedec = id[3] << 8 | id[4]; 741 ext_jedec = id[3] << 8 | id[4];
706 742
707 for (tmp = 0, info = m25p_data; 743 for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
708 tmp < ARRAY_SIZE(m25p_data); 744 info = (void *)m25p_ids[tmp].driver_data;
709 tmp++, info++) {
710 if (info->jedec_id == jedec) { 745 if (info->jedec_id == jedec) {
711 if (info->ext_id != 0 && info->ext_id != ext_jedec) 746 if (info->ext_id != 0 && info->ext_id != ext_jedec)
712 continue; 747 continue;
713 return info; 748 return &m25p_ids[tmp];
714 } 749 }
715 } 750 }
716 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
717 return NULL; 751 return NULL;
718} 752}
719 753
@@ -725,6 +759,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
725 */ 759 */
726static int __devinit m25p_probe(struct spi_device *spi) 760static int __devinit m25p_probe(struct spi_device *spi)
727{ 761{
762 const struct spi_device_id *id = spi_get_device_id(spi);
728 struct flash_platform_data *data; 763 struct flash_platform_data *data;
729 struct m25p *flash; 764 struct m25p *flash;
730 struct flash_info *info; 765 struct flash_info *info;
@@ -737,50 +772,65 @@ static int __devinit m25p_probe(struct spi_device *spi)
737 */ 772 */
738 data = spi->dev.platform_data; 773 data = spi->dev.platform_data;
739 if (data && data->type) { 774 if (data && data->type) {
740 for (i = 0, info = m25p_data; 775 const struct spi_device_id *plat_id;
741 i < ARRAY_SIZE(m25p_data);
742 i++, info++) {
743 if (strcmp(data->type, info->name) == 0)
744 break;
745 }
746 776
747 /* unrecognized chip? */ 777 for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
748 if (i == ARRAY_SIZE(m25p_data)) { 778 plat_id = &m25p_ids[i];
749 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n", 779 if (strcmp(data->type, plat_id->name))
750 dev_name(&spi->dev), data->type); 780 continue;
751 info = NULL; 781 break;
752
753 /* recognized; is that chip really what's there? */
754 } else if (info->jedec_id) {
755 struct flash_info *chip = jedec_probe(spi);
756
757 if (!chip || chip != info) {
758 dev_warn(&spi->dev, "found %s, expected %s\n",
759 chip ? chip->name : "UNKNOWN",
760 info->name);
761 info = NULL;
762 }
763 } 782 }
764 } else
765 info = jedec_probe(spi);
766 783
767 if (!info) 784 if (plat_id)
768 return -ENODEV; 785 id = plat_id;
786 else
787 dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
788 }
789
790 info = (void *)id->driver_data;
791
792 if (info->jedec_id) {
793 const struct spi_device_id *jid;
794
795 jid = jedec_probe(spi);
796 if (!jid) {
797 dev_info(&spi->dev, "non-JEDEC variant of %s\n",
798 id->name);
799 } else if (jid != id) {
800 /*
801 * JEDEC knows better, so overwrite platform ID. We
802 * can't trust partitions any longer, but we'll let
803 * mtd apply them anyway, since some partitions may be
804 * marked read-only, and we don't want to lose that
805 * information, even if it's not 100% accurate.
806 */
807 dev_warn(&spi->dev, "found %s, expected %s\n",
808 jid->name, id->name);
809 id = jid;
810 info = (void *)jid->driver_data;
811 }
812 }
769 813
770 flash = kzalloc(sizeof *flash, GFP_KERNEL); 814 flash = kzalloc(sizeof *flash, GFP_KERNEL);
771 if (!flash) 815 if (!flash)
772 return -ENOMEM; 816 return -ENOMEM;
817 flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
818 if (!flash->command) {
819 kfree(flash);
820 return -ENOMEM;
821 }
773 822
774 flash->spi = spi; 823 flash->spi = spi;
775 mutex_init(&flash->lock); 824 mutex_init(&flash->lock);
776 dev_set_drvdata(&spi->dev, flash); 825 dev_set_drvdata(&spi->dev, flash);
777 826
778 /* 827 /*
779 * Atmel serial flash tend to power up 828 * Atmel and SST serial flash tend to power
780 * with the software protection bits set 829 * up with the software protection bits set
781 */ 830 */
782 831
783 if (info->jedec_id >> 16 == 0x1f) { 832 if (info->jedec_id >> 16 == 0x1f ||
833 info->jedec_id >> 16 == 0xbf) {
784 write_enable(flash); 834 write_enable(flash);
785 write_sr(flash, 0); 835 write_sr(flash, 0);
786 } 836 }
@@ -812,9 +862,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
812 flash->mtd.erasesize = info->sector_size; 862 flash->mtd.erasesize = info->sector_size;
813 } 863 }
814 864
865 if (info->flags & M25P_NO_ERASE)
866 flash->mtd.flags |= MTD_NO_ERASE;
867
815 flash->mtd.dev.parent = &spi->dev; 868 flash->mtd.dev.parent = &spi->dev;
869 flash->page_size = info->page_size;
870 flash->addr_width = info->addr_width;
816 871
817 dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name, 872 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
818 (long long)flash->mtd.size >> 10); 873 (long long)flash->mtd.size >> 10);
819 874
820 DEBUG(MTD_DEBUG_LEVEL2, 875 DEBUG(MTD_DEBUG_LEVEL2,
@@ -888,8 +943,10 @@ static int __devexit m25p_remove(struct spi_device *spi)
888 status = del_mtd_partitions(&flash->mtd); 943 status = del_mtd_partitions(&flash->mtd);
889 else 944 else
890 status = del_mtd_device(&flash->mtd); 945 status = del_mtd_device(&flash->mtd);
891 if (status == 0) 946 if (status == 0) {
947 kfree(flash->command);
892 kfree(flash); 948 kfree(flash);
949 }
893 return 0; 950 return 0;
894} 951}
895 952
@@ -900,6 +957,7 @@ static struct spi_driver m25p80_driver = {
900 .bus = &spi_bus_type, 957 .bus = &spi_bus_type,
901 .owner = THIS_MODULE, 958 .owner = THIS_MODULE,
902 }, 959 },
960 .id_table = m25p_ids,
903 .probe = m25p_probe, 961 .probe = m25p_probe,
904 .remove = __devexit_p(m25p_remove), 962 .remove = __devexit_p(m25p_remove),
905 963
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 93e3627be74c..19817404ce7d 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -636,6 +636,7 @@ add_dataflash_otp(struct spi_device *spi, char *name,
636 struct mtd_info *device; 636 struct mtd_info *device;
637 struct flash_platform_data *pdata = spi->dev.platform_data; 637 struct flash_platform_data *pdata = spi->dev.platform_data;
638 char *otp_tag = ""; 638 char *otp_tag = "";
639 int err = 0;
639 640
640 priv = kzalloc(sizeof *priv, GFP_KERNEL); 641 priv = kzalloc(sizeof *priv, GFP_KERNEL);
641 if (!priv) 642 if (!priv)
@@ -693,13 +694,23 @@ add_dataflash_otp(struct spi_device *spi, char *name,
693 694
694 if (nr_parts > 0) { 695 if (nr_parts > 0) {
695 priv->partitioned = 1; 696 priv->partitioned = 1;
696 return add_mtd_partitions(device, parts, nr_parts); 697 err = add_mtd_partitions(device, parts, nr_parts);
698 goto out;
697 } 699 }
698 } else if (pdata && pdata->nr_parts) 700 } else if (pdata && pdata->nr_parts)
699 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 701 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
700 pdata->nr_parts, device->name); 702 pdata->nr_parts, device->name);
701 703
702 return add_mtd_device(device) == 1 ? -ENODEV : 0; 704 if (add_mtd_device(device) == 1)
705 err = -ENODEV;
706
707out:
708 if (!err)
709 return 0;
710
711 dev_set_drvdata(&spi->dev, NULL);
712 kfree(priv);
713 return err;
703} 714}
704 715
705static inline int __devinit 716static inline int __devinit
@@ -932,8 +943,10 @@ static int __devexit dataflash_remove(struct spi_device *spi)
932 status = del_mtd_partitions(&flash->mtd); 943 status = del_mtd_partitions(&flash->mtd);
933 else 944 else
934 status = del_mtd_device(&flash->mtd); 945 status = del_mtd_device(&flash->mtd);
935 if (status == 0) 946 if (status == 0) {
947 dev_set_drvdata(&spi->dev, NULL);
936 kfree(flash); 948 kfree(flash);
949 }
937 return status; 950 return status;
938} 951}
939 952
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 847e214ade59..4c364d44ad59 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -359,12 +359,6 @@ config MTD_SA1100
359 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ. 359 the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
360 If you have such a board, say 'Y'. 360 If you have such a board, say 'Y'.
361 361
362config MTD_IPAQ
363 tristate "CFI Flash device mapped on Compaq/HP iPAQ"
364 depends on IPAQ_HANDHELD && MTD_CFI
365 help
366 This provides a driver for the on-board flash of the iPAQ.
367
368config MTD_DC21285 362config MTD_DC21285
369 tristate "CFI Flash device mapped on DC21285 Footbridge" 363 tristate "CFI Flash device mapped on DC21285 Footbridge"
370 depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS 364 depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ae2f6dbe43c3..ce315214ff2b 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -24,12 +24,12 @@ obj-$(CONFIG_MTD_CEIVA) += ceiva.o
24obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o 24obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
25obj-$(CONFIG_MTD_PHYSMAP) += physmap.o 25obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 26obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
27obj-$(CONFIG_MTD_PISMO) += pismo.o
27obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 28obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
28obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 29obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
29obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 30obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
30obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o 31obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
31obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o 32obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
32obj-$(CONFIG_MTD_IPAQ) += ipaq-flash.o
33obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o 33obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
34obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o 34obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
35obj-$(CONFIG_MTD_NETSC520) += netsc520.o 35obj-$(CONFIG_MTD_NETSC520) += netsc520.o
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
deleted file mode 100644
index 76708e796b70..000000000000
--- a/drivers/mtd/maps/ipaq-flash.c
+++ /dev/null
@@ -1,460 +0,0 @@
1/*
2 * Flash memory access on iPAQ Handhelds (either SA1100 or PXA250 based)
3 *
4 * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
5 * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
6 * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/spinlock.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <asm/page.h>
16#include <asm/mach-types.h>
17#include <asm/system.h>
18#include <asm/errno.h>
19
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23#ifdef CONFIG_MTD_CONCAT
24#include <linux/mtd/concat.h>
25#endif
26
27#include <mach/hardware.h>
28#include <mach/h3600.h>
29#include <asm/io.h>
30
31
32#ifndef CONFIG_IPAQ_HANDHELD
33#error This is for iPAQ Handhelds only
34#endif
35#ifdef CONFIG_SA1100_JORNADA56X
36
37static void jornada56x_set_vpp(struct map_info *map, int vpp)
38{
39 if (vpp)
40 GPSR = GPIO_GPIO26;
41 else
42 GPCR = GPIO_GPIO26;
43 GPDR |= GPIO_GPIO26;
44}
45
46#endif
47
48#ifdef CONFIG_SA1100_JORNADA720
49
50static void jornada720_set_vpp(struct map_info *map, int vpp)
51{
52 if (vpp)
53 PPSR |= 0x80;
54 else
55 PPSR &= ~0x80;
56 PPDR |= 0x80;
57}
58
59#endif
60
61#define MAX_IPAQ_CS 2 /* Number of CS we are going to test */
62
63#define IPAQ_MAP_INIT(X) \
64 { \
65 name: "IPAQ flash " X, \
66 }
67
68
69static struct map_info ipaq_map[MAX_IPAQ_CS] = {
70 IPAQ_MAP_INIT("bank 1"),
71 IPAQ_MAP_INIT("bank 2")
72};
73
74static struct mtd_info *my_sub_mtd[MAX_IPAQ_CS] = {
75 NULL,
76 NULL
77};
78
79/*
80 * Here are partition information for all known IPAQ-based devices.
81 * See include/linux/mtd/partitions.h for definition of the mtd_partition
82 * structure.
83 *
84 * The *_max_flash_size is the maximum possible mapped flash size which
85 * is not necessarily the actual flash size. It must be no more than
86 * the value specified in the "struct map_desc *_io_desc" mapping
87 * definition for the corresponding machine.
88 *
89 * Please keep these in alphabetical order, and formatted as per existing
90 * entries. Thanks.
91 */
92
93#ifdef CONFIG_IPAQ_HANDHELD
94static unsigned long h3xxx_max_flash_size = 0x04000000;
95static struct mtd_partition h3xxx_partitions[] = {
96 {
97 name: "H3XXX boot firmware",
98#ifndef CONFIG_LAB
99 size: 0x00040000,
100#else
101 size: 0x00080000,
102#endif
103 offset: 0,
104#ifndef CONFIG_LAB
105 mask_flags: MTD_WRITEABLE, /* force read-only */
106#endif
107 },
108 {
109 name: "H3XXX root jffs2",
110#ifndef CONFIG_LAB
111 size: 0x2000000 - 2*0x40000, /* Warning, this is fixed later */
112 offset: 0x00040000,
113#else
114 size: 0x2000000 - 0x40000 - 0x80000, /* Warning, this is fixed later */
115 offset: 0x00080000,
116#endif
117 },
118 {
119 name: "asset",
120 size: 0x40000,
121 offset: 0x2000000 - 0x40000, /* Warning, this is fixed later */
122 mask_flags: MTD_WRITEABLE, /* force read-only */
123 }
124};
125
126#ifndef CONFIG_MTD_CONCAT
127static struct mtd_partition h3xxx_partitions_bank2[] = {
128 /* this is used only on 2 CS machines when concat is not present */
129 {
130 name: "second H3XXX root jffs2",
131 size: 0x1000000 - 0x40000, /* Warning, this is fixed later */
132 offset: 0x00000000,
133 },
134 {
135 name: "second asset",
136 size: 0x40000,
137 offset: 0x1000000 - 0x40000, /* Warning, this is fixed later */
138 mask_flags: MTD_WRITEABLE, /* force read-only */
139 }
140};
141#endif
142
143static DEFINE_SPINLOCK(ipaq_vpp_lock);
144
145static void h3xxx_set_vpp(struct map_info *map, int vpp)
146{
147 static int nest = 0;
148
149 spin_lock(&ipaq_vpp_lock);
150 if (vpp)
151 nest++;
152 else
153 nest--;
154 if (nest)
155 assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 1);
156 else
157 assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 0);
158 spin_unlock(&ipaq_vpp_lock);
159}
160
161#endif
162
163#if defined(CONFIG_SA1100_JORNADA56X) || defined(CONFIG_SA1100_JORNADA720)
164static unsigned long jornada_max_flash_size = 0x02000000;
165static struct mtd_partition jornada_partitions[] = {
166 {
167 name: "Jornada boot firmware",
168 size: 0x00040000,
169 offset: 0,
170 mask_flags: MTD_WRITEABLE, /* force read-only */
171 }, {
172 name: "Jornada root jffs2",
173 size: MTDPART_SIZ_FULL,
174 offset: 0x00040000,
175 }
176};
177#endif
178
179
180static struct mtd_partition *parsed_parts;
181static struct mtd_info *mymtd;
182
183static unsigned long cs_phys[] = {
184#ifdef CONFIG_ARCH_SA1100
185 SA1100_CS0_PHYS,
186 SA1100_CS1_PHYS,
187 SA1100_CS2_PHYS,
188 SA1100_CS3_PHYS,
189 SA1100_CS4_PHYS,
190 SA1100_CS5_PHYS,
191#else
192 PXA_CS0_PHYS,
193 PXA_CS1_PHYS,
194 PXA_CS2_PHYS,
195 PXA_CS3_PHYS,
196 PXA_CS4_PHYS,
197 PXA_CS5_PHYS,
198#endif
199};
200
201static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
202
203static int __init h1900_special_case(void);
204
205static int __init ipaq_mtd_init(void)
206{
207 struct mtd_partition *parts = NULL;
208 int nb_parts = 0;
209 int parsed_nr_parts = 0;
210 const char *part_type;
211 int i; /* used when we have >1 flash chips */
212 unsigned long tot_flashsize = 0; /* used when we have >1 flash chips */
213
214 /* Default flash bankwidth */
215 // ipaq_map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
216
217 if (machine_is_h1900())
218 {
219 /* For our intents, the h1900 is not a real iPAQ, so we special-case it. */
220 return h1900_special_case();
221 }
222
223 if (machine_is_h3100() || machine_is_h1900())
224 for(i=0; i<MAX_IPAQ_CS; i++)
225 ipaq_map[i].bankwidth = 2;
226 else
227 for(i=0; i<MAX_IPAQ_CS; i++)
228 ipaq_map[i].bankwidth = 4;
229
230 /*
231 * Static partition definition selection
232 */
233 part_type = "static";
234
235 simple_map_init(&ipaq_map[0]);
236 simple_map_init(&ipaq_map[1]);
237
238#ifdef CONFIG_IPAQ_HANDHELD
239 if (machine_is_ipaq()) {
240 parts = h3xxx_partitions;
241 nb_parts = ARRAY_SIZE(h3xxx_partitions);
242 for(i=0; i<MAX_IPAQ_CS; i++) {
243 ipaq_map[i].size = h3xxx_max_flash_size;
244 ipaq_map[i].set_vpp = h3xxx_set_vpp;
245 ipaq_map[i].phys = cs_phys[i];
246 ipaq_map[i].virt = ioremap(cs_phys[i], 0x04000000);
247 if (machine_is_h3100 () || machine_is_h1900())
248 ipaq_map[i].bankwidth = 2;
249 }
250 if (machine_is_h3600()) {
251 /* No asset partition here */
252 h3xxx_partitions[1].size += 0x40000;
253 nb_parts--;
254 }
255 }
256#endif
257#ifdef CONFIG_ARCH_H5400
258 if (machine_is_h5400()) {
259 ipaq_map[0].size = 0x02000000;
260 ipaq_map[1].size = 0x02000000;
261 ipaq_map[1].phys = 0x02000000;
262 ipaq_map[1].virt = ipaq_map[0].virt + 0x02000000;
263 }
264#endif
265#ifdef CONFIG_ARCH_H1900
266 if (machine_is_h1900()) {
267 ipaq_map[0].size = 0x00400000;
268 ipaq_map[1].size = 0x02000000;
269 ipaq_map[1].phys = 0x00080000;
270 ipaq_map[1].virt = ipaq_map[0].virt + 0x00080000;
271 }
272#endif
273
274#ifdef CONFIG_SA1100_JORNADA56X
275 if (machine_is_jornada56x()) {
276 parts = jornada_partitions;
277 nb_parts = ARRAY_SIZE(jornada_partitions);
278 ipaq_map[0].size = jornada_max_flash_size;
279 ipaq_map[0].set_vpp = jornada56x_set_vpp;
280 ipaq_map[0].virt = (__u32)ioremap(0x0, 0x04000000);
281 }
282#endif
283#ifdef CONFIG_SA1100_JORNADA720
284 if (machine_is_jornada720()) {
285 parts = jornada_partitions;
286 nb_parts = ARRAY_SIZE(jornada_partitions);
287 ipaq_map[0].size = jornada_max_flash_size;
288 ipaq_map[0].set_vpp = jornada720_set_vpp;
289 }
290#endif
291
292
293 if (machine_is_ipaq()) { /* for iPAQs only */
294 for(i=0; i<MAX_IPAQ_CS; i++) {
295 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with CFI.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
296 my_sub_mtd[i] = do_map_probe("cfi_probe", &ipaq_map[i]);
297 if (!my_sub_mtd[i]) {
298 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
299 my_sub_mtd[i] = do_map_probe("jedec_probe", &ipaq_map[i]);
300 }
301 if (!my_sub_mtd[i]) {
302 printk(KERN_NOTICE "iPAQ flash: failed to find flash.\n");
303 if (i)
304 break;
305 else
306 return -ENXIO;
307 } else
308 printk(KERN_NOTICE "iPAQ flash: found %d bytes\n", my_sub_mtd[i]->size);
309
310 /* do we really need this debugging? --joshua 20030703 */
311 // printk("my_sub_mtd[%d]=%p\n", i, my_sub_mtd[i]);
312 my_sub_mtd[i]->owner = THIS_MODULE;
313 tot_flashsize += my_sub_mtd[i]->size;
314 }
315#ifdef CONFIG_MTD_CONCAT
316 /* fix the asset location */
317# ifdef CONFIG_LAB
318 h3xxx_partitions[1].size = tot_flashsize - 0x40000 - 0x80000 /* extra big boot block */;
319# else
320 h3xxx_partitions[1].size = tot_flashsize - 2 * 0x40000;
321# endif
322 h3xxx_partitions[2].offset = tot_flashsize - 0x40000;
323 /* and concat the devices */
324 mymtd = mtd_concat_create(&my_sub_mtd[0], i,
325 "ipaq");
326 if (!mymtd) {
327 printk("Cannot create iPAQ concat device\n");
328 return -ENXIO;
329 }
330#else
331 mymtd = my_sub_mtd[0];
332
333 /*
334 *In the very near future, command line partition parsing
335 * will use the device name as 'mtd-id' instead of a value
336 * passed to the parse_cmdline_partitions() routine. Since
337 * the bootldr says 'ipaq', make sure it continues to work.
338 */
339 mymtd->name = "ipaq";
340
341 if ((machine_is_h3600())) {
342# ifdef CONFIG_LAB
343 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x80000;
344# else
345 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000;
346# endif
347 nb_parts = 2;
348 } else {
349# ifdef CONFIG_LAB
350 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000 - 0x80000; /* extra big boot block */
351# else
352 h3xxx_partitions[1].size = my_sub_mtd[0]->size - 2*0x40000;
353# endif
354 h3xxx_partitions[2].offset = my_sub_mtd[0]->size - 0x40000;
355 }
356
357 if (my_sub_mtd[1]) {
358# ifdef CONFIG_LAB
359 h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x80000;
360# else
361 h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x40000;
362# endif
363 h3xxx_partitions_bank2[1].offset = my_sub_mtd[1]->size - 0x40000;
364 }
365#endif
366 }
367 else {
368 /*
369 * Now let's probe for the actual flash. Do it here since
370 * specific machine settings might have been set above.
371 */
372 printk(KERN_NOTICE "IPAQ flash: probing %d-bit flash bus, window=%lx\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
373 mymtd = do_map_probe("cfi_probe", &ipaq_map[0]);
374 if (!mymtd)
375 return -ENXIO;
376 mymtd->owner = THIS_MODULE;
377 }
378
379
380 /*
381 * Dynamic partition selection stuff (might override the static ones)
382 */
383
384 i = parse_mtd_partitions(mymtd, part_probes, &parsed_parts, 0);
385
386 if (i > 0) {
387 nb_parts = parsed_nr_parts = i;
388 parts = parsed_parts;
389 part_type = "dynamic";
390 }
391
392 if (!parts) {
393 printk(KERN_NOTICE "IPAQ flash: no partition info available, registering whole flash at once\n");
394 add_mtd_device(mymtd);
395#ifndef CONFIG_MTD_CONCAT
396 if (my_sub_mtd[1])
397 add_mtd_device(my_sub_mtd[1]);
398#endif
399 } else {
400 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
401 add_mtd_partitions(mymtd, parts, nb_parts);
402#ifndef CONFIG_MTD_CONCAT
403 if (my_sub_mtd[1])
404 add_mtd_partitions(my_sub_mtd[1], h3xxx_partitions_bank2, ARRAY_SIZE(h3xxx_partitions_bank2));
405#endif
406 }
407
408 return 0;
409}
410
411static void __exit ipaq_mtd_cleanup(void)
412{
413 int i;
414
415 if (mymtd) {
416 del_mtd_partitions(mymtd);
417#ifndef CONFIG_MTD_CONCAT
418 if (my_sub_mtd[1])
419 del_mtd_partitions(my_sub_mtd[1]);
420#endif
421 map_destroy(mymtd);
422#ifdef CONFIG_MTD_CONCAT
423 for(i=0; i<MAX_IPAQ_CS; i++)
424#else
425 for(i=1; i<MAX_IPAQ_CS; i++)
426#endif
427 {
428 if (my_sub_mtd[i])
429 map_destroy(my_sub_mtd[i]);
430 }
431 kfree(parsed_parts);
432 }
433}
434
435static int __init h1900_special_case(void)
436{
437 /* The iPAQ h1900 is a special case - it has weird ROM. */
438 simple_map_init(&ipaq_map[0]);
439 ipaq_map[0].size = 0x80000;
440 ipaq_map[0].set_vpp = h3xxx_set_vpp;
441 ipaq_map[0].phys = 0x0;
442 ipaq_map[0].virt = ioremap(0x0, 0x04000000);
443 ipaq_map[0].bankwidth = 2;
444
445 printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
446 mymtd = do_map_probe("jedec_probe", &ipaq_map[0]);
447 if (!mymtd)
448 return -ENODEV;
449 add_mtd_device(mymtd);
450 printk(KERN_NOTICE "iPAQ flash: registered h1910 flash\n");
451
452 return 0;
453}
454
455module_init(ipaq_mtd_init);
456module_exit(ipaq_mtd_cleanup);
457
458MODULE_AUTHOR("Jamey Hicks");
459MODULE_DESCRIPTION("IPAQ CFI map driver");
460MODULE_LICENSE("MIT");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7214b876feba..7b0515297411 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -210,7 +210,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
210 * not attempt to do a direct access on us. 210 * not attempt to do a direct access on us.
211 */ 211 */
212 info->map.phys = NO_XIP; 212 info->map.phys = NO_XIP;
213 info->map.size = dev->resource->end - dev->resource->start + 1; 213 info->map.size = resource_size(dev->resource);
214 214
215 /* 215 /*
216 * We only support 16-bit accesses for now. If and when 216 * We only support 16-bit accesses for now. If and when
@@ -224,7 +224,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
224 info->map.copy_from = ixp4xx_copy_from, 224 info->map.copy_from = ixp4xx_copy_from,
225 225
226 info->res = request_mem_region(dev->resource->start, 226 info->res = request_mem_region(dev->resource->start,
227 dev->resource->end - dev->resource->start + 1, 227 resource_size(dev->resource),
228 "IXP4XXFlash"); 228 "IXP4XXFlash");
229 if (!info->res) { 229 if (!info->res) {
230 printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); 230 printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
@@ -233,7 +233,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
233 } 233 }
234 234
235 info->map.virt = ioremap(dev->resource->start, 235 info->map.virt = ioremap(dev->resource->start,
236 dev->resource->end - dev->resource->start + 1); 236 resource_size(dev->resource));
237 if (!info->map.virt) { 237 if (!info->map.virt) {
238 printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); 238 printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
239 err = -EIO; 239 err = -EIO;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 380648e9051a..d9603f7f9652 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -48,23 +48,22 @@ static int physmap_flash_remove(struct platform_device *dev)
48 48
49 if (info->cmtd) { 49 if (info->cmtd) {
50#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
51 if (info->nr_parts || physmap_data->nr_parts) 51 if (info->nr_parts || physmap_data->nr_parts) {
52 del_mtd_partitions(info->cmtd); 52 del_mtd_partitions(info->cmtd);
53 else 53
54 if (info->nr_parts)
55 kfree(info->parts);
56 } else {
54 del_mtd_device(info->cmtd); 57 del_mtd_device(info->cmtd);
58 }
55#else 59#else
56 del_mtd_device(info->cmtd); 60 del_mtd_device(info->cmtd);
57#endif 61#endif
58 }
59#ifdef CONFIG_MTD_PARTITIONS
60 if (info->nr_parts)
61 kfree(info->parts);
62#endif
63
64#ifdef CONFIG_MTD_CONCAT 62#ifdef CONFIG_MTD_CONCAT
65 if (info->cmtd != info->mtd[0]) 63 if (info->cmtd != info->mtd[0])
66 mtd_concat_destroy(info->cmtd); 64 mtd_concat_destroy(info->cmtd);
67#endif 65#endif
66 }
68 67
69 for (i = 0; i < MAX_RESOURCES; i++) { 68 for (i = 0; i < MAX_RESOURCES; i++) {
70 if (info->mtd[i] != NULL) 69 if (info->mtd[i] != NULL)
@@ -130,7 +129,7 @@ static int physmap_flash_probe(struct platform_device *dev)
130 info->map[i].size); 129 info->map[i].size);
131 if (info->map[i].virt == NULL) { 130 if (info->map[i].virt == NULL) {
132 dev_err(&dev->dev, "Failed to ioremap flash region\n"); 131 dev_err(&dev->dev, "Failed to ioremap flash region\n");
133 err = EIO; 132 err = -EIO;
134 goto err_out; 133 goto err_out;
135 } 134 }
136 135
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index d7a47574d21e..f3af87e08ecd 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -248,7 +248,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
248 plat->exit(); 248 plat->exit();
249} 249}
250 250
251static struct sa_info *__init 251static struct sa_info *__devinit
252sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) 252sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
253{ 253{
254 struct sa_info *info; 254 struct sa_info *info;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 1f73297e7776..82afad0ddd72 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -612,16 +612,15 @@ static int __devinit vmu_connect(struct maple_device *mdev)
612 612
613 test_flash_data = be32_to_cpu(mdev->devinfo.function); 613 test_flash_data = be32_to_cpu(mdev->devinfo.function);
614 /* Need to count how many bits are set - to find out which 614 /* Need to count how many bits are set - to find out which
615 * function_data element has details of the memory card: 615 * function_data element has details of the memory card
616 * using Brian Kernighan's/Peter Wegner's method */ 616 */
617 for (c = 0; test_flash_data; c++) 617 c = hweight_long(test_flash_data);
618 test_flash_data &= test_flash_data - 1;
619 618
620 basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); 619 basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
621 620
622 card = kmalloc(sizeof(struct memcard), GFP_KERNEL); 621 card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
623 if (!card) { 622 if (!card) {
624 error = ENOMEM; 623 error = -ENOMEM;
625 goto fail_nomem; 624 goto fail_nomem;
626 } 625 }
627 626
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 64e2b379a350..c82e09bbc5fd 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -84,9 +84,6 @@ static int mtd_blktrans_thread(void *arg)
84 struct request_queue *rq = tr->blkcore_priv->rq; 84 struct request_queue *rq = tr->blkcore_priv->rq;
85 struct request *req = NULL; 85 struct request *req = NULL;
86 86
87 /* we might get involved when memory gets low, so use PF_MEMALLOC */
88 current->flags |= PF_MEMALLOC;
89
90 spin_lock_irq(rq->queue_lock); 87 spin_lock_irq(rq->queue_lock);
91 88
92 while (!kthread_should_stop()) { 89 while (!kthread_should_stop()) {
@@ -381,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
381 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 378 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
382 "%sd", tr->name); 379 "%sd", tr->name);
383 if (IS_ERR(tr->blkcore_priv->thread)) { 380 if (IS_ERR(tr->blkcore_priv->thread)) {
384 int ret = PTR_ERR(tr->blkcore_priv->thread); 381 ret = PTR_ERR(tr->blkcore_priv->thread);
385 blk_cleanup_queue(tr->blkcore_priv->rq); 382 blk_cleanup_queue(tr->blkcore_priv->rq);
386 unregister_blkdev(tr->major, tr->name); 383 unregister_blkdev(tr->major, tr->name);
387 kfree(tr->blkcore_priv); 384 kfree(tr->blkcore_priv);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 467a4f177bfb..c356c0a30c3e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -447,7 +447,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
447 for (i=0; i< MAX_MTD_DEVICES; i++) 447 for (i=0; i< MAX_MTD_DEVICES; i++)
448 if (mtd_table[i] == mtd) 448 if (mtd_table[i] == mtd)
449 ret = mtd_table[i]; 449 ret = mtd_table[i];
450 } else if (num < MAX_MTD_DEVICES) { 450 } else if (num >= 0 && num < MAX_MTD_DEVICES) {
451 ret = mtd_table[num]; 451 ret = mtd_table[num];
452 if (mtd && mtd != ret) 452 if (mtd && mtd != ret)
453 ret = NULL; 453 ret = NULL;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1060337c06df..a714ec482761 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -29,14 +29,34 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h> 32#include <linux/interrupt.h>
34#include <linux/mtd/mtd.h> 33#include <linux/mtd/mtd.h>
34#include <linux/kmsg_dump.h>
35
36/* Maximum MTD partition size */
37#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
35 38
36#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 39#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
37#define OOPS_PAGE_SIZE 4096 40#define MTDOOPS_HEADER_SIZE 8
41
42static unsigned long record_size = 4096;
43module_param(record_size, ulong, 0400);
44MODULE_PARM_DESC(record_size,
45 "record size for MTD OOPS pages in bytes (default 4096)");
46
47static char mtddev[80];
48module_param_string(mtddev, mtddev, 80, 0400);
49MODULE_PARM_DESC(mtddev,
50 "name or index number of the MTD device to use");
51
52static int dump_oops = 1;
53module_param(dump_oops, int, 0600);
54MODULE_PARM_DESC(dump_oops,
55 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
38 56
39static struct mtdoops_context { 57static struct mtdoops_context {
58 struct kmsg_dumper dump;
59
40 int mtd_index; 60 int mtd_index;
41 struct work_struct work_erase; 61 struct work_struct work_erase;
42 struct work_struct work_write; 62 struct work_struct work_write;
@@ -44,28 +64,43 @@ static struct mtdoops_context {
44 int oops_pages; 64 int oops_pages;
45 int nextpage; 65 int nextpage;
46 int nextcount; 66 int nextcount;
47 char *name; 67 unsigned long *oops_page_used;
48 68
49 void *oops_buf; 69 void *oops_buf;
50
51 /* writecount and disabling ready are spin lock protected */
52 spinlock_t writecount_lock;
53 int ready;
54 int writecount;
55} oops_cxt; 70} oops_cxt;
56 71
72static void mark_page_used(struct mtdoops_context *cxt, int page)
73{
74 set_bit(page, cxt->oops_page_used);
75}
76
77static void mark_page_unused(struct mtdoops_context *cxt, int page)
78{
79 clear_bit(page, cxt->oops_page_used);
80}
81
82static int page_is_used(struct mtdoops_context *cxt, int page)
83{
84 return test_bit(page, cxt->oops_page_used);
85}
86
57static void mtdoops_erase_callback(struct erase_info *done) 87static void mtdoops_erase_callback(struct erase_info *done)
58{ 88{
59 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; 89 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
60 wake_up(wait_q); 90 wake_up(wait_q);
61} 91}
62 92
63static int mtdoops_erase_block(struct mtd_info *mtd, int offset) 93static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
64{ 94{
95 struct mtd_info *mtd = cxt->mtd;
96 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
97 u32 start_page = start_page_offset / record_size;
98 u32 erase_pages = mtd->erasesize / record_size;
65 struct erase_info erase; 99 struct erase_info erase;
66 DECLARE_WAITQUEUE(wait, current); 100 DECLARE_WAITQUEUE(wait, current);
67 wait_queue_head_t wait_q; 101 wait_queue_head_t wait_q;
68 int ret; 102 int ret;
103 int page;
69 104
70 init_waitqueue_head(&wait_q); 105 init_waitqueue_head(&wait_q);
71 erase.mtd = mtd; 106 erase.mtd = mtd;
@@ -81,25 +116,24 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
81 if (ret) { 116 if (ret) {
82 set_current_state(TASK_RUNNING); 117 set_current_state(TASK_RUNNING);
83 remove_wait_queue(&wait_q, &wait); 118 remove_wait_queue(&wait_q, &wait);
84 printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " 119 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
85 "on \"%s\" failed\n", 120 (unsigned long long)erase.addr,
86 (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); 121 (unsigned long long)erase.len, mtddev);
87 return ret; 122 return ret;
88 } 123 }
89 124
90 schedule(); /* Wait for erase to finish. */ 125 schedule(); /* Wait for erase to finish. */
91 remove_wait_queue(&wait_q, &wait); 126 remove_wait_queue(&wait_q, &wait);
92 127
128 /* Mark pages as unused */
129 for (page = start_page; page < start_page + erase_pages; page++)
130 mark_page_unused(cxt, page);
131
93 return 0; 132 return 0;
94} 133}
95 134
96static void mtdoops_inc_counter(struct mtdoops_context *cxt) 135static void mtdoops_inc_counter(struct mtdoops_context *cxt)
97{ 136{
98 struct mtd_info *mtd = cxt->mtd;
99 size_t retlen;
100 u32 count;
101 int ret;
102
103 cxt->nextpage++; 137 cxt->nextpage++;
104 if (cxt->nextpage >= cxt->oops_pages) 138 if (cxt->nextpage >= cxt->oops_pages)
105 cxt->nextpage = 0; 139 cxt->nextpage = 0;
@@ -107,25 +141,13 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt)
107 if (cxt->nextcount == 0xffffffff) 141 if (cxt->nextcount == 0xffffffff)
108 cxt->nextcount = 0; 142 cxt->nextcount = 0;
109 143
110 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 144 if (page_is_used(cxt, cxt->nextpage)) {
111 &retlen, (u_char *) &count);
112 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
113 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
114 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
115 retlen, ret);
116 schedule_work(&cxt->work_erase); 145 schedule_work(&cxt->work_erase);
117 return; 146 return;
118 } 147 }
119 148
120 /* See if we need to erase the next block */ 149 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
121 if (count != 0xffffffff) { 150 cxt->nextpage, cxt->nextcount);
122 schedule_work(&cxt->work_erase);
123 return;
124 }
125
126 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
127 cxt->nextpage, cxt->nextcount);
128 cxt->ready = 1;
129} 151}
130 152
131/* Scheduled work - when we can't proceed without erasing a block */ 153/* Scheduled work - when we can't proceed without erasing a block */
@@ -140,47 +162,47 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
140 if (!mtd) 162 if (!mtd)
141 return; 163 return;
142 164
143 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; 165 mod = (cxt->nextpage * record_size) % mtd->erasesize;
144 if (mod != 0) { 166 if (mod != 0) {
145 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); 167 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
146 if (cxt->nextpage >= cxt->oops_pages) 168 if (cxt->nextpage >= cxt->oops_pages)
147 cxt->nextpage = 0; 169 cxt->nextpage = 0;
148 } 170 }
149 171
150 while (mtd->block_isbad) { 172 while (mtd->block_isbad) {
151 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 173 ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
152 if (!ret) 174 if (!ret)
153 break; 175 break;
154 if (ret < 0) { 176 if (ret < 0) {
155 printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); 177 printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
156 return; 178 return;
157 } 179 }
158badblock: 180badblock:
159 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 181 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
160 cxt->nextpage * OOPS_PAGE_SIZE); 182 cxt->nextpage * record_size);
161 i++; 183 i++;
162 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); 184 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
163 if (cxt->nextpage >= cxt->oops_pages) 185 if (cxt->nextpage >= cxt->oops_pages)
164 cxt->nextpage = 0; 186 cxt->nextpage = 0;
165 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { 187 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
166 printk(KERN_ERR "mtdoops: All blocks bad!\n"); 188 printk(KERN_ERR "mtdoops: all blocks bad!\n");
167 return; 189 return;
168 } 190 }
169 } 191 }
170 192
171 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 193 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
172 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 194 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
173 195
174 if (ret >= 0) { 196 if (ret >= 0) {
175 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 197 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
176 cxt->ready = 1; 198 cxt->nextpage, cxt->nextcount);
177 return; 199 return;
178 } 200 }
179 201
180 if (mtd->block_markbad && (ret == -EIO)) { 202 if (mtd->block_markbad && ret == -EIO) {
181 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 203 ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
182 if (ret < 0) { 204 if (ret < 0) {
183 printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); 205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
184 return; 206 return;
185 } 207 }
186 } 208 }
@@ -191,36 +213,37 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
191{ 213{
192 struct mtd_info *mtd = cxt->mtd; 214 struct mtd_info *mtd = cxt->mtd;
193 size_t retlen; 215 size_t retlen;
216 u32 *hdr;
194 int ret; 217 int ret;
195 218
196 if (cxt->writecount < OOPS_PAGE_SIZE) 219 /* Add mtdoops header to the buffer */
197 memset(cxt->oops_buf + cxt->writecount, 0xff, 220 hdr = cxt->oops_buf;
198 OOPS_PAGE_SIZE - cxt->writecount); 221 hdr[0] = cxt->nextcount;
222 hdr[1] = MTDOOPS_KERNMSG_MAGIC;
199 223
200 if (panic) 224 if (panic)
201 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 225 ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
202 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 226 record_size, &retlen, cxt->oops_buf);
203 else 227 else
204 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 228 ret = mtd->write(mtd, cxt->nextpage * record_size,
205 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 229 record_size, &retlen, cxt->oops_buf);
206
207 cxt->writecount = 0;
208 230
209 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 231 if (retlen != record_size || ret < 0)
210 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", 232 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
211 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); 233 cxt->nextpage * record_size, retlen, record_size, ret);
234 mark_page_used(cxt, cxt->nextpage);
235 memset(cxt->oops_buf, 0xff, record_size);
212 236
213 mtdoops_inc_counter(cxt); 237 mtdoops_inc_counter(cxt);
214} 238}
215 239
216
217static void mtdoops_workfunc_write(struct work_struct *work) 240static void mtdoops_workfunc_write(struct work_struct *work)
218{ 241{
219 struct mtdoops_context *cxt = 242 struct mtdoops_context *cxt =
220 container_of(work, struct mtdoops_context, work_write); 243 container_of(work, struct mtdoops_context, work_write);
221 244
222 mtdoops_write(cxt, 0); 245 mtdoops_write(cxt, 0);
223} 246}
224 247
225static void find_next_position(struct mtdoops_context *cxt) 248static void find_next_position(struct mtdoops_context *cxt)
226{ 249{
@@ -230,28 +253,33 @@ static void find_next_position(struct mtdoops_context *cxt)
230 size_t retlen; 253 size_t retlen;
231 254
232 for (page = 0; page < cxt->oops_pages; page++) { 255 for (page = 0; page < cxt->oops_pages; page++) {
233 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); 256 /* Assume the page is used */
234 if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { 257 mark_page_used(cxt, page);
235 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" 258 ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
236 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); 259 &retlen, (u_char *) &count[0]);
260 if (retlen != MTDOOPS_HEADER_SIZE ||
261 (ret < 0 && ret != -EUCLEAN)) {
262 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
263 page * record_size, retlen,
264 MTDOOPS_HEADER_SIZE, ret);
237 continue; 265 continue;
238 } 266 }
239 267
240 if (count[1] != MTDOOPS_KERNMSG_MAGIC) 268 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
241 continue; 269 mark_page_unused(cxt, page);
242 if (count[0] == 0xffffffff) 270 if (count[0] == 0xffffffff)
243 continue; 271 continue;
244 if (maxcount == 0xffffffff) { 272 if (maxcount == 0xffffffff) {
245 maxcount = count[0]; 273 maxcount = count[0];
246 maxpos = page; 274 maxpos = page;
247 } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { 275 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
248 maxcount = count[0]; 276 maxcount = count[0];
249 maxpos = page; 277 maxpos = page;
250 } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { 278 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
251 maxcount = count[0]; 279 maxcount = count[0];
252 maxpos = page; 280 maxpos = page;
253 } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) 281 } else if (count[0] > maxcount && count[0] > 0xc0000000
254 && (maxcount > 0x80000000)) { 282 && maxcount > 0x80000000) {
255 maxcount = count[0]; 283 maxcount = count[0];
256 maxpos = page; 284 maxpos = page;
257 } 285 }
@@ -269,187 +297,170 @@ static void find_next_position(struct mtdoops_context *cxt)
269 mtdoops_inc_counter(cxt); 297 mtdoops_inc_counter(cxt);
270} 298}
271 299
272 300static void mtdoops_do_dump(struct kmsg_dumper *dumper,
273static void mtdoops_notify_add(struct mtd_info *mtd) 301 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
302 const char *s2, unsigned long l2)
274{ 303{
275 struct mtdoops_context *cxt = &oops_cxt; 304 struct mtdoops_context *cxt = container_of(dumper,
305 struct mtdoops_context, dump);
306 unsigned long s1_start, s2_start;
307 unsigned long l1_cpy, l2_cpy;
308 char *dst;
309
310 /* Only dump oopses if dump_oops is set */
311 if (reason == KMSG_DUMP_OOPS && !dump_oops)
312 return;
276 313
277 if (cxt->name && !strcmp(mtd->name, cxt->name)) 314 dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
278 cxt->mtd_index = mtd->index; 315 l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
316 l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
279 317
280 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 318 s2_start = l2 - l2_cpy;
281 return; 319 s1_start = l1 - l1_cpy;
282 320
283 if (mtd->size < (mtd->erasesize * 2)) { 321 memcpy(dst, s1 + s1_start, l1_cpy);
284 printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", 322 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
285 mtd->index);
286 return;
287 }
288 323
289 if (mtd->erasesize < OOPS_PAGE_SIZE) { 324 /* Panics must be written immediately */
290 printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", 325 if (reason == KMSG_DUMP_PANIC) {
291 mtd->index); 326 if (!cxt->mtd->panic_write)
327 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
328 else
329 mtdoops_write(cxt, 1);
292 return; 330 return;
293 } 331 }
294 332
295 cxt->mtd = mtd; 333 /* For other cases, schedule work to write it "nicely" */
296 if (mtd->size > INT_MAX) 334 schedule_work(&cxt->work_write);
297 cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
298 else
299 cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
300
301 find_next_position(cxt);
302
303 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
304} 335}
305 336
306static void mtdoops_notify_remove(struct mtd_info *mtd) 337static void mtdoops_notify_add(struct mtd_info *mtd)
307{ 338{
308 struct mtdoops_context *cxt = &oops_cxt; 339 struct mtdoops_context *cxt = &oops_cxt;
340 u64 mtdoops_pages = div_u64(mtd->size, record_size);
341 int err;
309 342
310 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 343 if (!strcmp(mtd->name, mtddev))
311 return; 344 cxt->mtd_index = mtd->index;
312
313 cxt->mtd = NULL;
314 flush_scheduled_work();
315}
316
317static void mtdoops_console_sync(void)
318{
319 struct mtdoops_context *cxt = &oops_cxt;
320 struct mtd_info *mtd = cxt->mtd;
321 unsigned long flags;
322 345
323 if (!cxt->ready || !mtd || cxt->writecount == 0) 346 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
324 return; 347 return;
325 348
326 /* 349 if (mtd->size < mtd->erasesize * 2) {
327 * Once ready is 0 and we've held the lock no further writes to the 350 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
328 * buffer will happen 351 mtd->index);
329 */
330 spin_lock_irqsave(&cxt->writecount_lock, flags);
331 if (!cxt->ready) {
332 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
333 return; 352 return;
334 } 353 }
335 cxt->ready = 0; 354 if (mtd->erasesize < record_size) {
336 spin_unlock_irqrestore(&cxt->writecount_lock, flags); 355 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
337 356 mtd->index);
338 if (mtd->panic_write && in_interrupt())
339 /* Interrupt context, we're going to panic so try and log */
340 mtdoops_write(cxt, 1);
341 else
342 schedule_work(&cxt->work_write);
343}
344
345static void
346mtdoops_console_write(struct console *co, const char *s, unsigned int count)
347{
348 struct mtdoops_context *cxt = co->data;
349 struct mtd_info *mtd = cxt->mtd;
350 unsigned long flags;
351
352 if (!oops_in_progress) {
353 mtdoops_console_sync();
354 return; 357 return;
355 } 358 }
356 359 if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
357 if (!cxt->ready || !mtd) 360 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
361 mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
358 return; 362 return;
363 }
359 364
360 /* Locking on writecount ensures sequential writes to the buffer */ 365 /* oops_page_used is a bit field */
361 spin_lock_irqsave(&cxt->writecount_lock, flags); 366 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
362 367 BITS_PER_LONG));
363 /* Check ready status didn't change whilst waiting for the lock */ 368 if (!cxt->oops_page_used) {
364 if (!cxt->ready) { 369 printk(KERN_ERR "mtdoops: could not allocate page array\n");
365 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
366 return; 370 return;
367 } 371 }
368 372
369 if (cxt->writecount == 0) { 373 cxt->dump.dump = mtdoops_do_dump;
370 u32 *stamp = cxt->oops_buf; 374 err = kmsg_dump_register(&cxt->dump);
371 *stamp++ = cxt->nextcount; 375 if (err) {
372 *stamp = MTDOOPS_KERNMSG_MAGIC; 376 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
373 cxt->writecount = 8; 377 vfree(cxt->oops_page_used);
378 cxt->oops_page_used = NULL;
379 return;
374 } 380 }
375 381
376 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 382 cxt->mtd = mtd;
377 count = OOPS_PAGE_SIZE - cxt->writecount; 383 cxt->oops_pages = (int)mtd->size / record_size;
378 384 find_next_position(cxt);
379 memcpy(cxt->oops_buf + cxt->writecount, s, count); 385 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
380 cxt->writecount += count;
381
382 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
383
384 if (cxt->writecount == OOPS_PAGE_SIZE)
385 mtdoops_console_sync();
386} 386}
387 387
388static int __init mtdoops_console_setup(struct console *co, char *options) 388static void mtdoops_notify_remove(struct mtd_info *mtd)
389{ 389{
390 struct mtdoops_context *cxt = co->data; 390 struct mtdoops_context *cxt = &oops_cxt;
391 391
392 if (cxt->mtd_index != -1 || cxt->name) 392 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
393 return -EBUSY; 393 return;
394 if (options) {
395 cxt->name = kstrdup(options, GFP_KERNEL);
396 return 0;
397 }
398 if (co->index == -1)
399 return -EINVAL;
400 394
401 cxt->mtd_index = co->index; 395 if (kmsg_dump_unregister(&cxt->dump) < 0)
402 return 0; 396 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
397
398 cxt->mtd = NULL;
399 flush_scheduled_work();
403} 400}
404 401
402
405static struct mtd_notifier mtdoops_notifier = { 403static struct mtd_notifier mtdoops_notifier = {
406 .add = mtdoops_notify_add, 404 .add = mtdoops_notify_add,
407 .remove = mtdoops_notify_remove, 405 .remove = mtdoops_notify_remove,
408}; 406};
409 407
410static struct console mtdoops_console = { 408static int __init mtdoops_init(void)
411 .name = "ttyMTD",
412 .write = mtdoops_console_write,
413 .setup = mtdoops_console_setup,
414 .unblank = mtdoops_console_sync,
415 .index = -1,
416 .data = &oops_cxt,
417};
418
419static int __init mtdoops_console_init(void)
420{ 409{
421 struct mtdoops_context *cxt = &oops_cxt; 410 struct mtdoops_context *cxt = &oops_cxt;
411 int mtd_index;
412 char *endp;
422 413
414 if (strlen(mtddev) == 0) {
415 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
416 return -EINVAL;
417 }
418 if ((record_size & 4095) != 0) {
419 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
420 return -EINVAL;
421 }
422 if (record_size < 4096) {
423 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
424 return -EINVAL;
425 }
426
427 /* Setup the MTD device to use */
423 cxt->mtd_index = -1; 428 cxt->mtd_index = -1;
424 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 429 mtd_index = simple_strtoul(mtddev, &endp, 0);
425 spin_lock_init(&cxt->writecount_lock); 430 if (*endp == '\0')
431 cxt->mtd_index = mtd_index;
432 if (cxt->mtd_index > MAX_MTD_DEVICES) {
433 printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
434 mtd_index);
435 return -EINVAL;
436 }
426 437
438 cxt->oops_buf = vmalloc(record_size);
427 if (!cxt->oops_buf) { 439 if (!cxt->oops_buf) {
428 printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); 440 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
429 return -ENOMEM; 441 return -ENOMEM;
430 } 442 }
443 memset(cxt->oops_buf, 0xff, record_size);
431 444
432 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); 445 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
433 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); 446 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
434 447
435 register_console(&mtdoops_console);
436 register_mtd_user(&mtdoops_notifier); 448 register_mtd_user(&mtdoops_notifier);
437 return 0; 449 return 0;
438} 450}
439 451
440static void __exit mtdoops_console_exit(void) 452static void __exit mtdoops_exit(void)
441{ 453{
442 struct mtdoops_context *cxt = &oops_cxt; 454 struct mtdoops_context *cxt = &oops_cxt;
443 455
444 unregister_mtd_user(&mtdoops_notifier); 456 unregister_mtd_user(&mtdoops_notifier);
445 unregister_console(&mtdoops_console);
446 kfree(cxt->name);
447 vfree(cxt->oops_buf); 457 vfree(cxt->oops_buf);
458 vfree(cxt->oops_page_used);
448} 459}
449 460
450 461
451subsys_initcall(mtdoops_console_init); 462module_init(mtdoops_init);
452module_exit(mtdoops_console_exit); 463module_exit(mtdoops_exit);
453 464
454MODULE_LICENSE("GPL"); 465MODULE_LICENSE("GPL");
455MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); 466MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 0e35e1aefd22..7678538344f4 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -201,6 +201,22 @@ config MTD_NAND_S3C2410_CLKSTOP
201 when the is NAND chip selected or released, but will save 201 when the is NAND chip selected or released, but will save
202 approximately 5mA of power when there is nothing happening. 202 approximately 5mA of power when there is nothing happening.
203 203
204config MTD_NAND_BCM_UMI
205 tristate "NAND Flash support for BCM Reference Boards"
206 depends on ARCH_BCMRING && MTD_NAND
207 help
208 This enables the NAND flash controller on the BCM UMI block.
209
210 No board specfic support is done by this driver, each board
211 must advertise a platform_device for the driver to attach.
212
213config MTD_NAND_BCM_UMI_HWCS
214 bool "BCM UMI NAND Hardware CS"
215 depends on MTD_NAND_BCM_UMI
216 help
217 Enable the use of the BCM UMI block's internal CS using NAND.
218 This should only be used if you know the external NAND CS can toggle.
219
204config MTD_NAND_DISKONCHIP 220config MTD_NAND_DISKONCHIP
205 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" 221 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
206 depends on EXPERIMENTAL 222 depends on EXPERIMENTAL
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6950d3dabf10..460a1f39a8d1 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
42obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 42obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
43obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o 43obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
44obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 44obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
45obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
45 46
46nand-objs := nand_base.o nand_bbt.o 47nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6d9649159a18..2d6773281fd9 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -372,15 +372,6 @@ static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
372 return __alauda_read_page(mtd, from, ignore_buf, oob); 372 return __alauda_read_page(mtd, from, ignore_buf, oob);
373} 373}
374 374
375static int popcount8(u8 c)
376{
377 int ret = 0;
378
379 for ( ; c; c>>=1)
380 ret += c & 1;
381 return ret;
382}
383
384static int alauda_isbad(struct mtd_info *mtd, loff_t ofs) 375static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
385{ 376{
386 u8 oob[16]; 377 u8 oob[16];
@@ -391,7 +382,7 @@ static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
391 return err; 382 return err;
392 383
393 /* A block is marked bad if two or more bits are zero */ 384 /* A block is marked bad if two or more bits are zero */
394 return popcount8(oob[5]) >= 7 ? 0 : 1; 385 return hweight8(oob[5]) >= 7 ? 0 : 1;
395} 386}
396 387
397static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len, 388static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index f8e9975c86e5..524e6c9e0672 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -192,7 +192,6 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
192{ 192{
193 struct nand_chip *nand_chip = mtd->priv; 193 struct nand_chip *nand_chip = mtd->priv;
194 struct atmel_nand_host *host = nand_chip->priv; 194 struct atmel_nand_host *host = nand_chip->priv;
195 uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
196 unsigned int ecc_value; 195 unsigned int ecc_value;
197 196
198 /* get the first 2 ECC bytes */ 197 /* get the first 2 ECC bytes */
@@ -464,7 +463,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
464 if (host->board->det_pin) { 463 if (host->board->det_pin) {
465 if (gpio_get_value(host->board->det_pin)) { 464 if (gpio_get_value(host->board->det_pin)) {
466 printk(KERN_INFO "No SmartMedia card inserted.\n"); 465 printk(KERN_INFO "No SmartMedia card inserted.\n");
467 res = ENXIO; 466 res = -ENXIO;
468 goto err_no_card; 467 goto err_no_card;
469 } 468 }
470 } 469 }
@@ -535,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
535 534
536 if ((!partitions) || (num_partitions == 0)) { 535 if ((!partitions) || (num_partitions == 0)) {
537 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); 536 printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
538 res = ENXIO; 537 res = -ENXIO;
539 goto err_no_partitions; 538 goto err_no_partitions;
540 } 539 }
541 540
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
new file mode 100644
index 000000000000..a930666d0687
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -0,0 +1,213 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include "nand_bcm_umi.h"
17
18/* ---- External Variable Declarations ----------------------------------- */
19/* ---- External Function Prototypes ------------------------------------- */
20/* ---- Public Variables ------------------------------------------------- */
21/* ---- Private Constants and Types -------------------------------------- */
22
23/* ---- Private Function Prototypes -------------------------------------- */
24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
25 struct nand_chip *chip, uint8_t *buf, int page);
26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
27 struct nand_chip *chip, const uint8_t *buf);
28
29/* ---- Private Variables ------------------------------------------------ */
30
31/*
32** nand_hw_eccoob
33** New oob placement block for use with hardware ecc generation.
34*/
35static struct nand_ecclayout nand_hw_eccoob_512 = {
36 /* Reserve 5 for BI indicator */
37 .oobfree = {
38#if (NAND_ECC_NUM_BYTES > 3)
39 {.offset = 0, .length = 2}
40#else
41 {.offset = 0, .length = 5},
42 {.offset = 6, .length = 7}
43#endif
44 }
45};
46
47/*
48** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
49** except the BI is at byte 0.
50*/
51static struct nand_ecclayout nand_hw_eccoob_2048 = {
52 /* Reserve 0 as BI indicator */
53 .oobfree = {
54#if (NAND_ECC_NUM_BYTES > 10)
55 {.offset = 1, .length = 2},
56#elif (NAND_ECC_NUM_BYTES > 7)
57 {.offset = 1, .length = 5},
58 {.offset = 16, .length = 6},
59 {.offset = 32, .length = 6},
60 {.offset = 48, .length = 6}
61#else
62 {.offset = 1, .length = 8},
63 {.offset = 16, .length = 9},
64 {.offset = 32, .length = 9},
65 {.offset = 48, .length = 9}
66#endif
67 }
68};
69
70/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
71 * except the BI is at byte 0. */
72static struct nand_ecclayout nand_hw_eccoob_4096 = {
73 /* Reserve 0 as BI indicator */
74 .oobfree = {
75#if (NAND_ECC_NUM_BYTES > 10)
76 {.offset = 1, .length = 2},
77 {.offset = 16, .length = 3},
78 {.offset = 32, .length = 3},
79 {.offset = 48, .length = 3},
80 {.offset = 64, .length = 3},
81 {.offset = 80, .length = 3},
82 {.offset = 96, .length = 3},
83 {.offset = 112, .length = 3}
84#else
85 {.offset = 1, .length = 5},
86 {.offset = 16, .length = 6},
87 {.offset = 32, .length = 6},
88 {.offset = 48, .length = 6},
89 {.offset = 64, .length = 6},
90 {.offset = 80, .length = 6},
91 {.offset = 96, .length = 6},
92 {.offset = 112, .length = 6}
93#endif
94 }
95};
96
97/* ---- Private Functions ------------------------------------------------ */
98/* ==== Public Functions ================================================= */
99
100/****************************************************************************
101*
102* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
103* @mtd: mtd info structure
104* @chip: nand chip info structure
105* @buf: buffer to store read data
106*
107***************************************************************************/
108static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
109 struct nand_chip *chip, uint8_t * buf,
110 int page)
111{
112 int sectorIdx = 0;
113 int eccsize = chip->ecc.size;
114 int eccsteps = chip->ecc.steps;
115 uint8_t *datap = buf;
116 uint8_t eccCalc[NAND_ECC_NUM_BYTES];
117 int sectorOobSize = mtd->oobsize / eccsteps;
118 int stat;
119
120 for (sectorIdx = 0; sectorIdx < eccsteps;
121 sectorIdx++, datap += eccsize) {
122 if (sectorIdx > 0) {
123 /* Seek to page location within sector */
124 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
125 -1);
126 }
127
128 /* Enable hardware ECC before reading the buf */
129 nand_bcm_umi_bch_enable_read_hwecc();
130
131 /* Read in data */
132 bcm_umi_nand_read_buf(mtd, datap, eccsize);
133
134 /* Pause hardware ECC after reading the buf */
135 nand_bcm_umi_bch_pause_read_ecc_calc();
136
137 /* Read the OOB ECC */
138 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
139 mtd->writesize + sectorIdx * sectorOobSize, -1);
140 nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
141 NAND_ECC_NUM_BYTES,
142 chip->oob_poi +
143 sectorIdx * sectorOobSize);
144
145 /* Correct any ECC detected errors */
146 stat =
147 nand_bcm_umi_bch_correct_page(datap, eccCalc,
148 NAND_ECC_NUM_BYTES);
149
150 /* Update Stats */
151 if (stat < 0) {
152#if defined(NAND_BCM_UMI_DEBUG)
153 printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
154 __func__, sectorIdx);
155 printk(KERN_WARNING
156 "%s data %02x %02x %02x %02x "
157 "%02x %02x %02x %02x\n",
158 __func__, datap[0], datap[1], datap[2], datap[3],
159 datap[4], datap[5], datap[6], datap[7]);
160 printk(KERN_WARNING
161 "%s ecc %02x %02x %02x %02x "
162 "%02x %02x %02x %02x %02x %02x "
163 "%02x %02x %02x\n",
164 __func__, eccCalc[0], eccCalc[1], eccCalc[2],
165 eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
166 eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
167 eccCalc[11], eccCalc[12]);
168 BUG();
169#endif
170 mtd->ecc_stats.failed++;
171 } else {
172#if defined(NAND_BCM_UMI_DEBUG)
173 if (stat > 0) {
174 printk(KERN_INFO
175 "%s %d correctable_errors detected\n",
176 __func__, stat);
177 }
178#endif
179 mtd->ecc_stats.corrected += stat;
180 }
181 }
182 return 0;
183}
184
185/****************************************************************************
186*
187* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
188* @mtd: mtd info structure
189* @chip: nand chip info structure
190* @buf: data buffer
191*
192***************************************************************************/
193static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
194 struct nand_chip *chip, const uint8_t *buf)
195{
196 int sectorIdx = 0;
197 int eccsize = chip->ecc.size;
198 int eccsteps = chip->ecc.steps;
199 const uint8_t *datap = buf;
200 uint8_t *oobp = chip->oob_poi;
201 int sectorOobSize = mtd->oobsize / eccsteps;
202
203 for (sectorIdx = 0; sectorIdx < eccsteps;
204 sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
205 /* Enable hardware ECC before writing the buf */
206 nand_bcm_umi_bch_enable_write_hwecc();
207 bcm_umi_nand_write_buf(mtd, datap, eccsize);
208 nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
209 NAND_ECC_NUM_BYTES);
210 }
211
212 bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
213}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
new file mode 100644
index 000000000000..087bcd745bb7
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -0,0 +1,581 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <linux/version.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/ioport.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/nand_ecc.h>
31#include <linux/mtd/partitions.h>
32
33#include <asm/mach-types.h>
34#include <asm/system.h>
35
36#include <mach/reg_nand.h>
37#include <mach/reg_umi.h>
38
39#include "nand_bcm_umi.h"
40
41#include <mach/memory_settings.h>
42
43#define USE_DMA 1
44#include <mach/dma.h>
45#include <linux/dma-mapping.h>
46#include <linux/completion.h>
47
48/* ---- External Variable Declarations ----------------------------------- */
49/* ---- External Function Prototypes ------------------------------------- */
50/* ---- Public Variables ------------------------------------------------- */
51/* ---- Private Constants and Types -------------------------------------- */
52static const __devinitconst char gBanner[] = KERN_INFO \
53 "BCM UMI MTD NAND Driver: 1.00\n";
54
55#ifdef CONFIG_MTD_PARTITIONS
56const char *part_probes[] = { "cmdlinepart", NULL };
57#endif
58
59#if NAND_ECC_BCH
60static uint8_t scan_ff_pattern[] = { 0xff };
61
62static struct nand_bbt_descr largepage_bbt = {
63 .options = 0,
64 .offs = 0,
65 .len = 1,
66 .pattern = scan_ff_pattern
67};
68#endif
69
70/*
71** Preallocate a buffer to avoid having to do this every dma operation.
72** This is the size of the preallocated coherent DMA buffer.
73*/
74#if USE_DMA
75#define DMA_MIN_BUFLEN 512
76#define DMA_MAX_BUFLEN PAGE_SIZE
77#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
78 ((len) > DMA_MAX_BUFLEN))
79
80/*
81 * The current NAND data space goes from 0x80001900 to 0x80001FFF,
82 * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
83 * size NAND flash. Need to break the DMA down to multiple 1Ks.
84 *
85 * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
86 */
87#define DMA_MAX_LEN 1024
88
89#else /* !USE_DMA */
90#define DMA_MIN_BUFLEN 0
91#define DMA_MAX_BUFLEN 0
92#define USE_DIRECT_IO(len) 1
93#endif
94/* ---- Private Function Prototypes -------------------------------------- */
95static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
96static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
97 int len);
98
99/* ---- Private Variables ------------------------------------------------ */
100static struct mtd_info *board_mtd;
101static void __iomem *bcm_umi_io_base;
102static void *virtPtr;
103static dma_addr_t physPtr;
104static struct completion nand_comp;
105
106/* ---- Private Functions ------------------------------------------------ */
107#if NAND_ECC_BCH
108#include "bcm_umi_bch.c"
109#else
110#include "bcm_umi_hamming.c"
111#endif
112
113#if USE_DMA
114
115/* Handler called when the DMA finishes. */
116static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
117{
118 complete(&nand_comp);
119}
120
121static int nand_dma_init(void)
122{
123 int rc;
124
125 rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
126 nand_dma_handler, NULL);
127 if (rc != 0) {
128 printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
129 return rc;
130 }
131
132 virtPtr =
133 dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
134 if (virtPtr == NULL) {
135 printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
136 return -ENOMEM;
137 }
138
139 return 0;
140}
141
142static void nand_dma_term(void)
143{
144 if (virtPtr != NULL)
145 dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
146}
147
148static void nand_dma_read(void *buf, int len)
149{
150 int offset = 0;
151 int tmp_len = 0;
152 int len_left = len;
153 DMA_Handle_t hndl;
154
155 if (virtPtr == NULL)
156 panic("nand_dma_read: virtPtr == NULL\n");
157
158 if ((void *)physPtr == NULL)
159 panic("nand_dma_read: physPtr == NULL\n");
160
161 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
162 if (hndl < 0) {
163 printk(KERN_ERR
164 "nand_dma_read: unable to allocate dma channel: %d\n",
165 (int)hndl);
166 panic("\n");
167 }
168
169 while (len_left > 0) {
170 if (len_left > DMA_MAX_LEN) {
171 tmp_len = DMA_MAX_LEN;
172 len_left -= DMA_MAX_LEN;
173 } else {
174 tmp_len = len_left;
175 len_left = 0;
176 }
177
178 init_completion(&nand_comp);
179 dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
180 physPtr + offset, tmp_len);
181 wait_for_completion(&nand_comp);
182
183 offset += tmp_len;
184 }
185
186 dma_free_channel(hndl);
187
188 if (buf != NULL)
189 memcpy(buf, virtPtr, len);
190}
191
192static void nand_dma_write(const void *buf, int len)
193{
194 int offset = 0;
195 int tmp_len = 0;
196 int len_left = len;
197 DMA_Handle_t hndl;
198
199 if (buf == NULL)
200 panic("nand_dma_write: buf == NULL\n");
201
202 if (virtPtr == NULL)
203 panic("nand_dma_write: virtPtr == NULL\n");
204
205 if ((void *)physPtr == NULL)
206 panic("nand_dma_write: physPtr == NULL\n");
207
208 memcpy(virtPtr, buf, len);
209
210
211 hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
212 if (hndl < 0) {
213 printk(KERN_ERR
214 "nand_dma_write: unable to allocate dma channel: %d\n",
215 (int)hndl);
216 panic("\n");
217 }
218
219 while (len_left > 0) {
220 if (len_left > DMA_MAX_LEN) {
221 tmp_len = DMA_MAX_LEN;
222 len_left -= DMA_MAX_LEN;
223 } else {
224 tmp_len = len_left;
225 len_left = 0;
226 }
227
228 init_completion(&nand_comp);
229 dma_transfer_mem_to_mem(hndl, physPtr + offset,
230 REG_NAND_DATA_PADDR, tmp_len);
231 wait_for_completion(&nand_comp);
232
233 offset += tmp_len;
234 }
235
236 dma_free_channel(hndl);
237}
238
239#endif
240
241static int nand_dev_ready(struct mtd_info *mtd)
242{
243 return nand_bcm_umi_dev_ready();
244}
245
246/****************************************************************************
247*
248* bcm_umi_nand_inithw
249*
250* This routine does the necessary hardware (board-specific)
251* initializations. This includes setting up the timings, etc.
252*
253***************************************************************************/
254int bcm_umi_nand_inithw(void)
255{
256 /* Configure nand timing parameters */
257 REG_UMI_NAND_TCR &= ~0x7ffff;
258 REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
259
260#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
261 /* enable software control of CS */
262 REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
263#endif
264
265 /* keep NAND chip select asserted */
266 REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
267
268 REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
269 /* enable writes to flash */
270 REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
271
272 writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
273 nand_bcm_umi_wait_till_ready();
274
275#if NAND_ECC_BCH
276 nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
277#endif
278
279 return 0;
280}
281
282/* Used to turn latch the proper register for access. */
283static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
284 unsigned int ctrl)
285{
286 /* send command to hardware */
287 struct nand_chip *chip = mtd->priv;
288 if (ctrl & NAND_CTRL_CHANGE) {
289 if (ctrl & NAND_CLE) {
290 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
291 goto CMD;
292 }
293 if (ctrl & NAND_ALE) {
294 chip->IO_ADDR_W =
295 bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
296 goto CMD;
297 }
298 chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
299 }
300
301CMD:
302 /* Send command to chip directly */
303 if (cmd != NAND_CMD_NONE)
304 writeb(cmd, chip->IO_ADDR_W);
305}
306
307static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
308 int len)
309{
310 if (USE_DIRECT_IO(len)) {
311 /* Do it the old way if the buffer is small or too large.
312 * Probably quicker than starting and checking dma. */
313 int i;
314 struct nand_chip *this = mtd->priv;
315
316 for (i = 0; i < len; i++)
317 writeb(buf[i], this->IO_ADDR_W);
318 }
319#if USE_DMA
320 else
321 nand_dma_write(buf, len);
322#endif
323}
324
325static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
326{
327 if (USE_DIRECT_IO(len)) {
328 int i;
329 struct nand_chip *this = mtd->priv;
330
331 for (i = 0; i < len; i++)
332 buf[i] = readb(this->IO_ADDR_R);
333 }
334#if USE_DMA
335 else
336 nand_dma_read(buf, len);
337#endif
338}
339
340static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
341static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
342 int len)
343{
344 /*
345 * Try to readback page with ECC correction. This is necessary
346 * for MLC parts which may have permanently stuck bits.
347 */
348 struct nand_chip *chip = mtd->priv;
349 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
350 if (ret < 0)
351 return -EFAULT;
352 else {
353 if (memcmp(readbackbuf, buf, len) == 0)
354 return 0;
355
356 return -EFAULT;
357 }
358 return 0;
359}
360
361static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
362{
363 struct nand_chip *this;
364 struct resource *r;
365 int err = 0;
366
367 printk(gBanner);
368
369 /* Allocate memory for MTD device structure and private data */
370 board_mtd =
371 kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
372 GFP_KERNEL);
373 if (!board_mtd) {
374 printk(KERN_WARNING
375 "Unable to allocate NAND MTD device structure.\n");
376 return -ENOMEM;
377 }
378
379 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
380
381 if (!r)
382 return -ENXIO;
383
384 /* map physical adress */
385 bcm_umi_io_base = ioremap(r->start, r->end - r->start + 1);
386
387 if (!bcm_umi_io_base) {
388 printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
389 kfree(board_mtd);
390 return -EIO;
391 }
392
393 /* Get pointer to private data */
394 this = (struct nand_chip *)(&board_mtd[1]);
395
396 /* Initialize structures */
397 memset((char *)board_mtd, 0, sizeof(struct mtd_info));
398 memset((char *)this, 0, sizeof(struct nand_chip));
399
400 /* Link the private data with the MTD structure */
401 board_mtd->priv = this;
402
403 /* Initialize the NAND hardware. */
404 if (bcm_umi_nand_inithw() < 0) {
405 printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
406 iounmap(bcm_umi_io_base);
407 kfree(board_mtd);
408 return -EIO;
409 }
410
411 /* Set address of NAND IO lines */
412 this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
413 this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
414
415 /* Set command delay time, see datasheet for correct value */
416 this->chip_delay = 0;
417 /* Assign the device ready function, if available */
418 this->dev_ready = nand_dev_ready;
419 this->options = 0;
420
421 this->write_buf = bcm_umi_nand_write_buf;
422 this->read_buf = bcm_umi_nand_read_buf;
423 this->verify_buf = bcm_umi_nand_verify_buf;
424
425 this->cmd_ctrl = bcm_umi_nand_hwcontrol;
426 this->ecc.mode = NAND_ECC_HW;
427 this->ecc.size = 512;
428 this->ecc.bytes = NAND_ECC_NUM_BYTES;
429#if NAND_ECC_BCH
430 this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
431 this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
432#else
433 this->ecc.correct = nand_correct_data512;
434 this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
435 this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
436#endif
437
438#if USE_DMA
439 err = nand_dma_init();
440 if (err != 0)
441 return err;
442#endif
443
444 /* Figure out the size of the device that we have.
445 * We need to do this to figure out which ECC
446 * layout we'll be using.
447 */
448
449 err = nand_scan_ident(board_mtd, 1);
450 if (err) {
451 printk(KERN_ERR "nand_scan failed: %d\n", err);
452 iounmap(bcm_umi_io_base);
453 kfree(board_mtd);
454 return err;
455 }
456
457 /* Now that we know the nand size, we can setup the ECC layout */
458
459 switch (board_mtd->writesize) { /* writesize is the pagesize */
460 case 4096:
461 this->ecc.layout = &nand_hw_eccoob_4096;
462 break;
463 case 2048:
464 this->ecc.layout = &nand_hw_eccoob_2048;
465 break;
466 case 512:
467 this->ecc.layout = &nand_hw_eccoob_512;
468 break;
469 default:
470 {
471 printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
472 board_mtd->writesize);
473 return -EINVAL;
474 }
475 }
476
477#if NAND_ECC_BCH
478 if (board_mtd->writesize > 512) {
479 if (this->options & NAND_USE_FLASH_BBT)
480 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
481 this->badblock_pattern = &largepage_bbt;
482 }
483#endif
484
485 /* Now finish off the scan, now that ecc.layout has been initialized. */
486
487 err = nand_scan_tail(board_mtd);
488 if (err) {
489 printk(KERN_ERR "nand_scan failed: %d\n", err);
490 iounmap(bcm_umi_io_base);
491 kfree(board_mtd);
492 return err;
493 }
494
495 /* Register the partitions */
496 {
497 int nr_partitions;
498 struct mtd_partition *partition_info;
499
500 board_mtd->name = "bcm_umi-nand";
501 nr_partitions =
502 parse_mtd_partitions(board_mtd, part_probes,
503 &partition_info, 0);
504
505 if (nr_partitions <= 0) {
506 printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
507 nr_partitions);
508 iounmap(bcm_umi_io_base);
509 kfree(board_mtd);
510 return -EIO;
511 }
512 add_mtd_partitions(board_mtd, partition_info, nr_partitions);
513 }
514
515 /* Return happy */
516 return 0;
517}
518
519static int bcm_umi_nand_remove(struct platform_device *pdev)
520{
521#if USE_DMA
522 nand_dma_term();
523#endif
524
525 /* Release resources, unregister device */
526 nand_release(board_mtd);
527
528 /* unmap physical adress */
529 iounmap(bcm_umi_io_base);
530
531 /* Free the MTD device structure */
532 kfree(board_mtd);
533
534 return 0;
535}
536
537#ifdef CONFIG_PM
538static int bcm_umi_nand_suspend(struct platform_device *pdev,
539 pm_message_t state)
540{
541 printk(KERN_ERR "MTD NAND suspend is being called\n");
542 return 0;
543}
544
545static int bcm_umi_nand_resume(struct platform_device *pdev)
546{
547 printk(KERN_ERR "MTD NAND resume is being called\n");
548 return 0;
549}
550#else
551#define bcm_umi_nand_suspend NULL
552#define bcm_umi_nand_resume NULL
553#endif
554
555static struct platform_driver nand_driver = {
556 .driver = {
557 .name = "bcm-nand",
558 .owner = THIS_MODULE,
559 },
560 .probe = bcm_umi_nand_probe,
561 .remove = bcm_umi_nand_remove,
562 .suspend = bcm_umi_nand_suspend,
563 .resume = bcm_umi_nand_resume,
564};
565
566static int __init nand_init(void)
567{
568 return platform_driver_register(&nand_driver);
569}
570
571static void __exit nand_exit(void)
572{
573 platform_driver_unregister(&nand_driver);
574}
575
576module_init(nand_init);
577module_exit(nand_exit);
578
579MODULE_LICENSE("GPL");
580MODULE_AUTHOR("Broadcom");
581MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index f13f5b9afaf7..fe3eba87de40 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -591,6 +591,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
591 591
592 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ 592 /* options such as NAND_USE_FLASH_BBT or 16-bit widths */
593 info->chip.options = pdata->options; 593 info->chip.options = pdata->options;
594 info->chip.bbt_td = pdata->bbt_td;
595 info->chip.bbt_md = pdata->bbt_md;
594 596
595 info->ioaddr = (uint32_t __force) vaddr; 597 info->ioaddr = (uint32_t __force) vaddr;
596 598
@@ -599,7 +601,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
599 info->mask_chipsel = pdata->mask_chipsel; 601 info->mask_chipsel = pdata->mask_chipsel;
600 602
601 /* use nandboot-capable ALE/CLE masks by default */ 603 /* use nandboot-capable ALE/CLE masks by default */
602 info->mask_ale = pdata->mask_cle ? : MASK_ALE; 604 info->mask_ale = pdata->mask_ale ? : MASK_ALE;
603 info->mask_cle = pdata->mask_cle ? : MASK_CLE; 605 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
604 606
605 /* Set address of hardware control function */ 607 /* Set address of hardware control function */
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
index 72446fb48d4b..af6a6a5399e1 100644
--- a/drivers/mtd/nand/excite_nandflash.c
+++ b/drivers/mtd/nand/excite_nandflash.c
@@ -128,7 +128,7 @@ static int excite_nand_devready(struct mtd_info *mtd)
128 * The binding to the mtd and all allocated 128 * The binding to the mtd and all allocated
129 * resources are released. 129 * resources are released.
130 */ 130 */
131static int __exit excite_nand_remove(struct platform_device *dev) 131static int __devexit excite_nand_remove(struct platform_device *dev)
132{ 132{
133 struct excite_nand_drvdata * const this = platform_get_drvdata(dev); 133 struct excite_nand_drvdata * const this = platform_get_drvdata(dev);
134 134
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ddd37d2554ed..ae30fb6eed97 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -237,12 +237,15 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
237 237
238 ctrl->use_mdr = 0; 238 ctrl->use_mdr = 0;
239 239
240 dev_vdbg(ctrl->dev, 240 if (ctrl->status != LTESR_CC) {
241 "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n", 241 dev_info(ctrl->dev,
242 ctrl->status, ctrl->mdr, in_be32(&lbc->fmr)); 242 "command failed: fir %x fcr %x status %x mdr %x\n",
243 in_be32(&lbc->fir), in_be32(&lbc->fcr),
244 ctrl->status, ctrl->mdr);
245 return -EIO;
246 }
243 247
244 /* returns 0 on success otherwise non-zero) */ 248 return 0;
245 return ctrl->status == LTESR_CC ? 0 : -EIO;
246} 249}
247 250
248static void fsl_elbc_do_read(struct nand_chip *chip, int oob) 251static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
@@ -253,17 +256,17 @@ static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
253 256
254 if (priv->page_size) { 257 if (priv->page_size) {
255 out_be32(&lbc->fir, 258 out_be32(&lbc->fir,
256 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 259 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
257 (FIR_OP_CA << FIR_OP1_SHIFT) | 260 (FIR_OP_CA << FIR_OP1_SHIFT) |
258 (FIR_OP_PA << FIR_OP2_SHIFT) | 261 (FIR_OP_PA << FIR_OP2_SHIFT) |
259 (FIR_OP_CW1 << FIR_OP3_SHIFT) | 262 (FIR_OP_CM1 << FIR_OP3_SHIFT) |
260 (FIR_OP_RBW << FIR_OP4_SHIFT)); 263 (FIR_OP_RBW << FIR_OP4_SHIFT));
261 264
262 out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | 265 out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
263 (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); 266 (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
264 } else { 267 } else {
265 out_be32(&lbc->fir, 268 out_be32(&lbc->fir,
266 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 269 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
267 (FIR_OP_CA << FIR_OP1_SHIFT) | 270 (FIR_OP_CA << FIR_OP1_SHIFT) |
268 (FIR_OP_PA << FIR_OP2_SHIFT) | 271 (FIR_OP_PA << FIR_OP2_SHIFT) |
269 (FIR_OP_RBW << FIR_OP3_SHIFT)); 272 (FIR_OP_RBW << FIR_OP3_SHIFT));
@@ -332,7 +335,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
332 case NAND_CMD_READID: 335 case NAND_CMD_READID:
333 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); 336 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
334 337
335 out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) | 338 out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
336 (FIR_OP_UA << FIR_OP1_SHIFT) | 339 (FIR_OP_UA << FIR_OP1_SHIFT) |
337 (FIR_OP_RBW << FIR_OP2_SHIFT)); 340 (FIR_OP_RBW << FIR_OP2_SHIFT));
338 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
@@ -359,16 +362,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
359 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); 362 dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
360 363
361 out_be32(&lbc->fir, 364 out_be32(&lbc->fir,
362 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 365 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
363 (FIR_OP_PA << FIR_OP1_SHIFT) | 366 (FIR_OP_PA << FIR_OP1_SHIFT) |
364 (FIR_OP_CM1 << FIR_OP2_SHIFT)); 367 (FIR_OP_CM2 << FIR_OP2_SHIFT) |
368 (FIR_OP_CW1 << FIR_OP3_SHIFT) |
369 (FIR_OP_RS << FIR_OP4_SHIFT));
365 370
366 out_be32(&lbc->fcr, 371 out_be32(&lbc->fcr,
367 (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | 372 (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
368 (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT)); 373 (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
374 (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
369 375
370 out_be32(&lbc->fbcr, 0); 376 out_be32(&lbc->fbcr, 0);
371 ctrl->read_bytes = 0; 377 ctrl->read_bytes = 0;
378 ctrl->use_mdr = 1;
372 379
373 fsl_elbc_run_command(mtd); 380 fsl_elbc_run_command(mtd);
374 return; 381 return;
@@ -383,40 +390,41 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
383 390
384 ctrl->column = column; 391 ctrl->column = column;
385 ctrl->oob = 0; 392 ctrl->oob = 0;
393 ctrl->use_mdr = 1;
386 394
387 if (priv->page_size) { 395 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
388 fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) | 396 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
389 (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT); 397 (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
390 398
399 if (priv->page_size) {
391 out_be32(&lbc->fir, 400 out_be32(&lbc->fir,
392 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 401 (FIR_OP_CM2 << FIR_OP0_SHIFT) |
393 (FIR_OP_CA << FIR_OP1_SHIFT) | 402 (FIR_OP_CA << FIR_OP1_SHIFT) |
394 (FIR_OP_PA << FIR_OP2_SHIFT) | 403 (FIR_OP_PA << FIR_OP2_SHIFT) |
395 (FIR_OP_WB << FIR_OP3_SHIFT) | 404 (FIR_OP_WB << FIR_OP3_SHIFT) |
396 (FIR_OP_CW1 << FIR_OP4_SHIFT)); 405 (FIR_OP_CM3 << FIR_OP4_SHIFT) |
406 (FIR_OP_CW1 << FIR_OP5_SHIFT) |
407 (FIR_OP_RS << FIR_OP6_SHIFT));
397 } else { 408 } else {
398 fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
399 (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
400
401 out_be32(&lbc->fir, 409 out_be32(&lbc->fir,
402 (FIR_OP_CW0 << FIR_OP0_SHIFT) | 410 (FIR_OP_CM0 << FIR_OP0_SHIFT) |
403 (FIR_OP_CM2 << FIR_OP1_SHIFT) | 411 (FIR_OP_CM2 << FIR_OP1_SHIFT) |
404 (FIR_OP_CA << FIR_OP2_SHIFT) | 412 (FIR_OP_CA << FIR_OP2_SHIFT) |
405 (FIR_OP_PA << FIR_OP3_SHIFT) | 413 (FIR_OP_PA << FIR_OP3_SHIFT) |
406 (FIR_OP_WB << FIR_OP4_SHIFT) | 414 (FIR_OP_WB << FIR_OP4_SHIFT) |
407 (FIR_OP_CW1 << FIR_OP5_SHIFT)); 415 (FIR_OP_CM3 << FIR_OP5_SHIFT) |
416 (FIR_OP_CW1 << FIR_OP6_SHIFT) |
417 (FIR_OP_RS << FIR_OP7_SHIFT));
408 418
409 if (column >= mtd->writesize) { 419 if (column >= mtd->writesize) {
410 /* OOB area --> READOOB */ 420 /* OOB area --> READOOB */
411 column -= mtd->writesize; 421 column -= mtd->writesize;
412 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 422 fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
413 ctrl->oob = 1; 423 ctrl->oob = 1;
414 } else if (column < 256) { 424 } else {
425 WARN_ON(column != 0);
415 /* First 256 bytes --> READ0 */ 426 /* First 256 bytes --> READ0 */
416 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 427 fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
417 } else {
418 /* Second 256 bytes --> READ1 */
419 fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
420 } 428 }
421 } 429 }
422 430
@@ -628,22 +636,6 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
628{ 636{
629 struct fsl_elbc_mtd *priv = chip->priv; 637 struct fsl_elbc_mtd *priv = chip->priv;
630 struct fsl_elbc_ctrl *ctrl = priv->ctrl; 638 struct fsl_elbc_ctrl *ctrl = priv->ctrl;
631 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
632
633 if (ctrl->status != LTESR_CC)
634 return NAND_STATUS_FAIL;
635
636 /* Use READ_STATUS command, but wait for the device to be ready */
637 ctrl->use_mdr = 0;
638 out_be32(&lbc->fir,
639 (FIR_OP_CW0 << FIR_OP0_SHIFT) |
640 (FIR_OP_RBW << FIR_OP1_SHIFT));
641 out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
642 out_be32(&lbc->fbcr, 1);
643 set_addr(mtd, 0, 0, 0);
644 ctrl->read_bytes = 1;
645
646 fsl_elbc_run_command(mtd);
647 639
648 if (ctrl->status != LTESR_CC) 640 if (ctrl->status != LTESR_CC)
649 return NAND_STATUS_FAIL; 641 return NAND_STATUS_FAIL;
@@ -651,8 +643,7 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
651 /* The chip always seems to report that it is 643 /* The chip always seems to report that it is
652 * write-protected, even when it is not. 644 * write-protected, even when it is not.
653 */ 645 */
654 setbits8(ctrl->addr, NAND_STATUS_WP); 646 return (ctrl->mdr & 0xff) | NAND_STATUS_WP;
655 return fsl_elbc_read_byte(mtd);
656} 647}
657 648
658static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) 649static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
@@ -946,6 +937,13 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
946{ 937{
947 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 938 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
948 939
940 /*
941 * NAND transactions can tie up the bus for a long time, so set the
942 * bus timeout to max by clearing LBCR[BMT] (highest base counter
943 * value) and setting LBCR[BMTPS] to the highest prescaler value.
944 */
945 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15);
946
949 /* clear event registers */ 947 /* clear event registers */
950 setbits32(&lbc->ltesr, LTESR_NAND_MASK); 948 setbits32(&lbc->ltesr, LTESR_NAND_MASK);
951 out_be32(&lbc->lteatr, 0); 949 out_be32(&lbc->lteatr, 0);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index d120cd8d7267..071a60cb4204 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -112,7 +112,7 @@ static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
112 112
113 if (mchip_nr == -1) { 113 if (mchip_nr == -1) {
114 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); 114 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
115 } else if (mchip_nr >= 0) { 115 } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
116 fun->mchip_number = mchip_nr; 116 fun->mchip_number = mchip_nr;
117 chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr]; 117 chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
118 chip->IO_ADDR_W = chip->IO_ADDR_R; 118 chip->IO_ADDR_W = chip->IO_ADDR_R;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 65b26d5a5c0d..45dec5770da0 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -33,9 +33,13 @@
33 33
34#include <asm/mach/flash.h> 34#include <asm/mach/flash.h>
35#include <mach/mxc_nand.h> 35#include <mach/mxc_nand.h>
36#include <mach/hardware.h>
36 37
37#define DRIVER_NAME "mxc_nand" 38#define DRIVER_NAME "mxc_nand"
38 39
40#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
41#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
42
39/* Addresses for NFC registers */ 43/* Addresses for NFC registers */
40#define NFC_BUF_SIZE 0xE00 44#define NFC_BUF_SIZE 0xE00
41#define NFC_BUF_ADDR 0xE04 45#define NFC_BUF_ADDR 0xE04
@@ -46,24 +50,14 @@
46#define NFC_RSLTMAIN_AREA 0xE0E 50#define NFC_RSLTMAIN_AREA 0xE0E
47#define NFC_RSLTSPARE_AREA 0xE10 51#define NFC_RSLTSPARE_AREA 0xE10
48#define NFC_WRPROT 0xE12 52#define NFC_WRPROT 0xE12
49#define NFC_UNLOCKSTART_BLKADDR 0xE14 53#define NFC_V1_UNLOCKSTART_BLKADDR 0xe14
50#define NFC_UNLOCKEND_BLKADDR 0xE16 54#define NFC_V1_UNLOCKEND_BLKADDR 0xe16
55#define NFC_V21_UNLOCKSTART_BLKADDR 0xe20
56#define NFC_V21_UNLOCKEND_BLKADDR 0xe22
51#define NFC_NF_WRPRST 0xE18 57#define NFC_NF_WRPRST 0xE18
52#define NFC_CONFIG1 0xE1A 58#define NFC_CONFIG1 0xE1A
53#define NFC_CONFIG2 0xE1C 59#define NFC_CONFIG2 0xE1C
54 60
55/* Addresses for NFC RAM BUFFER Main area 0 */
56#define MAIN_AREA0 0x000
57#define MAIN_AREA1 0x200
58#define MAIN_AREA2 0x400
59#define MAIN_AREA3 0x600
60
61/* Addresses for NFC SPARE BUFFER Spare area 0 */
62#define SPARE_AREA0 0x800
63#define SPARE_AREA1 0x810
64#define SPARE_AREA2 0x820
65#define SPARE_AREA3 0x830
66
67/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register 61/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
68 * for Command operation */ 62 * for Command operation */
69#define NFC_CMD 0x1 63#define NFC_CMD 0x1
@@ -106,48 +100,66 @@ struct mxc_nand_host {
106 struct mtd_partition *parts; 100 struct mtd_partition *parts;
107 struct device *dev; 101 struct device *dev;
108 102
103 void *spare0;
104 void *main_area0;
105 void *main_area1;
106
107 void __iomem *base;
109 void __iomem *regs; 108 void __iomem *regs;
110 int spare_only;
111 int status_request; 109 int status_request;
112 int pagesize_2k;
113 uint16_t col_addr;
114 struct clk *clk; 110 struct clk *clk;
115 int clk_act; 111 int clk_act;
116 int irq; 112 int irq;
117 113
118 wait_queue_head_t irq_waitq; 114 wait_queue_head_t irq_waitq;
119};
120
121/* Define delays in microsec for NAND device operations */
122#define TROP_US_DELAY 2000
123/* Macros to get byte and bit positions of ECC */
124#define COLPOS(x) ((x) >> 3)
125#define BITPOS(x) ((x) & 0xf)
126 115
127/* Define single bit Error positions in Main & Spare area */ 116 uint8_t *data_buf;
128#define MAIN_SINGLEBIT_ERROR 0x4 117 unsigned int buf_start;
129#define SPARE_SINGLEBIT_ERROR 0x1 118 int spare_len;
130
131/* OOB placement block for use with hardware ecc generation */
132static struct nand_ecclayout nand_hw_eccoob_8 = {
133 .eccbytes = 5,
134 .eccpos = {6, 7, 8, 9, 10},
135 .oobfree = {{0, 5}, {11, 5}, }
136}; 119};
137 120
138static struct nand_ecclayout nand_hw_eccoob_16 = { 121/* OOB placement block for use with hardware ecc generation */
122static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
139 .eccbytes = 5, 123 .eccbytes = 5,
140 .eccpos = {6, 7, 8, 9, 10}, 124 .eccpos = {6, 7, 8, 9, 10},
141 .oobfree = {{0, 5}, {11, 5}, } 125 .oobfree = {{0, 5}, {12, 4}, }
142}; 126};
143 127
144static struct nand_ecclayout nand_hw_eccoob_64 = { 128static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
145 .eccbytes = 20, 129 .eccbytes = 20,
146 .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 130 .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
147 38, 39, 40, 41, 42, 54, 55, 56, 57, 58}, 131 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
148 .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, } 132 .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
149}; 133};
150 134
135/* OOB description for 512 byte pages with 16 byte OOB */
136static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
137 .eccbytes = 1 * 9,
138 .eccpos = {
139 7, 8, 9, 10, 11, 12, 13, 14, 15
140 },
141 .oobfree = {
142 {.offset = 0, .length = 5}
143 }
144};
145
146/* OOB description for 2048 byte pages with 64 byte OOB */
147static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
148 .eccbytes = 4 * 9,
149 .eccpos = {
150 7, 8, 9, 10, 11, 12, 13, 14, 15,
151 23, 24, 25, 26, 27, 28, 29, 30, 31,
152 39, 40, 41, 42, 43, 44, 45, 46, 47,
153 55, 56, 57, 58, 59, 60, 61, 62, 63
154 },
155 .oobfree = {
156 {.offset = 2, .length = 4},
157 {.offset = 16, .length = 7},
158 {.offset = 32, .length = 7},
159 {.offset = 48, .length = 7}
160 }
161};
162
151#ifdef CONFIG_MTD_PARTITIONS 163#ifdef CONFIG_MTD_PARTITIONS
152static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; 164static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
153#endif 165#endif
@@ -170,10 +182,10 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
170/* This function polls the NANDFC to wait for the basic operation to 182/* This function polls the NANDFC to wait for the basic operation to
171 * complete by checking the INT bit of config2 register. 183 * complete by checking the INT bit of config2 register.
172 */ 184 */
173static void wait_op_done(struct mxc_nand_host *host, int max_retries, 185static void wait_op_done(struct mxc_nand_host *host, int useirq)
174 uint16_t param, int useirq)
175{ 186{
176 uint32_t tmp; 187 uint32_t tmp;
188 int max_retries = 2000;
177 189
178 if (useirq) { 190 if (useirq) {
179 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { 191 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
@@ -200,8 +212,8 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
200 udelay(1); 212 udelay(1);
201 } 213 }
202 if (max_retries < 0) 214 if (max_retries < 0)
203 DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n", 215 DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
204 __func__, param); 216 __func__);
205 } 217 }
206} 218}
207 219
@@ -215,7 +227,7 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
215 writew(NFC_CMD, host->regs + NFC_CONFIG2); 227 writew(NFC_CMD, host->regs + NFC_CONFIG2);
216 228
217 /* Wait for operation to complete */ 229 /* Wait for operation to complete */
218 wait_op_done(host, TROP_US_DELAY, cmd, useirq); 230 wait_op_done(host, useirq);
219} 231}
220 232
221/* This function sends an address (or partial address) to the 233/* This function sends an address (or partial address) to the
@@ -229,82 +241,47 @@ static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast)
229 writew(NFC_ADDR, host->regs + NFC_CONFIG2); 241 writew(NFC_ADDR, host->regs + NFC_CONFIG2);
230 242
231 /* Wait for operation to complete */ 243 /* Wait for operation to complete */
232 wait_op_done(host, TROP_US_DELAY, addr, islast); 244 wait_op_done(host, islast);
233} 245}
234 246
235/* This function requests the NANDFC to initate the transfer 247static void send_page(struct mtd_info *mtd, unsigned int ops)
236 * of data currently in the NANDFC RAM buffer to the NAND device. */
237static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
238 int spare_only)
239{ 248{
240 DEBUG(MTD_DEBUG_LEVEL3, "send_prog_page (%d)\n", spare_only); 249 struct nand_chip *nand_chip = mtd->priv;
241 250 struct mxc_nand_host *host = nand_chip->priv;
242 /* NANDFC buffer 0 is used for page read/write */ 251 int bufs, i;
243 writew(buf_id, host->regs + NFC_BUF_ADDR);
244
245 /* Configure spare or page+spare access */
246 if (!host->pagesize_2k) {
247 uint16_t config1 = readw(host->regs + NFC_CONFIG1);
248 if (spare_only)
249 config1 |= NFC_SP_EN;
250 else
251 config1 &= ~(NFC_SP_EN);
252 writew(config1, host->regs + NFC_CONFIG1);
253 }
254 252
255 writew(NFC_INPUT, host->regs + NFC_CONFIG2); 253 if (nfc_is_v1() && mtd->writesize > 512)
254 bufs = 4;
255 else
256 bufs = 1;
256 257
257 /* Wait for operation to complete */ 258 for (i = 0; i < bufs; i++) {
258 wait_op_done(host, TROP_US_DELAY, spare_only, true);
259}
260 259
261/* Requests NANDFC to initated the transfer of data from the 260 /* NANDFC buffer 0 is used for page read/write */
262 * NAND device into in the NANDFC ram buffer. */ 261 writew(i, host->regs + NFC_BUF_ADDR);
263static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
264 int spare_only)
265{
266 DEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only);
267 262
268 /* NANDFC buffer 0 is used for page read/write */ 263 writew(ops, host->regs + NFC_CONFIG2);
269 writew(buf_id, host->regs + NFC_BUF_ADDR);
270 264
271 /* Configure spare or page+spare access */ 265 /* Wait for operation to complete */
272 if (!host->pagesize_2k) { 266 wait_op_done(host, true);
273 uint32_t config1 = readw(host->regs + NFC_CONFIG1);
274 if (spare_only)
275 config1 |= NFC_SP_EN;
276 else
277 config1 &= ~NFC_SP_EN;
278 writew(config1, host->regs + NFC_CONFIG1);
279 } 267 }
280
281 writew(NFC_OUTPUT, host->regs + NFC_CONFIG2);
282
283 /* Wait for operation to complete */
284 wait_op_done(host, TROP_US_DELAY, spare_only, true);
285} 268}
286 269
287/* Request the NANDFC to perform a read of the NAND device ID. */ 270/* Request the NANDFC to perform a read of the NAND device ID. */
288static void send_read_id(struct mxc_nand_host *host) 271static void send_read_id(struct mxc_nand_host *host)
289{ 272{
290 struct nand_chip *this = &host->nand; 273 struct nand_chip *this = &host->nand;
291 uint16_t tmp;
292 274
293 /* NANDFC buffer 0 is used for device ID output */ 275 /* NANDFC buffer 0 is used for device ID output */
294 writew(0x0, host->regs + NFC_BUF_ADDR); 276 writew(0x0, host->regs + NFC_BUF_ADDR);
295 277
296 /* Read ID into main buffer */
297 tmp = readw(host->regs + NFC_CONFIG1);
298 tmp &= ~NFC_SP_EN;
299 writew(tmp, host->regs + NFC_CONFIG1);
300
301 writew(NFC_ID, host->regs + NFC_CONFIG2); 278 writew(NFC_ID, host->regs + NFC_CONFIG2);
302 279
303 /* Wait for operation to complete */ 280 /* Wait for operation to complete */
304 wait_op_done(host, TROP_US_DELAY, 0, true); 281 wait_op_done(host, true);
305 282
306 if (this->options & NAND_BUSWIDTH_16) { 283 if (this->options & NAND_BUSWIDTH_16) {
307 void __iomem *main_buf = host->regs + MAIN_AREA0; 284 void __iomem *main_buf = host->main_area0;
308 /* compress the ID info */ 285 /* compress the ID info */
309 writeb(readb(main_buf + 2), main_buf + 1); 286 writeb(readb(main_buf + 2), main_buf + 1);
310 writeb(readb(main_buf + 4), main_buf + 2); 287 writeb(readb(main_buf + 4), main_buf + 2);
@@ -312,15 +289,16 @@ static void send_read_id(struct mxc_nand_host *host)
312 writeb(readb(main_buf + 8), main_buf + 4); 289 writeb(readb(main_buf + 8), main_buf + 4);
313 writeb(readb(main_buf + 10), main_buf + 5); 290 writeb(readb(main_buf + 10), main_buf + 5);
314 } 291 }
292 memcpy(host->data_buf, host->main_area0, 16);
315} 293}
316 294
317/* This function requests the NANDFC to perform a read of the 295/* This function requests the NANDFC to perform a read of the
318 * NAND device status and returns the current status. */ 296 * NAND device status and returns the current status. */
319static uint16_t get_dev_status(struct mxc_nand_host *host) 297static uint16_t get_dev_status(struct mxc_nand_host *host)
320{ 298{
321 void __iomem *main_buf = host->regs + MAIN_AREA1; 299 void __iomem *main_buf = host->main_area1;
322 uint32_t store; 300 uint32_t store;
323 uint16_t ret, tmp; 301 uint16_t ret;
324 /* Issue status request to NAND device */ 302 /* Issue status request to NAND device */
325 303
326 /* store the main area1 first word, later do recovery */ 304 /* store the main area1 first word, later do recovery */
@@ -329,15 +307,10 @@ static uint16_t get_dev_status(struct mxc_nand_host *host)
329 * corruption of read/write buffer on status requests. */ 307 * corruption of read/write buffer on status requests. */
330 writew(1, host->regs + NFC_BUF_ADDR); 308 writew(1, host->regs + NFC_BUF_ADDR);
331 309
332 /* Read status into main buffer */
333 tmp = readw(host->regs + NFC_CONFIG1);
334 tmp &= ~NFC_SP_EN;
335 writew(tmp, host->regs + NFC_CONFIG1);
336
337 writew(NFC_STATUS, host->regs + NFC_CONFIG2); 310 writew(NFC_STATUS, host->regs + NFC_CONFIG2);
338 311
339 /* Wait for operation to complete */ 312 /* Wait for operation to complete */
340 wait_op_done(host, TROP_US_DELAY, 0, true); 313 wait_op_done(host, true);
341 314
342 /* Status is placed in first word of main buffer */ 315 /* Status is placed in first word of main buffer */
343 /* get status, then recovery area 1 data */ 316 /* get status, then recovery area 1 data */
@@ -397,32 +370,14 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
397{ 370{
398 struct nand_chip *nand_chip = mtd->priv; 371 struct nand_chip *nand_chip = mtd->priv;
399 struct mxc_nand_host *host = nand_chip->priv; 372 struct mxc_nand_host *host = nand_chip->priv;
400 uint8_t ret = 0; 373 uint8_t ret;
401 uint16_t col, rd_word;
402 uint16_t __iomem *main_buf = host->regs + MAIN_AREA0;
403 uint16_t __iomem *spare_buf = host->regs + SPARE_AREA0;
404 374
405 /* Check for status request */ 375 /* Check for status request */
406 if (host->status_request) 376 if (host->status_request)
407 return get_dev_status(host) & 0xFF; 377 return get_dev_status(host) & 0xFF;
408 378
409 /* Get column for 16-bit access */ 379 ret = *(uint8_t *)(host->data_buf + host->buf_start);
410 col = host->col_addr >> 1; 380 host->buf_start++;
411
412 /* If we are accessing the spare region */
413 if (host->spare_only)
414 rd_word = readw(&spare_buf[col]);
415 else
416 rd_word = readw(&main_buf[col]);
417
418 /* Pick upper/lower byte of word from RAM buffer */
419 if (host->col_addr & 0x1)
420 ret = (rd_word >> 8) & 0xFF;
421 else
422 ret = rd_word & 0xFF;
423
424 /* Update saved column address */
425 host->col_addr++;
426 381
427 return ret; 382 return ret;
428} 383}
@@ -431,33 +386,10 @@ static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
431{ 386{
432 struct nand_chip *nand_chip = mtd->priv; 387 struct nand_chip *nand_chip = mtd->priv;
433 struct mxc_nand_host *host = nand_chip->priv; 388 struct mxc_nand_host *host = nand_chip->priv;
434 uint16_t col, rd_word, ret; 389 uint16_t ret;
435 uint16_t __iomem *p;
436
437 DEBUG(MTD_DEBUG_LEVEL3,
438 "mxc_nand_read_word(col = %d)\n", host->col_addr);
439
440 col = host->col_addr;
441 /* Adjust saved column address */
442 if (col < mtd->writesize && host->spare_only)
443 col += mtd->writesize;
444 390
445 if (col < mtd->writesize) 391 ret = *(uint16_t *)(host->data_buf + host->buf_start);
446 p = (host->regs + MAIN_AREA0) + (col >> 1); 392 host->buf_start += 2;
447 else
448 p = (host->regs + SPARE_AREA0) + ((col - mtd->writesize) >> 1);
449
450 if (col & 1) {
451 rd_word = readw(p);
452 ret = (rd_word >> 8) & 0xff;
453 rd_word = readw(&p[1]);
454 ret |= (rd_word << 8) & 0xff00;
455
456 } else
457 ret = readw(p);
458
459 /* Update saved column address */
460 host->col_addr = col + 2;
461 393
462 return ret; 394 return ret;
463} 395}
@@ -470,94 +402,14 @@ static void mxc_nand_write_buf(struct mtd_info *mtd,
470{ 402{
471 struct nand_chip *nand_chip = mtd->priv; 403 struct nand_chip *nand_chip = mtd->priv;
472 struct mxc_nand_host *host = nand_chip->priv; 404 struct mxc_nand_host *host = nand_chip->priv;
473 int n, col, i = 0; 405 u16 col = host->buf_start;
474 406 int n = mtd->oobsize + mtd->writesize - col;
475 DEBUG(MTD_DEBUG_LEVEL3,
476 "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
477 len);
478
479 col = host->col_addr;
480 407
481 /* Adjust saved column address */ 408 n = min(n, len);
482 if (col < mtd->writesize && host->spare_only)
483 col += mtd->writesize;
484 409
485 n = mtd->writesize + mtd->oobsize - col; 410 memcpy(host->data_buf + col, buf, n);
486 n = min(len, n);
487
488 DEBUG(MTD_DEBUG_LEVEL3,
489 "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
490
491 while (n) {
492 void __iomem *p;
493
494 if (col < mtd->writesize)
495 p = host->regs + MAIN_AREA0 + (col & ~3);
496 else
497 p = host->regs + SPARE_AREA0 -
498 mtd->writesize + (col & ~3);
499
500 DEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__,
501 __LINE__, p);
502
503 if (((col | (int)&buf[i]) & 3) || n < 16) {
504 uint32_t data = 0;
505
506 if (col & 3 || n < 4)
507 data = readl(p);
508
509 switch (col & 3) {
510 case 0:
511 if (n) {
512 data = (data & 0xffffff00) |
513 (buf[i++] << 0);
514 n--;
515 col++;
516 }
517 case 1:
518 if (n) {
519 data = (data & 0xffff00ff) |
520 (buf[i++] << 8);
521 n--;
522 col++;
523 }
524 case 2:
525 if (n) {
526 data = (data & 0xff00ffff) |
527 (buf[i++] << 16);
528 n--;
529 col++;
530 }
531 case 3:
532 if (n) {
533 data = (data & 0x00ffffff) |
534 (buf[i++] << 24);
535 n--;
536 col++;
537 }
538 }
539
540 writel(data, p);
541 } else {
542 int m = mtd->writesize - col;
543 411
544 if (col >= mtd->writesize) 412 host->buf_start += n;
545 m += mtd->oobsize;
546
547 m = min(n, m) & ~3;
548
549 DEBUG(MTD_DEBUG_LEVEL3,
550 "%s:%d: n = %d, m = %d, i = %d, col = %d\n",
551 __func__, __LINE__, n, m, i, col);
552
553 memcpy(p, &buf[i], m);
554 col += m;
555 i += m;
556 n -= m;
557 }
558 }
559 /* Update saved column address */
560 host->col_addr = col;
561} 413}
562 414
563/* Read the data buffer from the NAND Flash. To read the data from NAND 415/* Read the data buffer from the NAND Flash. To read the data from NAND
@@ -568,75 +420,14 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
568{ 420{
569 struct nand_chip *nand_chip = mtd->priv; 421 struct nand_chip *nand_chip = mtd->priv;
570 struct mxc_nand_host *host = nand_chip->priv; 422 struct mxc_nand_host *host = nand_chip->priv;
571 int n, col, i = 0; 423 u16 col = host->buf_start;
572 424 int n = mtd->oobsize + mtd->writesize - col;
573 DEBUG(MTD_DEBUG_LEVEL3,
574 "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len);
575
576 col = host->col_addr;
577 425
578 /* Adjust saved column address */ 426 n = min(n, len);
579 if (col < mtd->writesize && host->spare_only)
580 col += mtd->writesize;
581 427
582 n = mtd->writesize + mtd->oobsize - col; 428 memcpy(buf, host->data_buf + col, len);
583 n = min(len, n);
584
585 while (n) {
586 void __iomem *p;
587
588 if (col < mtd->writesize)
589 p = host->regs + MAIN_AREA0 + (col & ~3);
590 else
591 p = host->regs + SPARE_AREA0 -
592 mtd->writesize + (col & ~3);
593
594 if (((col | (int)&buf[i]) & 3) || n < 16) {
595 uint32_t data;
596
597 data = readl(p);
598 switch (col & 3) {
599 case 0:
600 if (n) {
601 buf[i++] = (uint8_t) (data);
602 n--;
603 col++;
604 }
605 case 1:
606 if (n) {
607 buf[i++] = (uint8_t) (data >> 8);
608 n--;
609 col++;
610 }
611 case 2:
612 if (n) {
613 buf[i++] = (uint8_t) (data >> 16);
614 n--;
615 col++;
616 }
617 case 3:
618 if (n) {
619 buf[i++] = (uint8_t) (data >> 24);
620 n--;
621 col++;
622 }
623 }
624 } else {
625 int m = mtd->writesize - col;
626
627 if (col >= mtd->writesize)
628 m += mtd->oobsize;
629
630 m = min(n, m) & ~3;
631 memcpy(&buf[i], p, m);
632 col += m;
633 i += m;
634 n -= m;
635 }
636 }
637 /* Update saved column address */
638 host->col_addr = col;
639 429
430 host->buf_start += len;
640} 431}
641 432
642/* Used by the upper layer to verify the data in NAND Flash 433/* Used by the upper layer to verify the data in NAND Flash
@@ -654,23 +445,6 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
654 struct nand_chip *nand_chip = mtd->priv; 445 struct nand_chip *nand_chip = mtd->priv;
655 struct mxc_nand_host *host = nand_chip->priv; 446 struct mxc_nand_host *host = nand_chip->priv;
656 447
657#ifdef CONFIG_MTD_NAND_MXC_FORCE_CE
658 if (chip > 0) {
659 DEBUG(MTD_DEBUG_LEVEL0,
660 "ERROR: Illegal chip select (chip = %d)\n", chip);
661 return;
662 }
663
664 if (chip == -1) {
665 writew(readw(host->regs + NFC_CONFIG1) & ~NFC_CE,
666 host->regs + NFC_CONFIG1);
667 return;
668 }
669
670 writew(readw(host->regs + NFC_CONFIG1) | NFC_CE,
671 host->regs + NFC_CONFIG1);
672#endif
673
674 switch (chip) { 448 switch (chip) {
675 case -1: 449 case -1:
676 /* Disable the NFC clock */ 450 /* Disable the NFC clock */
@@ -692,94 +466,40 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
692 } 466 }
693} 467}
694 468
695/* Used by the upper layer to write command to NAND Flash for 469/*
696 * different operations to be carried out on NAND Flash */ 470 * Function to transfer data to/from spare area.
697static void mxc_nand_command(struct mtd_info *mtd, unsigned command, 471 */
698 int column, int page_addr) 472static void copy_spare(struct mtd_info *mtd, bool bfrom)
699{ 473{
700 struct nand_chip *nand_chip = mtd->priv; 474 struct nand_chip *this = mtd->priv;
701 struct mxc_nand_host *host = nand_chip->priv; 475 struct mxc_nand_host *host = this->priv;
702 int useirq = true; 476 u16 i, j;
703 477 u16 n = mtd->writesize >> 9;
704 DEBUG(MTD_DEBUG_LEVEL3, 478 u8 *d = host->data_buf + mtd->writesize;
705 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", 479 u8 *s = host->spare0;
706 command, column, page_addr); 480 u16 t = host->spare_len;
707 481
708 /* Reset command state information */ 482 j = (mtd->oobsize / n >> 1) << 1;
709 host->status_request = false; 483
710 484 if (bfrom) {
711 /* Command pre-processing step */ 485 for (i = 0; i < n - 1; i++)
712 switch (command) { 486 memcpy(d + i * j, s + i * t, j);
713 487
714 case NAND_CMD_STATUS: 488 /* the last section */
715 host->col_addr = 0; 489 memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
716 host->status_request = true; 490 } else {
717 break; 491 for (i = 0; i < n - 1; i++)
718 492 memcpy(&s[i * t], &d[i * j], j);
719 case NAND_CMD_READ0:
720 host->col_addr = column;
721 host->spare_only = false;
722 useirq = false;
723 break;
724
725 case NAND_CMD_READOOB:
726 host->col_addr = column;
727 host->spare_only = true;
728 useirq = false;
729 if (host->pagesize_2k)
730 command = NAND_CMD_READ0; /* only READ0 is valid */
731 break;
732
733 case NAND_CMD_SEQIN:
734 if (column >= mtd->writesize) {
735 /*
736 * FIXME: before send SEQIN command for write OOB,
737 * We must read one page out.
738 * For K9F1GXX has no READ1 command to set current HW
739 * pointer to spare area, we must write the whole page
740 * including OOB together.
741 */
742 if (host->pagesize_2k)
743 /* call ourself to read a page */
744 mxc_nand_command(mtd, NAND_CMD_READ0, 0,
745 page_addr);
746
747 host->col_addr = column - mtd->writesize;
748 host->spare_only = true;
749
750 /* Set program pointer to spare region */
751 if (!host->pagesize_2k)
752 send_cmd(host, NAND_CMD_READOOB, false);
753 } else {
754 host->spare_only = false;
755 host->col_addr = column;
756
757 /* Set program pointer to page start */
758 if (!host->pagesize_2k)
759 send_cmd(host, NAND_CMD_READ0, false);
760 }
761 useirq = false;
762 break;
763
764 case NAND_CMD_PAGEPROG:
765 send_prog_page(host, 0, host->spare_only);
766
767 if (host->pagesize_2k) {
768 /* data in 4 areas datas */
769 send_prog_page(host, 1, host->spare_only);
770 send_prog_page(host, 2, host->spare_only);
771 send_prog_page(host, 3, host->spare_only);
772 }
773
774 break;
775 493
776 case NAND_CMD_ERASE1: 494 /* the last section */
777 useirq = false; 495 memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
778 break;
779 } 496 }
497}
780 498
781 /* Write out the command to the device. */ 499static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
782 send_cmd(host, command, useirq); 500{
501 struct nand_chip *nand_chip = mtd->priv;
502 struct mxc_nand_host *host = nand_chip->priv;
783 503
784 /* Write out column address, if necessary */ 504 /* Write out column address, if necessary */
785 if (column != -1) { 505 if (column != -1) {
@@ -791,7 +511,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
791 * the full page. 511 * the full page.
792 */ 512 */
793 send_addr(host, 0, page_addr == -1); 513 send_addr(host, 0, page_addr == -1);
794 if (host->pagesize_2k) 514 if (mtd->writesize > 512)
795 /* another col addr cycle for 2k page */ 515 /* another col addr cycle for 2k page */
796 send_addr(host, 0, false); 516 send_addr(host, 0, false);
797 } 517 }
@@ -801,7 +521,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
801 /* paddr_0 - p_addr_7 */ 521 /* paddr_0 - p_addr_7 */
802 send_addr(host, (page_addr & 0xff), false); 522 send_addr(host, (page_addr & 0xff), false);
803 523
804 if (host->pagesize_2k) { 524 if (mtd->writesize > 512) {
805 if (mtd->size >= 0x10000000) { 525 if (mtd->size >= 0x10000000) {
806 /* paddr_8 - paddr_15 */ 526 /* paddr_8 - paddr_15 */
807 send_addr(host, (page_addr >> 8) & 0xff, false); 527 send_addr(host, (page_addr >> 8) & 0xff, false);
@@ -820,52 +540,136 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
820 send_addr(host, (page_addr >> 8) & 0xff, true); 540 send_addr(host, (page_addr >> 8) & 0xff, true);
821 } 541 }
822 } 542 }
543}
544
545/* Used by the upper layer to write command to NAND Flash for
546 * different operations to be carried out on NAND Flash */
547static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
548 int column, int page_addr)
549{
550 struct nand_chip *nand_chip = mtd->priv;
551 struct mxc_nand_host *host = nand_chip->priv;
552
553 DEBUG(MTD_DEBUG_LEVEL3,
554 "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
555 command, column, page_addr);
556
557 /* Reset command state information */
558 host->status_request = false;
823 559
824 /* Command post-processing step */ 560 /* Command pre-processing step */
825 switch (command) { 561 switch (command) {
826 562
827 case NAND_CMD_RESET: 563 case NAND_CMD_STATUS:
564 host->buf_start = 0;
565 host->status_request = true;
566
567 send_cmd(host, command, true);
568 mxc_do_addr_cycle(mtd, column, page_addr);
828 break; 569 break;
829 570
830 case NAND_CMD_READOOB:
831 case NAND_CMD_READ0: 571 case NAND_CMD_READ0:
832 if (host->pagesize_2k) { 572 case NAND_CMD_READOOB:
833 /* send read confirm command */ 573 if (command == NAND_CMD_READ0)
574 host->buf_start = column;
575 else
576 host->buf_start = column + mtd->writesize;
577
578 if (mtd->writesize > 512)
579 command = NAND_CMD_READ0; /* only READ0 is valid */
580
581 send_cmd(host, command, false);
582 mxc_do_addr_cycle(mtd, column, page_addr);
583
584 if (mtd->writesize > 512)
834 send_cmd(host, NAND_CMD_READSTART, true); 585 send_cmd(host, NAND_CMD_READSTART, true);
835 /* read for each AREA */ 586
836 send_read_page(host, 0, host->spare_only); 587 send_page(mtd, NFC_OUTPUT);
837 send_read_page(host, 1, host->spare_only); 588
838 send_read_page(host, 2, host->spare_only); 589 memcpy(host->data_buf, host->main_area0, mtd->writesize);
839 send_read_page(host, 3, host->spare_only); 590 copy_spare(mtd, true);
840 } else
841 send_read_page(host, 0, host->spare_only);
842 break; 591 break;
843 592
844 case NAND_CMD_READID: 593 case NAND_CMD_SEQIN:
845 host->col_addr = 0; 594 if (column >= mtd->writesize) {
846 send_read_id(host); 595 /*
596 * FIXME: before send SEQIN command for write OOB,
597 * We must read one page out.
598 * For K9F1GXX has no READ1 command to set current HW
599 * pointer to spare area, we must write the whole page
600 * including OOB together.
601 */
602 if (mtd->writesize > 512)
603 /* call ourself to read a page */
604 mxc_nand_command(mtd, NAND_CMD_READ0, 0,
605 page_addr);
606
607 host->buf_start = column;
608
609 /* Set program pointer to spare region */
610 if (mtd->writesize == 512)
611 send_cmd(host, NAND_CMD_READOOB, false);
612 } else {
613 host->buf_start = column;
614
615 /* Set program pointer to page start */
616 if (mtd->writesize == 512)
617 send_cmd(host, NAND_CMD_READ0, false);
618 }
619
620 send_cmd(host, command, false);
621 mxc_do_addr_cycle(mtd, column, page_addr);
847 break; 622 break;
848 623
849 case NAND_CMD_PAGEPROG: 624 case NAND_CMD_PAGEPROG:
625 memcpy(host->main_area0, host->data_buf, mtd->writesize);
626 copy_spare(mtd, false);
627 send_page(mtd, NFC_INPUT);
628 send_cmd(host, command, true);
629 mxc_do_addr_cycle(mtd, column, page_addr);
850 break; 630 break;
851 631
852 case NAND_CMD_STATUS: 632 case NAND_CMD_READID:
633 send_cmd(host, command, true);
634 mxc_do_addr_cycle(mtd, column, page_addr);
635 send_read_id(host);
636 host->buf_start = column;
853 break; 637 break;
854 638
639 case NAND_CMD_ERASE1:
855 case NAND_CMD_ERASE2: 640 case NAND_CMD_ERASE2:
641 send_cmd(host, command, false);
642 mxc_do_addr_cycle(mtd, column, page_addr);
643
856 break; 644 break;
857 } 645 }
858} 646}
859 647
860/* Define some generic bad / good block scan pattern which are used 648/*
861 * while scanning a device for factory marked good / bad blocks. */ 649 * The generic flash bbt decriptors overlap with our ecc
862static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 650 * hardware, so define some i.MX specific ones.
651 */
652static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
653static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
654
655static struct nand_bbt_descr bbt_main_descr = {
656 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
657 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
658 .offs = 0,
659 .len = 4,
660 .veroffs = 4,
661 .maxblocks = 4,
662 .pattern = bbt_pattern,
663};
863 664
864static struct nand_bbt_descr smallpage_memorybased = { 665static struct nand_bbt_descr bbt_mirror_descr = {
865 .options = NAND_BBT_SCAN2NDPAGE, 666 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
866 .offs = 5, 667 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
867 .len = 1, 668 .offs = 0,
868 .pattern = scan_ff_pattern 669 .len = 4,
670 .veroffs = 4,
671 .maxblocks = 4,
672 .pattern = mirror_pattern,
869}; 673};
870 674
871static int __init mxcnd_probe(struct platform_device *pdev) 675static int __init mxcnd_probe(struct platform_device *pdev)
@@ -877,12 +681,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
877 struct resource *res; 681 struct resource *res;
878 uint16_t tmp; 682 uint16_t tmp;
879 int err = 0, nr_parts = 0; 683 int err = 0, nr_parts = 0;
684 struct nand_ecclayout *oob_smallpage, *oob_largepage;
880 685
881 /* Allocate memory for MTD device structure and private data */ 686 /* Allocate memory for MTD device structure and private data */
882 host = kzalloc(sizeof(struct mxc_nand_host), GFP_KERNEL); 687 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
688 NAND_MAX_OOBSIZE, GFP_KERNEL);
883 if (!host) 689 if (!host)
884 return -ENOMEM; 690 return -ENOMEM;
885 691
692 host->data_buf = (uint8_t *)(host + 1);
693
886 host->dev = &pdev->dev; 694 host->dev = &pdev->dev;
887 /* structures must be linked */ 695 /* structures must be linked */
888 this = &host->nand; 696 this = &host->nand;
@@ -890,7 +698,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
890 mtd->priv = this; 698 mtd->priv = this;
891 mtd->owner = THIS_MODULE; 699 mtd->owner = THIS_MODULE;
892 mtd->dev.parent = &pdev->dev; 700 mtd->dev.parent = &pdev->dev;
893 mtd->name = "mxc_nand"; 701 mtd->name = DRIVER_NAME;
894 702
895 /* 50 us command delay time */ 703 /* 50 us command delay time */
896 this->chip_delay = 5; 704 this->chip_delay = 5;
@@ -920,62 +728,93 @@ static int __init mxcnd_probe(struct platform_device *pdev)
920 goto eres; 728 goto eres;
921 } 729 }
922 730
923 host->regs = ioremap(res->start, res->end - res->start + 1); 731 host->base = ioremap(res->start, resource_size(res));
924 if (!host->regs) { 732 if (!host->base) {
925 err = -ENOMEM; 733 err = -ENOMEM;
926 goto eres; 734 goto eres;
927 } 735 }
928 736
737 host->main_area0 = host->base;
738 host->main_area1 = host->base + 0x200;
739
740 if (nfc_is_v21()) {
741 host->regs = host->base + 0x1000;
742 host->spare0 = host->base + 0x1000;
743 host->spare_len = 64;
744 oob_smallpage = &nandv2_hw_eccoob_smallpage;
745 oob_largepage = &nandv2_hw_eccoob_largepage;
746 } else if (nfc_is_v1()) {
747 host->regs = host->base;
748 host->spare0 = host->base + 0x800;
749 host->spare_len = 16;
750 oob_smallpage = &nandv1_hw_eccoob_smallpage;
751 oob_largepage = &nandv1_hw_eccoob_largepage;
752 } else
753 BUG();
754
755 /* disable interrupt and spare enable */
929 tmp = readw(host->regs + NFC_CONFIG1); 756 tmp = readw(host->regs + NFC_CONFIG1);
930 tmp |= NFC_INT_MSK; 757 tmp |= NFC_INT_MSK;
758 tmp &= ~NFC_SP_EN;
931 writew(tmp, host->regs + NFC_CONFIG1); 759 writew(tmp, host->regs + NFC_CONFIG1);
932 760
933 init_waitqueue_head(&host->irq_waitq); 761 init_waitqueue_head(&host->irq_waitq);
934 762
935 host->irq = platform_get_irq(pdev, 0); 763 host->irq = platform_get_irq(pdev, 0);
936 764
937 err = request_irq(host->irq, mxc_nfc_irq, 0, "mxc_nd", host); 765 err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
938 if (err) 766 if (err)
939 goto eirq; 767 goto eirq;
940 768
769 /* Reset NAND */
770 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
771
772 /* preset operation */
773 /* Unlock the internal RAM Buffer */
774 writew(0x2, host->regs + NFC_CONFIG);
775
776 /* Blocks to be unlocked */
777 if (nfc_is_v21()) {
778 writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
779 writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
780 this->ecc.bytes = 9;
781 } else if (nfc_is_v1()) {
782 writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
783 writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
784 this->ecc.bytes = 3;
785 } else
786 BUG();
787
788 /* Unlock Block Command for given address range */
789 writew(0x4, host->regs + NFC_WRPROT);
790
791 this->ecc.size = 512;
792 this->ecc.layout = oob_smallpage;
793
941 if (pdata->hw_ecc) { 794 if (pdata->hw_ecc) {
942 this->ecc.calculate = mxc_nand_calculate_ecc; 795 this->ecc.calculate = mxc_nand_calculate_ecc;
943 this->ecc.hwctl = mxc_nand_enable_hwecc; 796 this->ecc.hwctl = mxc_nand_enable_hwecc;
944 this->ecc.correct = mxc_nand_correct_data; 797 this->ecc.correct = mxc_nand_correct_data;
945 this->ecc.mode = NAND_ECC_HW; 798 this->ecc.mode = NAND_ECC_HW;
946 this->ecc.size = 512;
947 this->ecc.bytes = 3;
948 tmp = readw(host->regs + NFC_CONFIG1); 799 tmp = readw(host->regs + NFC_CONFIG1);
949 tmp |= NFC_ECC_EN; 800 tmp |= NFC_ECC_EN;
950 writew(tmp, host->regs + NFC_CONFIG1); 801 writew(tmp, host->regs + NFC_CONFIG1);
951 } else { 802 } else {
952 this->ecc.size = 512;
953 this->ecc.bytes = 3;
954 this->ecc.layout = &nand_hw_eccoob_8;
955 this->ecc.mode = NAND_ECC_SOFT; 803 this->ecc.mode = NAND_ECC_SOFT;
956 tmp = readw(host->regs + NFC_CONFIG1); 804 tmp = readw(host->regs + NFC_CONFIG1);
957 tmp &= ~NFC_ECC_EN; 805 tmp &= ~NFC_ECC_EN;
958 writew(tmp, host->regs + NFC_CONFIG1); 806 writew(tmp, host->regs + NFC_CONFIG1);
959 } 807 }
960 808
961 /* Reset NAND */
962 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
963
964 /* preset operation */
965 /* Unlock the internal RAM Buffer */
966 writew(0x2, host->regs + NFC_CONFIG);
967
968 /* Blocks to be unlocked */
969 writew(0x0, host->regs + NFC_UNLOCKSTART_BLKADDR);
970 writew(0x4000, host->regs + NFC_UNLOCKEND_BLKADDR);
971
972 /* Unlock Block Command for given address range */
973 writew(0x4, host->regs + NFC_WRPROT);
974
975 /* NAND bus width determines access funtions used by upper layer */ 809 /* NAND bus width determines access funtions used by upper layer */
976 if (pdata->width == 2) { 810 if (pdata->width == 2)
977 this->options |= NAND_BUSWIDTH_16; 811 this->options |= NAND_BUSWIDTH_16;
978 this->ecc.layout = &nand_hw_eccoob_16; 812
813 if (pdata->flash_bbt) {
814 this->bbt_td = &bbt_main_descr;
815 this->bbt_md = &bbt_mirror_descr;
816 /* update flash based bbt */
817 this->options |= NAND_USE_FLASH_BBT;
979 } 818 }
980 819
981 /* first scan to find the device and get the page size */ 820 /* first scan to find the device and get the page size */
@@ -984,38 +823,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
984 goto escan; 823 goto escan;
985 } 824 }
986 825
987 if (mtd->writesize == 2048) { 826 if (mtd->writesize == 2048)
988 host->pagesize_2k = 1; 827 this->ecc.layout = oob_largepage;
989 this->badblock_pattern = &smallpage_memorybased;
990 }
991
992 if (this->ecc.mode == NAND_ECC_HW) {
993 switch (mtd->oobsize) {
994 case 8:
995 this->ecc.layout = &nand_hw_eccoob_8;
996 break;
997 case 16:
998 this->ecc.layout = &nand_hw_eccoob_16;
999 break;
1000 case 64:
1001 this->ecc.layout = &nand_hw_eccoob_64;
1002 break;
1003 default:
1004 /* page size not handled by HW ECC */
1005 /* switching back to soft ECC */
1006 this->ecc.size = 512;
1007 this->ecc.bytes = 3;
1008 this->ecc.layout = &nand_hw_eccoob_8;
1009 this->ecc.mode = NAND_ECC_SOFT;
1010 this->ecc.calculate = NULL;
1011 this->ecc.correct = NULL;
1012 this->ecc.hwctl = NULL;
1013 tmp = readw(host->regs + NFC_CONFIG1);
1014 tmp &= ~NFC_ECC_EN;
1015 writew(tmp, host->regs + NFC_CONFIG1);
1016 break;
1017 }
1018 }
1019 828
1020 /* second phase scan */ 829 /* second phase scan */
1021 if (nand_scan_tail(mtd)) { 830 if (nand_scan_tail(mtd)) {
@@ -1043,7 +852,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1043escan: 852escan:
1044 free_irq(host->irq, host); 853 free_irq(host->irq, host);
1045eirq: 854eirq:
1046 iounmap(host->regs); 855 iounmap(host->base);
1047eres: 856eres:
1048 clk_put(host->clk); 857 clk_put(host->clk);
1049eclk: 858eclk:
@@ -1062,7 +871,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1062 871
1063 nand_release(&host->mtd); 872 nand_release(&host->mtd);
1064 free_irq(host->irq, host); 873 free_irq(host->irq, host);
1065 iounmap(host->regs); 874 iounmap(host->base);
1066 kfree(host); 875 kfree(host);
1067 876
1068 return 0; 877 return 0;
@@ -1113,7 +922,7 @@ static struct platform_driver mxcnd_driver = {
1113 .driver = { 922 .driver = {
1114 .name = DRIVER_NAME, 923 .name = DRIVER_NAME,
1115 }, 924 },
1116 .remove = __exit_p(mxcnd_remove), 925 .remove = __devexit_p(mxcnd_remove),
1117 .suspend = mxcnd_suspend, 926 .suspend = mxcnd_suspend,
1118 .resume = mxcnd_resume, 927 .resume = mxcnd_resume,
1119}; 928};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 2957cc70da3d..8f2958fe2148 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -428,6 +428,28 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
428 return nand_isbad_bbt(mtd, ofs, allowbbt); 428 return nand_isbad_bbt(mtd, ofs, allowbbt);
429} 429}
430 430
431/**
432 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
433 * @mtd: MTD device structure
434 * @timeo: Timeout
435 *
436 * Helper function for nand_wait_ready used when needing to wait in interrupt
437 * context.
438 */
439static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
440{
441 struct nand_chip *chip = mtd->priv;
442 int i;
443
444 /* Wait for the device to get ready */
445 for (i = 0; i < timeo; i++) {
446 if (chip->dev_ready(mtd))
447 break;
448 touch_softlockup_watchdog();
449 mdelay(1);
450 }
451}
452
431/* 453/*
432 * Wait for the ready pin, after a command 454 * Wait for the ready pin, after a command
433 * The timeout is catched later. 455 * The timeout is catched later.
@@ -437,6 +459,10 @@ void nand_wait_ready(struct mtd_info *mtd)
437 struct nand_chip *chip = mtd->priv; 459 struct nand_chip *chip = mtd->priv;
438 unsigned long timeo = jiffies + 2; 460 unsigned long timeo = jiffies + 2;
439 461
462 /* 400ms timeout */
463 if (in_interrupt() || oops_in_progress)
464 return panic_nand_wait_ready(mtd, 400);
465
440 led_trigger_event(nand_led_trigger, LED_FULL); 466 led_trigger_event(nand_led_trigger, LED_FULL);
441 /* wait until command is processed or timeout occures */ 467 /* wait until command is processed or timeout occures */
442 do { 468 do {
@@ -672,6 +698,22 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
672} 698}
673 699
674/** 700/**
701 * panic_nand_get_device - [GENERIC] Get chip for selected access
702 * @chip: the nand chip descriptor
703 * @mtd: MTD device structure
704 * @new_state: the state which is requested
705 *
706 * Used when in panic, no locks are taken.
707 */
708static void panic_nand_get_device(struct nand_chip *chip,
709 struct mtd_info *mtd, int new_state)
710{
711 /* Hardware controller shared among independend devices */
712 chip->controller->active = chip;
713 chip->state = new_state;
714}
715
716/**
675 * nand_get_device - [GENERIC] Get chip for selected access 717 * nand_get_device - [GENERIC] Get chip for selected access
676 * @chip: the nand chip descriptor 718 * @chip: the nand chip descriptor
677 * @mtd: MTD device structure 719 * @mtd: MTD device structure
@@ -698,8 +740,14 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
698 return 0; 740 return 0;
699 } 741 }
700 if (new_state == FL_PM_SUSPENDED) { 742 if (new_state == FL_PM_SUSPENDED) {
701 spin_unlock(lock); 743 if (chip->controller->active->state == FL_PM_SUSPENDED) {
702 return (chip->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN; 744 chip->state = FL_PM_SUSPENDED;
745 spin_unlock(lock);
746 return 0;
747 } else {
748 spin_unlock(lock);
749 return -EAGAIN;
750 }
703 } 751 }
704 set_current_state(TASK_UNINTERRUPTIBLE); 752 set_current_state(TASK_UNINTERRUPTIBLE);
705 add_wait_queue(wq, &wait); 753 add_wait_queue(wq, &wait);
@@ -710,6 +758,32 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
710} 758}
711 759
712/** 760/**
761 * panic_nand_wait - [GENERIC] wait until the command is done
762 * @mtd: MTD device structure
763 * @chip: NAND chip structure
764 * @timeo: Timeout
765 *
766 * Wait for command done. This is a helper function for nand_wait used when
767 * we are in interrupt context. May happen when in panic and trying to write
768 * an oops trough mtdoops.
769 */
770static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
771 unsigned long timeo)
772{
773 int i;
774 for (i = 0; i < timeo; i++) {
775 if (chip->dev_ready) {
776 if (chip->dev_ready(mtd))
777 break;
778 } else {
779 if (chip->read_byte(mtd) & NAND_STATUS_READY)
780 break;
781 }
782 mdelay(1);
783 }
784}
785
786/**
713 * nand_wait - [DEFAULT] wait until the command is done 787 * nand_wait - [DEFAULT] wait until the command is done
714 * @mtd: MTD device structure 788 * @mtd: MTD device structure
715 * @chip: NAND chip structure 789 * @chip: NAND chip structure
@@ -740,15 +814,19 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
740 else 814 else
741 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 815 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
742 816
743 while (time_before(jiffies, timeo)) { 817 if (in_interrupt() || oops_in_progress)
744 if (chip->dev_ready) { 818 panic_nand_wait(mtd, chip, timeo);
745 if (chip->dev_ready(mtd)) 819 else {
746 break; 820 while (time_before(jiffies, timeo)) {
747 } else { 821 if (chip->dev_ready) {
748 if (chip->read_byte(mtd) & NAND_STATUS_READY) 822 if (chip->dev_ready(mtd))
749 break; 823 break;
824 } else {
825 if (chip->read_byte(mtd) & NAND_STATUS_READY)
826 break;
827 }
828 cond_resched();
750 } 829 }
751 cond_resched();
752 } 830 }
753 led_trigger_event(nand_led_trigger, LED_OFF); 831 led_trigger_event(nand_led_trigger, LED_OFF);
754 832
@@ -1949,6 +2027,45 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1949} 2027}
1950 2028
1951/** 2029/**
2030 * panic_nand_write - [MTD Interface] NAND write with ECC
2031 * @mtd: MTD device structure
2032 * @to: offset to write to
2033 * @len: number of bytes to write
2034 * @retlen: pointer to variable to store the number of written bytes
2035 * @buf: the data to write
2036 *
2037 * NAND write with ECC. Used when performing writes in interrupt context, this
2038 * may for example be called by mtdoops when writing an oops while in panic.
2039 */
2040static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2041 size_t *retlen, const uint8_t *buf)
2042{
2043 struct nand_chip *chip = mtd->priv;
2044 int ret;
2045
2046 /* Do not allow reads past end of device */
2047 if ((to + len) > mtd->size)
2048 return -EINVAL;
2049 if (!len)
2050 return 0;
2051
2052 /* Wait for the device to get ready. */
2053 panic_nand_wait(mtd, chip, 400);
2054
2055 /* Grab the device. */
2056 panic_nand_get_device(chip, mtd, FL_WRITING);
2057
2058 chip->ops.len = len;
2059 chip->ops.datbuf = (uint8_t *)buf;
2060 chip->ops.oobbuf = NULL;
2061
2062 ret = nand_do_write_ops(mtd, to, &chip->ops);
2063
2064 *retlen = chip->ops.retlen;
2065 return ret;
2066}
2067
2068/**
1952 * nand_write - [MTD Interface] NAND write with ECC 2069 * nand_write - [MTD Interface] NAND write with ECC
1953 * @mtd: MTD device structure 2070 * @mtd: MTD device structure
1954 * @to: offset to write to 2071 * @to: offset to write to
@@ -2645,7 +2762,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
2645 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); 2762 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
2646 2763
2647 if (IS_ERR(type)) { 2764 if (IS_ERR(type)) {
2648 printk(KERN_WARNING "No NAND device found!!!\n"); 2765 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
2766 printk(KERN_WARNING "No NAND device found.\n");
2649 chip->select_chip(mtd, -1); 2767 chip->select_chip(mtd, -1);
2650 return PTR_ERR(type); 2768 return PTR_ERR(type);
2651 } 2769 }
@@ -2877,6 +2995,7 @@ int nand_scan_tail(struct mtd_info *mtd)
2877 mtd->unpoint = NULL; 2995 mtd->unpoint = NULL;
2878 mtd->read = nand_read; 2996 mtd->read = nand_read;
2879 mtd->write = nand_write; 2997 mtd->write = nand_write;
2998 mtd->panic_write = panic_nand_write;
2880 mtd->read_oob = nand_read_oob; 2999 mtd->read_oob = nand_read_oob;
2881 mtd->write_oob = nand_write_oob; 3000 mtd->write_oob = nand_write_oob;
2882 mtd->sync = nand_sync; 3001 mtd->sync = nand_sync;
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
new file mode 100644
index 000000000000..46a6bc9c4b74
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.c
@@ -0,0 +1,149 @@
1/*****************************************************************************
2* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14
15/* ---- Include Files ---------------------------------------------------- */
16#include <mach/reg_umi.h>
17#include "nand_bcm_umi.h"
18#ifdef BOOT0_BUILD
19#include <uart.h>
20#endif
21
22/* ---- External Variable Declarations ----------------------------------- */
23/* ---- External Function Prototypes ------------------------------------- */
24/* ---- Public Variables ------------------------------------------------- */
25/* ---- Private Constants and Types -------------------------------------- */
26/* ---- Private Function Prototypes -------------------------------------- */
27/* ---- Private Variables ------------------------------------------------ */
28/* ---- Private Functions ------------------------------------------------ */
29
30#if NAND_ECC_BCH
31/****************************************************************************
32* nand_bch_ecc_flip_bit - Routine to flip an errored bit
33*
34* PURPOSE:
35* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
36* errored bit specified
37*
38* PARAMETERS:
39* datap - Container that holds the 512 byte data
40* errorLocation - Location of the bit that needs to be flipped
41*
42* RETURNS:
43* None
44****************************************************************************/
45static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
46{
47 int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
48 int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
49 int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
50
51 uint8_t errorByte = 0;
52 uint8_t byteMask = 1 << locWithinAByte;
53
54 /* BCH uses big endian, need to change the location
55 * bits to little endian */
56 locWithinAWord = 3 - locWithinAWord;
57
58 errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
59
60#ifdef BOOT0_BUILD
61 puthexs("\nECC Correct Offset: ",
62 locWithinAPage * sizeof(uint32_t) + locWithinAWord);
63 puthexs(" errorByte:", errorByte);
64 puthex8(" Bit: ", locWithinAByte);
65#endif
66
67 if (errorByte & byteMask) {
68 /* bit needs to be cleared */
69 errorByte &= ~byteMask;
70 } else {
71 /* bit needs to be set */
72 errorByte |= byteMask;
73 }
74
75 /* write back the value with the fixed bit */
76 datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
77}
78
79/****************************************************************************
80* nand_correct_page_bch - Routine to correct bit errors when reading NAND
81*
82* PURPOSE:
83* This routine reads the BCH registers to determine if there are any bit
84* errors during the read of the last 512 bytes of data + ECC bytes. If
85* errors exists, the routine fixes it.
86*
87* PARAMETERS:
88* datap - Container that holds the 512 byte data
89*
90* RETURNS:
91* 0 or greater = Number of errors corrected
92* (No errors are found or errors have been fixed)
93* -1 = Error(s) cannot be fixed
94****************************************************************************/
95int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
96 int numEccBytes)
97{
98 int numErrors;
99 int errorLocation;
100 int idx;
101 uint32_t regValue;
102
103 /* wait for read ECC to be valid */
104 regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
105
106 /*
107 * read the control status register to determine if there
108 * are error'ed bits
109 * see if errors are correctible
110 */
111 if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
112 int i;
113
114 for (i = 0; i < numEccBytes; i++) {
115 if (readEccData[i] != 0xff) {
116 /* errors cannot be fixed, return -1 */
117 return -1;
118 }
119 }
120 /* If ECC is unprogrammed then we can't correct,
121 * assume everything OK */
122 return 0;
123 }
124
125 if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
126 /* no errors */
127 return 0;
128 }
129
130 /*
131 * Fix errored bits by doing the following:
132 * 1. Read the number of errors in the control and status register
133 * 2. Read the error location registers that corresponds to the number
134 * of errors reported
135 * 3. Invert the bit in the data
136 */
137 numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
138
139 for (idx = 0; idx < numErrors; idx++) {
140 errorLocation =
141 REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
142
143 /* Flip bit */
144 nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
145 }
146 /* Errors corrected */
147 return numErrors;
148}
149#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
new file mode 100644
index 000000000000..7cec2cd97854
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -0,0 +1,358 @@
1/*****************************************************************************
2* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
3*
4* Unless you and Broadcom execute a separate written software license
5* agreement governing use of this software, this software is licensed to you
6* under the terms of the GNU General Public License version 2, available at
7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
8*
9* Notwithstanding the above, under no circumstances may you combine this
10* software in any way with any other Broadcom software provided under a
11* license other than the GPL, without Broadcom's express prior written
12* consent.
13*****************************************************************************/
14#ifndef NAND_BCM_UMI_H
15#define NAND_BCM_UMI_H
16
17/* ---- Include Files ---------------------------------------------------- */
18#include <mach/reg_umi.h>
19#include <mach/reg_nand.h>
20#include <cfg_global.h>
21
22/* ---- Constants and Types ---------------------------------------------- */
23#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
24#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
25#else
26#define NAND_ECC_BCH 0
27#endif
28
29#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
30
31#if NAND_ECC_BCH
32#ifdef BOOT0_BUILD
33#define NAND_ECC_NUM_BYTES 13
34#else
35#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
36#endif
37#else
38#define NAND_ECC_NUM_BYTES 3
39#endif
40
41#define NAND_DATA_ACCESS_SIZE 512
42
43/* ---- Variable Externs ------------------------------------------ */
44/* ---- Function Prototypes --------------------------------------- */
45int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
46 int numEccBytes);
47
48/* Check in device is ready */
49static inline int nand_bcm_umi_dev_ready(void)
50{
51 return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
52}
53
54/* Wait until device is ready */
55static inline void nand_bcm_umi_wait_till_ready(void)
56{
57 while (nand_bcm_umi_dev_ready() == 0)
58 ;
59}
60
61/* Enable Hamming ECC */
62static inline void nand_bcm_umi_hamming_enable_hwecc(void)
63{
64 /* disable and reset ECC, 512 byte page */
65 REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
66 REG_UMI_NAND_ECC_CSR_256BYTE);
67 /* enable ECC */
68 REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
69}
70
71#if NAND_ECC_BCH
72/* BCH ECC specifics */
73#define ECC_BITS_PER_CORRECTABLE_BIT 13
74
75/* Enable BCH Read ECC */
76static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
77{
78 /* disable and reset ECC */
79 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
80 /* Turn on ECC */
81 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
82}
83
84/* Enable BCH Write ECC */
85static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
86{
87 /* disable and reset ECC */
88 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
89 /* Turn on ECC */
90 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
91}
92
93/* Config number of BCH ECC bytes */
94static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
95{
96 uint32_t nValue;
97 uint32_t tValue;
98 uint32_t kValue;
99 uint32_t numBits = numEccBytes * 8;
100
101 /* disable and reset ECC */
102 REG_UMI_BCH_CTRL_STATUS =
103 REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
104 REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
105
106 /* Every correctible bit requires 13 ECC bits */
107 tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
108
109 /* Total data in number of bits for generating and computing BCH ECC */
110 nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
111
112 /* K parameter is used internally. K = N - (T * 13) */
113 kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
114
115 /* Write the settings */
116 REG_UMI_BCH_N = nValue;
117 REG_UMI_BCH_T = tValue;
118 REG_UMI_BCH_K = kValue;
119}
120
121/* Pause during ECC read calculation to skip bytes in OOB */
122static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
123{
124 REG_UMI_BCH_CTRL_STATUS =
125 REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
126 REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
127}
128
129/* Resume during ECC read calculation after skipping bytes in OOB */
130static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
131{
132 REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
133}
134
135/* Poll read ECC calc to check when hardware completes */
136static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
137{
138 uint32_t regVal;
139
140 do {
141 /* wait for ECC to be valid */
142 regVal = REG_UMI_BCH_CTRL_STATUS;
143 } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
144
145 return regVal;
146}
147
148/* Poll write ECC calc to check when hardware completes */
149static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
150{
151 /* wait for ECC to be valid */
152 while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
153 == 0)
154 ;
155}
156
157/* Read the OOB and ECC, for kernel write OOB to a buffer */
158#if defined(__KERNEL__) && !defined(STANDALONE)
159static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
160 uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
161#else
162static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
163 uint8_t *eccCalc, int numEccBytes)
164#endif
165{
166 int eccPos = 0;
167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */
168
169 /* ECC is already paused when this function is called */
170
171 if (pageSize == NAND_DATA_ACCESS_SIZE) {
172 while (numToRead > numEccBytes) {
173 /* skip free oob region */
174#if defined(__KERNEL__) && !defined(STANDALONE)
175 *oobp++ = REG_NAND_DATA8;
176#else
177 REG_NAND_DATA8;
178#endif
179 numToRead--;
180 }
181
182 /* read ECC bytes before BI */
183 nand_bcm_umi_bch_resume_read_ecc_calc();
184
185 while (numToRead > 11) {
186#if defined(__KERNEL__) && !defined(STANDALONE)
187 *oobp = REG_NAND_DATA8;
188 eccCalc[eccPos++] = *oobp;
189 oobp++;
190#else
191 eccCalc[eccPos++] = REG_NAND_DATA8;
192#endif
193 }
194
195 nand_bcm_umi_bch_pause_read_ecc_calc();
196
197 if (numToRead == 11) {
198 /* read BI */
199#if defined(__KERNEL__) && !defined(STANDALONE)
200 *oobp++ = REG_NAND_DATA8;
201#else
202 REG_NAND_DATA8;
203#endif
204 numToRead--;
205 }
206
207 /* read ECC bytes */
208 nand_bcm_umi_bch_resume_read_ecc_calc();
209 while (numToRead) {
210#if defined(__KERNEL__) && !defined(STANDALONE)
211 *oobp = REG_NAND_DATA8;
212 eccCalc[eccPos++] = *oobp;
213 oobp++;
214#else
215 eccCalc[eccPos++] = REG_NAND_DATA8;
216#endif
217 numToRead--;
218 }
219 } else {
220 /* skip BI */
221#if defined(__KERNEL__) && !defined(STANDALONE)
222 *oobp++ = REG_NAND_DATA8;
223#else
224 REG_NAND_DATA8;
225#endif
226 numToRead--;
227
228 while (numToRead > numEccBytes) {
229 /* skip free oob region */
230#if defined(__KERNEL__) && !defined(STANDALONE)
231 *oobp++ = REG_NAND_DATA8;
232#else
233 REG_NAND_DATA8;
234#endif
235 numToRead--;
236 }
237
238 /* read ECC bytes */
239 nand_bcm_umi_bch_resume_read_ecc_calc();
240 while (numToRead) {
241#if defined(__KERNEL__) && !defined(STANDALONE)
242 *oobp = REG_NAND_DATA8;
243 eccCalc[eccPos++] = *oobp;
244 oobp++;
245#else
246 eccCalc[eccPos++] = REG_NAND_DATA8;
247#endif
248 numToRead--;
249 }
250 }
251}
252
253/* Helper function to write ECC */
254static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
255 uint8_t *oobp, uint8_t eccVal)
256{
257 if (eccBytePos <= numEccBytes)
258 *oobp = eccVal;
259}
260
261/* Write OOB with ECC */
262static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
263 uint8_t *oobp, int numEccBytes)
264{
265 uint32_t eccVal = 0xffffffff;
266
267 /* wait for write ECC to be valid */
268 nand_bcm_umi_bch_poll_write_ecc_calc();
269
270 /*
271 ** Get the hardware ecc from the 32-bit result registers.
272 ** Read after 512 byte accesses. Format B3B2B1B0
273 ** where B3 = ecc3, etc.
274 */
275
276 if (pageSize == NAND_DATA_ACCESS_SIZE) {
277 /* Now fill in the ECC bytes */
278 if (numEccBytes >= 13)
279 eccVal = REG_UMI_BCH_WR_ECC_3;
280
281 /* Usually we skip CM in oob[0,1] */
282 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
283 (eccVal >> 16) & 0xff);
284 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
285 (eccVal >> 8) & 0xff);
286
287 /* Write ECC in oob[2,3,4] */
288 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
289 eccVal & 0xff); /* ECC 12 */
290
291 if (numEccBytes >= 9)
292 eccVal = REG_UMI_BCH_WR_ECC_2;
293
294 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
295 (eccVal >> 24) & 0xff); /* ECC11 */
296 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
297 (eccVal >> 16) & 0xff); /* ECC10 */
298
299 /* Always Skip BI in oob[5] */
300 } else {
301 /* Always Skip BI in oob[0] */
302
303 /* Now fill in the ECC bytes */
304 if (numEccBytes >= 13)
305 eccVal = REG_UMI_BCH_WR_ECC_3;
306
307 /* Usually skip CM in oob[1,2] */
308 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
309 (eccVal >> 16) & 0xff);
310 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
311 (eccVal >> 8) & 0xff);
312
313 /* Write ECC in oob[3-15] */
314 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
315 eccVal & 0xff); /* ECC12 */
316
317 if (numEccBytes >= 9)
318 eccVal = REG_UMI_BCH_WR_ECC_2;
319
320 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
321 (eccVal >> 24) & 0xff); /* ECC11 */
322 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
323 (eccVal >> 16) & 0xff); /* ECC10 */
324 }
325
326 /* Fill in the remainder of ECC locations */
327 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
328 (eccVal >> 8) & 0xff); /* ECC9 */
329 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
330 eccVal & 0xff); /* ECC8 */
331
332 if (numEccBytes >= 5)
333 eccVal = REG_UMI_BCH_WR_ECC_1;
334
335 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
336 (eccVal >> 24) & 0xff); /* ECC7 */
337 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
338 (eccVal >> 16) & 0xff); /* ECC6 */
339 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
340 (eccVal >> 8) & 0xff); /* ECC5 */
341 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
342 eccVal & 0xff); /* ECC4 */
343
344 if (numEccBytes >= 1)
345 eccVal = REG_UMI_BCH_WR_ECC_0;
346
347 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
348 (eccVal >> 24) & 0xff); /* ECC3 */
349 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
350 (eccVal >> 16) & 0xff); /* ECC2 */
351 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
352 (eccVal >> 8) & 0xff); /* ECC1 */
353 NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
354 eccVal & 0xff); /* ECC0 */
355}
356#endif
357
358#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 92320a643275..271b8e735e8f 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -150,20 +150,19 @@ static const char addressbits[256] = {
150}; 150};
151 151
152/** 152/**
153 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte 153 * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
154 * block 154 * block
155 * @mtd: MTD block structure
156 * @buf: input buffer with raw data 155 * @buf: input buffer with raw data
156 * @eccsize: data bytes per ecc step (256 or 512)
157 * @code: output buffer with ECC 157 * @code: output buffer with ECC
158 */ 158 */
159int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, 159void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
160 unsigned char *code) 160 unsigned char *code)
161{ 161{
162 int i; 162 int i;
163 const uint32_t *bp = (uint32_t *)buf; 163 const uint32_t *bp = (uint32_t *)buf;
164 /* 256 or 512 bytes/ecc */ 164 /* 256 or 512 bytes/ecc */
165 const uint32_t eccsize_mult = 165 const uint32_t eccsize_mult = eccsize >> 8;
166 (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
167 uint32_t cur; /* current value in buffer */ 166 uint32_t cur; /* current value in buffer */
168 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ 167 /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
169 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; 168 uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
@@ -412,6 +411,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
412 (invparity[par & 0x55] << 2) | 411 (invparity[par & 0x55] << 2) |
413 (invparity[rp17] << 1) | 412 (invparity[rp17] << 1) |
414 (invparity[rp16] << 0); 413 (invparity[rp16] << 0);
414}
415EXPORT_SYMBOL(__nand_calculate_ecc);
416
417/**
418 * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
419 * block
420 * @mtd: MTD block structure
421 * @buf: input buffer with raw data
422 * @code: output buffer with ECC
423 */
424int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
425 unsigned char *code)
426{
427 __nand_calculate_ecc(buf,
428 ((struct nand_chip *)mtd->priv)->ecc.size, code);
429
415 return 0; 430 return 0;
416} 431}
417EXPORT_SYMBOL(nand_calculate_ecc); 432EXPORT_SYMBOL(nand_calculate_ecc);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cd0711b83ac4..7281000fef2d 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); 161MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
162 162
163/* The largest possible page size */ 163/* The largest possible page size */
164#define NS_LARGEST_PAGE_SIZE 2048 164#define NS_LARGEST_PAGE_SIZE 4096
165 165
166/* The prefix for simulator output */ 166/* The prefix for simulator output */
167#define NS_OUTPUT_PREFIX "[nandsim]" 167#define NS_OUTPUT_PREFIX "[nandsim]"
@@ -259,7 +259,8 @@ MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of mem
259#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 259#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
260#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */ 260#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
261#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 261#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
262#define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */ 262#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
263#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
263#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */ 264#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
264 265
265/* Remove action bits ftom state */ 266/* Remove action bits ftom state */
@@ -588,6 +589,8 @@ static int init_nandsim(struct mtd_info *mtd)
588 ns->options |= OPT_PAGE512_8BIT; 589 ns->options |= OPT_PAGE512_8BIT;
589 } else if (ns->geom.pgsz == 2048) { 590 } else if (ns->geom.pgsz == 2048) {
590 ns->options |= OPT_PAGE2048; 591 ns->options |= OPT_PAGE2048;
592 } else if (ns->geom.pgsz == 4096) {
593 ns->options |= OPT_PAGE4096;
591 } else { 594 } else {
592 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz); 595 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
593 return -EIO; 596 return -EIO;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 4e16c6f5bdd5..8d467315f02b 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -34,7 +34,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
34{ 34{
35 struct platform_nand_data *pdata = pdev->dev.platform_data; 35 struct platform_nand_data *pdata = pdev->dev.platform_data;
36 struct plat_nand_data *data; 36 struct plat_nand_data *data;
37 int res = 0; 37 struct resource *res;
38 int err = 0;
39
40 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
41 if (!res)
42 return -ENXIO;
38 43
39 /* Allocate memory for the device structure (and zero it) */ 44 /* Allocate memory for the device structure (and zero it) */
40 data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL); 45 data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
@@ -43,12 +48,18 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
43 return -ENOMEM; 48 return -ENOMEM;
44 } 49 }
45 50
46 data->io_base = ioremap(pdev->resource[0].start, 51 if (!request_mem_region(res->start, resource_size(res),
47 pdev->resource[0].end - pdev->resource[0].start + 1); 52 dev_name(&pdev->dev))) {
53 dev_err(&pdev->dev, "request_mem_region failed\n");
54 err = -EBUSY;
55 goto out_free;
56 }
57
58 data->io_base = ioremap(res->start, resource_size(res));
48 if (data->io_base == NULL) { 59 if (data->io_base == NULL) {
49 dev_err(&pdev->dev, "ioremap failed\n"); 60 dev_err(&pdev->dev, "ioremap failed\n");
50 kfree(data); 61 err = -EIO;
51 return -EIO; 62 goto out_release_io;
52 } 63 }
53 64
54 data->chip.priv = &data; 65 data->chip.priv = &data;
@@ -74,24 +85,24 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
74 85
75 /* Handle any platform specific setup */ 86 /* Handle any platform specific setup */
76 if (pdata->ctrl.probe) { 87 if (pdata->ctrl.probe) {
77 res = pdata->ctrl.probe(pdev); 88 err = pdata->ctrl.probe(pdev);
78 if (res) 89 if (err)
79 goto out; 90 goto out;
80 } 91 }
81 92
82 /* Scan to find existance of the device */ 93 /* Scan to find existance of the device */
83 if (nand_scan(&data->mtd, 1)) { 94 if (nand_scan(&data->mtd, 1)) {
84 res = -ENXIO; 95 err = -ENXIO;
85 goto out; 96 goto out;
86 } 97 }
87 98
88#ifdef CONFIG_MTD_PARTITIONS 99#ifdef CONFIG_MTD_PARTITIONS
89 if (pdata->chip.part_probe_types) { 100 if (pdata->chip.part_probe_types) {
90 res = parse_mtd_partitions(&data->mtd, 101 err = parse_mtd_partitions(&data->mtd,
91 pdata->chip.part_probe_types, 102 pdata->chip.part_probe_types,
92 &data->parts, 0); 103 &data->parts, 0);
93 if (res > 0) { 104 if (err > 0) {
94 add_mtd_partitions(&data->mtd, data->parts, res); 105 add_mtd_partitions(&data->mtd, data->parts, err);
95 return 0; 106 return 0;
96 } 107 }
97 } 108 }
@@ -99,14 +110,14 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 pdata->chip.set_parts(data->mtd.size, &pdata->chip); 110 pdata->chip.set_parts(data->mtd.size, &pdata->chip);
100 if (pdata->chip.partitions) { 111 if (pdata->chip.partitions) {
101 data->parts = pdata->chip.partitions; 112 data->parts = pdata->chip.partitions;
102 res = add_mtd_partitions(&data->mtd, data->parts, 113 err = add_mtd_partitions(&data->mtd, data->parts,
103 pdata->chip.nr_partitions); 114 pdata->chip.nr_partitions);
104 } else 115 } else
105#endif 116#endif
106 res = add_mtd_device(&data->mtd); 117 err = add_mtd_device(&data->mtd);
107 118
108 if (!res) 119 if (!err)
109 return res; 120 return err;
110 121
111 nand_release(&data->mtd); 122 nand_release(&data->mtd);
112out: 123out:
@@ -114,8 +125,11 @@ out:
114 pdata->ctrl.remove(pdev); 125 pdata->ctrl.remove(pdev);
115 platform_set_drvdata(pdev, NULL); 126 platform_set_drvdata(pdev, NULL);
116 iounmap(data->io_base); 127 iounmap(data->io_base);
128out_release_io:
129 release_mem_region(res->start, resource_size(res));
130out_free:
117 kfree(data); 131 kfree(data);
118 return res; 132 return err;
119} 133}
120 134
121/* 135/*
@@ -125,6 +139,9 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
125{ 139{
126 struct plat_nand_data *data = platform_get_drvdata(pdev); 140 struct plat_nand_data *data = platform_get_drvdata(pdev);
127 struct platform_nand_data *pdata = pdev->dev.platform_data; 141 struct platform_nand_data *pdata = pdev->dev.platform_data;
142 struct resource *res;
143
144 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
128 145
129 nand_release(&data->mtd); 146 nand_release(&data->mtd);
130#ifdef CONFIG_MTD_PARTITIONS 147#ifdef CONFIG_MTD_PARTITIONS
@@ -134,6 +151,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
134 if (pdata->ctrl.remove) 151 if (pdata->ctrl.remove)
135 pdata->ctrl.remove(pdev); 152 pdata->ctrl.remove(pdev);
136 iounmap(data->io_base); 153 iounmap(data->io_base);
154 release_mem_region(res->start, resource_size(res));
137 kfree(data); 155 kfree(data);
138 156
139 return 0; 157 return 0;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 68b5b3a486a9..fa6e9c7fe511 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -774,7 +774,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
774 chip->select_chip = s3c2410_nand_select_chip; 774 chip->select_chip = s3c2410_nand_select_chip;
775 chip->chip_delay = 50; 775 chip->chip_delay = 50;
776 chip->priv = nmtd; 776 chip->priv = nmtd;
777 chip->options = 0; 777 chip->options = set->options;
778 chip->controller = &info->controller; 778 chip->controller = &info->controller;
779 779
780 switch (info->cpu_type) { 780 switch (info->cpu_type) {
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 73af8324d0d0..863513c3b69a 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -429,11 +429,10 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
429 chip = mtd->priv; 429 chip = mtd->priv;
430 txx9_priv = chip->priv; 430 txx9_priv = chip->priv;
431 431
432 nand_release(mtd);
432#ifdef CONFIG_MTD_PARTITIONS 433#ifdef CONFIG_MTD_PARTITIONS
433 del_mtd_partitions(mtd);
434 kfree(drvdata->parts[i]); 434 kfree(drvdata->parts[i]);
435#endif 435#endif
436 del_mtd_device(mtd);
437 kfree(txx9_priv->mtdname); 436 kfree(txx9_priv->mtdname);
438 kfree(txx9_priv); 437 kfree(txx9_priv);
439 } 438 }
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 86c4f6dcdc65..75f38b95811e 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
112 unsigned long timeout; 112 unsigned long timeout;
113 u32 syscfg; 113 u32 syscfg;
114 114
115 if (state == FL_RESETING) { 115 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
116 int i; 116 state == FL_VERIFYING_ERASE) {
117 int i = 21;
118 unsigned int intr_flags = ONENAND_INT_MASTER;
119
120 switch (state) {
121 case FL_RESETING:
122 intr_flags |= ONENAND_INT_RESET;
123 break;
124 case FL_PREPARING_ERASE:
125 intr_flags |= ONENAND_INT_ERASE;
126 break;
127 case FL_VERIFYING_ERASE:
128 i = 101;
129 break;
130 }
117 131
118 for (i = 0; i < 20; i++) { 132 while (--i) {
119 udelay(1); 133 udelay(1);
120 intr = read_reg(c, ONENAND_REG_INTERRUPT); 134 intr = read_reg(c, ONENAND_REG_INTERRUPT);
121 if (intr & ONENAND_INT_MASTER) 135 if (intr & ONENAND_INT_MASTER)
@@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
126 wait_err("controller error", state, ctrl, intr); 140 wait_err("controller error", state, ctrl, intr);
127 return -EIO; 141 return -EIO;
128 } 142 }
129 if (!(intr & ONENAND_INT_RESET)) { 143 if ((intr & intr_flags) != intr_flags) {
130 wait_err("timeout", state, ctrl, intr); 144 wait_err("timeout", state, ctrl, intr);
131 return -EIO; 145 return -EIO;
132 } 146 }
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ff66e4330aa7..f63b1db3ffb3 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1,17 +1,19 @@
1/* 1/*
2 * linux/drivers/mtd/onenand/onenand_base.c 2 * linux/drivers/mtd/onenand/onenand_base.c
3 * 3 *
4 * Copyright (C) 2005-2007 Samsung Electronics 4 * Copyright © 2005-2009 Samsung Electronics
5 * Copyright © 2007 Nokia Corporation
6 *
5 * Kyungmin Park <kyungmin.park@samsung.com> 7 * Kyungmin Park <kyungmin.park@samsung.com>
6 * 8 *
7 * Credits: 9 * Credits:
8 * Adrian Hunter <ext-adrian.hunter@nokia.com>: 10 * Adrian Hunter <ext-adrian.hunter@nokia.com>:
9 * auto-placement support, read-while load support, various fixes 11 * auto-placement support, read-while load support, various fixes
10 * Copyright (C) Nokia Corporation, 2007
11 * 12 *
12 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com> 13 * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
13 * Flex-OneNAND support 14 * Flex-OneNAND support
14 * Copyright (C) Samsung Electronics, 2008 15 * Amul Kumar Saha <amul.saha at samsung.com>
16 * OTP support
15 * 17 *
16 * This program is free software; you can redistribute it and/or modify 18 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as 19 * it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
32 34
33#include <asm/io.h> 35#include <asm/io.h>
34 36
37/*
38 * Multiblock erase if number of blocks to erase is 2 or more.
39 * Maximum number of blocks for simultaneous erase is 64.
40 */
41#define MB_ERASE_MIN_BLK_COUNT 2
42#define MB_ERASE_MAX_BLK_COUNT 64
43
35/* Default Flex-OneNAND boundary and lock respectively */ 44/* Default Flex-OneNAND boundary and lock respectively */
36static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 }; 45static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
37 46
@@ -43,6 +52,18 @@ MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
43 " : 0->Set boundary in unlocked status" 52 " : 0->Set boundary in unlocked status"
44 " : 1->Set boundary in locked status"); 53 " : 1->Set boundary in locked status");
45 54
55/* Default OneNAND/Flex-OneNAND OTP options*/
56static int otp;
57
58module_param(otp, int, 0400);
59MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
60 "Syntax : otp=LOCK_TYPE"
61 "LOCK_TYPE : Keys issued, for specific OTP Lock type"
62 " : 0 -> Default (No Blocks Locked)"
63 " : 1 -> OTP Block lock"
64 " : 2 -> 1st Block lock"
65 " : 3 -> BOTH OTP Block and 1st Block lock");
66
46/** 67/**
47 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page 68 * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
48 * For now, we expose only 64 out of 80 ecc bytes 69 * For now, we expose only 64 out of 80 ecc bytes
@@ -339,6 +360,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
339 break; 360 break;
340 361
341 case ONENAND_CMD_ERASE: 362 case ONENAND_CMD_ERASE:
363 case ONENAND_CMD_MULTIBLOCK_ERASE:
364 case ONENAND_CMD_ERASE_VERIFY:
342 case ONENAND_CMD_BUFFERRAM: 365 case ONENAND_CMD_BUFFERRAM:
343 case ONENAND_CMD_OTP_ACCESS: 366 case ONENAND_CMD_OTP_ACCESS:
344 block = onenand_block(this, addr); 367 block = onenand_block(this, addr);
@@ -483,7 +506,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
483 if (interrupt & flags) 506 if (interrupt & flags)
484 break; 507 break;
485 508
486 if (state != FL_READING) 509 if (state != FL_READING && state != FL_PREPARING_ERASE)
487 cond_resched(); 510 cond_resched();
488 } 511 }
489 /* To get correct interrupt status in timeout case */ 512 /* To get correct interrupt status in timeout case */
@@ -500,25 +523,40 @@ static int onenand_wait(struct mtd_info *mtd, int state)
500 int ecc = onenand_read_ecc(this); 523 int ecc = onenand_read_ecc(this);
501 if (ecc) { 524 if (ecc) {
502 if (ecc & ONENAND_ECC_2BIT_ALL) { 525 if (ecc & ONENAND_ECC_2BIT_ALL) {
503 printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc); 526 printk(KERN_ERR "%s: ECC error = 0x%04x\n",
527 __func__, ecc);
504 mtd->ecc_stats.failed++; 528 mtd->ecc_stats.failed++;
505 return -EBADMSG; 529 return -EBADMSG;
506 } else if (ecc & ONENAND_ECC_1BIT_ALL) { 530 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
507 printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc); 531 printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
532 __func__, ecc);
508 mtd->ecc_stats.corrected++; 533 mtd->ecc_stats.corrected++;
509 } 534 }
510 } 535 }
511 } else if (state == FL_READING) { 536 } else if (state == FL_READING) {
512 printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 537 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
538 __func__, ctrl, interrupt);
539 return -EIO;
540 }
541
542 if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
543 printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
544 __func__, ctrl, interrupt);
545 return -EIO;
546 }
547
548 if (!(interrupt & ONENAND_INT_MASTER)) {
549 printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
550 __func__, ctrl, interrupt);
513 return -EIO; 551 return -EIO;
514 } 552 }
515 553
516 /* If there's controller error, it's a real error */ 554 /* If there's controller error, it's a real error */
517 if (ctrl & ONENAND_CTRL_ERROR) { 555 if (ctrl & ONENAND_CTRL_ERROR) {
518 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", 556 printk(KERN_ERR "%s: controller error = 0x%04x\n",
519 ctrl); 557 __func__, ctrl);
520 if (ctrl & ONENAND_CTRL_LOCK) 558 if (ctrl & ONENAND_CTRL_LOCK)
521 printk(KERN_ERR "onenand_wait: it's locked error.\n"); 559 printk(KERN_ERR "%s: it's locked error.\n", __func__);
522 return -EIO; 560 return -EIO;
523 } 561 }
524 562
@@ -1015,7 +1053,8 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
1015 /* We are attempting to reread, so decrement stats.failed 1053 /* We are attempting to reread, so decrement stats.failed
1016 * which was incremented by onenand_wait due to read failure 1054 * which was incremented by onenand_wait due to read failure
1017 */ 1055 */
1018 printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n"); 1056 printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
1057 __func__);
1019 mtd->ecc_stats.failed--; 1058 mtd->ecc_stats.failed--;
1020 1059
1021 /* Issue the LSB page recovery command */ 1060 /* Issue the LSB page recovery command */
@@ -1046,7 +1085,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1046 int ret = 0; 1085 int ret = 0;
1047 int writesize = this->writesize; 1086 int writesize = this->writesize;
1048 1087
1049 DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1088 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1089 __func__, (unsigned int) from, (int) len);
1050 1090
1051 if (ops->mode == MTD_OOB_AUTO) 1091 if (ops->mode == MTD_OOB_AUTO)
1052 oobsize = this->ecclayout->oobavail; 1092 oobsize = this->ecclayout->oobavail;
@@ -1057,7 +1097,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1057 1097
1058 /* Do not allow reads past end of device */ 1098 /* Do not allow reads past end of device */
1059 if (from + len > mtd->size) { 1099 if (from + len > mtd->size) {
1060 printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n"); 1100 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1101 __func__);
1061 ops->retlen = 0; 1102 ops->retlen = 0;
1062 ops->oobretlen = 0; 1103 ops->oobretlen = 0;
1063 return -EINVAL; 1104 return -EINVAL;
@@ -1146,7 +1187,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1146 int ret = 0, boundary = 0; 1187 int ret = 0, boundary = 0;
1147 int writesize = this->writesize; 1188 int writesize = this->writesize;
1148 1189
1149 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1190 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1191 __func__, (unsigned int) from, (int) len);
1150 1192
1151 if (ops->mode == MTD_OOB_AUTO) 1193 if (ops->mode == MTD_OOB_AUTO)
1152 oobsize = this->ecclayout->oobavail; 1194 oobsize = this->ecclayout->oobavail;
@@ -1157,7 +1199,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1157 1199
1158 /* Do not allow reads past end of device */ 1200 /* Do not allow reads past end of device */
1159 if ((from + len) > mtd->size) { 1201 if ((from + len) > mtd->size) {
1160 printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n"); 1202 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1203 __func__);
1161 ops->retlen = 0; 1204 ops->retlen = 0;
1162 ops->oobretlen = 0; 1205 ops->oobretlen = 0;
1163 return -EINVAL; 1206 return -EINVAL;
@@ -1275,7 +1318,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1275 1318
1276 from += ops->ooboffs; 1319 from += ops->ooboffs;
1277 1320
1278 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1321 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
1322 __func__, (unsigned int) from, (int) len);
1279 1323
1280 /* Initialize return length value */ 1324 /* Initialize return length value */
1281 ops->oobretlen = 0; 1325 ops->oobretlen = 0;
@@ -1288,7 +1332,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1288 column = from & (mtd->oobsize - 1); 1332 column = from & (mtd->oobsize - 1);
1289 1333
1290 if (unlikely(column >= oobsize)) { 1334 if (unlikely(column >= oobsize)) {
1291 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n"); 1335 printk(KERN_ERR "%s: Attempted to start read outside oob\n",
1336 __func__);
1292 return -EINVAL; 1337 return -EINVAL;
1293 } 1338 }
1294 1339
@@ -1296,7 +1341,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1296 if (unlikely(from >= mtd->size || 1341 if (unlikely(from >= mtd->size ||
1297 column + len > ((mtd->size >> this->page_shift) - 1342 column + len > ((mtd->size >> this->page_shift) -
1298 (from >> this->page_shift)) * oobsize)) { 1343 (from >> this->page_shift)) * oobsize)) {
1299 printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n"); 1344 printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
1345 __func__);
1300 return -EINVAL; 1346 return -EINVAL;
1301 } 1347 }
1302 1348
@@ -1319,7 +1365,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1319 ret = onenand_recover_lsb(mtd, from, ret); 1365 ret = onenand_recover_lsb(mtd, from, ret);
1320 1366
1321 if (ret && ret != -EBADMSG) { 1367 if (ret && ret != -EBADMSG) {
1322 printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 1368 printk(KERN_ERR "%s: read failed = 0x%x\n",
1369 __func__, ret);
1323 break; 1370 break;
1324 } 1371 }
1325 1372
@@ -1450,20 +1497,21 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1450 if (interrupt & ONENAND_INT_READ) { 1497 if (interrupt & ONENAND_INT_READ) {
1451 int ecc = onenand_read_ecc(this); 1498 int ecc = onenand_read_ecc(this);
1452 if (ecc & ONENAND_ECC_2BIT_ALL) { 1499 if (ecc & ONENAND_ECC_2BIT_ALL) {
1453 printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x" 1500 printk(KERN_WARNING "%s: ecc error = 0x%04x, "
1454 ", controller error 0x%04x\n", ecc, ctrl); 1501 "controller error 0x%04x\n",
1502 __func__, ecc, ctrl);
1455 return ONENAND_BBT_READ_ECC_ERROR; 1503 return ONENAND_BBT_READ_ECC_ERROR;
1456 } 1504 }
1457 } else { 1505 } else {
1458 printk(KERN_ERR "onenand_bbt_wait: read timeout!" 1506 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
1459 "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 1507 __func__, ctrl, interrupt);
1460 return ONENAND_BBT_READ_FATAL_ERROR; 1508 return ONENAND_BBT_READ_FATAL_ERROR;
1461 } 1509 }
1462 1510
1463 /* Initial bad block case: 0x2400 or 0x0400 */ 1511 /* Initial bad block case: 0x2400 or 0x0400 */
1464 if (ctrl & ONENAND_CTRL_ERROR) { 1512 if (ctrl & ONENAND_CTRL_ERROR) {
1465 printk(KERN_DEBUG "onenand_bbt_wait: " 1513 printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
1466 "controller error = 0x%04x\n", ctrl); 1514 __func__, ctrl);
1467 return ONENAND_BBT_READ_ERROR; 1515 return ONENAND_BBT_READ_ERROR;
1468 } 1516 }
1469 1517
@@ -1487,14 +1535,16 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1487 size_t len = ops->ooblen; 1535 size_t len = ops->ooblen;
1488 u_char *buf = ops->oobbuf; 1536 u_char *buf = ops->oobbuf;
1489 1537
1490 DEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len); 1538 DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
1539 __func__, (unsigned int) from, len);
1491 1540
1492 /* Initialize return value */ 1541 /* Initialize return value */
1493 ops->oobretlen = 0; 1542 ops->oobretlen = 0;
1494 1543
1495 /* Do not allow reads past end of device */ 1544 /* Do not allow reads past end of device */
1496 if (unlikely((from + len) > mtd->size)) { 1545 if (unlikely((from + len) > mtd->size)) {
1497 printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n"); 1546 printk(KERN_ERR "%s: Attempt read beyond end of device\n",
1547 __func__);
1498 return ONENAND_BBT_READ_FATAL_ERROR; 1548 return ONENAND_BBT_READ_FATAL_ERROR;
1499 } 1549 }
1500 1550
@@ -1661,21 +1711,23 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1661 /* Wait for any existing operation to clear */ 1711 /* Wait for any existing operation to clear */
1662 onenand_panic_wait(mtd); 1712 onenand_panic_wait(mtd);
1663 1713
1664 DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n", 1714 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1665 (unsigned int) to, (int) len); 1715 __func__, (unsigned int) to, (int) len);
1666 1716
1667 /* Initialize retlen, in case of early exit */ 1717 /* Initialize retlen, in case of early exit */
1668 *retlen = 0; 1718 *retlen = 0;
1669 1719
1670 /* Do not allow writes past end of device */ 1720 /* Do not allow writes past end of device */
1671 if (unlikely((to + len) > mtd->size)) { 1721 if (unlikely((to + len) > mtd->size)) {
1672 printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n"); 1722 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1723 __func__);
1673 return -EINVAL; 1724 return -EINVAL;
1674 } 1725 }
1675 1726
1676 /* Reject writes, which are not page aligned */ 1727 /* Reject writes, which are not page aligned */
1677 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1728 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1678 printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); 1729 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1730 __func__);
1679 return -EINVAL; 1731 return -EINVAL;
1680 } 1732 }
1681 1733
@@ -1711,7 +1763,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1711 } 1763 }
1712 1764
1713 if (ret) { 1765 if (ret) {
1714 printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret); 1766 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
1715 break; 1767 break;
1716 } 1768 }
1717 1769
@@ -1792,7 +1844,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1792 u_char *oobbuf; 1844 u_char *oobbuf;
1793 int ret = 0; 1845 int ret = 0;
1794 1846
1795 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1847 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1848 __func__, (unsigned int) to, (int) len);
1796 1849
1797 /* Initialize retlen, in case of early exit */ 1850 /* Initialize retlen, in case of early exit */
1798 ops->retlen = 0; 1851 ops->retlen = 0;
@@ -1800,13 +1853,15 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1800 1853
1801 /* Do not allow writes past end of device */ 1854 /* Do not allow writes past end of device */
1802 if (unlikely((to + len) > mtd->size)) { 1855 if (unlikely((to + len) > mtd->size)) {
1803 printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n"); 1856 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1857 __func__);
1804 return -EINVAL; 1858 return -EINVAL;
1805 } 1859 }
1806 1860
1807 /* Reject writes, which are not page aligned */ 1861 /* Reject writes, which are not page aligned */
1808 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1862 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1809 printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n"); 1863 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
1864 __func__);
1810 return -EINVAL; 1865 return -EINVAL;
1811 } 1866 }
1812 1867
@@ -1879,7 +1934,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1879 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); 1934 onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
1880 if (ret) { 1935 if (ret) {
1881 written -= prevlen; 1936 written -= prevlen;
1882 printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1937 printk(KERN_ERR "%s: write failed %d\n",
1938 __func__, ret);
1883 break; 1939 break;
1884 } 1940 }
1885 1941
@@ -1887,7 +1943,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1887 /* Only check verify write turn on */ 1943 /* Only check verify write turn on */
1888 ret = onenand_verify(mtd, buf - len, to - len, len); 1944 ret = onenand_verify(mtd, buf - len, to - len, len);
1889 if (ret) 1945 if (ret)
1890 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1946 printk(KERN_ERR "%s: verify failed %d\n",
1947 __func__, ret);
1891 break; 1948 break;
1892 } 1949 }
1893 1950
@@ -1905,14 +1962,16 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1905 /* In partial page write we don't update bufferram */ 1962 /* In partial page write we don't update bufferram */
1906 onenand_update_bufferram(mtd, to, !ret && !subpage); 1963 onenand_update_bufferram(mtd, to, !ret && !subpage);
1907 if (ret) { 1964 if (ret) {
1908 printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); 1965 printk(KERN_ERR "%s: write failed %d\n",
1966 __func__, ret);
1909 break; 1967 break;
1910 } 1968 }
1911 1969
1912 /* Only check verify write turn on */ 1970 /* Only check verify write turn on */
1913 ret = onenand_verify(mtd, buf, to, thislen); 1971 ret = onenand_verify(mtd, buf, to, thislen);
1914 if (ret) { 1972 if (ret) {
1915 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1973 printk(KERN_ERR "%s: verify failed %d\n",
1974 __func__, ret);
1916 break; 1975 break;
1917 } 1976 }
1918 1977
@@ -1968,7 +2027,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1968 2027
1969 to += ops->ooboffs; 2028 to += ops->ooboffs;
1970 2029
1971 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 2030 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
2031 __func__, (unsigned int) to, (int) len);
1972 2032
1973 /* Initialize retlen, in case of early exit */ 2033 /* Initialize retlen, in case of early exit */
1974 ops->oobretlen = 0; 2034 ops->oobretlen = 0;
@@ -1981,14 +2041,15 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1981 column = to & (mtd->oobsize - 1); 2041 column = to & (mtd->oobsize - 1);
1982 2042
1983 if (unlikely(column >= oobsize)) { 2043 if (unlikely(column >= oobsize)) {
1984 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n"); 2044 printk(KERN_ERR "%s: Attempted to start write outside oob\n",
2045 __func__);
1985 return -EINVAL; 2046 return -EINVAL;
1986 } 2047 }
1987 2048
1988 /* For compatibility with NAND: Do not allow write past end of page */ 2049 /* For compatibility with NAND: Do not allow write past end of page */
1989 if (unlikely(column + len > oobsize)) { 2050 if (unlikely(column + len > oobsize)) {
1990 printk(KERN_ERR "onenand_write_oob_nolock: " 2051 printk(KERN_ERR "%s: Attempt to write past end of page\n",
1991 "Attempt to write past end of page\n"); 2052 __func__);
1992 return -EINVAL; 2053 return -EINVAL;
1993 } 2054 }
1994 2055
@@ -1996,7 +2057,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
1996 if (unlikely(to >= mtd->size || 2057 if (unlikely(to >= mtd->size ||
1997 column + len > ((mtd->size >> this->page_shift) - 2058 column + len > ((mtd->size >> this->page_shift) -
1998 (to >> this->page_shift)) * oobsize)) { 2059 (to >> this->page_shift)) * oobsize)) {
1999 printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n"); 2060 printk(KERN_ERR "%s: Attempted to write past end of device\n",
2061 __func__);
2000 return -EINVAL; 2062 return -EINVAL;
2001 } 2063 }
2002 2064
@@ -2038,13 +2100,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2038 2100
2039 ret = this->wait(mtd, FL_WRITING); 2101 ret = this->wait(mtd, FL_WRITING);
2040 if (ret) { 2102 if (ret) {
2041 printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret); 2103 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2042 break; 2104 break;
2043 } 2105 }
2044 2106
2045 ret = onenand_verify_oob(mtd, oobbuf, to); 2107 ret = onenand_verify_oob(mtd, oobbuf, to);
2046 if (ret) { 2108 if (ret) {
2047 printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret); 2109 printk(KERN_ERR "%s: verify failed %d\n",
2110 __func__, ret);
2048 break; 2111 break;
2049 } 2112 }
2050 2113
@@ -2140,78 +2203,186 @@ static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allo
2140 return bbm->isbad_bbt(mtd, ofs, allowbbt); 2203 return bbm->isbad_bbt(mtd, ofs, allowbbt);
2141} 2204}
2142 2205
2206
2207static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
2208 struct erase_info *instr)
2209{
2210 struct onenand_chip *this = mtd->priv;
2211 loff_t addr = instr->addr;
2212 int len = instr->len;
2213 unsigned int block_size = (1 << this->erase_shift);
2214 int ret = 0;
2215
2216 while (len) {
2217 this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
2218 ret = this->wait(mtd, FL_VERIFYING_ERASE);
2219 if (ret) {
2220 printk(KERN_ERR "%s: Failed verify, block %d\n",
2221 __func__, onenand_block(this, addr));
2222 instr->state = MTD_ERASE_FAILED;
2223 instr->fail_addr = addr;
2224 return -1;
2225 }
2226 len -= block_size;
2227 addr += block_size;
2228 }
2229 return 0;
2230}
2231
2143/** 2232/**
2144 * onenand_erase - [MTD Interface] erase block(s) 2233 * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
2145 * @param mtd MTD device structure 2234 * @param mtd MTD device structure
2146 * @param instr erase instruction 2235 * @param instr erase instruction
2236 * @param region erase region
2147 * 2237 *
2148 * Erase one ore more blocks 2238 * Erase one or more blocks up to 64 block at a time
2149 */ 2239 */
2150static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) 2240static int onenand_multiblock_erase(struct mtd_info *mtd,
2241 struct erase_info *instr,
2242 unsigned int block_size)
2151{ 2243{
2152 struct onenand_chip *this = mtd->priv; 2244 struct onenand_chip *this = mtd->priv;
2153 unsigned int block_size;
2154 loff_t addr = instr->addr; 2245 loff_t addr = instr->addr;
2155 loff_t len = instr->len; 2246 int len = instr->len;
2156 int ret = 0, i; 2247 int eb_count = 0;
2157 struct mtd_erase_region_info *region = NULL; 2248 int ret = 0;
2158 loff_t region_end = 0; 2249 int bdry_block = 0;
2159 2250
2160 DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); 2251 instr->state = MTD_ERASING;
2161 2252
2162 /* Do not allow erase past end of device */ 2253 if (ONENAND_IS_DDP(this)) {
2163 if (unlikely((len + addr) > mtd->size)) { 2254 loff_t bdry_addr = this->chipsize >> 1;
2164 printk(KERN_ERR "onenand_erase: Erase past end of device\n"); 2255 if (addr < bdry_addr && (addr + len) > bdry_addr)
2165 return -EINVAL; 2256 bdry_block = bdry_addr >> this->erase_shift;
2166 } 2257 }
2167 2258
2168 if (FLEXONENAND(this)) { 2259 /* Pre-check bbs */
2169 /* Find the eraseregion of this address */ 2260 while (len) {
2170 i = flexonenand_region(mtd, addr); 2261 /* Check if we have a bad block, we do not erase bad blocks */
2171 region = &mtd->eraseregions[i]; 2262 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2263 printk(KERN_WARNING "%s: attempt to erase a bad block "
2264 "at addr 0x%012llx\n",
2265 __func__, (unsigned long long) addr);
2266 instr->state = MTD_ERASE_FAILED;
2267 return -EIO;
2268 }
2269 len -= block_size;
2270 addr += block_size;
2271 }
2172 2272
2173 block_size = region->erasesize; 2273 len = instr->len;
2174 region_end = region->offset + region->erasesize * region->numblocks; 2274 addr = instr->addr;
2175 2275
2176 /* Start address within region must align on block boundary. 2276 /* loop over 64 eb batches */
2177 * Erase region's start offset is always block start address. 2277 while (len) {
2178 */ 2278 struct erase_info verify_instr = *instr;
2179 if (unlikely((addr - region->offset) & (block_size - 1))) { 2279 int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
2180 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2280
2181 return -EINVAL; 2281 verify_instr.addr = addr;
2282 verify_instr.len = 0;
2283
2284 /* do not cross chip boundary */
2285 if (bdry_block) {
2286 int this_block = (addr >> this->erase_shift);
2287
2288 if (this_block < bdry_block) {
2289 max_eb_count = min(max_eb_count,
2290 (bdry_block - this_block));
2291 }
2182 } 2292 }
2183 } else {
2184 block_size = 1 << this->erase_shift;
2185 2293
2186 /* Start address must align on block boundary */ 2294 eb_count = 0;
2187 if (unlikely(addr & (block_size - 1))) { 2295
2188 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2296 while (len > block_size && eb_count < (max_eb_count - 1)) {
2189 return -EINVAL; 2297 this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
2298 addr, block_size);
2299 onenand_invalidate_bufferram(mtd, addr, block_size);
2300
2301 ret = this->wait(mtd, FL_PREPARING_ERASE);
2302 if (ret) {
2303 printk(KERN_ERR "%s: Failed multiblock erase, "
2304 "block %d\n", __func__,
2305 onenand_block(this, addr));
2306 instr->state = MTD_ERASE_FAILED;
2307 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2308 return -EIO;
2309 }
2310
2311 len -= block_size;
2312 addr += block_size;
2313 eb_count++;
2314 }
2315
2316 /* last block of 64-eb series */
2317 cond_resched();
2318 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
2319 onenand_invalidate_bufferram(mtd, addr, block_size);
2320
2321 ret = this->wait(mtd, FL_ERASING);
2322 /* Check if it is write protected */
2323 if (ret) {
2324 printk(KERN_ERR "%s: Failed erase, block %d\n",
2325 __func__, onenand_block(this, addr));
2326 instr->state = MTD_ERASE_FAILED;
2327 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2328 return -EIO;
2329 }
2330
2331 len -= block_size;
2332 addr += block_size;
2333 eb_count++;
2334
2335 /* verify */
2336 verify_instr.len = eb_count * block_size;
2337 if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
2338 instr->state = verify_instr.state;
2339 instr->fail_addr = verify_instr.fail_addr;
2340 return -EIO;
2190 } 2341 }
2191 }
2192 2342
2193 /* Length must align on block boundary */
2194 if (unlikely(len & (block_size - 1))) {
2195 printk(KERN_ERR "onenand_erase: Length not block aligned\n");
2196 return -EINVAL;
2197 } 2343 }
2344 return 0;
2345}
2198 2346
2199 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2200 2347
2201 /* Grab the lock and see if the device is available */ 2348/**
2202 onenand_get_device(mtd, FL_ERASING); 2349 * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
2350 * @param mtd MTD device structure
2351 * @param instr erase instruction
2352 * @param region erase region
2353 * @param block_size erase block size
2354 *
2355 * Erase one or more blocks one block at a time
2356 */
2357static int onenand_block_by_block_erase(struct mtd_info *mtd,
2358 struct erase_info *instr,
2359 struct mtd_erase_region_info *region,
2360 unsigned int block_size)
2361{
2362 struct onenand_chip *this = mtd->priv;
2363 loff_t addr = instr->addr;
2364 int len = instr->len;
2365 loff_t region_end = 0;
2366 int ret = 0;
2367
2368 if (region) {
2369 /* region is set for Flex-OneNAND */
2370 region_end = region->offset + region->erasesize * region->numblocks;
2371 }
2203 2372
2204 /* Loop through the blocks */
2205 instr->state = MTD_ERASING; 2373 instr->state = MTD_ERASING;
2206 2374
2375 /* Loop through the blocks */
2207 while (len) { 2376 while (len) {
2208 cond_resched(); 2377 cond_resched();
2209 2378
2210 /* Check if we have a bad block, we do not erase bad blocks */ 2379 /* Check if we have a bad block, we do not erase bad blocks */
2211 if (onenand_block_isbad_nolock(mtd, addr, 0)) { 2380 if (onenand_block_isbad_nolock(mtd, addr, 0)) {
2212 printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr); 2381 printk(KERN_WARNING "%s: attempt to erase a bad block "
2382 "at addr 0x%012llx\n",
2383 __func__, (unsigned long long) addr);
2213 instr->state = MTD_ERASE_FAILED; 2384 instr->state = MTD_ERASE_FAILED;
2214 goto erase_exit; 2385 return -EIO;
2215 } 2386 }
2216 2387
2217 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); 2388 this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
@@ -2221,11 +2392,11 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2221 ret = this->wait(mtd, FL_ERASING); 2392 ret = this->wait(mtd, FL_ERASING);
2222 /* Check, if it is write protected */ 2393 /* Check, if it is write protected */
2223 if (ret) { 2394 if (ret) {
2224 printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", 2395 printk(KERN_ERR "%s: Failed erase, block %d\n",
2225 onenand_block(this, addr)); 2396 __func__, onenand_block(this, addr));
2226 instr->state = MTD_ERASE_FAILED; 2397 instr->state = MTD_ERASE_FAILED;
2227 instr->fail_addr = addr; 2398 instr->fail_addr = addr;
2228 goto erase_exit; 2399 return -EIO;
2229 } 2400 }
2230 2401
2231 len -= block_size; 2402 len -= block_size;
@@ -2241,25 +2412,88 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2241 2412
2242 if (len & (block_size - 1)) { 2413 if (len & (block_size - 1)) {
2243 /* FIXME: This should be handled at MTD partitioning level. */ 2414 /* FIXME: This should be handled at MTD partitioning level. */
2244 printk(KERN_ERR "onenand_erase: Unaligned address\n"); 2415 printk(KERN_ERR "%s: Unaligned address\n",
2245 goto erase_exit; 2416 __func__);
2417 return -EIO;
2246 } 2418 }
2247 } 2419 }
2420 }
2421 return 0;
2422}
2423
2424/**
2425 * onenand_erase - [MTD Interface] erase block(s)
2426 * @param mtd MTD device structure
2427 * @param instr erase instruction
2428 *
2429 * Erase one or more blocks
2430 */
2431static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2432{
2433 struct onenand_chip *this = mtd->priv;
2434 unsigned int block_size;
2435 loff_t addr = instr->addr;
2436 loff_t len = instr->len;
2437 int ret = 0;
2438 struct mtd_erase_region_info *region = NULL;
2439 loff_t region_offset = 0;
2440
2441 DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
2442 (unsigned long long) instr->addr, (unsigned long long) instr->len);
2443
2444 /* Do not allow erase past end of device */
2445 if (unlikely((len + addr) > mtd->size)) {
2446 printk(KERN_ERR "%s: Erase past end of device\n", __func__);
2447 return -EINVAL;
2448 }
2449
2450 if (FLEXONENAND(this)) {
2451 /* Find the eraseregion of this address */
2452 int i = flexonenand_region(mtd, addr);
2453
2454 region = &mtd->eraseregions[i];
2455 block_size = region->erasesize;
2456
2457 /* Start address within region must align on block boundary.
2458 * Erase region's start offset is always block start address.
2459 */
2460 region_offset = region->offset;
2461 } else
2462 block_size = 1 << this->erase_shift;
2463
2464 /* Start address must align on block boundary */
2465 if (unlikely((addr - region_offset) & (block_size - 1))) {
2466 printk(KERN_ERR "%s: Unaligned address\n", __func__);
2467 return -EINVAL;
2468 }
2248 2469
2470 /* Length must align on block boundary */
2471 if (unlikely(len & (block_size - 1))) {
2472 printk(KERN_ERR "%s: Length not block aligned\n", __func__);
2473 return -EINVAL;
2249 } 2474 }
2250 2475
2251 instr->state = MTD_ERASE_DONE; 2476 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2252 2477
2253erase_exit: 2478 /* Grab the lock and see if the device is available */
2479 onenand_get_device(mtd, FL_ERASING);
2254 2480
2255 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2481 if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
2482 /* region is set for Flex-OneNAND (no mb erase) */
2483 ret = onenand_block_by_block_erase(mtd, instr,
2484 region, block_size);
2485 } else {
2486 ret = onenand_multiblock_erase(mtd, instr, block_size);
2487 }
2256 2488
2257 /* Deselect and wake up anyone waiting on the device */ 2489 /* Deselect and wake up anyone waiting on the device */
2258 onenand_release_device(mtd); 2490 onenand_release_device(mtd);
2259 2491
2260 /* Do call back function */ 2492 /* Do call back function */
2261 if (!ret) 2493 if (!ret) {
2494 instr->state = MTD_ERASE_DONE;
2262 mtd_erase_callback(instr); 2495 mtd_erase_callback(instr);
2496 }
2263 2497
2264 return ret; 2498 return ret;
2265} 2499}
@@ -2272,7 +2506,7 @@ erase_exit:
2272 */ 2506 */
2273static void onenand_sync(struct mtd_info *mtd) 2507static void onenand_sync(struct mtd_info *mtd)
2274{ 2508{
2275 DEBUG(MTD_DEBUG_LEVEL3, "onenand_sync: called\n"); 2509 DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
2276 2510
2277 /* Grab the lock and see if the device is available */ 2511 /* Grab the lock and see if the device is available */
2278 onenand_get_device(mtd, FL_SYNCING); 2512 onenand_get_device(mtd, FL_SYNCING);
@@ -2406,7 +2640,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2406 /* Check lock status */ 2640 /* Check lock status */
2407 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2641 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2408 if (!(status & wp_status_mask)) 2642 if (!(status & wp_status_mask))
2409 printk(KERN_ERR "wp status = 0x%x\n", status); 2643 printk(KERN_ERR "%s: wp status = 0x%x\n",
2644 __func__, status);
2410 2645
2411 return 0; 2646 return 0;
2412 } 2647 }
@@ -2435,7 +2670,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
2435 /* Check lock status */ 2670 /* Check lock status */
2436 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2671 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2437 if (!(status & wp_status_mask)) 2672 if (!(status & wp_status_mask))
2438 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2673 printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2674 __func__, block, status);
2439 } 2675 }
2440 2676
2441 return 0; 2677 return 0;
@@ -2502,7 +2738,8 @@ static int onenand_check_lock_status(struct onenand_chip *this)
2502 /* Check lock status */ 2738 /* Check lock status */
2503 status = this->read_word(this->base + ONENAND_REG_WP_STATUS); 2739 status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
2504 if (!(status & ONENAND_WP_US)) { 2740 if (!(status & ONENAND_WP_US)) {
2505 printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); 2741 printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
2742 __func__, block, status);
2506 return 0; 2743 return 0;
2507 } 2744 }
2508 } 2745 }
@@ -2557,6 +2794,208 @@ static void onenand_unlock_all(struct mtd_info *mtd)
2557 2794
2558#ifdef CONFIG_MTD_ONENAND_OTP 2795#ifdef CONFIG_MTD_ONENAND_OTP
2559 2796
2797/**
2798 * onenand_otp_command - Send OTP specific command to OneNAND device
2799 * @param mtd MTD device structure
2800 * @param cmd the command to be sent
2801 * @param addr offset to read from or write to
2802 * @param len number of bytes to read or write
2803 */
2804static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
2805 size_t len)
2806{
2807 struct onenand_chip *this = mtd->priv;
2808 int value, block, page;
2809
2810 /* Address translation */
2811 switch (cmd) {
2812 case ONENAND_CMD_OTP_ACCESS:
2813 block = (int) (addr >> this->erase_shift);
2814 page = -1;
2815 break;
2816
2817 default:
2818 block = (int) (addr >> this->erase_shift);
2819 page = (int) (addr >> this->page_shift);
2820
2821 if (ONENAND_IS_2PLANE(this)) {
2822 /* Make the even block number */
2823 block &= ~1;
2824 /* Is it the odd plane? */
2825 if (addr & this->writesize)
2826 block++;
2827 page >>= 1;
2828 }
2829 page &= this->page_mask;
2830 break;
2831 }
2832
2833 if (block != -1) {
2834 /* Write 'DFS, FBA' of Flash */
2835 value = onenand_block_address(this, block);
2836 this->write_word(value, this->base +
2837 ONENAND_REG_START_ADDRESS1);
2838 }
2839
2840 if (page != -1) {
2841 /* Now we use page size operation */
2842 int sectors = 4, count = 4;
2843 int dataram;
2844
2845 switch (cmd) {
2846 default:
2847 if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
2848 cmd = ONENAND_CMD_2X_PROG;
2849 dataram = ONENAND_CURRENT_BUFFERRAM(this);
2850 break;
2851 }
2852
2853 /* Write 'FPA, FSA' of Flash */
2854 value = onenand_page_address(page, sectors);
2855 this->write_word(value, this->base +
2856 ONENAND_REG_START_ADDRESS8);
2857
2858 /* Write 'BSA, BSC' of DataRAM */
2859 value = onenand_buffer_address(dataram, sectors, count);
2860 this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
2861 }
2862
2863 /* Interrupt clear */
2864 this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
2865
2866 /* Write command */
2867 this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
2868
2869 return 0;
2870}
2871
2872/**
2873 * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
2874 * @param mtd MTD device structure
2875 * @param to offset to write to
2876 * @param len number of bytes to write
2877 * @param retlen pointer to variable to store the number of written bytes
2878 * @param buf the data to write
2879 *
2880 * OneNAND write out-of-band only for OTP
2881 */
2882static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2883 struct mtd_oob_ops *ops)
2884{
2885 struct onenand_chip *this = mtd->priv;
2886 int column, ret = 0, oobsize;
2887 int written = 0;
2888 u_char *oobbuf;
2889 size_t len = ops->ooblen;
2890 const u_char *buf = ops->oobbuf;
2891 int block, value, status;
2892
2893 to += ops->ooboffs;
2894
2895 /* Initialize retlen, in case of early exit */
2896 ops->oobretlen = 0;
2897
2898 oobsize = mtd->oobsize;
2899
2900 column = to & (mtd->oobsize - 1);
2901
2902 oobbuf = this->oob_buf;
2903
2904 /* Loop until all data write */
2905 while (written < len) {
2906 int thislen = min_t(int, oobsize, len - written);
2907
2908 cond_resched();
2909
2910 block = (int) (to >> this->erase_shift);
2911 /*
2912 * Write 'DFS, FBA' of Flash
2913 * Add: F100h DQ=DFS, FBA
2914 */
2915
2916 value = onenand_block_address(this, block);
2917 this->write_word(value, this->base +
2918 ONENAND_REG_START_ADDRESS1);
2919
2920 /*
2921 * Select DataRAM for DDP
2922 * Add: F101h DQ=DBS
2923 */
2924
2925 value = onenand_bufferram_address(this, block);
2926 this->write_word(value, this->base +
2927 ONENAND_REG_START_ADDRESS2);
2928 ONENAND_SET_NEXT_BUFFERRAM(this);
2929
2930 /*
2931 * Enter OTP access mode
2932 */
2933 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2934 this->wait(mtd, FL_OTPING);
2935
2936 /* We send data to spare ram with oobsize
2937 * to prevent byte access */
2938 memcpy(oobbuf + column, buf, thislen);
2939
2940 /*
2941 * Write Data into DataRAM
2942 * Add: 8th Word
2943 * in sector0/spare/page0
2944 * DQ=XXFCh
2945 */
2946 this->write_bufferram(mtd, ONENAND_SPARERAM,
2947 oobbuf, 0, mtd->oobsize);
2948
2949 onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
2950 onenand_update_bufferram(mtd, to, 0);
2951 if (ONENAND_IS_2PLANE(this)) {
2952 ONENAND_SET_BUFFERRAM1(this);
2953 onenand_update_bufferram(mtd, to + this->writesize, 0);
2954 }
2955
2956 ret = this->wait(mtd, FL_WRITING);
2957 if (ret) {
2958 printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
2959 break;
2960 }
2961
2962 /* Exit OTP access mode */
2963 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2964 this->wait(mtd, FL_RESETING);
2965
2966 status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
2967 status &= 0x60;
2968
2969 if (status == 0x60) {
2970 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2971 printk(KERN_DEBUG "1st Block\tLOCKED\n");
2972 printk(KERN_DEBUG "OTP Block\tLOCKED\n");
2973 } else if (status == 0x20) {
2974 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2975 printk(KERN_DEBUG "1st Block\tLOCKED\n");
2976 printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
2977 } else if (status == 0x40) {
2978 printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
2979 printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
2980 printk(KERN_DEBUG "OTP Block\tLOCKED\n");
2981 } else {
2982 printk(KERN_DEBUG "Reboot to check\n");
2983 }
2984
2985 written += thislen;
2986 if (written == len)
2987 break;
2988
2989 to += mtd->writesize;
2990 buf += thislen;
2991 column = 0;
2992 }
2993
2994 ops->oobretlen = written;
2995
2996 return ret;
2997}
2998
2560/* Internal OTP operation */ 2999/* Internal OTP operation */
2561typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, 3000typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
2562 size_t *retlen, u_char *buf); 3001 size_t *retlen, u_char *buf);
@@ -2659,11 +3098,11 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2659 struct mtd_oob_ops ops; 3098 struct mtd_oob_ops ops;
2660 int ret; 3099 int ret;
2661 3100
2662 /* Enter OTP access mode */
2663 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
2664 this->wait(mtd, FL_OTPING);
2665
2666 if (FLEXONENAND(this)) { 3101 if (FLEXONENAND(this)) {
3102
3103 /* Enter OTP access mode */
3104 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3105 this->wait(mtd, FL_OTPING);
2667 /* 3106 /*
2668 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of 3107 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
2669 * main area of page 49. 3108 * main area of page 49.
@@ -2674,19 +3113,19 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
2674 ops.oobbuf = NULL; 3113 ops.oobbuf = NULL;
2675 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops); 3114 ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
2676 *retlen = ops.retlen; 3115 *retlen = ops.retlen;
3116
3117 /* Exit OTP access mode */
3118 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
3119 this->wait(mtd, FL_RESETING);
2677 } else { 3120 } else {
2678 ops.mode = MTD_OOB_PLACE; 3121 ops.mode = MTD_OOB_PLACE;
2679 ops.ooblen = len; 3122 ops.ooblen = len;
2680 ops.oobbuf = buf; 3123 ops.oobbuf = buf;
2681 ops.ooboffs = 0; 3124 ops.ooboffs = 0;
2682 ret = onenand_write_oob_nolock(mtd, from, &ops); 3125 ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
2683 *retlen = ops.oobretlen; 3126 *retlen = ops.oobretlen;
2684 } 3127 }
2685 3128
2686 /* Exit OTP access mode */
2687 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
2688 this->wait(mtd, FL_RESETING);
2689
2690 return ret; 3129 return ret;
2691} 3130}
2692 3131
@@ -2717,16 +3156,21 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2717 if (density < ONENAND_DEVICE_DENSITY_512Mb) 3156 if (density < ONENAND_DEVICE_DENSITY_512Mb)
2718 otp_pages = 20; 3157 otp_pages = 20;
2719 else 3158 else
2720 otp_pages = 10; 3159 otp_pages = 50;
2721 3160
2722 if (mode == MTD_OTP_FACTORY) { 3161 if (mode == MTD_OTP_FACTORY) {
2723 from += mtd->writesize * otp_pages; 3162 from += mtd->writesize * otp_pages;
2724 otp_pages = 64 - otp_pages; 3163 otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
2725 } 3164 }
2726 3165
2727 /* Check User/Factory boundary */ 3166 /* Check User/Factory boundary */
2728 if (((mtd->writesize * otp_pages) - (from + len)) < 0) 3167 if (mode == MTD_OTP_USER) {
2729 return 0; 3168 if (mtd->writesize * otp_pages < from + len)
3169 return 0;
3170 } else {
3171 if (mtd->writesize * otp_pages < len)
3172 return 0;
3173 }
2730 3174
2731 onenand_get_device(mtd, FL_OTPING); 3175 onenand_get_device(mtd, FL_OTPING);
2732 while (len > 0 && otp_pages > 0) { 3176 while (len > 0 && otp_pages > 0) {
@@ -2749,13 +3193,12 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2749 *retlen += sizeof(struct otp_info); 3193 *retlen += sizeof(struct otp_info);
2750 } else { 3194 } else {
2751 size_t tmp_retlen; 3195 size_t tmp_retlen;
2752 int size = len;
2753 3196
2754 ret = action(mtd, from, len, &tmp_retlen, buf); 3197 ret = action(mtd, from, len, &tmp_retlen, buf);
2755 3198
2756 buf += size; 3199 buf += tmp_retlen;
2757 len -= size; 3200 len -= tmp_retlen;
2758 *retlen += size; 3201 *retlen += tmp_retlen;
2759 3202
2760 if (ret) 3203 if (ret)
2761 break; 3204 break;
@@ -2868,21 +3311,11 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2868 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf; 3311 u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
2869 size_t retlen; 3312 size_t retlen;
2870 int ret; 3313 int ret;
3314 unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
2871 3315
2872 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize 3316 memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
2873 : mtd->oobsize); 3317 : mtd->oobsize);
2874 /* 3318 /*
2875 * Note: OTP lock operation
2876 * OTP block : 0xXXFC
2877 * 1st block : 0xXXF3 (If chip support)
2878 * Both : 0xXXF0 (If chip support)
2879 */
2880 if (FLEXONENAND(this))
2881 buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
2882 else
2883 buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
2884
2885 /*
2886 * Write lock mark to 8th word of sector0 of page0 of the spare0. 3319 * Write lock mark to 8th word of sector0 of page0 of the spare0.
2887 * We write 16 bytes spare area instead of 2 bytes. 3320 * We write 16 bytes spare area instead of 2 bytes.
2888 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of 3321 * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
@@ -2892,10 +3325,30 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
2892 from = 0; 3325 from = 0;
2893 len = FLEXONENAND(this) ? mtd->writesize : 16; 3326 len = FLEXONENAND(this) ? mtd->writesize : 16;
2894 3327
3328 /*
3329 * Note: OTP lock operation
3330 * OTP block : 0xXXFC XX 1111 1100
3331 * 1st block : 0xXXF3 (If chip support) XX 1111 0011
3332 * Both : 0xXXF0 (If chip support) XX 1111 0000
3333 */
3334 if (FLEXONENAND(this))
3335 otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
3336
3337 /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
3338 if (otp == 1)
3339 buf[otp_lock_offset] = 0xFC;
3340 else if (otp == 2)
3341 buf[otp_lock_offset] = 0xF3;
3342 else if (otp == 3)
3343 buf[otp_lock_offset] = 0xF0;
3344 else if (otp != 0)
3345 printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
3346
2895 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER); 3347 ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
2896 3348
2897 return ret ? : retlen; 3349 return ret ? : retlen;
2898} 3350}
3351
2899#endif /* CONFIG_MTD_ONENAND_OTP */ 3352#endif /* CONFIG_MTD_ONENAND_OTP */
2900 3353
2901/** 3354/**
@@ -3172,7 +3625,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
3172 break; 3625 break;
3173 3626
3174 if (i != mtd->oobsize) { 3627 if (i != mtd->oobsize) {
3175 printk(KERN_WARNING "Block %d not erased.\n", block); 3628 printk(KERN_WARNING "%s: Block %d not erased.\n",
3629 __func__, block);
3176 return 1; 3630 return 1;
3177 } 3631 }
3178 } 3632 }
@@ -3204,8 +3658,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3204 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; 3658 blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
3205 3659
3206 if (boundary >= blksperdie) { 3660 if (boundary >= blksperdie) {
3207 printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. " 3661 printk(KERN_ERR "%s: Invalid boundary value. "
3208 "Boundary not changed.\n"); 3662 "Boundary not changed.\n", __func__);
3209 return -EINVAL; 3663 return -EINVAL;
3210 } 3664 }
3211 3665
@@ -3214,7 +3668,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3214 new = boundary + (die * this->density_mask); 3668 new = boundary + (die * this->density_mask);
3215 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new)); 3669 ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
3216 if (ret) { 3670 if (ret) {
3217 printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n"); 3671 printk(KERN_ERR "%s: Please erase blocks "
3672 "before boundary change\n", __func__);
3218 return ret; 3673 return ret;
3219 } 3674 }
3220 3675
@@ -3227,12 +3682,12 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3227 3682
3228 thisboundary = this->read_word(this->base + ONENAND_DATARAM); 3683 thisboundary = this->read_word(this->base + ONENAND_DATARAM);
3229 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { 3684 if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
3230 printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n"); 3685 printk(KERN_ERR "%s: boundary locked\n", __func__);
3231 ret = 1; 3686 ret = 1;
3232 goto out; 3687 goto out;
3233 } 3688 }
3234 3689
3235 printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n", 3690 printk(KERN_INFO "Changing die %d boundary: %d%s\n",
3236 die, boundary, lock ? "(Locked)" : "(Unlocked)"); 3691 die, boundary, lock ? "(Locked)" : "(Unlocked)");
3237 3692
3238 addr = die ? this->diesize[0] : 0; 3693 addr = die ? this->diesize[0] : 0;
@@ -3243,7 +3698,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3243 this->command(mtd, ONENAND_CMD_ERASE, addr, 0); 3698 this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
3244 ret = this->wait(mtd, FL_ERASING); 3699 ret = this->wait(mtd, FL_ERASING);
3245 if (ret) { 3700 if (ret) {
3246 printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die); 3701 printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
3702 __func__, die);
3247 goto out; 3703 goto out;
3248 } 3704 }
3249 3705
@@ -3251,7 +3707,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
3251 this->command(mtd, ONENAND_CMD_PROG, addr, 0); 3707 this->command(mtd, ONENAND_CMD_PROG, addr, 0);
3252 ret = this->wait(mtd, FL_WRITING); 3708 ret = this->wait(mtd, FL_WRITING);
3253 if (ret) { 3709 if (ret) {
3254 printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die); 3710 printk(KERN_ERR "%s: Failed PI write for Die %d\n",
3711 __func__, die);
3255 goto out; 3712 goto out;
3256 } 3713 }
3257 3714
@@ -3408,8 +3865,8 @@ static void onenand_resume(struct mtd_info *mtd)
3408 if (this->state == FL_PM_SUSPENDED) 3865 if (this->state == FL_PM_SUSPENDED)
3409 onenand_release_device(mtd); 3866 onenand_release_device(mtd);
3410 else 3867 else
3411 printk(KERN_ERR "resume() called for the chip which is not" 3868 printk(KERN_ERR "%s: resume() called for the chip which is not "
3412 "in suspended state\n"); 3869 "in suspended state\n", __func__);
3413} 3870}
3414 3871
3415/** 3872/**
@@ -3464,7 +3921,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3464 if (!this->page_buf) { 3921 if (!this->page_buf) {
3465 this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); 3922 this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
3466 if (!this->page_buf) { 3923 if (!this->page_buf) {
3467 printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n"); 3924 printk(KERN_ERR "%s: Can't allocate page_buf\n",
3925 __func__);
3468 return -ENOMEM; 3926 return -ENOMEM;
3469 } 3927 }
3470 this->options |= ONENAND_PAGEBUF_ALLOC; 3928 this->options |= ONENAND_PAGEBUF_ALLOC;
@@ -3472,7 +3930,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3472 if (!this->oob_buf) { 3930 if (!this->oob_buf) {
3473 this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL); 3931 this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
3474 if (!this->oob_buf) { 3932 if (!this->oob_buf) {
3475 printk(KERN_ERR "onenand_scan(): Can't allocate oob_buf\n"); 3933 printk(KERN_ERR "%s: Can't allocate oob_buf\n",
3934 __func__);
3476 if (this->options & ONENAND_PAGEBUF_ALLOC) { 3935 if (this->options & ONENAND_PAGEBUF_ALLOC) {
3477 this->options &= ~ONENAND_PAGEBUF_ALLOC; 3936 this->options &= ~ONENAND_PAGEBUF_ALLOC;
3478 kfree(this->page_buf); 3937 kfree(this->page_buf);
@@ -3505,8 +3964,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
3505 break; 3964 break;
3506 3965
3507 default: 3966 default:
3508 printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n", 3967 printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
3509 mtd->oobsize); 3968 __func__, mtd->oobsize);
3510 mtd->subpage_sft = 0; 3969 mtd->subpage_sft = 0;
3511 /* To prevent kernel oops */ 3970 /* To prevent kernel oops */
3512 this->ecclayout = &onenand_oob_32; 3971 this->ecclayout = &onenand_oob_32;
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index c1d501335006..b44dcab940d8 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
5obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o 5obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o 6obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o 7obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
8obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
new file mode 100644
index 000000000000..c1f31051784c
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -0,0 +1,87 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/list.h>
4#include <linux/slab.h>
5#include <linux/random.h>
6#include <linux/string.h>
7#include <linux/bitops.h>
8#include <linux/jiffies.h>
9#include <linux/mtd/nand_ecc.h>
10
11#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
12
13static void inject_single_bit_error(void *data, size_t size)
14{
15 unsigned long offset = random32() % (size * BITS_PER_BYTE);
16
17 __change_bit(offset, data);
18}
19
20static unsigned char data[512];
21static unsigned char error_data[512];
22
23static int nand_ecc_test(const size_t size)
24{
25 unsigned char code[3];
26 unsigned char error_code[3];
27 char testname[30];
28
29 BUG_ON(sizeof(data) < size);
30
31 sprintf(testname, "nand-ecc-%zu", size);
32
33 get_random_bytes(data, size);
34
35 memcpy(error_data, data, size);
36 inject_single_bit_error(error_data, size);
37
38 __nand_calculate_ecc(data, size, code);
39 __nand_calculate_ecc(error_data, size, error_code);
40 __nand_correct_data(error_data, code, error_code, size);
41
42 if (!memcmp(data, error_data, size)) {
43 printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
44 return 0;
45 }
46
47 printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
48
49 printk(KERN_DEBUG "hexdump of data:\n");
50 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
51 data, size, false);
52 printk(KERN_DEBUG "hexdump of error data:\n");
53 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
54 error_data, size, false);
55
56 return -1;
57}
58
59#else
60
61static int nand_ecc_test(const size_t size)
62{
63 return 0;
64}
65
66#endif
67
68static int __init ecc_test_init(void)
69{
70 srandom32(jiffies);
71
72 nand_ecc_test(256);
73 nand_ecc_test(512);
74
75 return 0;
76}
77
78static void __exit ecc_test_exit(void)
79{
80}
81
82module_init(ecc_test_init);
83module_exit(ecc_test_exit);
84
85MODULE_DESCRIPTION("NAND ECC function test module");
86MODULE_AUTHOR("Akinobu Mita");
87MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index 5553cd4eab20..5813920e79a5 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -343,7 +343,6 @@ static int scan_for_bad_eraseblocks(void)
343 printk(PRINT_PREF "error: cannot allocate memory\n"); 343 printk(PRINT_PREF "error: cannot allocate memory\n");
344 return -ENOMEM; 344 return -ENOMEM;
345 } 345 }
346 memset(bbt, 0 , ebcnt);
347 346
348 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 347 printk(PRINT_PREF "scanning for bad eraseblocks\n");
349 for (i = 0; i < ebcnt; ++i) { 348 for (i = 0; i < ebcnt; ++i) {
@@ -392,7 +391,6 @@ static int __init mtd_oobtest_init(void)
392 mtd->writesize, ebcnt, pgcnt, mtd->oobsize); 391 mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
393 392
394 err = -ENOMEM; 393 err = -ENOMEM;
395 mtd->erasesize = mtd->erasesize;
396 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL); 394 readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
397 if (!readbuf) { 395 if (!readbuf) {
398 printk(PRINT_PREF "error: cannot allocate memory\n"); 396 printk(PRINT_PREF "error: cannot allocate memory\n");
@@ -476,18 +474,10 @@ static int __init mtd_oobtest_init(void)
476 use_len_max = mtd->ecclayout->oobavail; 474 use_len_max = mtd->ecclayout->oobavail;
477 vary_offset = 1; 475 vary_offset = 1;
478 simple_srand(5); 476 simple_srand(5);
479 printk(PRINT_PREF "writing OOBs of whole device\n"); 477
480 for (i = 0; i < ebcnt; ++i) { 478 err = write_whole_device();
481 if (bbt[i]) 479 if (err)
482 continue; 480 goto out;
483 err = write_eraseblock(i);
484 if (err)
485 goto out;
486 if (i % 256 == 0)
487 printk(PRINT_PREF "written up to eraseblock %u\n", i);
488 cond_resched();
489 }
490 printk(PRINT_PREF "written %u eraseblocks\n", i);
491 481
492 /* Check all eraseblocks */ 482 /* Check all eraseblocks */
493 use_offset = 0; 483 use_offset = 0;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 103cac480fee..ce17cbe918c5 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -523,6 +523,7 @@ static int __init mtd_pagetest_init(void)
523 do_div(tmp, mtd->erasesize); 523 do_div(tmp, mtd->erasesize);
524 ebcnt = tmp; 524 ebcnt = tmp;
525 pgcnt = mtd->erasesize / mtd->writesize; 525 pgcnt = mtd->erasesize / mtd->writesize;
526 pgsize = mtd->writesize;
526 527
527 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " 528 printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
528 "page size %u, count of eraseblocks %u, pages per " 529 "page size %u, count of eraseblocks %u, pages per "
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 090c556ffed2..3b6f2fa12cff 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -700,7 +700,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
700 struct jffs2_raw_inode ri; 700 struct jffs2_raw_inode ri;
701 struct jffs2_node_frag *last_frag; 701 struct jffs2_node_frag *last_frag;
702 union jffs2_device_node dev; 702 union jffs2_device_node dev;
703 char *mdata = NULL, mdatalen = 0; 703 char *mdata = NULL;
704 int mdatalen = 0;
704 uint32_t alloclen, ilen; 705 uint32_t alloclen, ilen;
705 int ret; 706 int ret;
706 707
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 378991cfe40f..e22de8397b74 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1284,7 +1284,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
1284 f->target = NULL; 1284 f->target = NULL;
1285 mutex_unlock(&f->sem); 1285 mutex_unlock(&f->sem);
1286 jffs2_do_clear_inode(c, f); 1286 jffs2_do_clear_inode(c, f);
1287 return -ret; 1287 return ret;
1288 } 1288 }
1289 1289
1290 f->target[je32_to_cpu(latest_node->csize)] = '\0'; 1290 f->target[je32_to_cpu(latest_node->csize)] = '\0';
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 6caf1e1ee26d..800171dca53b 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -23,7 +23,7 @@
23 23
24int jffs2_sum_init(struct jffs2_sb_info *c) 24int jffs2_sum_init(struct jffs2_sb_info *c)
25{ 25{
26 uint32_t sum_size = max_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); 26 uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
27 27
28 c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); 28 c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
29 29
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 000000000000..e32aa268efac
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,60 @@
1/*
2 * linux/include/kmsg_dump.h
3 *
4 * Copyright (C) 2009 Net Insight AB
5 *
6 * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of this archive
10 * for more details.
11 */
12#ifndef _LINUX_KMSG_DUMP_H
13#define _LINUX_KMSG_DUMP_H
14
15#include <linux/list.h>
16
17enum kmsg_dump_reason {
18 KMSG_DUMP_OOPS,
19 KMSG_DUMP_PANIC,
20};
21
22/**
23 * struct kmsg_dumper - kernel crash message dumper structure
24 * @dump: The callback which gets called on crashes. The buffer is passed
25 * as two sections, where s1 (length l1) contains the older
26 * messages and s2 (length l2) contains the newer.
27 * @list: Entry in the dumper list (private)
28 * @registered: Flag that specifies if this is already registered
29 */
30struct kmsg_dumper {
31 void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
32 const char *s1, unsigned long l1,
33 const char *s2, unsigned long l2);
34 struct list_head list;
35 int registered;
36};
37
38#ifdef CONFIG_PRINTK
39void kmsg_dump(enum kmsg_dump_reason reason);
40
41int kmsg_dump_register(struct kmsg_dumper *dumper);
42
43int kmsg_dump_unregister(struct kmsg_dumper *dumper);
44#else
45static inline void kmsg_dump(enum kmsg_dump_reason reason)
46{
47}
48
49static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
50{
51 return -EINVAL;
52}
53
54static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
55{
56 return -EINVAL;
57}
58#endif
59
60#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index fff8c53e5434..9c3757c5759d 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,22 +19,21 @@
19 19
20/** 20/**
21 * struct nand_bbt_descr - bad block table descriptor 21 * struct nand_bbt_descr - bad block table descriptor
22 * @options: options for this descriptor 22 * @options: options for this descriptor
23 * @pages: the page(s) where we find the bbt, used with 23 * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
24 * option BBT_ABSPAGE when bbt is searched, 24 * when bbt is searched, then we store the found bbts pages here.
25 * then we store the found bbts pages here. 25 * Its an array and supports up to 8 chips now
26 * Its an array and supports up to 8 chips now 26 * @offs: offset of the pattern in the oob area of the page
27 * @offs: offset of the pattern in the oob area of the page 27 * @veroffs: offset of the bbt version counter in the oob are of the page
28 * @veroffs: offset of the bbt version counter in the oob area of the page 28 * @version: version read from the bbt page during scan
29 * @version: version read from the bbt page during scan 29 * @len: length of the pattern, if 0 no pattern check is performed
30 * @len: length of the pattern, if 0 no pattern check is performed 30 * @maxblocks: maximum number of blocks to search for a bbt. This number of
31 * @maxblocks: maximum number of blocks to search for a bbt. This 31 * blocks is reserved at the end of the device where the tables are
32 * number of blocks is reserved at the end of the device 32 * written.
33 * where the tables are written. 33 * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
34 * @reserved_block_code: if non-0, this pattern denotes a reserved 34 * bad) block in the stored bbt
35 * (rather than bad) block in the stored bbt 35 * @pattern: pattern to identify bad block table or factory marked good /
36 * @pattern: pattern to identify bad block table or factory marked 36 * bad blocks, can be NULL, if len = 0
37 * good / bad blocks, can be NULL, if len = 0
38 * 37 *
39 * Descriptor for the bad block table marker and the descriptor for the 38 * Descriptor for the bad block table marker and the descriptor for the
40 * pattern which identifies good and bad blocks. The assumption is made 39 * pattern which identifies good and bad blocks. The assumption is made
@@ -90,7 +89,9 @@ struct nand_bbt_descr {
90/* 89/*
91 * Constants for oob configuration 90 * Constants for oob configuration
92 */ 91 */
93#define ONENAND_BADBLOCK_POS 0 92#define NAND_SMALL_BADBLOCK_POS 5
93#define NAND_LARGE_BADBLOCK_POS 0
94#define ONENAND_BADBLOCK_POS 0
94 95
95/* 96/*
96 * Bad block scanning errors 97 * Bad block scanning errors
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 88d3d8fbf9f2..df89f4275232 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -518,10 +518,11 @@ struct cfi_fixup {
518#define CFI_MFR_ANY 0xffff 518#define CFI_MFR_ANY 0xffff
519#define CFI_ID_ANY 0xffff 519#define CFI_ID_ANY 0xffff
520 520
521#define CFI_MFR_AMD 0x0001 521#define CFI_MFR_AMD 0x0001
522#define CFI_MFR_ATMEL 0x001F 522#define CFI_MFR_INTEL 0x0089
523#define CFI_MFR_SAMSUNG 0x00EC 523#define CFI_MFR_ATMEL 0x001F
524#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 524#define CFI_MFR_SAMSUNG 0x00EC
525#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
525 526
526void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 527void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
527 528
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d4f38c5fd44e..d0bf422ae374 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -38,6 +38,15 @@ typedef enum {
38 FL_XIP_WHILE_ERASING, 38 FL_XIP_WHILE_ERASING,
39 FL_XIP_WHILE_WRITING, 39 FL_XIP_WHILE_WRITING,
40 FL_SHUTDOWN, 40 FL_SHUTDOWN,
41 /* These 2 come from nand_state_t, which has been unified here */
42 FL_READING,
43 FL_CACHEDPRG,
44 /* These 4 come from onenand_state_t, which has been unified here */
45 FL_RESETING,
46 FL_OTPING,
47 FL_PREPARING_ERASE,
48 FL_VERIFYING_ERASE,
49
41 FL_UNKNOWN 50 FL_UNKNOWN
42} flstate_t; 51} flstate_t;
43 52
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7a232a9bdd62..ccab9dfc5217 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,6 +21,8 @@
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/flashchip.h>
25#include <linux/mtd/bbm.h>
24 26
25struct mtd_info; 27struct mtd_info;
26/* Scan and identify a NAND device */ 28/* Scan and identify a NAND device */
@@ -168,7 +170,6 @@ typedef enum {
168/* Chip does not allow subpage writes */ 170/* Chip does not allow subpage writes */
169#define NAND_NO_SUBPAGE_WRITE 0x00000200 171#define NAND_NO_SUBPAGE_WRITE 0x00000200
170 172
171
172/* Options valid for Samsung large page devices */ 173/* Options valid for Samsung large page devices */
173#define NAND_SAMSUNG_LP_OPTIONS \ 174#define NAND_SAMSUNG_LP_OPTIONS \
174 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) 175 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -194,6 +195,9 @@ typedef enum {
194/* This option is defined if the board driver allocates its own buffers 195/* This option is defined if the board driver allocates its own buffers
195 (e.g. because it needs them DMA-coherent */ 196 (e.g. because it needs them DMA-coherent */
196#define NAND_OWN_BUFFERS 0x00040000 197#define NAND_OWN_BUFFERS 0x00040000
198/* Chip may not exist, so silence any errors in scan */
199#define NAND_SCAN_SILENT_NODEV 0x00080000
200
197/* Options set by nand scan */ 201/* Options set by nand scan */
198/* Nand scan has allocated controller struct */ 202/* Nand scan has allocated controller struct */
199#define NAND_CONTROLLER_ALLOC 0x80000000 203#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -202,20 +206,6 @@ typedef enum {
202#define NAND_CI_CHIPNR_MSK 0x03 206#define NAND_CI_CHIPNR_MSK 0x03
203#define NAND_CI_CELLTYPE_MSK 0x0C 207#define NAND_CI_CELLTYPE_MSK 0x0C
204 208
205/*
206 * nand_state_t - chip states
207 * Enumeration for NAND flash chip state
208 */
209typedef enum {
210 FL_READY,
211 FL_READING,
212 FL_WRITING,
213 FL_ERASING,
214 FL_SYNCING,
215 FL_CACHEDPRG,
216 FL_PM_SUSPENDED,
217} nand_state_t;
218
219/* Keep gcc happy */ 209/* Keep gcc happy */
220struct nand_chip; 210struct nand_chip;
221 211
@@ -402,7 +392,7 @@ struct nand_chip {
402 uint8_t cellinfo; 392 uint8_t cellinfo;
403 int badblockpos; 393 int badblockpos;
404 394
405 nand_state_t state; 395 flstate_t state;
406 396
407 uint8_t *oob_poi; 397 uint8_t *oob_poi;
408 struct nand_hw_control *controller; 398 struct nand_hw_control *controller;
@@ -470,75 +460,6 @@ struct nand_manufacturers {
470extern struct nand_flash_dev nand_flash_ids[]; 460extern struct nand_flash_dev nand_flash_ids[];
471extern struct nand_manufacturers nand_manuf_ids[]; 461extern struct nand_manufacturers nand_manuf_ids[];
472 462
473/**
474 * struct nand_bbt_descr - bad block table descriptor
475 * @options: options for this descriptor
476 * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
477 * when bbt is searched, then we store the found bbts pages here.
478 * Its an array and supports up to 8 chips now
479 * @offs: offset of the pattern in the oob area of the page
480 * @veroffs: offset of the bbt version counter in the oob are of the page
481 * @version: version read from the bbt page during scan
482 * @len: length of the pattern, if 0 no pattern check is performed
483 * @maxblocks: maximum number of blocks to search for a bbt. This number of
484 * blocks is reserved at the end of the device where the tables are
485 * written.
486 * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
487 * bad) block in the stored bbt
488 * @pattern: pattern to identify bad block table or factory marked good /
489 * bad blocks, can be NULL, if len = 0
490 *
491 * Descriptor for the bad block table marker and the descriptor for the
492 * pattern which identifies good and bad blocks. The assumption is made
493 * that the pattern and the version count are always located in the oob area
494 * of the first block.
495 */
496struct nand_bbt_descr {
497 int options;
498 int pages[NAND_MAX_CHIPS];
499 int offs;
500 int veroffs;
501 uint8_t version[NAND_MAX_CHIPS];
502 int len;
503 int maxblocks;
504 int reserved_block_code;
505 uint8_t *pattern;
506};
507
508/* Options for the bad block table descriptors */
509
510/* The number of bits used per block in the bbt on the device */
511#define NAND_BBT_NRBITS_MSK 0x0000000F
512#define NAND_BBT_1BIT 0x00000001
513#define NAND_BBT_2BIT 0x00000002
514#define NAND_BBT_4BIT 0x00000004
515#define NAND_BBT_8BIT 0x00000008
516/* The bad block table is in the last good block of the device */
517#define NAND_BBT_LASTBLOCK 0x00000010
518/* The bbt is at the given page, else we must scan for the bbt */
519#define NAND_BBT_ABSPAGE 0x00000020
520/* The bbt is at the given page, else we must scan for the bbt */
521#define NAND_BBT_SEARCH 0x00000040
522/* bbt is stored per chip on multichip devices */
523#define NAND_BBT_PERCHIP 0x00000080
524/* bbt has a version counter at offset veroffs */
525#define NAND_BBT_VERSION 0x00000100
526/* Create a bbt if none axists */
527#define NAND_BBT_CREATE 0x00000200
528/* Search good / bad pattern through all pages of a block */
529#define NAND_BBT_SCANALLPAGES 0x00000400
530/* Scan block empty during good / bad block scan */
531#define NAND_BBT_SCANEMPTY 0x00000800
532/* Write bbt if neccecary */
533#define NAND_BBT_WRITE 0x00001000
534/* Read and write back block contents when writing bbt */
535#define NAND_BBT_SAVECONTENT 0x00002000
536/* Search good / bad pattern on the first and the second page */
537#define NAND_BBT_SCAN2NDPAGE 0x00004000
538
539/* The maximum number of blocks to scan for a bbt */
540#define NAND_BBT_SCAN_MAXBLOCKS 4
541
542extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); 463extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
543extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); 464extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
544extern int nand_default_bbt(struct mtd_info *mtd); 465extern int nand_default_bbt(struct mtd_info *mtd);
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
548extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, 469extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
549 size_t * retlen, uint8_t * buf); 470 size_t * retlen, uint8_t * buf);
550 471
551/*
552* Constants for oob configuration
553*/
554#define NAND_SMALL_BADBLOCK_POS 5
555#define NAND_LARGE_BADBLOCK_POS 0
556
557/** 472/**
558 * struct platform_nand_chip - chip level device structure 473 * struct platform_nand_chip - chip level device structure
559 * @nr_chips: max. number of chips to scan for 474 * @nr_chips: max. number of chips to scan for
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 052ea8ca2434..41bc013571d0 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -16,7 +16,13 @@
16struct mtd_info; 16struct mtd_info;
17 17
18/* 18/*
19 * Calculate 3 byte ECC code for 256 byte block 19 * Calculate 3 byte ECC code for eccsize byte block
20 */
21void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
22 u_char *ecc_code);
23
24/*
25 * Calculate 3 byte ECC code for 256/512 byte block
20 */ 26 */
21int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); 27int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
22 28
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
27 unsigned int eccsize); 33 unsigned int eccsize);
28 34
29/* 35/*
30 * Detect and correct a 1 bit error for 256 byte block 36 * Detect and correct a 1 bit error for 256/512 byte block
31 */ 37 */
32int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); 38int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
33 39
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4e49f3350678..5509eb06b326 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/include/linux/mtd/onenand.h 2 * linux/include/linux/mtd/onenand.h
3 * 3 *
4 * Copyright (C) 2005-2007 Samsung Electronics 4 * Copyright © 2005-2009 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com> 5 * Kyungmin Park <kyungmin.park@samsung.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -14,6 +14,7 @@
14 14
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/completion.h> 16#include <linux/completion.h>
17#include <linux/mtd/flashchip.h>
17#include <linux/mtd/onenand_regs.h> 18#include <linux/mtd/onenand_regs.h>
18#include <linux/mtd/bbm.h> 19#include <linux/mtd/bbm.h>
19 20
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
25/* Free resources held by the OneNAND device */ 26/* Free resources held by the OneNAND device */
26extern void onenand_release(struct mtd_info *mtd); 27extern void onenand_release(struct mtd_info *mtd);
27 28
28/*
29 * onenand_state_t - chip states
30 * Enumeration for OneNAND flash chip state
31 */
32typedef enum {
33 FL_READY,
34 FL_READING,
35 FL_WRITING,
36 FL_ERASING,
37 FL_SYNCING,
38 FL_LOCKING,
39 FL_RESETING,
40 FL_OTPING,
41 FL_PM_SUSPENDED,
42} onenand_state_t;
43
44/** 29/**
45 * struct onenand_bufferram - OneNAND BufferRAM Data 30 * struct onenand_bufferram - OneNAND BufferRAM Data
46 * @blockpage: block & page address in BufferRAM 31 * @blockpage: block & page address in BufferRAM
@@ -137,7 +122,7 @@ struct onenand_chip {
137 122
138 spinlock_t chip_lock; 123 spinlock_t chip_lock;
139 wait_queue_head_t wq; 124 wait_queue_head_t wq;
140 onenand_state_t state; 125 flstate_t state;
141 unsigned char *page_buf; 126 unsigned char *page_buf;
142 unsigned char *oob_buf; 127 unsigned char *oob_buf;
143 128
@@ -152,6 +137,8 @@ struct onenand_chip {
152/* 137/*
153 * Helper macros 138 * Helper macros
154 */ 139 */
140#define ONENAND_PAGES_PER_BLOCK (1<<6)
141
155#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) 142#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
156#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) 143#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
157#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) 144#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index acadbf53a69f..cd6f3b431195 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -131,6 +131,8 @@
131#define ONENAND_CMD_LOCK_TIGHT (0x2C) 131#define ONENAND_CMD_LOCK_TIGHT (0x2C)
132#define ONENAND_CMD_UNLOCK_ALL (0x27) 132#define ONENAND_CMD_UNLOCK_ALL (0x27)
133#define ONENAND_CMD_ERASE (0x94) 133#define ONENAND_CMD_ERASE (0x94)
134#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
135#define ONENAND_CMD_ERASE_VERIFY (0x71)
134#define ONENAND_CMD_RESET (0xF0) 136#define ONENAND_CMD_RESET (0xF0)
135#define ONENAND_CMD_OTP_ACCESS (0x65) 137#define ONENAND_CMD_OTP_ACCESS (0x65)
136#define ONENAND_CMD_READID (0x90) 138#define ONENAND_CMD_READID (0x90)
diff --git a/kernel/panic.c b/kernel/panic.c
index 96b45d0b4ba5..5827f7b97254 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/debug_locks.h> 11#include <linux/debug_locks.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/kmsg_dump.h>
13#include <linux/kallsyms.h> 14#include <linux/kallsyms.h>
14#include <linux/notifier.h> 15#include <linux/notifier.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...)
74 dump_stack(); 75 dump_stack();
75#endif 76#endif
76 77
78 kmsg_dump(KMSG_DUMP_PANIC);
77 /* 79 /*
78 * If we have crashed and we have a crash kernel loaded let it handle 80 * If we have crashed and we have a crash kernel loaded let it handle
79 * everything else. 81 * everything else.
@@ -339,6 +341,7 @@ void oops_exit(void)
339{ 341{
340 do_oops_enter_exit(); 342 do_oops_enter_exit();
341 print_oops_end_marker(); 343 print_oops_end_marker();
344 kmsg_dump(KMSG_DUMP_OOPS);
342} 345}
343 346
344#ifdef WANT_WARN_ON_SLOWPATH 347#ifdef WANT_WARN_ON_SLOWPATH
diff --git a/kernel/printk.c b/kernel/printk.c
index b5ac4d99c667..1ded8e7dd19b 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -34,6 +34,7 @@
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h> 36#include <linux/ratelimit.h>
37#include <linux/kmsg_dump.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39 40
@@ -1405,4 +1406,122 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1405 return false; 1406 return false;
1406} 1407}
1407EXPORT_SYMBOL(printk_timed_ratelimit); 1408EXPORT_SYMBOL(printk_timed_ratelimit);
1409
1410static DEFINE_SPINLOCK(dump_list_lock);
1411static LIST_HEAD(dump_list);
1412
1413/**
1414 * kmsg_dump_register - register a kernel log dumper.
1415 * @dump: pointer to the kmsg_dumper structure
1416 *
1417 * Adds a kernel log dumper to the system. The dump callback in the
1418 * structure will be called when the kernel oopses or panics and must be
1419 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
1420 */
1421int kmsg_dump_register(struct kmsg_dumper *dumper)
1422{
1423 unsigned long flags;
1424 int err = -EBUSY;
1425
1426 /* The dump callback needs to be set */
1427 if (!dumper->dump)
1428 return -EINVAL;
1429
1430 spin_lock_irqsave(&dump_list_lock, flags);
1431 /* Don't allow registering multiple times */
1432 if (!dumper->registered) {
1433 dumper->registered = 1;
1434 list_add_tail(&dumper->list, &dump_list);
1435 err = 0;
1436 }
1437 spin_unlock_irqrestore(&dump_list_lock, flags);
1438
1439 return err;
1440}
1441EXPORT_SYMBOL_GPL(kmsg_dump_register);
1442
1443/**
1444 * kmsg_dump_unregister - unregister a kmsg dumper.
1445 * @dump: pointer to the kmsg_dumper structure
1446 *
1447 * Removes a dump device from the system. Returns zero on success and
1448 * %-EINVAL otherwise.
1449 */
1450int kmsg_dump_unregister(struct kmsg_dumper *dumper)
1451{
1452 unsigned long flags;
1453 int err = -EINVAL;
1454
1455 spin_lock_irqsave(&dump_list_lock, flags);
1456 if (dumper->registered) {
1457 dumper->registered = 0;
1458 list_del(&dumper->list);
1459 err = 0;
1460 }
1461 spin_unlock_irqrestore(&dump_list_lock, flags);
1462
1463 return err;
1464}
1465EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
1466
1467static const char const *kmsg_reasons[] = {
1468 [KMSG_DUMP_OOPS] = "oops",
1469 [KMSG_DUMP_PANIC] = "panic",
1470};
1471
1472static const char *kmsg_to_str(enum kmsg_dump_reason reason)
1473{
1474 if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
1475 return "unknown";
1476
1477 return kmsg_reasons[reason];
1478}
1479
1480/**
1481 * kmsg_dump - dump kernel log to kernel message dumpers.
1482 * @reason: the reason (oops, panic etc) for dumping
1483 *
1484 * Iterate through each of the dump devices and call the oops/panic
1485 * callbacks with the log buffer.
1486 */
1487void kmsg_dump(enum kmsg_dump_reason reason)
1488{
1489 unsigned long end;
1490 unsigned chars;
1491 struct kmsg_dumper *dumper;
1492 const char *s1, *s2;
1493 unsigned long l1, l2;
1494 unsigned long flags;
1495
1496 /* Theoretically, the log could move on after we do this, but
1497 there's not a lot we can do about that. The new messages
1498 will overwrite the start of what we dump. */
1499 spin_lock_irqsave(&logbuf_lock, flags);
1500 end = log_end & LOG_BUF_MASK;
1501 chars = logged_chars;
1502 spin_unlock_irqrestore(&logbuf_lock, flags);
1503
1504 if (logged_chars > end) {
1505 s1 = log_buf + log_buf_len - logged_chars + end;
1506 l1 = logged_chars - end;
1507
1508 s2 = log_buf;
1509 l2 = end;
1510 } else {
1511 s1 = "";
1512 l1 = 0;
1513
1514 s2 = log_buf + end - logged_chars;
1515 l2 = logged_chars;
1516 }
1517
1518 if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
1519 printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
1520 kmsg_to_str(reason));
1521 return;
1522 }
1523 list_for_each_entry(dumper, &dump_list, list)
1524 dumper->dump(dumper, reason, s1, l1, s2, l2);
1525 spin_unlock_irqrestore(&dump_list_lock, flags);
1526}
1408#endif 1527#endif