diff options
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r-- | drivers/mtd/nand/ams-delta.c | 10 | ||||
-rw-r--r-- | drivers/mtd/nand/bcm_umi_nand.c | 12 | ||||
-rw-r--r-- | drivers/mtd/nand/davinci_nand.c | 4 | ||||
-rw-r--r-- | drivers/mtd/nand/mxc_nand.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_bcm_umi.h | 73 | ||||
-rw-r--r-- | drivers/mtd/nand/nomadik_nand.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 303 | ||||
-rw-r--r-- | drivers/mtd/nand/orion_nand.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/pxa3xx_nand.c | 89 | ||||
-rw-r--r-- | drivers/mtd/nand/s3c2410.c | 2 |
10 files changed, 354 insertions, 145 deletions
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index 861ca8f7e47d..a7040af08536 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c | |||
@@ -23,11 +23,15 @@ | |||
23 | #include <linux/mtd/mtd.h> | 23 | #include <linux/mtd/mtd.h> |
24 | #include <linux/mtd/nand.h> | 24 | #include <linux/mtd/nand.h> |
25 | #include <linux/mtd/partitions.h> | 25 | #include <linux/mtd/partitions.h> |
26 | #include <linux/gpio.h> | ||
27 | #include <linux/platform_data/gpio-omap.h> | ||
28 | |||
26 | #include <asm/io.h> | 29 | #include <asm/io.h> |
27 | #include <mach/hardware.h> | ||
28 | #include <asm/sizes.h> | 30 | #include <asm/sizes.h> |
29 | #include <linux/gpio.h> | 31 | |
30 | #include <plat/board-ams-delta.h> | 32 | #include <mach/board-ams-delta.h> |
33 | |||
34 | #include <mach/hardware.h> | ||
31 | 35 | ||
32 | /* | 36 | /* |
33 | * MTD structure for E3 (Delta) | 37 | * MTD structure for E3 (Delta) |
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c index c855e7cd337b..d0d1bd4d0e7d 100644 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ b/drivers/mtd/nand/bcm_umi_nand.c | |||
@@ -249,20 +249,20 @@ static int nand_dev_ready(struct mtd_info *mtd) | |||
249 | int bcm_umi_nand_inithw(void) | 249 | int bcm_umi_nand_inithw(void) |
250 | { | 250 | { |
251 | /* Configure nand timing parameters */ | 251 | /* Configure nand timing parameters */ |
252 | REG_UMI_NAND_TCR &= ~0x7ffff; | 252 | writel(readl(®_UMI_NAND_TCR) & ~0x7ffff, ®_UMI_NAND_TCR); |
253 | REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR; | 253 | writel(readl(®_UMI_NAND_TCR) | HW_CFG_NAND_TCR, ®_UMI_NAND_TCR); |
254 | 254 | ||
255 | #if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS) | 255 | #if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS) |
256 | /* enable software control of CS */ | 256 | /* enable software control of CS */ |
257 | REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL; | 257 | writel(readl(®_UMI_NAND_TCR) | REG_UMI_NAND_TCR_CS_SWCTRL, ®_UMI_NAND_TCR); |
258 | #endif | 258 | #endif |
259 | 259 | ||
260 | /* keep NAND chip select asserted */ | 260 | /* keep NAND chip select asserted */ |
261 | REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED; | 261 | writel(readl(®_UMI_NAND_RCSR) | REG_UMI_NAND_RCSR_CS_ASSERTED, ®_UMI_NAND_RCSR); |
262 | 262 | ||
263 | REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16; | 263 | writel(readl(®_UMI_NAND_TCR) & ~REG_UMI_NAND_TCR_WORD16, ®_UMI_NAND_TCR); |
264 | /* enable writes to flash */ | 264 | /* enable writes to flash */ |
265 | REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP; | 265 | writel(readl(®_UMI_MMD_ICR) | REG_UMI_MMD_ICR_FLASH_WP, ®_UMI_MMD_ICR); |
266 | 266 | ||
267 | writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET); | 267 | writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET); |
268 | nand_bcm_umi_wait_till_ready(); | 268 | nand_bcm_umi_wait_till_ready(); |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index d94b03c207af..f1deb1ee2c95 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -34,8 +34,8 @@ | |||
34 | #include <linux/mtd/partitions.h> | 34 | #include <linux/mtd/partitions.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | 36 | ||
37 | #include <mach/nand.h> | 37 | #include <linux/platform_data/mtd-davinci.h> |
38 | #include <mach/aemif.h> | 38 | #include <linux/platform_data/mtd-davinci-aemif.h> |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * This is a device driver for the NAND flash controller found on the | 41 | * This is a device driver for the NAND flash controller found on the |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 6acc790c2fbb..5683604967d7 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/of_mtd.h> | 36 | #include <linux/of_mtd.h> |
37 | 37 | ||
38 | #include <asm/mach/flash.h> | 38 | #include <asm/mach/flash.h> |
39 | #include <mach/mxc_nand.h> | 39 | #include <linux/platform_data/mtd-mxc_nand.h> |
40 | #include <mach/hardware.h> | 40 | #include <mach/hardware.h> |
41 | 41 | ||
42 | #define DRIVER_NAME "mxc_nand" | 42 | #define DRIVER_NAME "mxc_nand" |
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h index 198b304d6f72..d90186684db8 100644 --- a/drivers/mtd/nand/nand_bcm_umi.h +++ b/drivers/mtd/nand/nand_bcm_umi.h | |||
@@ -17,7 +17,7 @@ | |||
17 | /* ---- Include Files ---------------------------------------------------- */ | 17 | /* ---- Include Files ---------------------------------------------------- */ |
18 | #include <mach/reg_umi.h> | 18 | #include <mach/reg_umi.h> |
19 | #include <mach/reg_nand.h> | 19 | #include <mach/reg_nand.h> |
20 | #include <cfg_global.h> | 20 | #include <mach/cfg_global.h> |
21 | 21 | ||
22 | /* ---- Constants and Types ---------------------------------------------- */ | 22 | /* ---- Constants and Types ---------------------------------------------- */ |
23 | #if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING) | 23 | #if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING) |
@@ -48,7 +48,7 @@ int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData, | |||
48 | /* Check in device is ready */ | 48 | /* Check in device is ready */ |
49 | static inline int nand_bcm_umi_dev_ready(void) | 49 | static inline int nand_bcm_umi_dev_ready(void) |
50 | { | 50 | { |
51 | return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY; | 51 | return readl(®_UMI_NAND_RCSR) & REG_UMI_NAND_RCSR_RDY; |
52 | } | 52 | } |
53 | 53 | ||
54 | /* Wait until device is ready */ | 54 | /* Wait until device is ready */ |
@@ -62,10 +62,11 @@ static inline void nand_bcm_umi_wait_till_ready(void) | |||
62 | static inline void nand_bcm_umi_hamming_enable_hwecc(void) | 62 | static inline void nand_bcm_umi_hamming_enable_hwecc(void) |
63 | { | 63 | { |
64 | /* disable and reset ECC, 512 byte page */ | 64 | /* disable and reset ECC, 512 byte page */ |
65 | REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE | | 65 | writel(readl(®_UMI_NAND_ECC_CSR) & ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE | |
66 | REG_UMI_NAND_ECC_CSR_256BYTE); | 66 | REG_UMI_NAND_ECC_CSR_256BYTE), ®_UMI_NAND_ECC_CSR); |
67 | /* enable ECC */ | 67 | /* enable ECC */ |
68 | REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE; | 68 | writel(readl(®_UMI_NAND_ECC_CSR) | REG_UMI_NAND_ECC_CSR_ECC_ENABLE, |
69 | ®_UMI_NAND_ECC_CSR); | ||
69 | } | 70 | } |
70 | 71 | ||
71 | #if NAND_ECC_BCH | 72 | #if NAND_ECC_BCH |
@@ -76,18 +77,18 @@ static inline void nand_bcm_umi_hamming_enable_hwecc(void) | |||
76 | static inline void nand_bcm_umi_bch_enable_read_hwecc(void) | 77 | static inline void nand_bcm_umi_bch_enable_read_hwecc(void) |
77 | { | 78 | { |
78 | /* disable and reset ECC */ | 79 | /* disable and reset ECC */ |
79 | REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID; | 80 | writel(REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, ®_UMI_BCH_CTRL_STATUS); |
80 | /* Turn on ECC */ | 81 | /* Turn on ECC */ |
81 | REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN; | 82 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, ®_UMI_BCH_CTRL_STATUS); |
82 | } | 83 | } |
83 | 84 | ||
84 | /* Enable BCH Write ECC */ | 85 | /* Enable BCH Write ECC */ |
85 | static inline void nand_bcm_umi_bch_enable_write_hwecc(void) | 86 | static inline void nand_bcm_umi_bch_enable_write_hwecc(void) |
86 | { | 87 | { |
87 | /* disable and reset ECC */ | 88 | /* disable and reset ECC */ |
88 | REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID; | 89 | writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID, ®_UMI_BCH_CTRL_STATUS); |
89 | /* Turn on ECC */ | 90 | /* Turn on ECC */ |
90 | REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN; | 91 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN, ®_UMI_BCH_CTRL_STATUS); |
91 | } | 92 | } |
92 | 93 | ||
93 | /* Config number of BCH ECC bytes */ | 94 | /* Config number of BCH ECC bytes */ |
@@ -99,9 +100,9 @@ static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes) | |||
99 | uint32_t numBits = numEccBytes * 8; | 100 | uint32_t numBits = numEccBytes * 8; |
100 | 101 | ||
101 | /* disable and reset ECC */ | 102 | /* disable and reset ECC */ |
102 | REG_UMI_BCH_CTRL_STATUS = | 103 | writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID | |
103 | REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID | | 104 | REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, |
104 | REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID; | 105 | ®_UMI_BCH_CTRL_STATUS); |
105 | 106 | ||
106 | /* Every correctible bit requires 13 ECC bits */ | 107 | /* Every correctible bit requires 13 ECC bits */ |
107 | tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT); | 108 | tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT); |
@@ -113,23 +114,21 @@ static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes) | |||
113 | kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT); | 114 | kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT); |
114 | 115 | ||
115 | /* Write the settings */ | 116 | /* Write the settings */ |
116 | REG_UMI_BCH_N = nValue; | 117 | writel(nValue, ®_UMI_BCH_N); |
117 | REG_UMI_BCH_T = tValue; | 118 | writel(tValue, ®_UMI_BCH_T); |
118 | REG_UMI_BCH_K = kValue; | 119 | writel(kValue, ®_UMI_BCH_K); |
119 | } | 120 | } |
120 | 121 | ||
121 | /* Pause during ECC read calculation to skip bytes in OOB */ | 122 | /* Pause during ECC read calculation to skip bytes in OOB */ |
122 | static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void) | 123 | static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void) |
123 | { | 124 | { |
124 | REG_UMI_BCH_CTRL_STATUS = | 125 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN | REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC, ®_UMI_BCH_CTRL_STATUS); |
125 | REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN | | ||
126 | REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC; | ||
127 | } | 126 | } |
128 | 127 | ||
129 | /* Resume during ECC read calculation after skipping bytes in OOB */ | 128 | /* Resume during ECC read calculation after skipping bytes in OOB */ |
130 | static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void) | 129 | static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void) |
131 | { | 130 | { |
132 | REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN; | 131 | writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, ®_UMI_BCH_CTRL_STATUS); |
133 | } | 132 | } |
134 | 133 | ||
135 | /* Poll read ECC calc to check when hardware completes */ | 134 | /* Poll read ECC calc to check when hardware completes */ |
@@ -139,7 +138,7 @@ static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void) | |||
139 | 138 | ||
140 | do { | 139 | do { |
141 | /* wait for ECC to be valid */ | 140 | /* wait for ECC to be valid */ |
142 | regVal = REG_UMI_BCH_CTRL_STATUS; | 141 | regVal = readl(®_UMI_BCH_CTRL_STATUS); |
143 | } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0); | 142 | } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0); |
144 | 143 | ||
145 | return regVal; | 144 | return regVal; |
@@ -149,7 +148,7 @@ static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void) | |||
149 | static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void) | 148 | static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void) |
150 | { | 149 | { |
151 | /* wait for ECC to be valid */ | 150 | /* wait for ECC to be valid */ |
152 | while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID) | 151 | while ((readl(®_UMI_BCH_CTRL_STATUS) & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID) |
153 | == 0) | 152 | == 0) |
154 | ; | 153 | ; |
155 | } | 154 | } |
@@ -170,9 +169,9 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
170 | if (pageSize != NAND_DATA_ACCESS_SIZE) { | 169 | if (pageSize != NAND_DATA_ACCESS_SIZE) { |
171 | /* skip BI */ | 170 | /* skip BI */ |
172 | #if defined(__KERNEL__) && !defined(STANDALONE) | 171 | #if defined(__KERNEL__) && !defined(STANDALONE) |
173 | *oobp++ = REG_NAND_DATA8; | 172 | *oobp++ = readb(®_NAND_DATA8); |
174 | #else | 173 | #else |
175 | REG_NAND_DATA8; | 174 | readb(®_NAND_DATA8); |
176 | #endif | 175 | #endif |
177 | numToRead--; | 176 | numToRead--; |
178 | } | 177 | } |
@@ -180,9 +179,9 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
180 | while (numToRead > numEccBytes) { | 179 | while (numToRead > numEccBytes) { |
181 | /* skip free oob region */ | 180 | /* skip free oob region */ |
182 | #if defined(__KERNEL__) && !defined(STANDALONE) | 181 | #if defined(__KERNEL__) && !defined(STANDALONE) |
183 | *oobp++ = REG_NAND_DATA8; | 182 | *oobp++ = readb(®_NAND_DATA8); |
184 | #else | 183 | #else |
185 | REG_NAND_DATA8; | 184 | readb(®_NAND_DATA8); |
186 | #endif | 185 | #endif |
187 | numToRead--; | 186 | numToRead--; |
188 | } | 187 | } |
@@ -193,11 +192,11 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
193 | 192 | ||
194 | while (numToRead > 11) { | 193 | while (numToRead > 11) { |
195 | #if defined(__KERNEL__) && !defined(STANDALONE) | 194 | #if defined(__KERNEL__) && !defined(STANDALONE) |
196 | *oobp = REG_NAND_DATA8; | 195 | *oobp = readb(®_NAND_DATA8); |
197 | eccCalc[eccPos++] = *oobp; | 196 | eccCalc[eccPos++] = *oobp; |
198 | oobp++; | 197 | oobp++; |
199 | #else | 198 | #else |
200 | eccCalc[eccPos++] = REG_NAND_DATA8; | 199 | eccCalc[eccPos++] = readb(®_NAND_DATA8); |
201 | #endif | 200 | #endif |
202 | numToRead--; | 201 | numToRead--; |
203 | } | 202 | } |
@@ -207,9 +206,9 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
207 | if (numToRead == 11) { | 206 | if (numToRead == 11) { |
208 | /* read BI */ | 207 | /* read BI */ |
209 | #if defined(__KERNEL__) && !defined(STANDALONE) | 208 | #if defined(__KERNEL__) && !defined(STANDALONE) |
210 | *oobp++ = REG_NAND_DATA8; | 209 | *oobp++ = readb(®_NAND_DATA8); |
211 | #else | 210 | #else |
212 | REG_NAND_DATA8; | 211 | readb(®_NAND_DATA8); |
213 | #endif | 212 | #endif |
214 | numToRead--; | 213 | numToRead--; |
215 | } | 214 | } |
@@ -219,11 +218,11 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize, | |||
219 | nand_bcm_umi_bch_resume_read_ecc_calc(); | 218 | nand_bcm_umi_bch_resume_read_ecc_calc(); |
220 | while (numToRead) { | 219 | while (numToRead) { |
221 | #if defined(__KERNEL__) && !defined(STANDALONE) | 220 | #if defined(__KERNEL__) && !defined(STANDALONE) |
222 | *oobp = REG_NAND_DATA8; | 221 | *oobp = readb(®_NAND_DATA8); |
223 | eccCalc[eccPos++] = *oobp; | 222 | eccCalc[eccPos++] = *oobp; |
224 | oobp++; | 223 | oobp++; |
225 | #else | 224 | #else |
226 | eccCalc[eccPos++] = REG_NAND_DATA8; | 225 | eccCalc[eccPos++] = readb(®_NAND_DATA8); |
227 | #endif | 226 | #endif |
228 | numToRead--; | 227 | numToRead--; |
229 | } | 228 | } |
@@ -255,7 +254,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | |||
255 | if (pageSize == NAND_DATA_ACCESS_SIZE) { | 254 | if (pageSize == NAND_DATA_ACCESS_SIZE) { |
256 | /* Now fill in the ECC bytes */ | 255 | /* Now fill in the ECC bytes */ |
257 | if (numEccBytes >= 13) | 256 | if (numEccBytes >= 13) |
258 | eccVal = REG_UMI_BCH_WR_ECC_3; | 257 | eccVal = readl(®_UMI_BCH_WR_ECC_3); |
259 | 258 | ||
260 | /* Usually we skip CM in oob[0,1] */ | 259 | /* Usually we skip CM in oob[0,1] */ |
261 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0], | 260 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0], |
@@ -268,7 +267,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | |||
268 | eccVal & 0xff); /* ECC 12 */ | 267 | eccVal & 0xff); /* ECC 12 */ |
269 | 268 | ||
270 | if (numEccBytes >= 9) | 269 | if (numEccBytes >= 9) |
271 | eccVal = REG_UMI_BCH_WR_ECC_2; | 270 | eccVal = readl(®_UMI_BCH_WR_ECC_2); |
272 | 271 | ||
273 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3], | 272 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3], |
274 | (eccVal >> 24) & 0xff); /* ECC11 */ | 273 | (eccVal >> 24) & 0xff); /* ECC11 */ |
@@ -281,7 +280,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | |||
281 | 280 | ||
282 | /* Now fill in the ECC bytes */ | 281 | /* Now fill in the ECC bytes */ |
283 | if (numEccBytes >= 13) | 282 | if (numEccBytes >= 13) |
284 | eccVal = REG_UMI_BCH_WR_ECC_3; | 283 | eccVal = readl(®_UMI_BCH_WR_ECC_3); |
285 | 284 | ||
286 | /* Usually skip CM in oob[1,2] */ | 285 | /* Usually skip CM in oob[1,2] */ |
287 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1], | 286 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1], |
@@ -294,7 +293,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | |||
294 | eccVal & 0xff); /* ECC12 */ | 293 | eccVal & 0xff); /* ECC12 */ |
295 | 294 | ||
296 | if (numEccBytes >= 9) | 295 | if (numEccBytes >= 9) |
297 | eccVal = REG_UMI_BCH_WR_ECC_2; | 296 | eccVal = readl(®_UMI_BCH_WR_ECC_2); |
298 | 297 | ||
299 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4], | 298 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4], |
300 | (eccVal >> 24) & 0xff); /* ECC11 */ | 299 | (eccVal >> 24) & 0xff); /* ECC11 */ |
@@ -309,7 +308,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | |||
309 | eccVal & 0xff); /* ECC8 */ | 308 | eccVal & 0xff); /* ECC8 */ |
310 | 309 | ||
311 | if (numEccBytes >= 5) | 310 | if (numEccBytes >= 5) |
312 | eccVal = REG_UMI_BCH_WR_ECC_1; | 311 | eccVal = readl(®_UMI_BCH_WR_ECC_1); |
313 | 312 | ||
314 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8], | 313 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8], |
315 | (eccVal >> 24) & 0xff); /* ECC7 */ | 314 | (eccVal >> 24) & 0xff); /* ECC7 */ |
@@ -321,7 +320,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize, | |||
321 | eccVal & 0xff); /* ECC4 */ | 320 | eccVal & 0xff); /* ECC4 */ |
322 | 321 | ||
323 | if (numEccBytes >= 1) | 322 | if (numEccBytes >= 1) |
324 | eccVal = REG_UMI_BCH_WR_ECC_0; | 323 | eccVal = readl(®_UMI_BCH_WR_ECC_0); |
325 | 324 | ||
326 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12], | 325 | NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12], |
327 | (eccVal >> 24) & 0xff); /* ECC3 */ | 326 | (eccVal >> 24) & 0xff); /* ECC3 */ |
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c index a86aa812ca13..9ee0c4edfacf 100644 --- a/drivers/mtd/nand/nomadik_nand.c +++ b/drivers/mtd/nand/nomadik_nand.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/mtd/partitions.h> | 31 | #include <linux/mtd/partitions.h> |
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <mach/nand.h> | 34 | #include <linux/platform_data/mtd-nomadik-nand.h> |
35 | #include <mach/fsmc.h> | 35 | #include <mach/fsmc.h> |
36 | 36 | ||
37 | #include <mtd/mtd-abi.h> | 37 | #include <mtd/mtd-abi.h> |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index ac4fd756eda3..fc8111278d12 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #include <plat/dma.h> | 30 | #include <plat/dma.h> |
31 | #include <plat/gpmc.h> | 31 | #include <plat/gpmc.h> |
32 | #include <plat/nand.h> | 32 | #include <linux/platform_data/mtd-nand-omap2.h> |
33 | 33 | ||
34 | #define DRIVER_NAME "omap2-nand" | 34 | #define DRIVER_NAME "omap2-nand" |
35 | #define OMAP_NAND_TIMEOUT_MS 5000 | 35 | #define OMAP_NAND_TIMEOUT_MS 5000 |
@@ -101,6 +101,16 @@ | |||
101 | #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) | 101 | #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) |
102 | #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) | 102 | #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) |
103 | 103 | ||
104 | #define PREFETCH_CONFIG1_CS_SHIFT 24 | ||
105 | #define ECC_CONFIG_CS_SHIFT 1 | ||
106 | #define CS_MASK 0x7 | ||
107 | #define ENABLE_PREFETCH (0x1 << 7) | ||
108 | #define DMA_MPU_MODE_SHIFT 2 | ||
109 | #define ECCSIZE1_SHIFT 22 | ||
110 | #define ECC1RESULTSIZE 0x1 | ||
111 | #define ECCCLEAR 0x100 | ||
112 | #define ECC1 0x1 | ||
113 | |||
104 | /* oob info generated runtime depending on ecc algorithm and layout selected */ | 114 | /* oob info generated runtime depending on ecc algorithm and layout selected */ |
105 | static struct nand_ecclayout omap_oobinfo; | 115 | static struct nand_ecclayout omap_oobinfo; |
106 | /* Define some generic bad / good block scan pattern which are used | 116 | /* Define some generic bad / good block scan pattern which are used |
@@ -124,15 +134,18 @@ struct omap_nand_info { | |||
124 | 134 | ||
125 | int gpmc_cs; | 135 | int gpmc_cs; |
126 | unsigned long phys_base; | 136 | unsigned long phys_base; |
137 | unsigned long mem_size; | ||
127 | struct completion comp; | 138 | struct completion comp; |
128 | struct dma_chan *dma; | 139 | struct dma_chan *dma; |
129 | int gpmc_irq; | 140 | int gpmc_irq_fifo; |
141 | int gpmc_irq_count; | ||
130 | enum { | 142 | enum { |
131 | OMAP_NAND_IO_READ = 0, /* read */ | 143 | OMAP_NAND_IO_READ = 0, /* read */ |
132 | OMAP_NAND_IO_WRITE, /* write */ | 144 | OMAP_NAND_IO_WRITE, /* write */ |
133 | } iomode; | 145 | } iomode; |
134 | u_char *buf; | 146 | u_char *buf; |
135 | int buf_len; | 147 | int buf_len; |
148 | struct gpmc_nand_regs reg; | ||
136 | 149 | ||
137 | #ifdef CONFIG_MTD_NAND_OMAP_BCH | 150 | #ifdef CONFIG_MTD_NAND_OMAP_BCH |
138 | struct bch_control *bch; | 151 | struct bch_control *bch; |
@@ -141,6 +154,63 @@ struct omap_nand_info { | |||
141 | }; | 154 | }; |
142 | 155 | ||
143 | /** | 156 | /** |
157 | * omap_prefetch_enable - configures and starts prefetch transfer | ||
158 | * @cs: cs (chip select) number | ||
159 | * @fifo_th: fifo threshold to be used for read/ write | ||
160 | * @dma_mode: dma mode enable (1) or disable (0) | ||
161 | * @u32_count: number of bytes to be transferred | ||
162 | * @is_write: prefetch read(0) or write post(1) mode | ||
163 | */ | ||
164 | static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, | ||
165 | unsigned int u32_count, int is_write, struct omap_nand_info *info) | ||
166 | { | ||
167 | u32 val; | ||
168 | |||
169 | if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) | ||
170 | return -1; | ||
171 | |||
172 | if (readl(info->reg.gpmc_prefetch_control)) | ||
173 | return -EBUSY; | ||
174 | |||
175 | /* Set the amount of bytes to be prefetched */ | ||
176 | writel(u32_count, info->reg.gpmc_prefetch_config2); | ||
177 | |||
178 | /* Set dma/mpu mode, the prefetch read / post write and | ||
179 | * enable the engine. Set which cs is has requested for. | ||
180 | */ | ||
181 | val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) | | ||
182 | PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH | | ||
183 | (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write)); | ||
184 | writel(val, info->reg.gpmc_prefetch_config1); | ||
185 | |||
186 | /* Start the prefetch engine */ | ||
187 | writel(0x1, info->reg.gpmc_prefetch_control); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * omap_prefetch_reset - disables and stops the prefetch engine | ||
194 | */ | ||
195 | static int omap_prefetch_reset(int cs, struct omap_nand_info *info) | ||
196 | { | ||
197 | u32 config1; | ||
198 | |||
199 | /* check if the same module/cs is trying to reset */ | ||
200 | config1 = readl(info->reg.gpmc_prefetch_config1); | ||
201 | if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs) | ||
202 | return -EINVAL; | ||
203 | |||
204 | /* Stop the PFPW engine */ | ||
205 | writel(0x0, info->reg.gpmc_prefetch_control); | ||
206 | |||
207 | /* Reset/disable the PFPW engine */ | ||
208 | writel(0x0, info->reg.gpmc_prefetch_config1); | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | /** | ||
144 | * omap_hwcontrol - hardware specific access to control-lines | 214 | * omap_hwcontrol - hardware specific access to control-lines |
145 | * @mtd: MTD device structure | 215 | * @mtd: MTD device structure |
146 | * @cmd: command to device | 216 | * @cmd: command to device |
@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
158 | 228 | ||
159 | if (cmd != NAND_CMD_NONE) { | 229 | if (cmd != NAND_CMD_NONE) { |
160 | if (ctrl & NAND_CLE) | 230 | if (ctrl & NAND_CLE) |
161 | gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); | 231 | writeb(cmd, info->reg.gpmc_nand_command); |
162 | 232 | ||
163 | else if (ctrl & NAND_ALE) | 233 | else if (ctrl & NAND_ALE) |
164 | gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); | 234 | writeb(cmd, info->reg.gpmc_nand_address); |
165 | 235 | ||
166 | else /* NAND_NCE */ | 236 | else /* NAND_NCE */ |
167 | gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); | 237 | writeb(cmd, info->reg.gpmc_nand_data); |
168 | } | 238 | } |
169 | } | 239 | } |
170 | 240 | ||
@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) | |||
198 | iowrite8(*p++, info->nand.IO_ADDR_W); | 268 | iowrite8(*p++, info->nand.IO_ADDR_W); |
199 | /* wait until buffer is available for write */ | 269 | /* wait until buffer is available for write */ |
200 | do { | 270 | do { |
201 | status = gpmc_read_status(GPMC_STATUS_BUFFER); | 271 | status = readl(info->reg.gpmc_status) & |
272 | GPMC_STATUS_BUFF_EMPTY; | ||
202 | } while (!status); | 273 | } while (!status); |
203 | } | 274 | } |
204 | } | 275 | } |
@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) | |||
235 | iowrite16(*p++, info->nand.IO_ADDR_W); | 306 | iowrite16(*p++, info->nand.IO_ADDR_W); |
236 | /* wait until buffer is available for write */ | 307 | /* wait until buffer is available for write */ |
237 | do { | 308 | do { |
238 | status = gpmc_read_status(GPMC_STATUS_BUFFER); | 309 | status = readl(info->reg.gpmc_status) & |
310 | GPMC_STATUS_BUFF_EMPTY; | ||
239 | } while (!status); | 311 | } while (!status); |
240 | } | 312 | } |
241 | } | 313 | } |
@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
265 | } | 337 | } |
266 | 338 | ||
267 | /* configure and start prefetch transfer */ | 339 | /* configure and start prefetch transfer */ |
268 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 340 | ret = omap_prefetch_enable(info->gpmc_cs, |
269 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); | 341 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info); |
270 | if (ret) { | 342 | if (ret) { |
271 | /* PFPW engine is busy, use cpu copy method */ | 343 | /* PFPW engine is busy, use cpu copy method */ |
272 | if (info->nand.options & NAND_BUSWIDTH_16) | 344 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
275 | omap_read_buf8(mtd, (u_char *)p, len); | 347 | omap_read_buf8(mtd, (u_char *)p, len); |
276 | } else { | 348 | } else { |
277 | do { | 349 | do { |
278 | r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 350 | r_count = readl(info->reg.gpmc_prefetch_status); |
351 | r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count); | ||
279 | r_count = r_count >> 2; | 352 | r_count = r_count >> 2; |
280 | ioread32_rep(info->nand.IO_ADDR_R, p, r_count); | 353 | ioread32_rep(info->nand.IO_ADDR_R, p, r_count); |
281 | p += r_count; | 354 | p += r_count; |
282 | len -= r_count << 2; | 355 | len -= r_count << 2; |
283 | } while (len); | 356 | } while (len); |
284 | /* disable and stop the PFPW engine */ | 357 | /* disable and stop the PFPW engine */ |
285 | gpmc_prefetch_reset(info->gpmc_cs); | 358 | omap_prefetch_reset(info->gpmc_cs, info); |
286 | } | 359 | } |
287 | } | 360 | } |
288 | 361 | ||
@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
301 | int i = 0, ret = 0; | 374 | int i = 0, ret = 0; |
302 | u16 *p = (u16 *)buf; | 375 | u16 *p = (u16 *)buf; |
303 | unsigned long tim, limit; | 376 | unsigned long tim, limit; |
377 | u32 val; | ||
304 | 378 | ||
305 | /* take care of subpage writes */ | 379 | /* take care of subpage writes */ |
306 | if (len % 2 != 0) { | 380 | if (len % 2 != 0) { |
@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
310 | } | 384 | } |
311 | 385 | ||
312 | /* configure and start prefetch transfer */ | 386 | /* configure and start prefetch transfer */ |
313 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 387 | ret = omap_prefetch_enable(info->gpmc_cs, |
314 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); | 388 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info); |
315 | if (ret) { | 389 | if (ret) { |
316 | /* PFPW engine is busy, use cpu copy method */ | 390 | /* PFPW engine is busy, use cpu copy method */ |
317 | if (info->nand.options & NAND_BUSWIDTH_16) | 391 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
320 | omap_write_buf8(mtd, (u_char *)p, len); | 394 | omap_write_buf8(mtd, (u_char *)p, len); |
321 | } else { | 395 | } else { |
322 | while (len) { | 396 | while (len) { |
323 | w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 397 | w_count = readl(info->reg.gpmc_prefetch_status); |
398 | w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count); | ||
324 | w_count = w_count >> 1; | 399 | w_count = w_count >> 1; |
325 | for (i = 0; (i < w_count) && len; i++, len -= 2) | 400 | for (i = 0; (i < w_count) && len; i++, len -= 2) |
326 | iowrite16(*p++, info->nand.IO_ADDR_W); | 401 | iowrite16(*p++, info->nand.IO_ADDR_W); |
@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
329 | tim = 0; | 404 | tim = 0; |
330 | limit = (loops_per_jiffy * | 405 | limit = (loops_per_jiffy * |
331 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | 406 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
332 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | 407 | do { |
333 | cpu_relax(); | 408 | cpu_relax(); |
409 | val = readl(info->reg.gpmc_prefetch_status); | ||
410 | val = GPMC_PREFETCH_STATUS_COUNT(val); | ||
411 | } while (val && (tim++ < limit)); | ||
334 | 412 | ||
335 | /* disable and stop the PFPW engine */ | 413 | /* disable and stop the PFPW engine */ |
336 | gpmc_prefetch_reset(info->gpmc_cs); | 414 | omap_prefetch_reset(info->gpmc_cs, info); |
337 | } | 415 | } |
338 | } | 416 | } |
339 | 417 | ||
@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
365 | unsigned long tim, limit; | 443 | unsigned long tim, limit; |
366 | unsigned n; | 444 | unsigned n; |
367 | int ret; | 445 | int ret; |
446 | u32 val; | ||
368 | 447 | ||
369 | if (addr >= high_memory) { | 448 | if (addr >= high_memory) { |
370 | struct page *p1; | 449 | struct page *p1; |
@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
396 | tx->callback_param = &info->comp; | 475 | tx->callback_param = &info->comp; |
397 | dmaengine_submit(tx); | 476 | dmaengine_submit(tx); |
398 | 477 | ||
399 | /* configure and start prefetch transfer */ | 478 | /* configure and start prefetch transfer */ |
400 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 479 | ret = omap_prefetch_enable(info->gpmc_cs, |
401 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | 480 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info); |
402 | if (ret) | 481 | if (ret) |
403 | /* PFPW engine is busy, use cpu copy method */ | 482 | /* PFPW engine is busy, use cpu copy method */ |
404 | goto out_copy_unmap; | 483 | goto out_copy_unmap; |
@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
410 | wait_for_completion(&info->comp); | 489 | wait_for_completion(&info->comp); |
411 | tim = 0; | 490 | tim = 0; |
412 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | 491 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
413 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | 492 | |
493 | do { | ||
414 | cpu_relax(); | 494 | cpu_relax(); |
495 | val = readl(info->reg.gpmc_prefetch_status); | ||
496 | val = GPMC_PREFETCH_STATUS_COUNT(val); | ||
497 | } while (val && (tim++ < limit)); | ||
415 | 498 | ||
416 | /* disable and stop the PFPW engine */ | 499 | /* disable and stop the PFPW engine */ |
417 | gpmc_prefetch_reset(info->gpmc_cs); | 500 | omap_prefetch_reset(info->gpmc_cs, info); |
418 | 501 | ||
419 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); | 502 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); |
420 | return 0; | 503 | return 0; |
@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev) | |||
471 | { | 554 | { |
472 | struct omap_nand_info *info = (struct omap_nand_info *) dev; | 555 | struct omap_nand_info *info = (struct omap_nand_info *) dev; |
473 | u32 bytes; | 556 | u32 bytes; |
474 | u32 irq_stat; | ||
475 | 557 | ||
476 | irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); | 558 | bytes = readl(info->reg.gpmc_prefetch_status); |
477 | bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 559 | bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes); |
478 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ | 560 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ |
479 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ | 561 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ |
480 | if (irq_stat & 0x2) | 562 | if (this_irq == info->gpmc_irq_count) |
481 | goto done; | 563 | goto done; |
482 | 564 | ||
483 | if (info->buf_len && (info->buf_len < bytes)) | 565 | if (info->buf_len && (info->buf_len < bytes)) |
@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev) | |||
494 | (u32 *)info->buf, bytes >> 2); | 576 | (u32 *)info->buf, bytes >> 2); |
495 | info->buf = info->buf + bytes; | 577 | info->buf = info->buf + bytes; |
496 | 578 | ||
497 | if (irq_stat & 0x2) | 579 | if (this_irq == info->gpmc_irq_count) |
498 | goto done; | 580 | goto done; |
499 | } | 581 | } |
500 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
501 | 582 | ||
502 | return IRQ_HANDLED; | 583 | return IRQ_HANDLED; |
503 | 584 | ||
504 | done: | 585 | done: |
505 | complete(&info->comp); | 586 | complete(&info->comp); |
506 | /* disable irq */ | ||
507 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); | ||
508 | 587 | ||
509 | /* clear status */ | 588 | disable_irq_nosync(info->gpmc_irq_fifo); |
510 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | 589 | disable_irq_nosync(info->gpmc_irq_count); |
511 | 590 | ||
512 | return IRQ_HANDLED; | 591 | return IRQ_HANDLED; |
513 | } | 592 | } |
@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
534 | init_completion(&info->comp); | 613 | init_completion(&info->comp); |
535 | 614 | ||
536 | /* configure and start prefetch transfer */ | 615 | /* configure and start prefetch transfer */ |
537 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 616 | ret = omap_prefetch_enable(info->gpmc_cs, |
538 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); | 617 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info); |
539 | if (ret) | 618 | if (ret) |
540 | /* PFPW engine is busy, use cpu copy method */ | 619 | /* PFPW engine is busy, use cpu copy method */ |
541 | goto out_copy; | 620 | goto out_copy; |
542 | 621 | ||
543 | info->buf_len = len; | 622 | info->buf_len = len; |
544 | /* enable irq */ | 623 | |
545 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | 624 | enable_irq(info->gpmc_irq_count); |
546 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | 625 | enable_irq(info->gpmc_irq_fifo); |
547 | 626 | ||
548 | /* waiting for read to complete */ | 627 | /* waiting for read to complete */ |
549 | wait_for_completion(&info->comp); | 628 | wait_for_completion(&info->comp); |
550 | 629 | ||
551 | /* disable and stop the PFPW engine */ | 630 | /* disable and stop the PFPW engine */ |
552 | gpmc_prefetch_reset(info->gpmc_cs); | 631 | omap_prefetch_reset(info->gpmc_cs, info); |
553 | return; | 632 | return; |
554 | 633 | ||
555 | out_copy: | 634 | out_copy: |
@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd, | |||
572 | struct omap_nand_info, mtd); | 651 | struct omap_nand_info, mtd); |
573 | int ret = 0; | 652 | int ret = 0; |
574 | unsigned long tim, limit; | 653 | unsigned long tim, limit; |
654 | u32 val; | ||
575 | 655 | ||
576 | if (len <= mtd->oobsize) { | 656 | if (len <= mtd->oobsize) { |
577 | omap_write_buf_pref(mtd, buf, len); | 657 | omap_write_buf_pref(mtd, buf, len); |
@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd, | |||
583 | init_completion(&info->comp); | 663 | init_completion(&info->comp); |
584 | 664 | ||
585 | /* configure and start prefetch transfer : size=24 */ | 665 | /* configure and start prefetch transfer : size=24 */ |
586 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 666 | ret = omap_prefetch_enable(info->gpmc_cs, |
587 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); | 667 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info); |
588 | if (ret) | 668 | if (ret) |
589 | /* PFPW engine is busy, use cpu copy method */ | 669 | /* PFPW engine is busy, use cpu copy method */ |
590 | goto out_copy; | 670 | goto out_copy; |
591 | 671 | ||
592 | info->buf_len = len; | 672 | info->buf_len = len; |
593 | /* enable irq */ | 673 | |
594 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | 674 | enable_irq(info->gpmc_irq_count); |
595 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | 675 | enable_irq(info->gpmc_irq_fifo); |
596 | 676 | ||
597 | /* waiting for write to complete */ | 677 | /* waiting for write to complete */ |
598 | wait_for_completion(&info->comp); | 678 | wait_for_completion(&info->comp); |
679 | |||
599 | /* wait for data to flushed-out before reset the prefetch */ | 680 | /* wait for data to flushed-out before reset the prefetch */ |
600 | tim = 0; | 681 | tim = 0; |
601 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | 682 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
602 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | 683 | do { |
684 | val = readl(info->reg.gpmc_prefetch_status); | ||
685 | val = GPMC_PREFETCH_STATUS_COUNT(val); | ||
603 | cpu_relax(); | 686 | cpu_relax(); |
687 | } while (val && (tim++ < limit)); | ||
604 | 688 | ||
605 | /* disable and stop the PFPW engine */ | 689 | /* disable and stop the PFPW engine */ |
606 | gpmc_prefetch_reset(info->gpmc_cs); | 690 | omap_prefetch_reset(info->gpmc_cs, info); |
607 | return; | 691 | return; |
608 | 692 | ||
609 | out_copy: | 693 | out_copy: |
@@ -843,7 +927,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, | |||
843 | { | 927 | { |
844 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 928 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
845 | mtd); | 929 | mtd); |
846 | return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code); | 930 | u32 val; |
931 | |||
932 | val = readl(info->reg.gpmc_ecc_config); | ||
933 | if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs) | ||
934 | return -EINVAL; | ||
935 | |||
936 | /* read ecc result */ | ||
937 | val = readl(info->reg.gpmc_ecc1_result); | ||
938 | *ecc_code++ = val; /* P128e, ..., P1e */ | ||
939 | *ecc_code++ = val >> 16; /* P128o, ..., P1o */ | ||
940 | /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */ | ||
941 | *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0); | ||
942 | |||
943 | return 0; | ||
847 | } | 944 | } |
848 | 945 | ||
849 | /** | 946 | /** |
@@ -857,8 +954,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) | |||
857 | mtd); | 954 | mtd); |
858 | struct nand_chip *chip = mtd->priv; | 955 | struct nand_chip *chip = mtd->priv; |
859 | unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; | 956 | unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; |
957 | u32 val; | ||
958 | |||
959 | /* clear ecc and enable bits */ | ||
960 | val = ECCCLEAR | ECC1; | ||
961 | writel(val, info->reg.gpmc_ecc_control); | ||
962 | |||
963 | /* program ecc and result sizes */ | ||
964 | val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) | | ||
965 | ECC1RESULTSIZE); | ||
966 | writel(val, info->reg.gpmc_ecc_size_config); | ||
860 | 967 | ||
861 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); | 968 | switch (mode) { |
969 | case NAND_ECC_READ: | ||
970 | case NAND_ECC_WRITE: | ||
971 | writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); | ||
972 | break; | ||
973 | case NAND_ECC_READSYN: | ||
974 | writel(ECCCLEAR, info->reg.gpmc_ecc_control); | ||
975 | break; | ||
976 | default: | ||
977 | dev_info(&info->pdev->dev, | ||
978 | "error: unrecognized Mode[%d]!\n", mode); | ||
979 | break; | ||
980 | } | ||
981 | |||
982 | /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */ | ||
983 | val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1); | ||
984 | writel(val, info->reg.gpmc_ecc_config); | ||
862 | } | 985 | } |
863 | 986 | ||
864 | /** | 987 | /** |
@@ -886,10 +1009,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
886 | else | 1009 | else |
887 | timeo += (HZ * 20) / 1000; | 1010 | timeo += (HZ * 20) / 1000; |
888 | 1011 | ||
889 | gpmc_nand_write(info->gpmc_cs, | 1012 | writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); |
890 | GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF)); | ||
891 | while (time_before(jiffies, timeo)) { | 1013 | while (time_before(jiffies, timeo)) { |
892 | status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); | 1014 | status = readb(info->reg.gpmc_nand_data); |
893 | if (status & NAND_STATUS_READY) | 1015 | if (status & NAND_STATUS_READY) |
894 | break; | 1016 | break; |
895 | cond_resched(); | 1017 | cond_resched(); |
@@ -909,22 +1031,13 @@ static int omap_dev_ready(struct mtd_info *mtd) | |||
909 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 1031 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
910 | mtd); | 1032 | mtd); |
911 | 1033 | ||
912 | val = gpmc_read_status(GPMC_GET_IRQ_STATUS); | 1034 | val = readl(info->reg.gpmc_status); |
1035 | |||
913 | if ((val & 0x100) == 0x100) { | 1036 | if ((val & 0x100) == 0x100) { |
914 | /* Clear IRQ Interrupt */ | 1037 | return 1; |
915 | val |= 0x100; | ||
916 | val &= ~(0x0); | ||
917 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val); | ||
918 | } else { | 1038 | } else { |
919 | unsigned int cnt = 0; | 1039 | return 0; |
920 | while (cnt++ < 0x1FF) { | ||
921 | if ((val & 0x100) == 0x100) | ||
922 | return 0; | ||
923 | val = gpmc_read_status(GPMC_GET_IRQ_STATUS); | ||
924 | } | ||
925 | } | 1040 | } |
926 | |||
927 | return 1; | ||
928 | } | 1041 | } |
929 | 1042 | ||
930 | #ifdef CONFIG_MTD_NAND_OMAP_BCH | 1043 | #ifdef CONFIG_MTD_NAND_OMAP_BCH |
@@ -1155,6 +1268,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1155 | int i, offset; | 1268 | int i, offset; |
1156 | dma_cap_mask_t mask; | 1269 | dma_cap_mask_t mask; |
1157 | unsigned sig; | 1270 | unsigned sig; |
1271 | struct resource *res; | ||
1158 | 1272 | ||
1159 | pdata = pdev->dev.platform_data; | 1273 | pdata = pdev->dev.platform_data; |
1160 | if (pdata == NULL) { | 1274 | if (pdata == NULL) { |
@@ -1174,7 +1288,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1174 | info->pdev = pdev; | 1288 | info->pdev = pdev; |
1175 | 1289 | ||
1176 | info->gpmc_cs = pdata->cs; | 1290 | info->gpmc_cs = pdata->cs; |
1177 | info->phys_base = pdata->phys_base; | 1291 | info->reg = pdata->reg; |
1178 | 1292 | ||
1179 | info->mtd.priv = &info->nand; | 1293 | info->mtd.priv = &info->nand; |
1180 | info->mtd.name = dev_name(&pdev->dev); | 1294 | info->mtd.name = dev_name(&pdev->dev); |
@@ -1183,16 +1297,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1183 | info->nand.options = pdata->devsize; | 1297 | info->nand.options = pdata->devsize; |
1184 | info->nand.options |= NAND_SKIP_BBTSCAN; | 1298 | info->nand.options |= NAND_SKIP_BBTSCAN; |
1185 | 1299 | ||
1186 | /* NAND write protect off */ | 1300 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1187 | gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); | 1301 | if (res == NULL) { |
1302 | err = -EINVAL; | ||
1303 | dev_err(&pdev->dev, "error getting memory resource\n"); | ||
1304 | goto out_free_info; | ||
1305 | } | ||
1306 | |||
1307 | info->phys_base = res->start; | ||
1308 | info->mem_size = resource_size(res); | ||
1188 | 1309 | ||
1189 | if (!request_mem_region(info->phys_base, NAND_IO_SIZE, | 1310 | if (!request_mem_region(info->phys_base, info->mem_size, |
1190 | pdev->dev.driver->name)) { | 1311 | pdev->dev.driver->name)) { |
1191 | err = -EBUSY; | 1312 | err = -EBUSY; |
1192 | goto out_free_info; | 1313 | goto out_free_info; |
1193 | } | 1314 | } |
1194 | 1315 | ||
1195 | info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); | 1316 | info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size); |
1196 | if (!info->nand.IO_ADDR_R) { | 1317 | if (!info->nand.IO_ADDR_R) { |
1197 | err = -ENOMEM; | 1318 | err = -ENOMEM; |
1198 | goto out_release_mem_region; | 1319 | goto out_release_mem_region; |
@@ -1265,17 +1386,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1265 | break; | 1386 | break; |
1266 | 1387 | ||
1267 | case NAND_OMAP_PREFETCH_IRQ: | 1388 | case NAND_OMAP_PREFETCH_IRQ: |
1268 | err = request_irq(pdata->gpmc_irq, | 1389 | info->gpmc_irq_fifo = platform_get_irq(pdev, 0); |
1269 | omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); | 1390 | if (info->gpmc_irq_fifo <= 0) { |
1391 | dev_err(&pdev->dev, "error getting fifo irq\n"); | ||
1392 | err = -ENODEV; | ||
1393 | goto out_release_mem_region; | ||
1394 | } | ||
1395 | err = request_irq(info->gpmc_irq_fifo, omap_nand_irq, | ||
1396 | IRQF_SHARED, "gpmc-nand-fifo", info); | ||
1270 | if (err) { | 1397 | if (err) { |
1271 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | 1398 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", |
1272 | pdata->gpmc_irq, err); | 1399 | info->gpmc_irq_fifo, err); |
1400 | info->gpmc_irq_fifo = 0; | ||
1401 | goto out_release_mem_region; | ||
1402 | } | ||
1403 | |||
1404 | info->gpmc_irq_count = platform_get_irq(pdev, 1); | ||
1405 | if (info->gpmc_irq_count <= 0) { | ||
1406 | dev_err(&pdev->dev, "error getting count irq\n"); | ||
1407 | err = -ENODEV; | ||
1408 | goto out_release_mem_region; | ||
1409 | } | ||
1410 | err = request_irq(info->gpmc_irq_count, omap_nand_irq, | ||
1411 | IRQF_SHARED, "gpmc-nand-count", info); | ||
1412 | if (err) { | ||
1413 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | ||
1414 | info->gpmc_irq_count, err); | ||
1415 | info->gpmc_irq_count = 0; | ||
1273 | goto out_release_mem_region; | 1416 | goto out_release_mem_region; |
1274 | } else { | ||
1275 | info->gpmc_irq = pdata->gpmc_irq; | ||
1276 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1277 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1278 | } | 1417 | } |
1418 | |||
1419 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1420 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1421 | |||
1279 | break; | 1422 | break; |
1280 | 1423 | ||
1281 | default: | 1424 | default: |
@@ -1363,7 +1506,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1363 | out_release_mem_region: | 1506 | out_release_mem_region: |
1364 | if (info->dma) | 1507 | if (info->dma) |
1365 | dma_release_channel(info->dma); | 1508 | dma_release_channel(info->dma); |
1366 | release_mem_region(info->phys_base, NAND_IO_SIZE); | 1509 | if (info->gpmc_irq_count > 0) |
1510 | free_irq(info->gpmc_irq_count, info); | ||
1511 | if (info->gpmc_irq_fifo > 0) | ||
1512 | free_irq(info->gpmc_irq_fifo, info); | ||
1513 | release_mem_region(info->phys_base, info->mem_size); | ||
1367 | out_free_info: | 1514 | out_free_info: |
1368 | kfree(info); | 1515 | kfree(info); |
1369 | 1516 | ||
@@ -1381,8 +1528,10 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
1381 | if (info->dma) | 1528 | if (info->dma) |
1382 | dma_release_channel(info->dma); | 1529 | dma_release_channel(info->dma); |
1383 | 1530 | ||
1384 | if (info->gpmc_irq) | 1531 | if (info->gpmc_irq_count > 0) |
1385 | free_irq(info->gpmc_irq, info); | 1532 | free_irq(info->gpmc_irq_count, info); |
1533 | if (info->gpmc_irq_fifo > 0) | ||
1534 | free_irq(info->gpmc_irq_fifo, info); | ||
1386 | 1535 | ||
1387 | /* Release NAND device, its internal structures and partitions */ | 1536 | /* Release NAND device, its internal structures and partitions */ |
1388 | nand_release(&info->mtd); | 1537 | nand_release(&info->mtd); |
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index fc5a868c436e..131b58a133f1 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | #include <asm/sizes.h> | 23 | #include <asm/sizes.h> |
24 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
25 | #include <plat/orion_nand.h> | 25 | #include <linux/platform_data/mtd-orion_nand.h> |
26 | 26 | ||
27 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 27 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
28 | { | 28 | { |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 252aaefcacfa..c45227173efd 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -22,9 +22,11 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/of.h> | ||
26 | #include <linux/of_device.h> | ||
25 | 27 | ||
26 | #include <mach/dma.h> | 28 | #include <mach/dma.h> |
27 | #include <plat/pxa3xx_nand.h> | 29 | #include <linux/platform_data/mtd-nand-pxa3xx.h> |
28 | 30 | ||
29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) | 31 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) |
30 | #define NAND_STOP_DELAY (2 * HZ/50) | 32 | #define NAND_STOP_DELAY (2 * HZ/50) |
@@ -1032,7 +1034,7 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
1032 | struct pxa3xx_nand_platform_data *pdata; | 1034 | struct pxa3xx_nand_platform_data *pdata; |
1033 | struct pxa3xx_nand_info *info; | 1035 | struct pxa3xx_nand_info *info; |
1034 | struct pxa3xx_nand_host *host; | 1036 | struct pxa3xx_nand_host *host; |
1035 | struct nand_chip *chip; | 1037 | struct nand_chip *chip = NULL; |
1036 | struct mtd_info *mtd; | 1038 | struct mtd_info *mtd; |
1037 | struct resource *r; | 1039 | struct resource *r; |
1038 | int ret, irq, cs; | 1040 | int ret, irq, cs; |
@@ -1081,21 +1083,31 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
1081 | } | 1083 | } |
1082 | clk_enable(info->clk); | 1084 | clk_enable(info->clk); |
1083 | 1085 | ||
1084 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 1086 | /* |
1085 | if (r == NULL) { | 1087 | * This is a dirty hack to make this driver work from devicetree |
1086 | dev_err(&pdev->dev, "no resource defined for data DMA\n"); | 1088 | * bindings. It can be removed once we have a prober DMA controller |
1087 | ret = -ENXIO; | 1089 | * framework for DT. |
1088 | goto fail_put_clk; | 1090 | */ |
1089 | } | 1091 | if (pdev->dev.of_node && cpu_is_pxa3xx()) { |
1090 | info->drcmr_dat = r->start; | 1092 | info->drcmr_dat = 97; |
1093 | info->drcmr_cmd = 99; | ||
1094 | } else { | ||
1095 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1096 | if (r == NULL) { | ||
1097 | dev_err(&pdev->dev, "no resource defined for data DMA\n"); | ||
1098 | ret = -ENXIO; | ||
1099 | goto fail_put_clk; | ||
1100 | } | ||
1101 | info->drcmr_dat = r->start; | ||
1091 | 1102 | ||
1092 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | 1103 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); |
1093 | if (r == NULL) { | 1104 | if (r == NULL) { |
1094 | dev_err(&pdev->dev, "no resource defined for command DMA\n"); | 1105 | dev_err(&pdev->dev, "no resource defined for command DMA\n"); |
1095 | ret = -ENXIO; | 1106 | ret = -ENXIO; |
1096 | goto fail_put_clk; | 1107 | goto fail_put_clk; |
1108 | } | ||
1109 | info->drcmr_cmd = r->start; | ||
1097 | } | 1110 | } |
1098 | info->drcmr_cmd = r->start; | ||
1099 | 1111 | ||
1100 | irq = platform_get_irq(pdev, 0); | 1112 | irq = platform_get_irq(pdev, 0); |
1101 | if (irq < 0) { | 1113 | if (irq < 0) { |
@@ -1200,12 +1212,55 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1200 | return 0; | 1212 | return 0; |
1201 | } | 1213 | } |
1202 | 1214 | ||
1215 | #ifdef CONFIG_OF | ||
1216 | static struct of_device_id pxa3xx_nand_dt_ids[] = { | ||
1217 | { .compatible = "marvell,pxa3xx-nand" }, | ||
1218 | {} | ||
1219 | }; | ||
1220 | MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids); | ||
1221 | |||
1222 | static int pxa3xx_nand_probe_dt(struct platform_device *pdev) | ||
1223 | { | ||
1224 | struct pxa3xx_nand_platform_data *pdata; | ||
1225 | struct device_node *np = pdev->dev.of_node; | ||
1226 | const struct of_device_id *of_id = | ||
1227 | of_match_device(pxa3xx_nand_dt_ids, &pdev->dev); | ||
1228 | |||
1229 | if (!of_id) | ||
1230 | return 0; | ||
1231 | |||
1232 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1233 | if (!pdata) | ||
1234 | return -ENOMEM; | ||
1235 | |||
1236 | if (of_get_property(np, "marvell,nand-enable-arbiter", NULL)) | ||
1237 | pdata->enable_arbiter = 1; | ||
1238 | if (of_get_property(np, "marvell,nand-keep-config", NULL)) | ||
1239 | pdata->keep_config = 1; | ||
1240 | of_property_read_u32(np, "num-cs", &pdata->num_cs); | ||
1241 | |||
1242 | pdev->dev.platform_data = pdata; | ||
1243 | |||
1244 | return 0; | ||
1245 | } | ||
1246 | #else | ||
1247 | static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev) | ||
1248 | { | ||
1249 | return 0; | ||
1250 | } | ||
1251 | #endif | ||
1252 | |||
1203 | static int pxa3xx_nand_probe(struct platform_device *pdev) | 1253 | static int pxa3xx_nand_probe(struct platform_device *pdev) |
1204 | { | 1254 | { |
1205 | struct pxa3xx_nand_platform_data *pdata; | 1255 | struct pxa3xx_nand_platform_data *pdata; |
1256 | struct mtd_part_parser_data ppdata = {}; | ||
1206 | struct pxa3xx_nand_info *info; | 1257 | struct pxa3xx_nand_info *info; |
1207 | int ret, cs, probe_success; | 1258 | int ret, cs, probe_success; |
1208 | 1259 | ||
1260 | ret = pxa3xx_nand_probe_dt(pdev); | ||
1261 | if (ret) | ||
1262 | return ret; | ||
1263 | |||
1209 | pdata = pdev->dev.platform_data; | 1264 | pdata = pdev->dev.platform_data; |
1210 | if (!pdata) { | 1265 | if (!pdata) { |
1211 | dev_err(&pdev->dev, "no platform data defined\n"); | 1266 | dev_err(&pdev->dev, "no platform data defined\n"); |
@@ -1229,8 +1284,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1229 | continue; | 1284 | continue; |
1230 | } | 1285 | } |
1231 | 1286 | ||
1287 | ppdata.of_node = pdev->dev.of_node; | ||
1232 | ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, | 1288 | ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, |
1233 | NULL, pdata->parts[cs], | 1289 | &ppdata, pdata->parts[cs], |
1234 | pdata->nr_parts[cs]); | 1290 | pdata->nr_parts[cs]); |
1235 | if (!ret) | 1291 | if (!ret) |
1236 | probe_success = 1; | 1292 | probe_success = 1; |
@@ -1306,6 +1362,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev) | |||
1306 | static struct platform_driver pxa3xx_nand_driver = { | 1362 | static struct platform_driver pxa3xx_nand_driver = { |
1307 | .driver = { | 1363 | .driver = { |
1308 | .name = "pxa3xx-nand", | 1364 | .name = "pxa3xx-nand", |
1365 | .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids), | ||
1309 | }, | 1366 | }, |
1310 | .probe = pxa3xx_nand_probe, | 1367 | .probe = pxa3xx_nand_probe, |
1311 | .remove = pxa3xx_nand_remove, | 1368 | .remove = pxa3xx_nand_remove, |
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 91121f33f743..d8040619ad8d 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | 47 | ||
48 | #include <plat/regs-nand.h> | 48 | #include <plat/regs-nand.h> |
49 | #include <plat/nand.h> | 49 | #include <linux/platform_data/mtd-nand-s3c2410.h> |
50 | 50 | ||
51 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC | 51 | #ifdef CONFIG_MTD_NAND_S3C2410_HWECC |
52 | static int hardware_ecc = 1; | 52 | static int hardware_ecc = 1; |