diff options
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r-- | drivers/mtd/nand/Kconfig | 19 | ||||
-rw-r--r-- | drivers/mtd/nand/fsl_upm.c | 9 | ||||
-rw-r--r-- | drivers/mtd/nand/mpc5121_nfc.c | 9 | ||||
-rw-r--r-- | drivers/mtd/nand/mxc_nand.c | 5 | ||||
-rw-r--r-- | drivers/mtd/nand/ndfc.c | 9 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 367 | ||||
-rw-r--r-- | drivers/mtd/nand/pasemi_nand.c | 9 | ||||
-rw-r--r-- | drivers/mtd/nand/r852.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/socrates_nand.c | 9 | ||||
-rw-r--r-- | drivers/mtd/nand/tmio_nand.c | 11 |
10 files changed, 308 insertions, 141 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 78205ac2b10f..a92054e945e1 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -121,23 +121,6 @@ config MTD_NAND_OMAP2 | |||
121 | help | 121 | help |
122 | Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. | 122 | Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. |
123 | 123 | ||
124 | config MTD_NAND_OMAP_PREFETCH | ||
125 | bool "GPMC prefetch support for NAND Flash device" | ||
126 | depends on MTD_NAND_OMAP2 | ||
127 | default y | ||
128 | help | ||
129 | The NAND device can be accessed for Read/Write using GPMC PREFETCH engine | ||
130 | to improve the performance. | ||
131 | |||
132 | config MTD_NAND_OMAP_PREFETCH_DMA | ||
133 | depends on MTD_NAND_OMAP_PREFETCH | ||
134 | bool "DMA mode" | ||
135 | default n | ||
136 | help | ||
137 | The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode | ||
138 | or in DMA interrupt mode. | ||
139 | Say y for DMA mode or MPU mode will be used | ||
140 | |||
141 | config MTD_NAND_IDS | 124 | config MTD_NAND_IDS |
142 | tristate | 125 | tristate |
143 | 126 | ||
@@ -491,7 +474,7 @@ config MTD_NAND_MPC5121_NFC | |||
491 | 474 | ||
492 | config MTD_NAND_MXC | 475 | config MTD_NAND_MXC |
493 | tristate "MXC NAND support" | 476 | tristate "MXC NAND support" |
494 | depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51 | 477 | depends on IMX_HAVE_PLATFORM_MXC_NAND |
495 | help | 478 | help |
496 | This enables the driver for the NAND flash controller on the | 479 | This enables the driver for the NAND flash controller on the |
497 | MXC processors. | 480 | MXC processors. |
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index efdcca94ce55..073ee026a17c 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c | |||
@@ -217,8 +217,7 @@ err: | |||
217 | return ret; | 217 | return ret; |
218 | } | 218 | } |
219 | 219 | ||
220 | static int __devinit fun_probe(struct platform_device *ofdev, | 220 | static int __devinit fun_probe(struct platform_device *ofdev) |
221 | const struct of_device_id *ofid) | ||
222 | { | 221 | { |
223 | struct fsl_upm_nand *fun; | 222 | struct fsl_upm_nand *fun; |
224 | struct resource io_res; | 223 | struct resource io_res; |
@@ -360,7 +359,7 @@ static const struct of_device_id of_fun_match[] = { | |||
360 | }; | 359 | }; |
361 | MODULE_DEVICE_TABLE(of, of_fun_match); | 360 | MODULE_DEVICE_TABLE(of, of_fun_match); |
362 | 361 | ||
363 | static struct of_platform_driver of_fun_driver = { | 362 | static struct platform_driver of_fun_driver = { |
364 | .driver = { | 363 | .driver = { |
365 | .name = "fsl,upm-nand", | 364 | .name = "fsl,upm-nand", |
366 | .owner = THIS_MODULE, | 365 | .owner = THIS_MODULE, |
@@ -372,13 +371,13 @@ static struct of_platform_driver of_fun_driver = { | |||
372 | 371 | ||
373 | static int __init fun_module_init(void) | 372 | static int __init fun_module_init(void) |
374 | { | 373 | { |
375 | return of_register_platform_driver(&of_fun_driver); | 374 | return platform_driver_register(&of_fun_driver); |
376 | } | 375 | } |
377 | module_init(fun_module_init); | 376 | module_init(fun_module_init); |
378 | 377 | ||
379 | static void __exit fun_module_exit(void) | 378 | static void __exit fun_module_exit(void) |
380 | { | 379 | { |
381 | of_unregister_platform_driver(&of_fun_driver); | 380 | platform_driver_unregister(&of_fun_driver); |
382 | } | 381 | } |
383 | module_exit(fun_module_exit); | 382 | module_exit(fun_module_exit); |
384 | 383 | ||
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index ddaf0011aa88..0b81b5b499d1 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
@@ -651,8 +651,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) | |||
651 | iounmap(prv->csreg); | 651 | iounmap(prv->csreg); |
652 | } | 652 | } |
653 | 653 | ||
654 | static int __devinit mpc5121_nfc_probe(struct platform_device *op, | 654 | static int __devinit mpc5121_nfc_probe(struct platform_device *op) |
655 | const struct of_device_id *match) | ||
656 | { | 655 | { |
657 | struct device_node *rootnode, *dn = op->dev.of_node; | 656 | struct device_node *rootnode, *dn = op->dev.of_node; |
658 | struct device *dev = &op->dev; | 657 | struct device *dev = &op->dev; |
@@ -892,7 +891,7 @@ static struct of_device_id mpc5121_nfc_match[] __devinitdata = { | |||
892 | {}, | 891 | {}, |
893 | }; | 892 | }; |
894 | 893 | ||
895 | static struct of_platform_driver mpc5121_nfc_driver = { | 894 | static struct platform_driver mpc5121_nfc_driver = { |
896 | .probe = mpc5121_nfc_probe, | 895 | .probe = mpc5121_nfc_probe, |
897 | .remove = __devexit_p(mpc5121_nfc_remove), | 896 | .remove = __devexit_p(mpc5121_nfc_remove), |
898 | .driver = { | 897 | .driver = { |
@@ -904,14 +903,14 @@ static struct of_platform_driver mpc5121_nfc_driver = { | |||
904 | 903 | ||
905 | static int __init mpc5121_nfc_init(void) | 904 | static int __init mpc5121_nfc_init(void) |
906 | { | 905 | { |
907 | return of_register_platform_driver(&mpc5121_nfc_driver); | 906 | return platform_driver_register(&mpc5121_nfc_driver); |
908 | } | 907 | } |
909 | 908 | ||
910 | module_init(mpc5121_nfc_init); | 909 | module_init(mpc5121_nfc_init); |
911 | 910 | ||
912 | static void __exit mpc5121_nfc_cleanup(void) | 911 | static void __exit mpc5121_nfc_cleanup(void) |
913 | { | 912 | { |
914 | of_unregister_platform_driver(&mpc5121_nfc_driver); | 913 | platform_driver_unregister(&mpc5121_nfc_driver); |
915 | } | 914 | } |
916 | 915 | ||
917 | module_exit(mpc5121_nfc_cleanup); | 916 | module_exit(mpc5121_nfc_cleanup); |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index b7d5a5b9a543..42a95fb41504 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -747,9 +747,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) | |||
747 | /* | 747 | /* |
748 | * MXC NANDFC can only perform full page+spare or | 748 | * MXC NANDFC can only perform full page+spare or |
749 | * spare-only read/write. When the upper layers | 749 | * spare-only read/write. When the upper layers |
750 | * layers perform a read/write buf operation, | 750 | * perform a read/write buf operation, the saved column |
751 | * we will used the saved column address to index into | 751 | * address is used to index into the full page. |
752 | * the full page. | ||
753 | */ | 752 | */ |
754 | host->send_addr(host, 0, page_addr == -1); | 753 | host->send_addr(host, 0, page_addr == -1); |
755 | if (mtd->writesize > 512) | 754 | if (mtd->writesize > 512) |
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index c9ae0a5023b6..bbe6d451290d 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c | |||
@@ -225,8 +225,7 @@ err: | |||
225 | return ret; | 225 | return ret; |
226 | } | 226 | } |
227 | 227 | ||
228 | static int __devinit ndfc_probe(struct platform_device *ofdev, | 228 | static int __devinit ndfc_probe(struct platform_device *ofdev) |
229 | const struct of_device_id *match) | ||
230 | { | 229 | { |
231 | struct ndfc_controller *ndfc = &ndfc_ctrl; | 230 | struct ndfc_controller *ndfc = &ndfc_ctrl; |
232 | const __be32 *reg; | 231 | const __be32 *reg; |
@@ -292,7 +291,7 @@ static const struct of_device_id ndfc_match[] = { | |||
292 | }; | 291 | }; |
293 | MODULE_DEVICE_TABLE(of, ndfc_match); | 292 | MODULE_DEVICE_TABLE(of, ndfc_match); |
294 | 293 | ||
295 | static struct of_platform_driver ndfc_driver = { | 294 | static struct platform_driver ndfc_driver = { |
296 | .driver = { | 295 | .driver = { |
297 | .name = "ndfc", | 296 | .name = "ndfc", |
298 | .owner = THIS_MODULE, | 297 | .owner = THIS_MODULE, |
@@ -304,12 +303,12 @@ static struct of_platform_driver ndfc_driver = { | |||
304 | 303 | ||
305 | static int __init ndfc_nand_init(void) | 304 | static int __init ndfc_nand_init(void) |
306 | { | 305 | { |
307 | return of_register_platform_driver(&ndfc_driver); | 306 | return platform_driver_register(&ndfc_driver); |
308 | } | 307 | } |
309 | 308 | ||
310 | static void __exit ndfc_nand_exit(void) | 309 | static void __exit ndfc_nand_exit(void) |
311 | { | 310 | { |
312 | of_unregister_platform_driver(&ndfc_driver); | 311 | platform_driver_unregister(&ndfc_driver); |
313 | } | 312 | } |
314 | 313 | ||
315 | module_init(ndfc_nand_init); | 314 | module_init(ndfc_nand_init); |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index d7a4e2550b13..da9a351c9d79 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/interrupt.h> | ||
14 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
@@ -24,6 +25,7 @@ | |||
24 | #include <plat/nand.h> | 25 | #include <plat/nand.h> |
25 | 26 | ||
26 | #define DRIVER_NAME "omap2-nand" | 27 | #define DRIVER_NAME "omap2-nand" |
28 | #define OMAP_NAND_TIMEOUT_MS 5000 | ||
27 | 29 | ||
28 | #define NAND_Ecc_P1e (1 << 0) | 30 | #define NAND_Ecc_P1e (1 << 0) |
29 | #define NAND_Ecc_P2e (1 << 1) | 31 | #define NAND_Ecc_P2e (1 << 1) |
@@ -96,26 +98,19 @@ | |||
96 | static const char *part_probes[] = { "cmdlinepart", NULL }; | 98 | static const char *part_probes[] = { "cmdlinepart", NULL }; |
97 | #endif | 99 | #endif |
98 | 100 | ||
99 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH | 101 | /* oob info generated runtime depending on ecc algorithm and layout selected */ |
100 | static int use_prefetch = 1; | 102 | static struct nand_ecclayout omap_oobinfo; |
101 | 103 | /* Define some generic bad / good block scan pattern which are used | |
102 | /* "modprobe ... use_prefetch=0" etc */ | 104 | * while scanning a device for factory marked good / bad blocks |
103 | module_param(use_prefetch, bool, 0); | 105 | */ |
104 | MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); | 106 | static uint8_t scan_ff_pattern[] = { 0xff }; |
105 | 107 | static struct nand_bbt_descr bb_descrip_flashbased = { | |
106 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | 108 | .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, |
107 | static int use_dma = 1; | 109 | .offs = 0, |
110 | .len = 1, | ||
111 | .pattern = scan_ff_pattern, | ||
112 | }; | ||
108 | 113 | ||
109 | /* "modprobe ... use_dma=0" etc */ | ||
110 | module_param(use_dma, bool, 0); | ||
111 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | ||
112 | #else | ||
113 | static const int use_dma; | ||
114 | #endif | ||
115 | #else | ||
116 | const int use_prefetch; | ||
117 | static const int use_dma; | ||
118 | #endif | ||
119 | 114 | ||
120 | struct omap_nand_info { | 115 | struct omap_nand_info { |
121 | struct nand_hw_control controller; | 116 | struct nand_hw_control controller; |
@@ -129,6 +124,13 @@ struct omap_nand_info { | |||
129 | unsigned long phys_base; | 124 | unsigned long phys_base; |
130 | struct completion comp; | 125 | struct completion comp; |
131 | int dma_ch; | 126 | int dma_ch; |
127 | int gpmc_irq; | ||
128 | enum { | ||
129 | OMAP_NAND_IO_READ = 0, /* read */ | ||
130 | OMAP_NAND_IO_WRITE, /* write */ | ||
131 | } iomode; | ||
132 | u_char *buf; | ||
133 | int buf_len; | ||
132 | }; | 134 | }; |
133 | 135 | ||
134 | /** | 136 | /** |
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
256 | } | 258 | } |
257 | 259 | ||
258 | /* configure and start prefetch transfer */ | 260 | /* configure and start prefetch transfer */ |
259 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); | 261 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
262 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); | ||
260 | if (ret) { | 263 | if (ret) { |
261 | /* PFPW engine is busy, use cpu copy method */ | 264 | /* PFPW engine is busy, use cpu copy method */ |
262 | if (info->nand.options & NAND_BUSWIDTH_16) | 265 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
288 | { | 291 | { |
289 | struct omap_nand_info *info = container_of(mtd, | 292 | struct omap_nand_info *info = container_of(mtd, |
290 | struct omap_nand_info, mtd); | 293 | struct omap_nand_info, mtd); |
291 | uint32_t pref_count = 0, w_count = 0; | 294 | uint32_t w_count = 0; |
292 | int i = 0, ret = 0; | 295 | int i = 0, ret = 0; |
293 | u16 *p; | 296 | u16 *p; |
297 | unsigned long tim, limit; | ||
294 | 298 | ||
295 | /* take care of subpage writes */ | 299 | /* take care of subpage writes */ |
296 | if (len % 2 != 0) { | 300 | if (len % 2 != 0) { |
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
300 | } | 304 | } |
301 | 305 | ||
302 | /* configure and start prefetch transfer */ | 306 | /* configure and start prefetch transfer */ |
303 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); | 307 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
308 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); | ||
304 | if (ret) { | 309 | if (ret) { |
305 | /* PFPW engine is busy, use cpu copy method */ | 310 | /* PFPW engine is busy, use cpu copy method */ |
306 | if (info->nand.options & NAND_BUSWIDTH_16) | 311 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
316 | iowrite16(*p++, info->nand.IO_ADDR_W); | 321 | iowrite16(*p++, info->nand.IO_ADDR_W); |
317 | } | 322 | } |
318 | /* wait for data to flushed-out before reset the prefetch */ | 323 | /* wait for data to flushed-out before reset the prefetch */ |
319 | do { | 324 | tim = 0; |
320 | pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); | 325 | limit = (loops_per_jiffy * |
321 | } while (pref_count); | 326 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
327 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
328 | cpu_relax(); | ||
329 | |||
322 | /* disable and stop the PFPW engine */ | 330 | /* disable and stop the PFPW engine */ |
323 | gpmc_prefetch_reset(info->gpmc_cs); | 331 | gpmc_prefetch_reset(info->gpmc_cs); |
324 | } | 332 | } |
325 | } | 333 | } |
326 | 334 | ||
327 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | ||
328 | /* | 335 | /* |
329 | * omap_nand_dma_cb: callback on the completion of dma transfer | 336 | * omap_nand_dma_cb: callback on the completion of dma transfer |
330 | * @lch: logical channel | 337 | * @lch: logical channel |
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
348 | { | 355 | { |
349 | struct omap_nand_info *info = container_of(mtd, | 356 | struct omap_nand_info *info = container_of(mtd, |
350 | struct omap_nand_info, mtd); | 357 | struct omap_nand_info, mtd); |
351 | uint32_t prefetch_status = 0; | ||
352 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : | 358 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : |
353 | DMA_FROM_DEVICE; | 359 | DMA_FROM_DEVICE; |
354 | dma_addr_t dma_addr; | 360 | dma_addr_t dma_addr; |
355 | int ret; | 361 | int ret; |
362 | unsigned long tim, limit; | ||
356 | 363 | ||
357 | /* The fifo depth is 64 bytes. We have a sync at each frame and frame | 364 | /* The fifo depth is 64 bytes max. |
358 | * length is 64 bytes. | 365 | * But configure the FIFO-threahold to 32 to get a sync at each frame |
366 | * and frame length is 32 bytes. | ||
359 | */ | 367 | */ |
360 | int buf_len = len >> 6; | 368 | int buf_len = len >> 6; |
361 | 369 | ||
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
396 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); | 404 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); |
397 | } | 405 | } |
398 | /* configure and start prefetch transfer */ | 406 | /* configure and start prefetch transfer */ |
399 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); | 407 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
408 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | ||
400 | if (ret) | 409 | if (ret) |
401 | /* PFPW engine is busy, use cpu copy methode */ | 410 | /* PFPW engine is busy, use cpu copy method */ |
402 | goto out_copy; | 411 | goto out_copy; |
403 | 412 | ||
404 | init_completion(&info->comp); | 413 | init_completion(&info->comp); |
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
407 | 416 | ||
408 | /* setup and start DMA using dma_addr */ | 417 | /* setup and start DMA using dma_addr */ |
409 | wait_for_completion(&info->comp); | 418 | wait_for_completion(&info->comp); |
419 | tim = 0; | ||
420 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
421 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
422 | cpu_relax(); | ||
410 | 423 | ||
411 | do { | ||
412 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | ||
413 | } while (prefetch_status); | ||
414 | /* disable and stop the PFPW engine */ | 424 | /* disable and stop the PFPW engine */ |
415 | gpmc_prefetch_reset(info->gpmc_cs); | 425 | gpmc_prefetch_reset(info->gpmc_cs); |
416 | 426 | ||
@@ -426,14 +436,6 @@ out_copy: | |||
426 | : omap_write_buf8(mtd, (u_char *) addr, len); | 436 | : omap_write_buf8(mtd, (u_char *) addr, len); |
427 | return 0; | 437 | return 0; |
428 | } | 438 | } |
429 | #else | ||
430 | static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} | ||
431 | static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | ||
432 | unsigned int len, int is_write) | ||
433 | { | ||
434 | return 0; | ||
435 | } | ||
436 | #endif | ||
437 | 439 | ||
438 | /** | 440 | /** |
439 | * omap_read_buf_dma_pref - read data from NAND controller into buffer | 441 | * omap_read_buf_dma_pref - read data from NAND controller into buffer |
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, | |||
466 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); | 468 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); |
467 | } | 469 | } |
468 | 470 | ||
471 | /* | ||
472 | * omap_nand_irq - GMPC irq handler | ||
473 | * @this_irq: gpmc irq number | ||
474 | * @dev: omap_nand_info structure pointer is passed here | ||
475 | */ | ||
476 | static irqreturn_t omap_nand_irq(int this_irq, void *dev) | ||
477 | { | ||
478 | struct omap_nand_info *info = (struct omap_nand_info *) dev; | ||
479 | u32 bytes; | ||
480 | u32 irq_stat; | ||
481 | |||
482 | irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); | ||
483 | bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | ||
484 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ | ||
485 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ | ||
486 | if (irq_stat & 0x2) | ||
487 | goto done; | ||
488 | |||
489 | if (info->buf_len && (info->buf_len < bytes)) | ||
490 | bytes = info->buf_len; | ||
491 | else if (!info->buf_len) | ||
492 | bytes = 0; | ||
493 | iowrite32_rep(info->nand.IO_ADDR_W, | ||
494 | (u32 *)info->buf, bytes >> 2); | ||
495 | info->buf = info->buf + bytes; | ||
496 | info->buf_len -= bytes; | ||
497 | |||
498 | } else { | ||
499 | ioread32_rep(info->nand.IO_ADDR_R, | ||
500 | (u32 *)info->buf, bytes >> 2); | ||
501 | info->buf = info->buf + bytes; | ||
502 | |||
503 | if (irq_stat & 0x2) | ||
504 | goto done; | ||
505 | } | ||
506 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
507 | |||
508 | return IRQ_HANDLED; | ||
509 | |||
510 | done: | ||
511 | complete(&info->comp); | ||
512 | /* disable irq */ | ||
513 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); | ||
514 | |||
515 | /* clear status */ | ||
516 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
517 | |||
518 | return IRQ_HANDLED; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * omap_read_buf_irq_pref - read data from NAND controller into buffer | ||
523 | * @mtd: MTD device structure | ||
524 | * @buf: buffer to store date | ||
525 | * @len: number of bytes to read | ||
526 | */ | ||
527 | static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) | ||
528 | { | ||
529 | struct omap_nand_info *info = container_of(mtd, | ||
530 | struct omap_nand_info, mtd); | ||
531 | int ret = 0; | ||
532 | |||
533 | if (len <= mtd->oobsize) { | ||
534 | omap_read_buf_pref(mtd, buf, len); | ||
535 | return; | ||
536 | } | ||
537 | |||
538 | info->iomode = OMAP_NAND_IO_READ; | ||
539 | info->buf = buf; | ||
540 | init_completion(&info->comp); | ||
541 | |||
542 | /* configure and start prefetch transfer */ | ||
543 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
544 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); | ||
545 | if (ret) | ||
546 | /* PFPW engine is busy, use cpu copy method */ | ||
547 | goto out_copy; | ||
548 | |||
549 | info->buf_len = len; | ||
550 | /* enable irq */ | ||
551 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
552 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
553 | |||
554 | /* waiting for read to complete */ | ||
555 | wait_for_completion(&info->comp); | ||
556 | |||
557 | /* disable and stop the PFPW engine */ | ||
558 | gpmc_prefetch_reset(info->gpmc_cs); | ||
559 | return; | ||
560 | |||
561 | out_copy: | ||
562 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
563 | omap_read_buf16(mtd, buf, len); | ||
564 | else | ||
565 | omap_read_buf8(mtd, buf, len); | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * omap_write_buf_irq_pref - write buffer to NAND controller | ||
570 | * @mtd: MTD device structure | ||
571 | * @buf: data buffer | ||
572 | * @len: number of bytes to write | ||
573 | */ | ||
574 | static void omap_write_buf_irq_pref(struct mtd_info *mtd, | ||
575 | const u_char *buf, int len) | ||
576 | { | ||
577 | struct omap_nand_info *info = container_of(mtd, | ||
578 | struct omap_nand_info, mtd); | ||
579 | int ret = 0; | ||
580 | unsigned long tim, limit; | ||
581 | |||
582 | if (len <= mtd->oobsize) { | ||
583 | omap_write_buf_pref(mtd, buf, len); | ||
584 | return; | ||
585 | } | ||
586 | |||
587 | info->iomode = OMAP_NAND_IO_WRITE; | ||
588 | info->buf = (u_char *) buf; | ||
589 | init_completion(&info->comp); | ||
590 | |||
591 | /* configure and start prefetch transfer : size=24 */ | ||
592 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
593 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); | ||
594 | if (ret) | ||
595 | /* PFPW engine is busy, use cpu copy method */ | ||
596 | goto out_copy; | ||
597 | |||
598 | info->buf_len = len; | ||
599 | /* enable irq */ | ||
600 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
601 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
602 | |||
603 | /* waiting for write to complete */ | ||
604 | wait_for_completion(&info->comp); | ||
605 | /* wait for data to flushed-out before reset the prefetch */ | ||
606 | tim = 0; | ||
607 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
608 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
609 | cpu_relax(); | ||
610 | |||
611 | /* disable and stop the PFPW engine */ | ||
612 | gpmc_prefetch_reset(info->gpmc_cs); | ||
613 | return; | ||
614 | |||
615 | out_copy: | ||
616 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
617 | omap_write_buf16(mtd, buf, len); | ||
618 | else | ||
619 | omap_write_buf8(mtd, buf, len); | ||
620 | } | ||
621 | |||
469 | /** | 622 | /** |
470 | * omap_verify_buf - Verify chip data against buffer | 623 | * omap_verify_buf - Verify chip data against buffer |
471 | * @mtd: MTD device structure | 624 | * @mtd: MTD device structure |
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) | |||
487 | return 0; | 640 | return 0; |
488 | } | 641 | } |
489 | 642 | ||
490 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | ||
491 | |||
492 | /** | 643 | /** |
493 | * gen_true_ecc - This function will generate true ECC value | 644 | * gen_true_ecc - This function will generate true ECC value |
494 | * @ecc_buf: buffer to store ecc code | 645 | * @ecc_buf: buffer to store ecc code |
@@ -716,8 +867,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) | |||
716 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); | 867 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); |
717 | } | 868 | } |
718 | 869 | ||
719 | #endif | ||
720 | |||
721 | /** | 870 | /** |
722 | * omap_wait - wait until the command is done | 871 | * omap_wait - wait until the command is done |
723 | * @mtd: MTD device structure | 872 | * @mtd: MTD device structure |
@@ -787,6 +936,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
787 | struct omap_nand_info *info; | 936 | struct omap_nand_info *info; |
788 | struct omap_nand_platform_data *pdata; | 937 | struct omap_nand_platform_data *pdata; |
789 | int err; | 938 | int err; |
939 | int i, offset; | ||
790 | 940 | ||
791 | pdata = pdev->dev.platform_data; | 941 | pdata = pdev->dev.platform_data; |
792 | if (pdata == NULL) { | 942 | if (pdata == NULL) { |
@@ -812,7 +962,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
812 | info->mtd.name = dev_name(&pdev->dev); | 962 | info->mtd.name = dev_name(&pdev->dev); |
813 | info->mtd.owner = THIS_MODULE; | 963 | info->mtd.owner = THIS_MODULE; |
814 | 964 | ||
815 | info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; | 965 | info->nand.options = pdata->devsize; |
816 | info->nand.options |= NAND_SKIP_BBTSCAN; | 966 | info->nand.options |= NAND_SKIP_BBTSCAN; |
817 | 967 | ||
818 | /* NAND write protect off */ | 968 | /* NAND write protect off */ |
@@ -850,28 +1000,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
850 | info->nand.chip_delay = 50; | 1000 | info->nand.chip_delay = 50; |
851 | } | 1001 | } |
852 | 1002 | ||
853 | if (use_prefetch) { | 1003 | switch (pdata->xfer_type) { |
854 | 1004 | case NAND_OMAP_PREFETCH_POLLED: | |
855 | info->nand.read_buf = omap_read_buf_pref; | 1005 | info->nand.read_buf = omap_read_buf_pref; |
856 | info->nand.write_buf = omap_write_buf_pref; | 1006 | info->nand.write_buf = omap_write_buf_pref; |
857 | if (use_dma) { | 1007 | break; |
858 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | 1008 | |
859 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | 1009 | case NAND_OMAP_POLLED: |
860 | if (err < 0) { | ||
861 | info->dma_ch = -1; | ||
862 | printk(KERN_WARNING "DMA request failed." | ||
863 | " Non-dma data transfer mode\n"); | ||
864 | } else { | ||
865 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
866 | OMAP_DMA_DATA_BURST_16); | ||
867 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
868 | OMAP_DMA_DATA_BURST_16); | ||
869 | |||
870 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
871 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
872 | } | ||
873 | } | ||
874 | } else { | ||
875 | if (info->nand.options & NAND_BUSWIDTH_16) { | 1010 | if (info->nand.options & NAND_BUSWIDTH_16) { |
876 | info->nand.read_buf = omap_read_buf16; | 1011 | info->nand.read_buf = omap_read_buf16; |
877 | info->nand.write_buf = omap_write_buf16; | 1012 | info->nand.write_buf = omap_write_buf16; |
@@ -879,20 +1014,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
879 | info->nand.read_buf = omap_read_buf8; | 1014 | info->nand.read_buf = omap_read_buf8; |
880 | info->nand.write_buf = omap_write_buf8; | 1015 | info->nand.write_buf = omap_write_buf8; |
881 | } | 1016 | } |
1017 | break; | ||
1018 | |||
1019 | case NAND_OMAP_PREFETCH_DMA: | ||
1020 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | ||
1021 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | ||
1022 | if (err < 0) { | ||
1023 | info->dma_ch = -1; | ||
1024 | dev_err(&pdev->dev, "DMA request failed!\n"); | ||
1025 | goto out_release_mem_region; | ||
1026 | } else { | ||
1027 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
1028 | OMAP_DMA_DATA_BURST_16); | ||
1029 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
1030 | OMAP_DMA_DATA_BURST_16); | ||
1031 | |||
1032 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
1033 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
1034 | } | ||
1035 | break; | ||
1036 | |||
1037 | case NAND_OMAP_PREFETCH_IRQ: | ||
1038 | err = request_irq(pdata->gpmc_irq, | ||
1039 | omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); | ||
1040 | if (err) { | ||
1041 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | ||
1042 | pdata->gpmc_irq, err); | ||
1043 | goto out_release_mem_region; | ||
1044 | } else { | ||
1045 | info->gpmc_irq = pdata->gpmc_irq; | ||
1046 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1047 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1048 | } | ||
1049 | break; | ||
1050 | |||
1051 | default: | ||
1052 | dev_err(&pdev->dev, | ||
1053 | "xfer_type(%d) not supported!\n", pdata->xfer_type); | ||
1054 | err = -EINVAL; | ||
1055 | goto out_release_mem_region; | ||
882 | } | 1056 | } |
883 | info->nand.verify_buf = omap_verify_buf; | ||
884 | 1057 | ||
885 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | 1058 | info->nand.verify_buf = omap_verify_buf; |
886 | info->nand.ecc.bytes = 3; | ||
887 | info->nand.ecc.size = 512; | ||
888 | info->nand.ecc.calculate = omap_calculate_ecc; | ||
889 | info->nand.ecc.hwctl = omap_enable_hwecc; | ||
890 | info->nand.ecc.correct = omap_correct_data; | ||
891 | info->nand.ecc.mode = NAND_ECC_HW; | ||
892 | 1059 | ||
893 | #else | 1060 | /* selsect the ecc type */ |
894 | info->nand.ecc.mode = NAND_ECC_SOFT; | 1061 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) |
895 | #endif | 1062 | info->nand.ecc.mode = NAND_ECC_SOFT; |
1063 | else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || | ||
1064 | (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { | ||
1065 | info->nand.ecc.bytes = 3; | ||
1066 | info->nand.ecc.size = 512; | ||
1067 | info->nand.ecc.calculate = omap_calculate_ecc; | ||
1068 | info->nand.ecc.hwctl = omap_enable_hwecc; | ||
1069 | info->nand.ecc.correct = omap_correct_data; | ||
1070 | info->nand.ecc.mode = NAND_ECC_HW; | ||
1071 | } | ||
896 | 1072 | ||
897 | /* DIP switches on some boards change between 8 and 16 bit | 1073 | /* DIP switches on some boards change between 8 and 16 bit |
898 | * bus widths for flash. Try the other width if the first try fails. | 1074 | * bus widths for flash. Try the other width if the first try fails. |
@@ -905,6 +1081,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
905 | } | 1081 | } |
906 | } | 1082 | } |
907 | 1083 | ||
1084 | /* rom code layout */ | ||
1085 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { | ||
1086 | |||
1087 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
1088 | offset = 2; | ||
1089 | else { | ||
1090 | offset = 1; | ||
1091 | info->nand.badblock_pattern = &bb_descrip_flashbased; | ||
1092 | } | ||
1093 | omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16); | ||
1094 | for (i = 0; i < omap_oobinfo.eccbytes; i++) | ||
1095 | omap_oobinfo.eccpos[i] = i+offset; | ||
1096 | |||
1097 | omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes; | ||
1098 | omap_oobinfo.oobfree->length = info->mtd.oobsize - | ||
1099 | (offset + omap_oobinfo.eccbytes); | ||
1100 | |||
1101 | info->nand.ecc.layout = &omap_oobinfo; | ||
1102 | } | ||
1103 | |||
908 | #ifdef CONFIG_MTD_PARTITIONS | 1104 | #ifdef CONFIG_MTD_PARTITIONS |
909 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); | 1105 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); |
910 | if (err > 0) | 1106 | if (err > 0) |
@@ -934,9 +1130,12 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
934 | mtd); | 1130 | mtd); |
935 | 1131 | ||
936 | platform_set_drvdata(pdev, NULL); | 1132 | platform_set_drvdata(pdev, NULL); |
937 | if (use_dma) | 1133 | if (info->dma_ch != -1) |
938 | omap_free_dma(info->dma_ch); | 1134 | omap_free_dma(info->dma_ch); |
939 | 1135 | ||
1136 | if (info->gpmc_irq) | ||
1137 | free_irq(info->gpmc_irq, info); | ||
1138 | |||
940 | /* Release NAND device, its internal structures and partitions */ | 1139 | /* Release NAND device, its internal structures and partitions */ |
941 | nand_release(&info->mtd); | 1140 | nand_release(&info->mtd); |
942 | iounmap(info->nand.IO_ADDR_R); | 1141 | iounmap(info->nand.IO_ADDR_R); |
@@ -955,16 +1154,8 @@ static struct platform_driver omap_nand_driver = { | |||
955 | 1154 | ||
956 | static int __init omap_nand_init(void) | 1155 | static int __init omap_nand_init(void) |
957 | { | 1156 | { |
958 | printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); | 1157 | pr_info("%s driver initializing\n", DRIVER_NAME); |
959 | 1158 | ||
960 | /* This check is required if driver is being | ||
961 | * loaded run time as a module | ||
962 | */ | ||
963 | if ((1 == use_dma) && (0 == use_prefetch)) { | ||
964 | printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " | ||
965 | "without use_prefetch'. Prefetch will not be" | ||
966 | " used in either mode (mpu or dma)\n"); | ||
967 | } | ||
968 | return platform_driver_register(&omap_nand_driver); | 1159 | return platform_driver_register(&omap_nand_driver); |
969 | } | 1160 | } |
970 | 1161 | ||
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index bb277a54986f..59efa829ef24 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c | |||
@@ -89,8 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd) | |||
89 | return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); | 89 | return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); |
90 | } | 90 | } |
91 | 91 | ||
92 | static int __devinit pasemi_nand_probe(struct platform_device *ofdev, | 92 | static int __devinit pasemi_nand_probe(struct platform_device *ofdev) |
93 | const struct of_device_id *match) | ||
94 | { | 93 | { |
95 | struct pci_dev *pdev; | 94 | struct pci_dev *pdev; |
96 | struct device_node *np = ofdev->dev.of_node; | 95 | struct device_node *np = ofdev->dev.of_node; |
@@ -219,7 +218,7 @@ static const struct of_device_id pasemi_nand_match[] = | |||
219 | 218 | ||
220 | MODULE_DEVICE_TABLE(of, pasemi_nand_match); | 219 | MODULE_DEVICE_TABLE(of, pasemi_nand_match); |
221 | 220 | ||
222 | static struct of_platform_driver pasemi_nand_driver = | 221 | static struct platform_driver pasemi_nand_driver = |
223 | { | 222 | { |
224 | .driver = { | 223 | .driver = { |
225 | .name = (char*)driver_name, | 224 | .name = (char*)driver_name, |
@@ -232,13 +231,13 @@ static struct of_platform_driver pasemi_nand_driver = | |||
232 | 231 | ||
233 | static int __init pasemi_nand_init(void) | 232 | static int __init pasemi_nand_init(void) |
234 | { | 233 | { |
235 | return of_register_platform_driver(&pasemi_nand_driver); | 234 | return platform_driver_register(&pasemi_nand_driver); |
236 | } | 235 | } |
237 | module_init(pasemi_nand_init); | 236 | module_init(pasemi_nand_init); |
238 | 237 | ||
239 | static void __exit pasemi_nand_exit(void) | 238 | static void __exit pasemi_nand_exit(void) |
240 | { | 239 | { |
241 | of_unregister_platform_driver(&pasemi_nand_driver); | 240 | platform_driver_unregister(&pasemi_nand_driver); |
242 | } | 241 | } |
243 | module_exit(pasemi_nand_exit); | 242 | module_exit(pasemi_nand_exit); |
244 | 243 | ||
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index d9d7efbc77cc..6322d1fb5d62 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
930 | 930 | ||
931 | init_completion(&dev->dma_done); | 931 | init_completion(&dev->dma_done); |
932 | 932 | ||
933 | dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); | 933 | dev->card_workqueue = create_freezable_workqueue(DRV_NAME); |
934 | 934 | ||
935 | if (!dev->card_workqueue) | 935 | if (!dev->card_workqueue) |
936 | goto error9; | 936 | goto error9; |
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index a8e403eebedb..a853548986f0 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c | |||
@@ -162,8 +162,7 @@ static const char *part_probes[] = { "cmdlinepart", NULL }; | |||
162 | /* | 162 | /* |
163 | * Probe for the NAND device. | 163 | * Probe for the NAND device. |
164 | */ | 164 | */ |
165 | static int __devinit socrates_nand_probe(struct platform_device *ofdev, | 165 | static int __devinit socrates_nand_probe(struct platform_device *ofdev) |
166 | const struct of_device_id *ofid) | ||
167 | { | 166 | { |
168 | struct socrates_nand_host *host; | 167 | struct socrates_nand_host *host; |
169 | struct mtd_info *mtd; | 168 | struct mtd_info *mtd; |
@@ -300,7 +299,7 @@ static const struct of_device_id socrates_nand_match[] = | |||
300 | 299 | ||
301 | MODULE_DEVICE_TABLE(of, socrates_nand_match); | 300 | MODULE_DEVICE_TABLE(of, socrates_nand_match); |
302 | 301 | ||
303 | static struct of_platform_driver socrates_nand_driver = { | 302 | static struct platform_driver socrates_nand_driver = { |
304 | .driver = { | 303 | .driver = { |
305 | .name = "socrates_nand", | 304 | .name = "socrates_nand", |
306 | .owner = THIS_MODULE, | 305 | .owner = THIS_MODULE, |
@@ -312,12 +311,12 @@ static struct of_platform_driver socrates_nand_driver = { | |||
312 | 311 | ||
313 | static int __init socrates_nand_init(void) | 312 | static int __init socrates_nand_init(void) |
314 | { | 313 | { |
315 | return of_register_platform_driver(&socrates_nand_driver); | 314 | return platform_driver_register(&socrates_nand_driver); |
316 | } | 315 | } |
317 | 316 | ||
318 | static void __exit socrates_nand_exit(void) | 317 | static void __exit socrates_nand_exit(void) |
319 | { | 318 | { |
320 | of_unregister_platform_driver(&socrates_nand_driver); | 319 | platform_driver_unregister(&socrates_nand_driver); |
321 | } | 320 | } |
322 | 321 | ||
323 | module_init(socrates_nand_init); | 322 | module_init(socrates_nand_init); |
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 3041d1f7ae3f..38fb16771f85 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c | |||
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf, | |||
319 | 319 | ||
320 | static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) | 320 | static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) |
321 | { | 321 | { |
322 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); | 322 | const struct mfd_cell *cell = mfd_get_cell(dev); |
323 | int ret; | 323 | int ret; |
324 | 324 | ||
325 | if (cell->enable) { | 325 | if (cell->enable) { |
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) | |||
363 | 363 | ||
364 | static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) | 364 | static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) |
365 | { | 365 | { |
366 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); | 366 | const struct mfd_cell *cell = mfd_get_cell(dev); |
367 | 367 | ||
368 | tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); | 368 | tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); |
369 | if (cell->disable) | 369 | if (cell->disable) |
@@ -372,8 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) | |||
372 | 372 | ||
373 | static int tmio_probe(struct platform_device *dev) | 373 | static int tmio_probe(struct platform_device *dev) |
374 | { | 374 | { |
375 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); | 375 | struct tmio_nand_data *data = mfd_get_data(dev); |
376 | struct tmio_nand_data *data = cell->driver_data; | ||
377 | struct resource *fcr = platform_get_resource(dev, | 376 | struct resource *fcr = platform_get_resource(dev, |
378 | IORESOURCE_MEM, 0); | 377 | IORESOURCE_MEM, 0); |
379 | struct resource *ccr = platform_get_resource(dev, | 378 | struct resource *ccr = platform_get_resource(dev, |
@@ -516,7 +515,7 @@ static int tmio_remove(struct platform_device *dev) | |||
516 | #ifdef CONFIG_PM | 515 | #ifdef CONFIG_PM |
517 | static int tmio_suspend(struct platform_device *dev, pm_message_t state) | 516 | static int tmio_suspend(struct platform_device *dev, pm_message_t state) |
518 | { | 517 | { |
519 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); | 518 | const struct mfd_cell *cell = mfd_get_cell(dev); |
520 | 519 | ||
521 | if (cell->suspend) | 520 | if (cell->suspend) |
522 | cell->suspend(dev); | 521 | cell->suspend(dev); |
@@ -527,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state) | |||
527 | 526 | ||
528 | static int tmio_resume(struct platform_device *dev) | 527 | static int tmio_resume(struct platform_device *dev) |
529 | { | 528 | { |
530 | struct mfd_cell *cell = dev_get_platdata(&dev->dev); | 529 | const struct mfd_cell *cell = mfd_get_cell(dev); |
531 | 530 | ||
532 | /* FIXME - is this required or merely another attack of the broken | 531 | /* FIXME - is this required or merely another attack of the broken |
533 | * SHARP platform? Looks suspicious. | 532 | * SHARP platform? Looks suspicious. |