diff options
Diffstat (limited to 'drivers/mtd/nand/omap2.c')
-rw-r--r-- | drivers/mtd/nand/omap2.c | 367 |
1 files changed, 279 insertions, 88 deletions
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index d7a4e2550b13..da9a351c9d79 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/interrupt.h> | ||
14 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
@@ -24,6 +25,7 @@ | |||
24 | #include <plat/nand.h> | 25 | #include <plat/nand.h> |
25 | 26 | ||
26 | #define DRIVER_NAME "omap2-nand" | 27 | #define DRIVER_NAME "omap2-nand" |
28 | #define OMAP_NAND_TIMEOUT_MS 5000 | ||
27 | 29 | ||
28 | #define NAND_Ecc_P1e (1 << 0) | 30 | #define NAND_Ecc_P1e (1 << 0) |
29 | #define NAND_Ecc_P2e (1 << 1) | 31 | #define NAND_Ecc_P2e (1 << 1) |
@@ -96,26 +98,19 @@ | |||
96 | static const char *part_probes[] = { "cmdlinepart", NULL }; | 98 | static const char *part_probes[] = { "cmdlinepart", NULL }; |
97 | #endif | 99 | #endif |
98 | 100 | ||
99 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH | 101 | /* oob info generated runtime depending on ecc algorithm and layout selected */ |
100 | static int use_prefetch = 1; | 102 | static struct nand_ecclayout omap_oobinfo; |
101 | 103 | /* Define some generic bad / good block scan pattern which are used | |
102 | /* "modprobe ... use_prefetch=0" etc */ | 104 | * while scanning a device for factory marked good / bad blocks |
103 | module_param(use_prefetch, bool, 0); | 105 | */ |
104 | MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); | 106 | static uint8_t scan_ff_pattern[] = { 0xff }; |
105 | 107 | static struct nand_bbt_descr bb_descrip_flashbased = { | |
106 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | 108 | .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, |
107 | static int use_dma = 1; | 109 | .offs = 0, |
110 | .len = 1, | ||
111 | .pattern = scan_ff_pattern, | ||
112 | }; | ||
108 | 113 | ||
109 | /* "modprobe ... use_dma=0" etc */ | ||
110 | module_param(use_dma, bool, 0); | ||
111 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | ||
112 | #else | ||
113 | static const int use_dma; | ||
114 | #endif | ||
115 | #else | ||
116 | const int use_prefetch; | ||
117 | static const int use_dma; | ||
118 | #endif | ||
119 | 114 | ||
120 | struct omap_nand_info { | 115 | struct omap_nand_info { |
121 | struct nand_hw_control controller; | 116 | struct nand_hw_control controller; |
@@ -129,6 +124,13 @@ struct omap_nand_info { | |||
129 | unsigned long phys_base; | 124 | unsigned long phys_base; |
130 | struct completion comp; | 125 | struct completion comp; |
131 | int dma_ch; | 126 | int dma_ch; |
127 | int gpmc_irq; | ||
128 | enum { | ||
129 | OMAP_NAND_IO_READ = 0, /* read */ | ||
130 | OMAP_NAND_IO_WRITE, /* write */ | ||
131 | } iomode; | ||
132 | u_char *buf; | ||
133 | int buf_len; | ||
132 | }; | 134 | }; |
133 | 135 | ||
134 | /** | 136 | /** |
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
256 | } | 258 | } |
257 | 259 | ||
258 | /* configure and start prefetch transfer */ | 260 | /* configure and start prefetch transfer */ |
259 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); | 261 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
262 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); | ||
260 | if (ret) { | 263 | if (ret) { |
261 | /* PFPW engine is busy, use cpu copy method */ | 264 | /* PFPW engine is busy, use cpu copy method */ |
262 | if (info->nand.options & NAND_BUSWIDTH_16) | 265 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
288 | { | 291 | { |
289 | struct omap_nand_info *info = container_of(mtd, | 292 | struct omap_nand_info *info = container_of(mtd, |
290 | struct omap_nand_info, mtd); | 293 | struct omap_nand_info, mtd); |
291 | uint32_t pref_count = 0, w_count = 0; | 294 | uint32_t w_count = 0; |
292 | int i = 0, ret = 0; | 295 | int i = 0, ret = 0; |
293 | u16 *p; | 296 | u16 *p; |
297 | unsigned long tim, limit; | ||
294 | 298 | ||
295 | /* take care of subpage writes */ | 299 | /* take care of subpage writes */ |
296 | if (len % 2 != 0) { | 300 | if (len % 2 != 0) { |
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
300 | } | 304 | } |
301 | 305 | ||
302 | /* configure and start prefetch transfer */ | 306 | /* configure and start prefetch transfer */ |
303 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); | 307 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
308 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); | ||
304 | if (ret) { | 309 | if (ret) { |
305 | /* PFPW engine is busy, use cpu copy method */ | 310 | /* PFPW engine is busy, use cpu copy method */ |
306 | if (info->nand.options & NAND_BUSWIDTH_16) | 311 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
316 | iowrite16(*p++, info->nand.IO_ADDR_W); | 321 | iowrite16(*p++, info->nand.IO_ADDR_W); |
317 | } | 322 | } |
318 | /* wait for data to flushed-out before reset the prefetch */ | 323 | /* wait for data to flushed-out before reset the prefetch */ |
319 | do { | 324 | tim = 0; |
320 | pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); | 325 | limit = (loops_per_jiffy * |
321 | } while (pref_count); | 326 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
327 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
328 | cpu_relax(); | ||
329 | |||
322 | /* disable and stop the PFPW engine */ | 330 | /* disable and stop the PFPW engine */ |
323 | gpmc_prefetch_reset(info->gpmc_cs); | 331 | gpmc_prefetch_reset(info->gpmc_cs); |
324 | } | 332 | } |
325 | } | 333 | } |
326 | 334 | ||
327 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | ||
328 | /* | 335 | /* |
329 | * omap_nand_dma_cb: callback on the completion of dma transfer | 336 | * omap_nand_dma_cb: callback on the completion of dma transfer |
330 | * @lch: logical channel | 337 | * @lch: logical channel |
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
348 | { | 355 | { |
349 | struct omap_nand_info *info = container_of(mtd, | 356 | struct omap_nand_info *info = container_of(mtd, |
350 | struct omap_nand_info, mtd); | 357 | struct omap_nand_info, mtd); |
351 | uint32_t prefetch_status = 0; | ||
352 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : | 358 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : |
353 | DMA_FROM_DEVICE; | 359 | DMA_FROM_DEVICE; |
354 | dma_addr_t dma_addr; | 360 | dma_addr_t dma_addr; |
355 | int ret; | 361 | int ret; |
362 | unsigned long tim, limit; | ||
356 | 363 | ||
357 | /* The fifo depth is 64 bytes. We have a sync at each frame and frame | 364 | /* The fifo depth is 64 bytes max. |
358 | * length is 64 bytes. | 365 | * But configure the FIFO-threahold to 32 to get a sync at each frame |
366 | * and frame length is 32 bytes. | ||
359 | */ | 367 | */ |
360 | int buf_len = len >> 6; | 368 | int buf_len = len >> 6; |
361 | 369 | ||
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
396 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); | 404 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); |
397 | } | 405 | } |
398 | /* configure and start prefetch transfer */ | 406 | /* configure and start prefetch transfer */ |
399 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); | 407 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
408 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | ||
400 | if (ret) | 409 | if (ret) |
401 | /* PFPW engine is busy, use cpu copy methode */ | 410 | /* PFPW engine is busy, use cpu copy method */ |
402 | goto out_copy; | 411 | goto out_copy; |
403 | 412 | ||
404 | init_completion(&info->comp); | 413 | init_completion(&info->comp); |
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
407 | 416 | ||
408 | /* setup and start DMA using dma_addr */ | 417 | /* setup and start DMA using dma_addr */ |
409 | wait_for_completion(&info->comp); | 418 | wait_for_completion(&info->comp); |
419 | tim = 0; | ||
420 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
421 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
422 | cpu_relax(); | ||
410 | 423 | ||
411 | do { | ||
412 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | ||
413 | } while (prefetch_status); | ||
414 | /* disable and stop the PFPW engine */ | 424 | /* disable and stop the PFPW engine */ |
415 | gpmc_prefetch_reset(info->gpmc_cs); | 425 | gpmc_prefetch_reset(info->gpmc_cs); |
416 | 426 | ||
@@ -426,14 +436,6 @@ out_copy: | |||
426 | : omap_write_buf8(mtd, (u_char *) addr, len); | 436 | : omap_write_buf8(mtd, (u_char *) addr, len); |
427 | return 0; | 437 | return 0; |
428 | } | 438 | } |
429 | #else | ||
430 | static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} | ||
431 | static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | ||
432 | unsigned int len, int is_write) | ||
433 | { | ||
434 | return 0; | ||
435 | } | ||
436 | #endif | ||
437 | 439 | ||
438 | /** | 440 | /** |
439 | * omap_read_buf_dma_pref - read data from NAND controller into buffer | 441 | * omap_read_buf_dma_pref - read data from NAND controller into buffer |
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, | |||
466 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); | 468 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); |
467 | } | 469 | } |
468 | 470 | ||
471 | /* | ||
472 | * omap_nand_irq - GMPC irq handler | ||
473 | * @this_irq: gpmc irq number | ||
474 | * @dev: omap_nand_info structure pointer is passed here | ||
475 | */ | ||
476 | static irqreturn_t omap_nand_irq(int this_irq, void *dev) | ||
477 | { | ||
478 | struct omap_nand_info *info = (struct omap_nand_info *) dev; | ||
479 | u32 bytes; | ||
480 | u32 irq_stat; | ||
481 | |||
482 | irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); | ||
483 | bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | ||
484 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ | ||
485 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ | ||
486 | if (irq_stat & 0x2) | ||
487 | goto done; | ||
488 | |||
489 | if (info->buf_len && (info->buf_len < bytes)) | ||
490 | bytes = info->buf_len; | ||
491 | else if (!info->buf_len) | ||
492 | bytes = 0; | ||
493 | iowrite32_rep(info->nand.IO_ADDR_W, | ||
494 | (u32 *)info->buf, bytes >> 2); | ||
495 | info->buf = info->buf + bytes; | ||
496 | info->buf_len -= bytes; | ||
497 | |||
498 | } else { | ||
499 | ioread32_rep(info->nand.IO_ADDR_R, | ||
500 | (u32 *)info->buf, bytes >> 2); | ||
501 | info->buf = info->buf + bytes; | ||
502 | |||
503 | if (irq_stat & 0x2) | ||
504 | goto done; | ||
505 | } | ||
506 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
507 | |||
508 | return IRQ_HANDLED; | ||
509 | |||
510 | done: | ||
511 | complete(&info->comp); | ||
512 | /* disable irq */ | ||
513 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); | ||
514 | |||
515 | /* clear status */ | ||
516 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
517 | |||
518 | return IRQ_HANDLED; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * omap_read_buf_irq_pref - read data from NAND controller into buffer | ||
523 | * @mtd: MTD device structure | ||
524 | * @buf: buffer to store date | ||
525 | * @len: number of bytes to read | ||
526 | */ | ||
527 | static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) | ||
528 | { | ||
529 | struct omap_nand_info *info = container_of(mtd, | ||
530 | struct omap_nand_info, mtd); | ||
531 | int ret = 0; | ||
532 | |||
533 | if (len <= mtd->oobsize) { | ||
534 | omap_read_buf_pref(mtd, buf, len); | ||
535 | return; | ||
536 | } | ||
537 | |||
538 | info->iomode = OMAP_NAND_IO_READ; | ||
539 | info->buf = buf; | ||
540 | init_completion(&info->comp); | ||
541 | |||
542 | /* configure and start prefetch transfer */ | ||
543 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
544 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); | ||
545 | if (ret) | ||
546 | /* PFPW engine is busy, use cpu copy method */ | ||
547 | goto out_copy; | ||
548 | |||
549 | info->buf_len = len; | ||
550 | /* enable irq */ | ||
551 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
552 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
553 | |||
554 | /* waiting for read to complete */ | ||
555 | wait_for_completion(&info->comp); | ||
556 | |||
557 | /* disable and stop the PFPW engine */ | ||
558 | gpmc_prefetch_reset(info->gpmc_cs); | ||
559 | return; | ||
560 | |||
561 | out_copy: | ||
562 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
563 | omap_read_buf16(mtd, buf, len); | ||
564 | else | ||
565 | omap_read_buf8(mtd, buf, len); | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * omap_write_buf_irq_pref - write buffer to NAND controller | ||
570 | * @mtd: MTD device structure | ||
571 | * @buf: data buffer | ||
572 | * @len: number of bytes to write | ||
573 | */ | ||
574 | static void omap_write_buf_irq_pref(struct mtd_info *mtd, | ||
575 | const u_char *buf, int len) | ||
576 | { | ||
577 | struct omap_nand_info *info = container_of(mtd, | ||
578 | struct omap_nand_info, mtd); | ||
579 | int ret = 0; | ||
580 | unsigned long tim, limit; | ||
581 | |||
582 | if (len <= mtd->oobsize) { | ||
583 | omap_write_buf_pref(mtd, buf, len); | ||
584 | return; | ||
585 | } | ||
586 | |||
587 | info->iomode = OMAP_NAND_IO_WRITE; | ||
588 | info->buf = (u_char *) buf; | ||
589 | init_completion(&info->comp); | ||
590 | |||
591 | /* configure and start prefetch transfer : size=24 */ | ||
592 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
593 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); | ||
594 | if (ret) | ||
595 | /* PFPW engine is busy, use cpu copy method */ | ||
596 | goto out_copy; | ||
597 | |||
598 | info->buf_len = len; | ||
599 | /* enable irq */ | ||
600 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
601 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
602 | |||
603 | /* waiting for write to complete */ | ||
604 | wait_for_completion(&info->comp); | ||
605 | /* wait for data to flushed-out before reset the prefetch */ | ||
606 | tim = 0; | ||
607 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
608 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
609 | cpu_relax(); | ||
610 | |||
611 | /* disable and stop the PFPW engine */ | ||
612 | gpmc_prefetch_reset(info->gpmc_cs); | ||
613 | return; | ||
614 | |||
615 | out_copy: | ||
616 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
617 | omap_write_buf16(mtd, buf, len); | ||
618 | else | ||
619 | omap_write_buf8(mtd, buf, len); | ||
620 | } | ||
621 | |||
469 | /** | 622 | /** |
470 | * omap_verify_buf - Verify chip data against buffer | 623 | * omap_verify_buf - Verify chip data against buffer |
471 | * @mtd: MTD device structure | 624 | * @mtd: MTD device structure |
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) | |||
487 | return 0; | 640 | return 0; |
488 | } | 641 | } |
489 | 642 | ||
490 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | ||
491 | |||
492 | /** | 643 | /** |
493 | * gen_true_ecc - This function will generate true ECC value | 644 | * gen_true_ecc - This function will generate true ECC value |
494 | * @ecc_buf: buffer to store ecc code | 645 | * @ecc_buf: buffer to store ecc code |
@@ -716,8 +867,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) | |||
716 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); | 867 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); |
717 | } | 868 | } |
718 | 869 | ||
719 | #endif | ||
720 | |||
721 | /** | 870 | /** |
722 | * omap_wait - wait until the command is done | 871 | * omap_wait - wait until the command is done |
723 | * @mtd: MTD device structure | 872 | * @mtd: MTD device structure |
@@ -787,6 +936,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
787 | struct omap_nand_info *info; | 936 | struct omap_nand_info *info; |
788 | struct omap_nand_platform_data *pdata; | 937 | struct omap_nand_platform_data *pdata; |
789 | int err; | 938 | int err; |
939 | int i, offset; | ||
790 | 940 | ||
791 | pdata = pdev->dev.platform_data; | 941 | pdata = pdev->dev.platform_data; |
792 | if (pdata == NULL) { | 942 | if (pdata == NULL) { |
@@ -812,7 +962,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
812 | info->mtd.name = dev_name(&pdev->dev); | 962 | info->mtd.name = dev_name(&pdev->dev); |
813 | info->mtd.owner = THIS_MODULE; | 963 | info->mtd.owner = THIS_MODULE; |
814 | 964 | ||
815 | info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; | 965 | info->nand.options = pdata->devsize; |
816 | info->nand.options |= NAND_SKIP_BBTSCAN; | 966 | info->nand.options |= NAND_SKIP_BBTSCAN; |
817 | 967 | ||
818 | /* NAND write protect off */ | 968 | /* NAND write protect off */ |
@@ -850,28 +1000,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
850 | info->nand.chip_delay = 50; | 1000 | info->nand.chip_delay = 50; |
851 | } | 1001 | } |
852 | 1002 | ||
853 | if (use_prefetch) { | 1003 | switch (pdata->xfer_type) { |
854 | 1004 | case NAND_OMAP_PREFETCH_POLLED: | |
855 | info->nand.read_buf = omap_read_buf_pref; | 1005 | info->nand.read_buf = omap_read_buf_pref; |
856 | info->nand.write_buf = omap_write_buf_pref; | 1006 | info->nand.write_buf = omap_write_buf_pref; |
857 | if (use_dma) { | 1007 | break; |
858 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | 1008 | |
859 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | 1009 | case NAND_OMAP_POLLED: |
860 | if (err < 0) { | ||
861 | info->dma_ch = -1; | ||
862 | printk(KERN_WARNING "DMA request failed." | ||
863 | " Non-dma data transfer mode\n"); | ||
864 | } else { | ||
865 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
866 | OMAP_DMA_DATA_BURST_16); | ||
867 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
868 | OMAP_DMA_DATA_BURST_16); | ||
869 | |||
870 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
871 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
872 | } | ||
873 | } | ||
874 | } else { | ||
875 | if (info->nand.options & NAND_BUSWIDTH_16) { | 1010 | if (info->nand.options & NAND_BUSWIDTH_16) { |
876 | info->nand.read_buf = omap_read_buf16; | 1011 | info->nand.read_buf = omap_read_buf16; |
877 | info->nand.write_buf = omap_write_buf16; | 1012 | info->nand.write_buf = omap_write_buf16; |
@@ -879,20 +1014,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
879 | info->nand.read_buf = omap_read_buf8; | 1014 | info->nand.read_buf = omap_read_buf8; |
880 | info->nand.write_buf = omap_write_buf8; | 1015 | info->nand.write_buf = omap_write_buf8; |
881 | } | 1016 | } |
1017 | break; | ||
1018 | |||
1019 | case NAND_OMAP_PREFETCH_DMA: | ||
1020 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | ||
1021 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | ||
1022 | if (err < 0) { | ||
1023 | info->dma_ch = -1; | ||
1024 | dev_err(&pdev->dev, "DMA request failed!\n"); | ||
1025 | goto out_release_mem_region; | ||
1026 | } else { | ||
1027 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
1028 | OMAP_DMA_DATA_BURST_16); | ||
1029 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
1030 | OMAP_DMA_DATA_BURST_16); | ||
1031 | |||
1032 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
1033 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
1034 | } | ||
1035 | break; | ||
1036 | |||
1037 | case NAND_OMAP_PREFETCH_IRQ: | ||
1038 | err = request_irq(pdata->gpmc_irq, | ||
1039 | omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); | ||
1040 | if (err) { | ||
1041 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | ||
1042 | pdata->gpmc_irq, err); | ||
1043 | goto out_release_mem_region; | ||
1044 | } else { | ||
1045 | info->gpmc_irq = pdata->gpmc_irq; | ||
1046 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1047 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1048 | } | ||
1049 | break; | ||
1050 | |||
1051 | default: | ||
1052 | dev_err(&pdev->dev, | ||
1053 | "xfer_type(%d) not supported!\n", pdata->xfer_type); | ||
1054 | err = -EINVAL; | ||
1055 | goto out_release_mem_region; | ||
882 | } | 1056 | } |
883 | info->nand.verify_buf = omap_verify_buf; | ||
884 | 1057 | ||
885 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | 1058 | info->nand.verify_buf = omap_verify_buf; |
886 | info->nand.ecc.bytes = 3; | ||
887 | info->nand.ecc.size = 512; | ||
888 | info->nand.ecc.calculate = omap_calculate_ecc; | ||
889 | info->nand.ecc.hwctl = omap_enable_hwecc; | ||
890 | info->nand.ecc.correct = omap_correct_data; | ||
891 | info->nand.ecc.mode = NAND_ECC_HW; | ||
892 | 1059 | ||
893 | #else | 1060 | /* selsect the ecc type */ |
894 | info->nand.ecc.mode = NAND_ECC_SOFT; | 1061 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) |
895 | #endif | 1062 | info->nand.ecc.mode = NAND_ECC_SOFT; |
1063 | else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || | ||
1064 | (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { | ||
1065 | info->nand.ecc.bytes = 3; | ||
1066 | info->nand.ecc.size = 512; | ||
1067 | info->nand.ecc.calculate = omap_calculate_ecc; | ||
1068 | info->nand.ecc.hwctl = omap_enable_hwecc; | ||
1069 | info->nand.ecc.correct = omap_correct_data; | ||
1070 | info->nand.ecc.mode = NAND_ECC_HW; | ||
1071 | } | ||
896 | 1072 | ||
897 | /* DIP switches on some boards change between 8 and 16 bit | 1073 | /* DIP switches on some boards change between 8 and 16 bit |
898 | * bus widths for flash. Try the other width if the first try fails. | 1074 | * bus widths for flash. Try the other width if the first try fails. |
@@ -905,6 +1081,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
905 | } | 1081 | } |
906 | } | 1082 | } |
907 | 1083 | ||
1084 | /* rom code layout */ | ||
1085 | if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { | ||
1086 | |||
1087 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
1088 | offset = 2; | ||
1089 | else { | ||
1090 | offset = 1; | ||
1091 | info->nand.badblock_pattern = &bb_descrip_flashbased; | ||
1092 | } | ||
1093 | omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16); | ||
1094 | for (i = 0; i < omap_oobinfo.eccbytes; i++) | ||
1095 | omap_oobinfo.eccpos[i] = i+offset; | ||
1096 | |||
1097 | omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes; | ||
1098 | omap_oobinfo.oobfree->length = info->mtd.oobsize - | ||
1099 | (offset + omap_oobinfo.eccbytes); | ||
1100 | |||
1101 | info->nand.ecc.layout = &omap_oobinfo; | ||
1102 | } | ||
1103 | |||
908 | #ifdef CONFIG_MTD_PARTITIONS | 1104 | #ifdef CONFIG_MTD_PARTITIONS |
909 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); | 1105 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); |
910 | if (err > 0) | 1106 | if (err > 0) |
@@ -934,9 +1130,12 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
934 | mtd); | 1130 | mtd); |
935 | 1131 | ||
936 | platform_set_drvdata(pdev, NULL); | 1132 | platform_set_drvdata(pdev, NULL); |
937 | if (use_dma) | 1133 | if (info->dma_ch != -1) |
938 | omap_free_dma(info->dma_ch); | 1134 | omap_free_dma(info->dma_ch); |
939 | 1135 | ||
1136 | if (info->gpmc_irq) | ||
1137 | free_irq(info->gpmc_irq, info); | ||
1138 | |||
940 | /* Release NAND device, its internal structures and partitions */ | 1139 | /* Release NAND device, its internal structures and partitions */ |
941 | nand_release(&info->mtd); | 1140 | nand_release(&info->mtd); |
942 | iounmap(info->nand.IO_ADDR_R); | 1141 | iounmap(info->nand.IO_ADDR_R); |
@@ -955,16 +1154,8 @@ static struct platform_driver omap_nand_driver = { | |||
955 | 1154 | ||
956 | static int __init omap_nand_init(void) | 1155 | static int __init omap_nand_init(void) |
957 | { | 1156 | { |
958 | printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); | 1157 | pr_info("%s driver initializing\n", DRIVER_NAME); |
959 | 1158 | ||
960 | /* This check is required if driver is being | ||
961 | * loaded run time as a module | ||
962 | */ | ||
963 | if ((1 == use_dma) && (0 == use_prefetch)) { | ||
964 | printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " | ||
965 | "without use_prefetch'. Prefetch will not be" | ||
966 | " used in either mode (mpu or dma)\n"); | ||
967 | } | ||
968 | return platform_driver_register(&omap_nand_driver); | 1159 | return platform_driver_register(&omap_nand_driver); |
969 | } | 1160 | } |
970 | 1161 | ||