diff options
author | Sukumar Ghorai <s-ghorai@ti.com> | 2011-01-28 05:12:06 -0500 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2011-02-17 18:32:53 -0500 |
commit | 4e070376165a9b7f245fada77645b81352c6ec78 (patch) | |
tree | 76a854fc37f50633f10a9eb25382d43329a94083 | |
parent | db97eb7dfe13f6c04f0a0e77c32e2691f563ab8b (diff) |
omap3: nand: prefetch in irq mode support
This patch enable prefetch-irq mode for nand transfer(read, write)
Signed-off-by: Vimal Singh <vimalsingh@ti.com>
Signed-off-by: Sukumar Ghorai <s-ghorai@ti.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
-rw-r--r-- | arch/arm/mach-omap2/board-flash.c | 2 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/nand.h | 4 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 198 |
3 files changed, 194 insertions, 10 deletions
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c index f6b72533c089..19645095d597 100644 --- a/arch/arm/mach-omap2/board-flash.c +++ b/arch/arm/mach-omap2/board-flash.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/mtd/physmap.h> | 17 | #include <linux/mtd/physmap.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <plat/irqs.h> | ||
19 | 20 | ||
20 | #include <plat/gpmc.h> | 21 | #include <plat/gpmc.h> |
21 | #include <plat/nand.h> | 22 | #include <plat/nand.h> |
@@ -147,6 +148,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, | |||
147 | board_nand_data.nr_parts = nr_parts; | 148 | board_nand_data.nr_parts = nr_parts; |
148 | board_nand_data.devsize = nand_type; | 149 | board_nand_data.devsize = nand_type; |
149 | 150 | ||
151 | board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs; | ||
150 | gpmc_nand_init(&board_nand_data); | 152 | gpmc_nand_init(&board_nand_data); |
151 | } | 153 | } |
152 | #else | 154 | #else |
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h index 78c0bdb98c18..ae5e05383031 100644 --- a/arch/arm/plat-omap/include/plat/nand.h +++ b/arch/arm/plat-omap/include/plat/nand.h | |||
@@ -13,7 +13,8 @@ | |||
13 | enum nand_io { | 13 | enum nand_io { |
14 | NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */ | 14 | NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */ |
15 | NAND_OMAP_POLLED, /* polled mode, without prefetch */ | 15 | NAND_OMAP_POLLED, /* polled mode, without prefetch */ |
16 | NAND_OMAP_PREFETCH_DMA /* prefetch enabled sDMA mode */ | 16 | NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */ |
17 | NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */ | ||
17 | }; | 18 | }; |
18 | 19 | ||
19 | struct omap_nand_platform_data { | 20 | struct omap_nand_platform_data { |
@@ -26,6 +27,7 @@ struct omap_nand_platform_data { | |||
26 | int (*nand_setup)(void); | 27 | int (*nand_setup)(void); |
27 | int (*dev_ready)(struct omap_nand_platform_data *); | 28 | int (*dev_ready)(struct omap_nand_platform_data *); |
28 | int dma_channel; | 29 | int dma_channel; |
30 | int gpmc_irq; | ||
29 | enum nand_io xfer_type; | 31 | enum nand_io xfer_type; |
30 | unsigned long phys_base; | 32 | unsigned long phys_base; |
31 | int devsize; | 33 | int devsize; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 60bac8e6e9fa..fbe841467175 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/interrupt.h> | ||
14 | #include <linux/jiffies.h> | 15 | #include <linux/jiffies.h> |
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
@@ -24,6 +25,7 @@ | |||
24 | #include <plat/nand.h> | 25 | #include <plat/nand.h> |
25 | 26 | ||
26 | #define DRIVER_NAME "omap2-nand" | 27 | #define DRIVER_NAME "omap2-nand" |
28 | #define OMAP_NAND_TIMEOUT_MS 5000 | ||
27 | 29 | ||
28 | #define NAND_Ecc_P1e (1 << 0) | 30 | #define NAND_Ecc_P1e (1 << 0) |
29 | #define NAND_Ecc_P2e (1 << 1) | 31 | #define NAND_Ecc_P2e (1 << 1) |
@@ -108,6 +110,13 @@ struct omap_nand_info { | |||
108 | unsigned long phys_base; | 110 | unsigned long phys_base; |
109 | struct completion comp; | 111 | struct completion comp; |
110 | int dma_ch; | 112 | int dma_ch; |
113 | int gpmc_irq; | ||
114 | enum { | ||
115 | OMAP_NAND_IO_READ = 0, /* read */ | ||
116 | OMAP_NAND_IO_WRITE, /* write */ | ||
117 | } iomode; | ||
118 | u_char *buf; | ||
119 | int buf_len; | ||
111 | }; | 120 | }; |
112 | 121 | ||
113 | /** | 122 | /** |
@@ -267,9 +276,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
267 | { | 276 | { |
268 | struct omap_nand_info *info = container_of(mtd, | 277 | struct omap_nand_info *info = container_of(mtd, |
269 | struct omap_nand_info, mtd); | 278 | struct omap_nand_info, mtd); |
270 | uint32_t pref_count = 0, w_count = 0; | 279 | uint32_t w_count = 0; |
271 | int i = 0, ret = 0; | 280 | int i = 0, ret = 0; |
272 | u16 *p; | 281 | u16 *p; |
282 | unsigned long tim, limit; | ||
273 | 283 | ||
274 | /* take care of subpage writes */ | 284 | /* take care of subpage writes */ |
275 | if (len % 2 != 0) { | 285 | if (len % 2 != 0) { |
@@ -295,9 +305,12 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
295 | iowrite16(*p++, info->nand.IO_ADDR_W); | 305 | iowrite16(*p++, info->nand.IO_ADDR_W); |
296 | } | 306 | } |
297 | /* wait for data to flushed-out before reset the prefetch */ | 307 | /* wait for data to flushed-out before reset the prefetch */ |
298 | do { | 308 | tim = 0; |
299 | pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); | 309 | limit = (loops_per_jiffy * |
300 | } while (pref_count); | 310 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
311 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
312 | cpu_relax(); | ||
313 | |||
301 | /* disable and stop the PFPW engine */ | 314 | /* disable and stop the PFPW engine */ |
302 | gpmc_prefetch_reset(info->gpmc_cs); | 315 | gpmc_prefetch_reset(info->gpmc_cs); |
303 | } | 316 | } |
@@ -326,11 +339,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
326 | { | 339 | { |
327 | struct omap_nand_info *info = container_of(mtd, | 340 | struct omap_nand_info *info = container_of(mtd, |
328 | struct omap_nand_info, mtd); | 341 | struct omap_nand_info, mtd); |
329 | uint32_t prefetch_status = 0; | ||
330 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : | 342 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : |
331 | DMA_FROM_DEVICE; | 343 | DMA_FROM_DEVICE; |
332 | dma_addr_t dma_addr; | 344 | dma_addr_t dma_addr; |
333 | int ret; | 345 | int ret; |
346 | unsigned long tim, limit; | ||
334 | 347 | ||
335 | /* The fifo depth is 64 bytes. We have a sync at each frame and frame | 348 | /* The fifo depth is 64 bytes. We have a sync at each frame and frame |
336 | * length is 64 bytes. | 349 | * length is 64 bytes. |
@@ -376,7 +389,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
376 | /* configure and start prefetch transfer */ | 389 | /* configure and start prefetch transfer */ |
377 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); | 390 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); |
378 | if (ret) | 391 | if (ret) |
379 | /* PFPW engine is busy, use cpu copy methode */ | 392 | /* PFPW engine is busy, use cpu copy method */ |
380 | goto out_copy; | 393 | goto out_copy; |
381 | 394 | ||
382 | init_completion(&info->comp); | 395 | init_completion(&info->comp); |
@@ -385,10 +398,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
385 | 398 | ||
386 | /* setup and start DMA using dma_addr */ | 399 | /* setup and start DMA using dma_addr */ |
387 | wait_for_completion(&info->comp); | 400 | wait_for_completion(&info->comp); |
401 | tim = 0; | ||
402 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
403 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
404 | cpu_relax(); | ||
388 | 405 | ||
389 | do { | ||
390 | prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | ||
391 | } while (prefetch_status); | ||
392 | /* disable and stop the PFPW engine */ | 406 | /* disable and stop the PFPW engine */ |
393 | gpmc_prefetch_reset(info->gpmc_cs); | 407 | gpmc_prefetch_reset(info->gpmc_cs); |
394 | 408 | ||
@@ -436,6 +450,155 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, | |||
436 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); | 450 | omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); |
437 | } | 451 | } |
438 | 452 | ||
453 | /* | ||
454 | * omap_nand_irq - GMPC irq handler | ||
455 | * @this_irq: gpmc irq number | ||
456 | * @dev: omap_nand_info structure pointer is passed here | ||
457 | */ | ||
458 | static irqreturn_t omap_nand_irq(int this_irq, void *dev) | ||
459 | { | ||
460 | struct omap_nand_info *info = (struct omap_nand_info *) dev; | ||
461 | u32 bytes; | ||
462 | u32 irq_stat; | ||
463 | |||
464 | irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); | ||
465 | bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | ||
466 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ | ||
467 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ | ||
468 | if (irq_stat & 0x2) | ||
469 | goto done; | ||
470 | |||
471 | if (info->buf_len && (info->buf_len < bytes)) | ||
472 | bytes = info->buf_len; | ||
473 | else if (!info->buf_len) | ||
474 | bytes = 0; | ||
475 | iowrite32_rep(info->nand.IO_ADDR_W, | ||
476 | (u32 *)info->buf, bytes >> 2); | ||
477 | info->buf = info->buf + bytes; | ||
478 | info->buf_len -= bytes; | ||
479 | |||
480 | } else { | ||
481 | ioread32_rep(info->nand.IO_ADDR_R, | ||
482 | (u32 *)info->buf, bytes >> 2); | ||
483 | info->buf = info->buf + bytes; | ||
484 | |||
485 | if (irq_stat & 0x2) | ||
486 | goto done; | ||
487 | } | ||
488 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
489 | |||
490 | return IRQ_HANDLED; | ||
491 | |||
492 | done: | ||
493 | complete(&info->comp); | ||
494 | /* disable irq */ | ||
495 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); | ||
496 | |||
497 | /* clear status */ | ||
498 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
499 | |||
500 | return IRQ_HANDLED; | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * omap_read_buf_irq_pref - read data from NAND controller into buffer | ||
505 | * @mtd: MTD device structure | ||
506 | * @buf: buffer to store date | ||
507 | * @len: number of bytes to read | ||
508 | */ | ||
509 | static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) | ||
510 | { | ||
511 | struct omap_nand_info *info = container_of(mtd, | ||
512 | struct omap_nand_info, mtd); | ||
513 | int ret = 0; | ||
514 | |||
515 | if (len <= mtd->oobsize) { | ||
516 | omap_read_buf_pref(mtd, buf, len); | ||
517 | return; | ||
518 | } | ||
519 | |||
520 | info->iomode = OMAP_NAND_IO_READ; | ||
521 | info->buf = buf; | ||
522 | init_completion(&info->comp); | ||
523 | |||
524 | /* configure and start prefetch transfer */ | ||
525 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); | ||
526 | if (ret) | ||
527 | /* PFPW engine is busy, use cpu copy method */ | ||
528 | goto out_copy; | ||
529 | |||
530 | info->buf_len = len; | ||
531 | /* enable irq */ | ||
532 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
533 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
534 | |||
535 | /* waiting for read to complete */ | ||
536 | wait_for_completion(&info->comp); | ||
537 | |||
538 | /* disable and stop the PFPW engine */ | ||
539 | gpmc_prefetch_reset(info->gpmc_cs); | ||
540 | return; | ||
541 | |||
542 | out_copy: | ||
543 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
544 | omap_read_buf16(mtd, buf, len); | ||
545 | else | ||
546 | omap_read_buf8(mtd, buf, len); | ||
547 | } | ||
548 | |||
549 | /* | ||
550 | * omap_write_buf_irq_pref - write buffer to NAND controller | ||
551 | * @mtd: MTD device structure | ||
552 | * @buf: data buffer | ||
553 | * @len: number of bytes to write | ||
554 | */ | ||
555 | static void omap_write_buf_irq_pref(struct mtd_info *mtd, | ||
556 | const u_char *buf, int len) | ||
557 | { | ||
558 | struct omap_nand_info *info = container_of(mtd, | ||
559 | struct omap_nand_info, mtd); | ||
560 | int ret = 0; | ||
561 | unsigned long tim, limit; | ||
562 | |||
563 | if (len <= mtd->oobsize) { | ||
564 | omap_write_buf_pref(mtd, buf, len); | ||
565 | return; | ||
566 | } | ||
567 | |||
568 | info->iomode = OMAP_NAND_IO_WRITE; | ||
569 | info->buf = (u_char *) buf; | ||
570 | init_completion(&info->comp); | ||
571 | |||
572 | /* configure and start prefetch transfer */ | ||
573 | ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); | ||
574 | if (ret) | ||
575 | /* PFPW engine is busy, use cpu copy method */ | ||
576 | goto out_copy; | ||
577 | |||
578 | info->buf_len = len; | ||
579 | /* enable irq */ | ||
580 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | ||
581 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | ||
582 | |||
583 | /* waiting for write to complete */ | ||
584 | wait_for_completion(&info->comp); | ||
585 | /* wait for data to flushed-out before reset the prefetch */ | ||
586 | tim = 0; | ||
587 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
588 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
589 | cpu_relax(); | ||
590 | |||
591 | /* disable and stop the PFPW engine */ | ||
592 | gpmc_prefetch_reset(info->gpmc_cs); | ||
593 | return; | ||
594 | |||
595 | out_copy: | ||
596 | if (info->nand.options & NAND_BUSWIDTH_16) | ||
597 | omap_write_buf16(mtd, buf, len); | ||
598 | else | ||
599 | omap_write_buf8(mtd, buf, len); | ||
600 | } | ||
601 | |||
439 | /** | 602 | /** |
440 | * omap_verify_buf - Verify chip data against buffer | 603 | * omap_verify_buf - Verify chip data against buffer |
441 | * @mtd: MTD device structure | 604 | * @mtd: MTD device structure |
@@ -846,6 +1009,20 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
846 | } | 1009 | } |
847 | break; | 1010 | break; |
848 | 1011 | ||
1012 | case NAND_OMAP_PREFETCH_IRQ: | ||
1013 | err = request_irq(pdata->gpmc_irq, | ||
1014 | omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); | ||
1015 | if (err) { | ||
1016 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | ||
1017 | pdata->gpmc_irq, err); | ||
1018 | goto out_release_mem_region; | ||
1019 | } else { | ||
1020 | info->gpmc_irq = pdata->gpmc_irq; | ||
1021 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1022 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1023 | } | ||
1024 | break; | ||
1025 | |||
849 | default: | 1026 | default: |
850 | dev_err(&pdev->dev, | 1027 | dev_err(&pdev->dev, |
851 | "xfer_type(%d) not supported!\n", pdata->xfer_type); | 1028 | "xfer_type(%d) not supported!\n", pdata->xfer_type); |
@@ -911,6 +1088,9 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
911 | if (info->dma_ch != -1) | 1088 | if (info->dma_ch != -1) |
912 | omap_free_dma(info->dma_ch); | 1089 | omap_free_dma(info->dma_ch); |
913 | 1090 | ||
1091 | if (info->gpmc_irq) | ||
1092 | free_irq(info->gpmc_irq, info); | ||
1093 | |||
914 | /* Release NAND device, its internal structures and partitions */ | 1094 | /* Release NAND device, its internal structures and partitions */ |
915 | nand_release(&info->mtd); | 1095 | nand_release(&info->mtd); |
916 | iounmap(info->nand.IO_ADDR_R); | 1096 | iounmap(info->nand.IO_ADDR_R); |