aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 22:28:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 22:28:15 -0400
commit0df0914d414a504b975f3cc66ace0c16ef55b7f3 (patch)
treec97ffa357943a8b226cdec1b9632c4cede813205 /drivers/mtd/nand
parent6899608533410557e6698cb9d4ff6df553916e98 (diff)
parent05f689400ea5fa3d71af82f910c8b140f87ad1f3 (diff)
Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6
* 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6: (258 commits) omap: zoom: host should not pull up wl1271's irq line arm: plat-omap: iommu: fix request_mem_region() error path OMAP2+: Common CPU DIE ID reading code reads wrong registers for OMAP4430 omap4: mux: Remove duplicate mux modes omap: iovmm: don't check 'da' to set IOVMF_DA_FIXED flag omap: iovmm: disallow mapping NULL address when IOVMF_DA_ANON is set omap2+: mux: Fix compile when CONFIG_OMAP_MUX is not selected omap4: board-omap4panda: Initialise the serial pads omap3: board-3430sdp: Initialise the serial pads omap4: board-4430sdp: Initialise the serial pads omap2+: mux: Add macro for configuring static with omap_hwmod_mux_init omap2+: mux: Remove the use of IDLE flag omap2+: Add separate list for dynamic pads to mux perf: add OMAP support for the new power events OMAP4: Add IVA OPP enteries. OMAP4: Update Voltage Rail Values for MPU, IVA and CORE OMAP4: Enable 800 MHz and 1 GHz MPU-OPP OMAP3+: OPP: Replace voltage values with Macros OMAP3: wdtimer: Fix CORE idle transition Watchdog: omap_wdt: add fine grain runtime-pm ... Fix up various conflicts in - arch/arm/mach-omap2/board-omap3evm.c - arch/arm/mach-omap2/clock3xxx_data.c - arch/arm/mach-omap2/usb-musb.c - arch/arm/plat-omap/include/plat/usb.h - drivers/usb/musb/musb_core.h
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/omap2.c367
2 files changed, 279 insertions, 105 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 450afc5df0bd..4f6c06f16328 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -106,23 +106,6 @@ config MTD_NAND_OMAP2
106 help 106 help
107 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. 107 Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
108 108
109config MTD_NAND_OMAP_PREFETCH
110 bool "GPMC prefetch support for NAND Flash device"
111 depends on MTD_NAND_OMAP2
112 default y
113 help
114 The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
115 to improve the performance.
116
117config MTD_NAND_OMAP_PREFETCH_DMA
118 depends on MTD_NAND_OMAP_PREFETCH
119 bool "DMA mode"
120 default n
121 help
122 The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
123 or in DMA interrupt mode.
124 Say y for DMA mode or MPU mode will be used
125
126config MTD_NAND_IDS 109config MTD_NAND_IDS
127 tristate 110 tristate
128 111
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 28af71c61834..7b8f1fffc528 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h>
14#include <linux/jiffies.h> 15#include <linux/jiffies.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
@@ -24,6 +25,7 @@
24#include <plat/nand.h> 25#include <plat/nand.h>
25 26
26#define DRIVER_NAME "omap2-nand" 27#define DRIVER_NAME "omap2-nand"
28#define OMAP_NAND_TIMEOUT_MS 5000
27 29
28#define NAND_Ecc_P1e (1 << 0) 30#define NAND_Ecc_P1e (1 << 0)
29#define NAND_Ecc_P2e (1 << 1) 31#define NAND_Ecc_P2e (1 << 1)
@@ -96,26 +98,19 @@
96static const char *part_probes[] = { "cmdlinepart", NULL }; 98static const char *part_probes[] = { "cmdlinepart", NULL };
97#endif 99#endif
98 100
99#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH 101/* oob info generated runtime depending on ecc algorithm and layout selected */
100static int use_prefetch = 1; 102static struct nand_ecclayout omap_oobinfo;
101 103/* Define some generic bad / good block scan pattern which are used
102/* "modprobe ... use_prefetch=0" etc */ 104 * while scanning a device for factory marked good / bad blocks
103module_param(use_prefetch, bool, 0); 105 */
104MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); 106static uint8_t scan_ff_pattern[] = { 0xff };
105 107static struct nand_bbt_descr bb_descrip_flashbased = {
106#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA 108 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
107static int use_dma = 1; 109 .offs = 0,
110 .len = 1,
111 .pattern = scan_ff_pattern,
112};
108 113
109/* "modprobe ... use_dma=0" etc */
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
112#else
113static const int use_dma;
114#endif
115#else
116const int use_prefetch;
117static const int use_dma;
118#endif
119 114
120struct omap_nand_info { 115struct omap_nand_info {
121 struct nand_hw_control controller; 116 struct nand_hw_control controller;
@@ -129,6 +124,13 @@ struct omap_nand_info {
129 unsigned long phys_base; 124 unsigned long phys_base;
130 struct completion comp; 125 struct completion comp;
131 int dma_ch; 126 int dma_ch;
127 int gpmc_irq;
128 enum {
129 OMAP_NAND_IO_READ = 0, /* read */
130 OMAP_NAND_IO_WRITE, /* write */
131 } iomode;
132 u_char *buf;
133 int buf_len;
132}; 134};
133 135
134/** 136/**
@@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
256 } 258 }
257 259
258 /* configure and start prefetch transfer */ 260 /* configure and start prefetch transfer */
259 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); 261 ret = gpmc_prefetch_enable(info->gpmc_cs,
262 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
260 if (ret) { 263 if (ret) {
261 /* PFPW engine is busy, use cpu copy method */ 264 /* PFPW engine is busy, use cpu copy method */
262 if (info->nand.options & NAND_BUSWIDTH_16) 265 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
288{ 291{
289 struct omap_nand_info *info = container_of(mtd, 292 struct omap_nand_info *info = container_of(mtd,
290 struct omap_nand_info, mtd); 293 struct omap_nand_info, mtd);
291 uint32_t pref_count = 0, w_count = 0; 294 uint32_t w_count = 0;
292 int i = 0, ret = 0; 295 int i = 0, ret = 0;
293 u16 *p; 296 u16 *p;
297 unsigned long tim, limit;
294 298
295 /* take care of subpage writes */ 299 /* take care of subpage writes */
296 if (len % 2 != 0) { 300 if (len % 2 != 0) {
@@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
300 } 304 }
301 305
302 /* configure and start prefetch transfer */ 306 /* configure and start prefetch transfer */
303 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); 307 ret = gpmc_prefetch_enable(info->gpmc_cs,
308 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
304 if (ret) { 309 if (ret) {
305 /* PFPW engine is busy, use cpu copy method */ 310 /* PFPW engine is busy, use cpu copy method */
306 if (info->nand.options & NAND_BUSWIDTH_16) 311 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
316 iowrite16(*p++, info->nand.IO_ADDR_W); 321 iowrite16(*p++, info->nand.IO_ADDR_W);
317 } 322 }
318 /* wait for data to flushed-out before reset the prefetch */ 323 /* wait for data to flushed-out before reset the prefetch */
319 do { 324 tim = 0;
320 pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); 325 limit = (loops_per_jiffy *
321 } while (pref_count); 326 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
327 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
328 cpu_relax();
329
322 /* disable and stop the PFPW engine */ 330 /* disable and stop the PFPW engine */
323 gpmc_prefetch_reset(info->gpmc_cs); 331 gpmc_prefetch_reset(info->gpmc_cs);
324 } 332 }
325} 333}
326 334
327#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
328/* 335/*
329 * omap_nand_dma_cb: callback on the completion of dma transfer 336 * omap_nand_dma_cb: callback on the completion of dma transfer
330 * @lch: logical channel 337 * @lch: logical channel
@@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
348{ 355{
349 struct omap_nand_info *info = container_of(mtd, 356 struct omap_nand_info *info = container_of(mtd,
350 struct omap_nand_info, mtd); 357 struct omap_nand_info, mtd);
351 uint32_t prefetch_status = 0;
352 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 358 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
353 DMA_FROM_DEVICE; 359 DMA_FROM_DEVICE;
354 dma_addr_t dma_addr; 360 dma_addr_t dma_addr;
355 int ret; 361 int ret;
362 unsigned long tim, limit;
356 363
357 /* The fifo depth is 64 bytes. We have a sync at each frame and frame 364 /* The fifo depth is 64 bytes max.
358 * length is 64 bytes. 365 * But configure the FIFO-threahold to 32 to get a sync at each frame
366 * and frame length is 32 bytes.
359 */ 367 */
360 int buf_len = len >> 6; 368 int buf_len = len >> 6;
361 369
@@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
396 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); 404 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
397 } 405 }
398 /* configure and start prefetch transfer */ 406 /* configure and start prefetch transfer */
399 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); 407 ret = gpmc_prefetch_enable(info->gpmc_cs,
408 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
400 if (ret) 409 if (ret)
401 /* PFPW engine is busy, use cpu copy methode */ 410 /* PFPW engine is busy, use cpu copy method */
402 goto out_copy; 411 goto out_copy;
403 412
404 init_completion(&info->comp); 413 init_completion(&info->comp);
@@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
407 416
408 /* setup and start DMA using dma_addr */ 417 /* setup and start DMA using dma_addr */
409 wait_for_completion(&info->comp); 418 wait_for_completion(&info->comp);
419 tim = 0;
420 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
421 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
422 cpu_relax();
410 423
411 do {
412 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
413 } while (prefetch_status);
414 /* disable and stop the PFPW engine */ 424 /* disable and stop the PFPW engine */
415 gpmc_prefetch_reset(info->gpmc_cs); 425 gpmc_prefetch_reset(info->gpmc_cs);
416 426
@@ -426,14 +436,6 @@ out_copy:
426 : omap_write_buf8(mtd, (u_char *) addr, len); 436 : omap_write_buf8(mtd, (u_char *) addr, len);
427 return 0; 437 return 0;
428} 438}
429#else
430static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
431static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
432 unsigned int len, int is_write)
433{
434 return 0;
435}
436#endif
437 439
438/** 440/**
439 * omap_read_buf_dma_pref - read data from NAND controller into buffer 441 * omap_read_buf_dma_pref - read data from NAND controller into buffer
@@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
466 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 468 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
467} 469}
468 470
471/*
472 * omap_nand_irq - GMPC irq handler
473 * @this_irq: gpmc irq number
474 * @dev: omap_nand_info structure pointer is passed here
475 */
476static irqreturn_t omap_nand_irq(int this_irq, void *dev)
477{
478 struct omap_nand_info *info = (struct omap_nand_info *) dev;
479 u32 bytes;
480 u32 irq_stat;
481
482 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
483 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
484 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
485 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
486 if (irq_stat & 0x2)
487 goto done;
488
489 if (info->buf_len && (info->buf_len < bytes))
490 bytes = info->buf_len;
491 else if (!info->buf_len)
492 bytes = 0;
493 iowrite32_rep(info->nand.IO_ADDR_W,
494 (u32 *)info->buf, bytes >> 2);
495 info->buf = info->buf + bytes;
496 info->buf_len -= bytes;
497
498 } else {
499 ioread32_rep(info->nand.IO_ADDR_R,
500 (u32 *)info->buf, bytes >> 2);
501 info->buf = info->buf + bytes;
502
503 if (irq_stat & 0x2)
504 goto done;
505 }
506 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
507
508 return IRQ_HANDLED;
509
510done:
511 complete(&info->comp);
512 /* disable irq */
513 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
514
515 /* clear status */
516 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
517
518 return IRQ_HANDLED;
519}
520
521/*
522 * omap_read_buf_irq_pref - read data from NAND controller into buffer
523 * @mtd: MTD device structure
524 * @buf: buffer to store date
525 * @len: number of bytes to read
526 */
527static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
528{
529 struct omap_nand_info *info = container_of(mtd,
530 struct omap_nand_info, mtd);
531 int ret = 0;
532
533 if (len <= mtd->oobsize) {
534 omap_read_buf_pref(mtd, buf, len);
535 return;
536 }
537
538 info->iomode = OMAP_NAND_IO_READ;
539 info->buf = buf;
540 init_completion(&info->comp);
541
542 /* configure and start prefetch transfer */
543 ret = gpmc_prefetch_enable(info->gpmc_cs,
544 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
545 if (ret)
546 /* PFPW engine is busy, use cpu copy method */
547 goto out_copy;
548
549 info->buf_len = len;
550 /* enable irq */
551 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
552 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
553
554 /* waiting for read to complete */
555 wait_for_completion(&info->comp);
556
557 /* disable and stop the PFPW engine */
558 gpmc_prefetch_reset(info->gpmc_cs);
559 return;
560
561out_copy:
562 if (info->nand.options & NAND_BUSWIDTH_16)
563 omap_read_buf16(mtd, buf, len);
564 else
565 omap_read_buf8(mtd, buf, len);
566}
567
568/*
569 * omap_write_buf_irq_pref - write buffer to NAND controller
570 * @mtd: MTD device structure
571 * @buf: data buffer
572 * @len: number of bytes to write
573 */
574static void omap_write_buf_irq_pref(struct mtd_info *mtd,
575 const u_char *buf, int len)
576{
577 struct omap_nand_info *info = container_of(mtd,
578 struct omap_nand_info, mtd);
579 int ret = 0;
580 unsigned long tim, limit;
581
582 if (len <= mtd->oobsize) {
583 omap_write_buf_pref(mtd, buf, len);
584 return;
585 }
586
587 info->iomode = OMAP_NAND_IO_WRITE;
588 info->buf = (u_char *) buf;
589 init_completion(&info->comp);
590
591 /* configure and start prefetch transfer : size=24 */
592 ret = gpmc_prefetch_enable(info->gpmc_cs,
593 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
594 if (ret)
595 /* PFPW engine is busy, use cpu copy method */
596 goto out_copy;
597
598 info->buf_len = len;
599 /* enable irq */
600 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
601 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
602
603 /* waiting for write to complete */
604 wait_for_completion(&info->comp);
605 /* wait for data to flushed-out before reset the prefetch */
606 tim = 0;
607 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
608 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
609 cpu_relax();
610
611 /* disable and stop the PFPW engine */
612 gpmc_prefetch_reset(info->gpmc_cs);
613 return;
614
615out_copy:
616 if (info->nand.options & NAND_BUSWIDTH_16)
617 omap_write_buf16(mtd, buf, len);
618 else
619 omap_write_buf8(mtd, buf, len);
620}
621
469/** 622/**
470 * omap_verify_buf - Verify chip data against buffer 623 * omap_verify_buf - Verify chip data against buffer
471 * @mtd: MTD device structure 624 * @mtd: MTD device structure
@@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
487 return 0; 640 return 0;
488} 641}
489 642
490#ifdef CONFIG_MTD_NAND_OMAP_HWECC
491
492/** 643/**
493 * gen_true_ecc - This function will generate true ECC value 644 * gen_true_ecc - This function will generate true ECC value
494 * @ecc_buf: buffer to store ecc code 645 * @ecc_buf: buffer to store ecc code
@@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
708 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); 859 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
709} 860}
710 861
711#endif
712
713/** 862/**
714 * omap_wait - wait until the command is done 863 * omap_wait - wait until the command is done
715 * @mtd: MTD device structure 864 * @mtd: MTD device structure
@@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
779 struct omap_nand_info *info; 928 struct omap_nand_info *info;
780 struct omap_nand_platform_data *pdata; 929 struct omap_nand_platform_data *pdata;
781 int err; 930 int err;
931 int i, offset;
782 932
783 pdata = pdev->dev.platform_data; 933 pdata = pdev->dev.platform_data;
784 if (pdata == NULL) { 934 if (pdata == NULL) {
@@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
804 info->mtd.name = dev_name(&pdev->dev); 954 info->mtd.name = dev_name(&pdev->dev);
805 info->mtd.owner = THIS_MODULE; 955 info->mtd.owner = THIS_MODULE;
806 956
807 info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; 957 info->nand.options = pdata->devsize;
808 info->nand.options |= NAND_SKIP_BBTSCAN; 958 info->nand.options |= NAND_SKIP_BBTSCAN;
809 959
810 /* NAND write protect off */ 960 /* NAND write protect off */
@@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
842 info->nand.chip_delay = 50; 992 info->nand.chip_delay = 50;
843 } 993 }
844 994
845 if (use_prefetch) { 995 switch (pdata->xfer_type) {
846 996 case NAND_OMAP_PREFETCH_POLLED:
847 info->nand.read_buf = omap_read_buf_pref; 997 info->nand.read_buf = omap_read_buf_pref;
848 info->nand.write_buf = omap_write_buf_pref; 998 info->nand.write_buf = omap_write_buf_pref;
849 if (use_dma) { 999 break;
850 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1000
851 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1001 case NAND_OMAP_POLLED:
852 if (err < 0) {
853 info->dma_ch = -1;
854 printk(KERN_WARNING "DMA request failed."
855 " Non-dma data transfer mode\n");
856 } else {
857 omap_set_dma_dest_burst_mode(info->dma_ch,
858 OMAP_DMA_DATA_BURST_16);
859 omap_set_dma_src_burst_mode(info->dma_ch,
860 OMAP_DMA_DATA_BURST_16);
861
862 info->nand.read_buf = omap_read_buf_dma_pref;
863 info->nand.write_buf = omap_write_buf_dma_pref;
864 }
865 }
866 } else {
867 if (info->nand.options & NAND_BUSWIDTH_16) { 1002 if (info->nand.options & NAND_BUSWIDTH_16) {
868 info->nand.read_buf = omap_read_buf16; 1003 info->nand.read_buf = omap_read_buf16;
869 info->nand.write_buf = omap_write_buf16; 1004 info->nand.write_buf = omap_write_buf16;
@@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
871 info->nand.read_buf = omap_read_buf8; 1006 info->nand.read_buf = omap_read_buf8;
872 info->nand.write_buf = omap_write_buf8; 1007 info->nand.write_buf = omap_write_buf8;
873 } 1008 }
1009 break;
1010
1011 case NAND_OMAP_PREFETCH_DMA:
1012 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1013 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1014 if (err < 0) {
1015 info->dma_ch = -1;
1016 dev_err(&pdev->dev, "DMA request failed!\n");
1017 goto out_release_mem_region;
1018 } else {
1019 omap_set_dma_dest_burst_mode(info->dma_ch,
1020 OMAP_DMA_DATA_BURST_16);
1021 omap_set_dma_src_burst_mode(info->dma_ch,
1022 OMAP_DMA_DATA_BURST_16);
1023
1024 info->nand.read_buf = omap_read_buf_dma_pref;
1025 info->nand.write_buf = omap_write_buf_dma_pref;
1026 }
1027 break;
1028
1029 case NAND_OMAP_PREFETCH_IRQ:
1030 err = request_irq(pdata->gpmc_irq,
1031 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1032 if (err) {
1033 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1034 pdata->gpmc_irq, err);
1035 goto out_release_mem_region;
1036 } else {
1037 info->gpmc_irq = pdata->gpmc_irq;
1038 info->nand.read_buf = omap_read_buf_irq_pref;
1039 info->nand.write_buf = omap_write_buf_irq_pref;
1040 }
1041 break;
1042
1043 default:
1044 dev_err(&pdev->dev,
1045 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1046 err = -EINVAL;
1047 goto out_release_mem_region;
874 } 1048 }
875 info->nand.verify_buf = omap_verify_buf;
876 1049
877#ifdef CONFIG_MTD_NAND_OMAP_HWECC 1050 info->nand.verify_buf = omap_verify_buf;
878 info->nand.ecc.bytes = 3;
879 info->nand.ecc.size = 512;
880 info->nand.ecc.calculate = omap_calculate_ecc;
881 info->nand.ecc.hwctl = omap_enable_hwecc;
882 info->nand.ecc.correct = omap_correct_data;
883 info->nand.ecc.mode = NAND_ECC_HW;
884 1051
885#else 1052 /* selsect the ecc type */
886 info->nand.ecc.mode = NAND_ECC_SOFT; 1053 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
887#endif 1054 info->nand.ecc.mode = NAND_ECC_SOFT;
1055 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1056 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1057 info->nand.ecc.bytes = 3;
1058 info->nand.ecc.size = 512;
1059 info->nand.ecc.calculate = omap_calculate_ecc;
1060 info->nand.ecc.hwctl = omap_enable_hwecc;
1061 info->nand.ecc.correct = omap_correct_data;
1062 info->nand.ecc.mode = NAND_ECC_HW;
1063 }
888 1064
889 /* DIP switches on some boards change between 8 and 16 bit 1065 /* DIP switches on some boards change between 8 and 16 bit
890 * bus widths for flash. Try the other width if the first try fails. 1066 * bus widths for flash. Try the other width if the first try fails.
@@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
897 } 1073 }
898 } 1074 }
899 1075
1076 /* rom code layout */
1077 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1078
1079 if (info->nand.options & NAND_BUSWIDTH_16)
1080 offset = 2;
1081 else {
1082 offset = 1;
1083 info->nand.badblock_pattern = &bb_descrip_flashbased;
1084 }
1085 omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
1086 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1087 omap_oobinfo.eccpos[i] = i+offset;
1088
1089 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1090 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1091 (offset + omap_oobinfo.eccbytes);
1092
1093 info->nand.ecc.layout = &omap_oobinfo;
1094 }
1095
900#ifdef CONFIG_MTD_PARTITIONS 1096#ifdef CONFIG_MTD_PARTITIONS
901 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1097 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
902 if (err > 0) 1098 if (err > 0)
@@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev)
926 mtd); 1122 mtd);
927 1123
928 platform_set_drvdata(pdev, NULL); 1124 platform_set_drvdata(pdev, NULL);
929 if (use_dma) 1125 if (info->dma_ch != -1)
930 omap_free_dma(info->dma_ch); 1126 omap_free_dma(info->dma_ch);
931 1127
1128 if (info->gpmc_irq)
1129 free_irq(info->gpmc_irq, info);
1130
932 /* Release NAND device, its internal structures and partitions */ 1131 /* Release NAND device, its internal structures and partitions */
933 nand_release(&info->mtd); 1132 nand_release(&info->mtd);
934 iounmap(info->nand.IO_ADDR_R); 1133 iounmap(info->nand.IO_ADDR_R);
@@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = {
947 1146
948static int __init omap_nand_init(void) 1147static int __init omap_nand_init(void)
949{ 1148{
950 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); 1149 pr_info("%s driver initializing\n", DRIVER_NAME);
951 1150
952 /* This check is required if driver is being
953 * loaded run time as a module
954 */
955 if ((1 == use_dma) && (0 == use_prefetch)) {
956 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
957 "without use_prefetch'. Prefetch will not be"
958 " used in either mode (mpu or dma)\n");
959 }
960 return platform_driver_register(&omap_nand_driver); 1151 return platform_driver_register(&omap_nand_driver);
961} 1152}
962 1153