diff options
Diffstat (limited to 'drivers/mtd/nand/omap2.c')
-rw-r--r-- | drivers/mtd/nand/omap2.c | 301 |
1 files changed, 225 insertions, 76 deletions
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index ac4fd756eda3..27293e328517 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -101,6 +101,16 @@ | |||
101 | #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) | 101 | #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) |
102 | #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) | 102 | #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) |
103 | 103 | ||
104 | #define PREFETCH_CONFIG1_CS_SHIFT 24 | ||
105 | #define ECC_CONFIG_CS_SHIFT 1 | ||
106 | #define CS_MASK 0x7 | ||
107 | #define ENABLE_PREFETCH (0x1 << 7) | ||
108 | #define DMA_MPU_MODE_SHIFT 2 | ||
109 | #define ECCSIZE1_SHIFT 22 | ||
110 | #define ECC1RESULTSIZE 0x1 | ||
111 | #define ECCCLEAR 0x100 | ||
112 | #define ECC1 0x1 | ||
113 | |||
104 | /* oob info generated runtime depending on ecc algorithm and layout selected */ | 114 | /* oob info generated runtime depending on ecc algorithm and layout selected */ |
105 | static struct nand_ecclayout omap_oobinfo; | 115 | static struct nand_ecclayout omap_oobinfo; |
106 | /* Define some generic bad / good block scan pattern which are used | 116 | /* Define some generic bad / good block scan pattern which are used |
@@ -124,15 +134,18 @@ struct omap_nand_info { | |||
124 | 134 | ||
125 | int gpmc_cs; | 135 | int gpmc_cs; |
126 | unsigned long phys_base; | 136 | unsigned long phys_base; |
137 | unsigned long mem_size; | ||
127 | struct completion comp; | 138 | struct completion comp; |
128 | struct dma_chan *dma; | 139 | struct dma_chan *dma; |
129 | int gpmc_irq; | 140 | int gpmc_irq_fifo; |
141 | int gpmc_irq_count; | ||
130 | enum { | 142 | enum { |
131 | OMAP_NAND_IO_READ = 0, /* read */ | 143 | OMAP_NAND_IO_READ = 0, /* read */ |
132 | OMAP_NAND_IO_WRITE, /* write */ | 144 | OMAP_NAND_IO_WRITE, /* write */ |
133 | } iomode; | 145 | } iomode; |
134 | u_char *buf; | 146 | u_char *buf; |
135 | int buf_len; | 147 | int buf_len; |
148 | struct gpmc_nand_regs reg; | ||
136 | 149 | ||
137 | #ifdef CONFIG_MTD_NAND_OMAP_BCH | 150 | #ifdef CONFIG_MTD_NAND_OMAP_BCH |
138 | struct bch_control *bch; | 151 | struct bch_control *bch; |
@@ -141,6 +154,63 @@ struct omap_nand_info { | |||
141 | }; | 154 | }; |
142 | 155 | ||
143 | /** | 156 | /** |
157 | * omap_prefetch_enable - configures and starts prefetch transfer | ||
158 | * @cs: cs (chip select) number | ||
159 | * @fifo_th: fifo threshold to be used for read/ write | ||
160 | * @dma_mode: dma mode enable (1) or disable (0) | ||
161 | * @u32_count: number of bytes to be transferred | ||
162 | * @is_write: prefetch read(0) or write post(1) mode | ||
163 | */ | ||
164 | static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, | ||
165 | unsigned int u32_count, int is_write, struct omap_nand_info *info) | ||
166 | { | ||
167 | u32 val; | ||
168 | |||
169 | if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) | ||
170 | return -1; | ||
171 | |||
172 | if (readl(info->reg.gpmc_prefetch_control)) | ||
173 | return -EBUSY; | ||
174 | |||
175 | /* Set the amount of bytes to be prefetched */ | ||
176 | writel(u32_count, info->reg.gpmc_prefetch_config2); | ||
177 | |||
178 | /* Set dma/mpu mode, the prefetch read / post write and | ||
179 | * enable the engine. Set which cs is has requested for. | ||
180 | */ | ||
181 | val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) | | ||
182 | PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH | | ||
183 | (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write)); | ||
184 | writel(val, info->reg.gpmc_prefetch_config1); | ||
185 | |||
186 | /* Start the prefetch engine */ | ||
187 | writel(0x1, info->reg.gpmc_prefetch_control); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * omap_prefetch_reset - disables and stops the prefetch engine | ||
194 | */ | ||
195 | static int omap_prefetch_reset(int cs, struct omap_nand_info *info) | ||
196 | { | ||
197 | u32 config1; | ||
198 | |||
199 | /* check if the same module/cs is trying to reset */ | ||
200 | config1 = readl(info->reg.gpmc_prefetch_config1); | ||
201 | if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs) | ||
202 | return -EINVAL; | ||
203 | |||
204 | /* Stop the PFPW engine */ | ||
205 | writel(0x0, info->reg.gpmc_prefetch_control); | ||
206 | |||
207 | /* Reset/disable the PFPW engine */ | ||
208 | writel(0x0, info->reg.gpmc_prefetch_config1); | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | /** | ||
144 | * omap_hwcontrol - hardware specific access to control-lines | 214 | * omap_hwcontrol - hardware specific access to control-lines |
145 | * @mtd: MTD device structure | 215 | * @mtd: MTD device structure |
146 | * @cmd: command to device | 216 | * @cmd: command to device |
@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
158 | 228 | ||
159 | if (cmd != NAND_CMD_NONE) { | 229 | if (cmd != NAND_CMD_NONE) { |
160 | if (ctrl & NAND_CLE) | 230 | if (ctrl & NAND_CLE) |
161 | gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); | 231 | writeb(cmd, info->reg.gpmc_nand_command); |
162 | 232 | ||
163 | else if (ctrl & NAND_ALE) | 233 | else if (ctrl & NAND_ALE) |
164 | gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); | 234 | writeb(cmd, info->reg.gpmc_nand_address); |
165 | 235 | ||
166 | else /* NAND_NCE */ | 236 | else /* NAND_NCE */ |
167 | gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); | 237 | writeb(cmd, info->reg.gpmc_nand_data); |
168 | } | 238 | } |
169 | } | 239 | } |
170 | 240 | ||
@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) | |||
198 | iowrite8(*p++, info->nand.IO_ADDR_W); | 268 | iowrite8(*p++, info->nand.IO_ADDR_W); |
199 | /* wait until buffer is available for write */ | 269 | /* wait until buffer is available for write */ |
200 | do { | 270 | do { |
201 | status = gpmc_read_status(GPMC_STATUS_BUFFER); | 271 | status = readl(info->reg.gpmc_status) & |
272 | GPMC_STATUS_BUFF_EMPTY; | ||
202 | } while (!status); | 273 | } while (!status); |
203 | } | 274 | } |
204 | } | 275 | } |
@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) | |||
235 | iowrite16(*p++, info->nand.IO_ADDR_W); | 306 | iowrite16(*p++, info->nand.IO_ADDR_W); |
236 | /* wait until buffer is available for write */ | 307 | /* wait until buffer is available for write */ |
237 | do { | 308 | do { |
238 | status = gpmc_read_status(GPMC_STATUS_BUFFER); | 309 | status = readl(info->reg.gpmc_status) & |
310 | GPMC_STATUS_BUFF_EMPTY; | ||
239 | } while (!status); | 311 | } while (!status); |
240 | } | 312 | } |
241 | } | 313 | } |
@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
265 | } | 337 | } |
266 | 338 | ||
267 | /* configure and start prefetch transfer */ | 339 | /* configure and start prefetch transfer */ |
268 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 340 | ret = omap_prefetch_enable(info->gpmc_cs, |
269 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); | 341 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info); |
270 | if (ret) { | 342 | if (ret) { |
271 | /* PFPW engine is busy, use cpu copy method */ | 343 | /* PFPW engine is busy, use cpu copy method */ |
272 | if (info->nand.options & NAND_BUSWIDTH_16) | 344 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
275 | omap_read_buf8(mtd, (u_char *)p, len); | 347 | omap_read_buf8(mtd, (u_char *)p, len); |
276 | } else { | 348 | } else { |
277 | do { | 349 | do { |
278 | r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 350 | r_count = readl(info->reg.gpmc_prefetch_status); |
351 | r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count); | ||
279 | r_count = r_count >> 2; | 352 | r_count = r_count >> 2; |
280 | ioread32_rep(info->nand.IO_ADDR_R, p, r_count); | 353 | ioread32_rep(info->nand.IO_ADDR_R, p, r_count); |
281 | p += r_count; | 354 | p += r_count; |
282 | len -= r_count << 2; | 355 | len -= r_count << 2; |
283 | } while (len); | 356 | } while (len); |
284 | /* disable and stop the PFPW engine */ | 357 | /* disable and stop the PFPW engine */ |
285 | gpmc_prefetch_reset(info->gpmc_cs); | 358 | omap_prefetch_reset(info->gpmc_cs, info); |
286 | } | 359 | } |
287 | } | 360 | } |
288 | 361 | ||
@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
301 | int i = 0, ret = 0; | 374 | int i = 0, ret = 0; |
302 | u16 *p = (u16 *)buf; | 375 | u16 *p = (u16 *)buf; |
303 | unsigned long tim, limit; | 376 | unsigned long tim, limit; |
377 | u32 val; | ||
304 | 378 | ||
305 | /* take care of subpage writes */ | 379 | /* take care of subpage writes */ |
306 | if (len % 2 != 0) { | 380 | if (len % 2 != 0) { |
@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
310 | } | 384 | } |
311 | 385 | ||
312 | /* configure and start prefetch transfer */ | 386 | /* configure and start prefetch transfer */ |
313 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 387 | ret = omap_prefetch_enable(info->gpmc_cs, |
314 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); | 388 | PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info); |
315 | if (ret) { | 389 | if (ret) { |
316 | /* PFPW engine is busy, use cpu copy method */ | 390 | /* PFPW engine is busy, use cpu copy method */ |
317 | if (info->nand.options & NAND_BUSWIDTH_16) | 391 | if (info->nand.options & NAND_BUSWIDTH_16) |
@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
320 | omap_write_buf8(mtd, (u_char *)p, len); | 394 | omap_write_buf8(mtd, (u_char *)p, len); |
321 | } else { | 395 | } else { |
322 | while (len) { | 396 | while (len) { |
323 | w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 397 | w_count = readl(info->reg.gpmc_prefetch_status); |
398 | w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count); | ||
324 | w_count = w_count >> 1; | 399 | w_count = w_count >> 1; |
325 | for (i = 0; (i < w_count) && len; i++, len -= 2) | 400 | for (i = 0; (i < w_count) && len; i++, len -= 2) |
326 | iowrite16(*p++, info->nand.IO_ADDR_W); | 401 | iowrite16(*p++, info->nand.IO_ADDR_W); |
@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
329 | tim = 0; | 404 | tim = 0; |
330 | limit = (loops_per_jiffy * | 405 | limit = (loops_per_jiffy * |
331 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | 406 | msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
332 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | 407 | do { |
333 | cpu_relax(); | 408 | cpu_relax(); |
409 | val = readl(info->reg.gpmc_prefetch_status); | ||
410 | val = GPMC_PREFETCH_STATUS_COUNT(val); | ||
411 | } while (val && (tim++ < limit)); | ||
334 | 412 | ||
335 | /* disable and stop the PFPW engine */ | 413 | /* disable and stop the PFPW engine */ |
336 | gpmc_prefetch_reset(info->gpmc_cs); | 414 | omap_prefetch_reset(info->gpmc_cs, info); |
337 | } | 415 | } |
338 | } | 416 | } |
339 | 417 | ||
@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
365 | unsigned long tim, limit; | 443 | unsigned long tim, limit; |
366 | unsigned n; | 444 | unsigned n; |
367 | int ret; | 445 | int ret; |
446 | u32 val; | ||
368 | 447 | ||
369 | if (addr >= high_memory) { | 448 | if (addr >= high_memory) { |
370 | struct page *p1; | 449 | struct page *p1; |
@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
396 | tx->callback_param = &info->comp; | 475 | tx->callback_param = &info->comp; |
397 | dmaengine_submit(tx); | 476 | dmaengine_submit(tx); |
398 | 477 | ||
399 | /* configure and start prefetch transfer */ | 478 | /* configure and start prefetch transfer */ |
400 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 479 | ret = omap_prefetch_enable(info->gpmc_cs, |
401 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | 480 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info); |
402 | if (ret) | 481 | if (ret) |
403 | /* PFPW engine is busy, use cpu copy method */ | 482 | /* PFPW engine is busy, use cpu copy method */ |
404 | goto out_copy_unmap; | 483 | goto out_copy_unmap; |
@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
410 | wait_for_completion(&info->comp); | 489 | wait_for_completion(&info->comp); |
411 | tim = 0; | 490 | tim = 0; |
412 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | 491 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
413 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | 492 | |
493 | do { | ||
414 | cpu_relax(); | 494 | cpu_relax(); |
495 | val = readl(info->reg.gpmc_prefetch_status); | ||
496 | val = GPMC_PREFETCH_STATUS_COUNT(val); | ||
497 | } while (val && (tim++ < limit)); | ||
415 | 498 | ||
416 | /* disable and stop the PFPW engine */ | 499 | /* disable and stop the PFPW engine */ |
417 | gpmc_prefetch_reset(info->gpmc_cs); | 500 | omap_prefetch_reset(info->gpmc_cs, info); |
418 | 501 | ||
419 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); | 502 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); |
420 | return 0; | 503 | return 0; |
@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev) | |||
471 | { | 554 | { |
472 | struct omap_nand_info *info = (struct omap_nand_info *) dev; | 555 | struct omap_nand_info *info = (struct omap_nand_info *) dev; |
473 | u32 bytes; | 556 | u32 bytes; |
474 | u32 irq_stat; | ||
475 | 557 | ||
476 | irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); | 558 | bytes = readl(info->reg.gpmc_prefetch_status); |
477 | bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 559 | bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes); |
478 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ | 560 | bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ |
479 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ | 561 | if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ |
480 | if (irq_stat & 0x2) | 562 | if (this_irq == info->gpmc_irq_count) |
481 | goto done; | 563 | goto done; |
482 | 564 | ||
483 | if (info->buf_len && (info->buf_len < bytes)) | 565 | if (info->buf_len && (info->buf_len < bytes)) |
@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev) | |||
494 | (u32 *)info->buf, bytes >> 2); | 576 | (u32 *)info->buf, bytes >> 2); |
495 | info->buf = info->buf + bytes; | 577 | info->buf = info->buf + bytes; |
496 | 578 | ||
497 | if (irq_stat & 0x2) | 579 | if (this_irq == info->gpmc_irq_count) |
498 | goto done; | 580 | goto done; |
499 | } | 581 | } |
500 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | ||
501 | 582 | ||
502 | return IRQ_HANDLED; | 583 | return IRQ_HANDLED; |
503 | 584 | ||
504 | done: | 585 | done: |
505 | complete(&info->comp); | 586 | complete(&info->comp); |
506 | /* disable irq */ | ||
507 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); | ||
508 | 587 | ||
509 | /* clear status */ | 588 | disable_irq_nosync(info->gpmc_irq_fifo); |
510 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); | 589 | disable_irq_nosync(info->gpmc_irq_count); |
511 | 590 | ||
512 | return IRQ_HANDLED; | 591 | return IRQ_HANDLED; |
513 | } | 592 | } |
@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) | |||
534 | init_completion(&info->comp); | 613 | init_completion(&info->comp); |
535 | 614 | ||
536 | /* configure and start prefetch transfer */ | 615 | /* configure and start prefetch transfer */ |
537 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 616 | ret = omap_prefetch_enable(info->gpmc_cs, |
538 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); | 617 | PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info); |
539 | if (ret) | 618 | if (ret) |
540 | /* PFPW engine is busy, use cpu copy method */ | 619 | /* PFPW engine is busy, use cpu copy method */ |
541 | goto out_copy; | 620 | goto out_copy; |
542 | 621 | ||
543 | info->buf_len = len; | 622 | info->buf_len = len; |
544 | /* enable irq */ | 623 | |
545 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | 624 | enable_irq(info->gpmc_irq_count); |
546 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | 625 | enable_irq(info->gpmc_irq_fifo); |
547 | 626 | ||
548 | /* waiting for read to complete */ | 627 | /* waiting for read to complete */ |
549 | wait_for_completion(&info->comp); | 628 | wait_for_completion(&info->comp); |
550 | 629 | ||
551 | /* disable and stop the PFPW engine */ | 630 | /* disable and stop the PFPW engine */ |
552 | gpmc_prefetch_reset(info->gpmc_cs); | 631 | omap_prefetch_reset(info->gpmc_cs, info); |
553 | return; | 632 | return; |
554 | 633 | ||
555 | out_copy: | 634 | out_copy: |
@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd, | |||
572 | struct omap_nand_info, mtd); | 651 | struct omap_nand_info, mtd); |
573 | int ret = 0; | 652 | int ret = 0; |
574 | unsigned long tim, limit; | 653 | unsigned long tim, limit; |
654 | u32 val; | ||
575 | 655 | ||
576 | if (len <= mtd->oobsize) { | 656 | if (len <= mtd->oobsize) { |
577 | omap_write_buf_pref(mtd, buf, len); | 657 | omap_write_buf_pref(mtd, buf, len); |
@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd, | |||
583 | init_completion(&info->comp); | 663 | init_completion(&info->comp); |
584 | 664 | ||
585 | /* configure and start prefetch transfer : size=24 */ | 665 | /* configure and start prefetch transfer : size=24 */ |
586 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 666 | ret = omap_prefetch_enable(info->gpmc_cs, |
587 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); | 667 | (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info); |
588 | if (ret) | 668 | if (ret) |
589 | /* PFPW engine is busy, use cpu copy method */ | 669 | /* PFPW engine is busy, use cpu copy method */ |
590 | goto out_copy; | 670 | goto out_copy; |
591 | 671 | ||
592 | info->buf_len = len; | 672 | info->buf_len = len; |
593 | /* enable irq */ | 673 | |
594 | gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, | 674 | enable_irq(info->gpmc_irq_count); |
595 | (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); | 675 | enable_irq(info->gpmc_irq_fifo); |
596 | 676 | ||
597 | /* waiting for write to complete */ | 677 | /* waiting for write to complete */ |
598 | wait_for_completion(&info->comp); | 678 | wait_for_completion(&info->comp); |
679 | |||
599 | /* wait for data to flushed-out before reset the prefetch */ | 680 | /* wait for data to flushed-out before reset the prefetch */ |
600 | tim = 0; | 681 | tim = 0; |
601 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | 682 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); |
602 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | 683 | do { |
684 | val = readl(info->reg.gpmc_prefetch_status); | ||
685 | val = GPMC_PREFETCH_STATUS_COUNT(val); | ||
603 | cpu_relax(); | 686 | cpu_relax(); |
687 | } while (val && (tim++ < limit)); | ||
604 | 688 | ||
605 | /* disable and stop the PFPW engine */ | 689 | /* disable and stop the PFPW engine */ |
606 | gpmc_prefetch_reset(info->gpmc_cs); | 690 | omap_prefetch_reset(info->gpmc_cs, info); |
607 | return; | 691 | return; |
608 | 692 | ||
609 | out_copy: | 693 | out_copy: |
@@ -843,7 +927,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, | |||
843 | { | 927 | { |
844 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 928 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
845 | mtd); | 929 | mtd); |
846 | return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code); | 930 | u32 val; |
931 | |||
932 | val = readl(info->reg.gpmc_ecc_config); | ||
933 | if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs) | ||
934 | return -EINVAL; | ||
935 | |||
936 | /* read ecc result */ | ||
937 | val = readl(info->reg.gpmc_ecc1_result); | ||
938 | *ecc_code++ = val; /* P128e, ..., P1e */ | ||
939 | *ecc_code++ = val >> 16; /* P128o, ..., P1o */ | ||
940 | /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */ | ||
941 | *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0); | ||
942 | |||
943 | return 0; | ||
847 | } | 944 | } |
848 | 945 | ||
849 | /** | 946 | /** |
@@ -857,8 +954,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) | |||
857 | mtd); | 954 | mtd); |
858 | struct nand_chip *chip = mtd->priv; | 955 | struct nand_chip *chip = mtd->priv; |
859 | unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; | 956 | unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; |
957 | u32 val; | ||
958 | |||
959 | /* clear ecc and enable bits */ | ||
960 | val = ECCCLEAR | ECC1; | ||
961 | writel(val, info->reg.gpmc_ecc_control); | ||
962 | |||
963 | /* program ecc and result sizes */ | ||
964 | val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) | | ||
965 | ECC1RESULTSIZE); | ||
966 | writel(val, info->reg.gpmc_ecc_size_config); | ||
860 | 967 | ||
861 | gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); | 968 | switch (mode) { |
969 | case NAND_ECC_READ: | ||
970 | case NAND_ECC_WRITE: | ||
971 | writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); | ||
972 | break; | ||
973 | case NAND_ECC_READSYN: | ||
974 | writel(ECCCLEAR, info->reg.gpmc_ecc_control); | ||
975 | break; | ||
976 | default: | ||
977 | dev_info(&info->pdev->dev, | ||
978 | "error: unrecognized Mode[%d]!\n", mode); | ||
979 | break; | ||
980 | } | ||
981 | |||
982 | /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */ | ||
983 | val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1); | ||
984 | writel(val, info->reg.gpmc_ecc_config); | ||
862 | } | 985 | } |
863 | 986 | ||
864 | /** | 987 | /** |
@@ -886,10 +1009,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
886 | else | 1009 | else |
887 | timeo += (HZ * 20) / 1000; | 1010 | timeo += (HZ * 20) / 1000; |
888 | 1011 | ||
889 | gpmc_nand_write(info->gpmc_cs, | 1012 | writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); |
890 | GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF)); | ||
891 | while (time_before(jiffies, timeo)) { | 1013 | while (time_before(jiffies, timeo)) { |
892 | status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); | 1014 | status = readb(info->reg.gpmc_nand_data); |
893 | if (status & NAND_STATUS_READY) | 1015 | if (status & NAND_STATUS_READY) |
894 | break; | 1016 | break; |
895 | cond_resched(); | 1017 | cond_resched(); |
@@ -909,22 +1031,13 @@ static int omap_dev_ready(struct mtd_info *mtd) | |||
909 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 1031 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
910 | mtd); | 1032 | mtd); |
911 | 1033 | ||
912 | val = gpmc_read_status(GPMC_GET_IRQ_STATUS); | 1034 | val = readl(info->reg.gpmc_status); |
1035 | |||
913 | if ((val & 0x100) == 0x100) { | 1036 | if ((val & 0x100) == 0x100) { |
914 | /* Clear IRQ Interrupt */ | 1037 | return 1; |
915 | val |= 0x100; | ||
916 | val &= ~(0x0); | ||
917 | gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val); | ||
918 | } else { | 1038 | } else { |
919 | unsigned int cnt = 0; | 1039 | return 0; |
920 | while (cnt++ < 0x1FF) { | ||
921 | if ((val & 0x100) == 0x100) | ||
922 | return 0; | ||
923 | val = gpmc_read_status(GPMC_GET_IRQ_STATUS); | ||
924 | } | ||
925 | } | 1040 | } |
926 | |||
927 | return 1; | ||
928 | } | 1041 | } |
929 | 1042 | ||
930 | #ifdef CONFIG_MTD_NAND_OMAP_BCH | 1043 | #ifdef CONFIG_MTD_NAND_OMAP_BCH |
@@ -1155,6 +1268,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1155 | int i, offset; | 1268 | int i, offset; |
1156 | dma_cap_mask_t mask; | 1269 | dma_cap_mask_t mask; |
1157 | unsigned sig; | 1270 | unsigned sig; |
1271 | struct resource *res; | ||
1158 | 1272 | ||
1159 | pdata = pdev->dev.platform_data; | 1273 | pdata = pdev->dev.platform_data; |
1160 | if (pdata == NULL) { | 1274 | if (pdata == NULL) { |
@@ -1174,7 +1288,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1174 | info->pdev = pdev; | 1288 | info->pdev = pdev; |
1175 | 1289 | ||
1176 | info->gpmc_cs = pdata->cs; | 1290 | info->gpmc_cs = pdata->cs; |
1177 | info->phys_base = pdata->phys_base; | 1291 | info->reg = pdata->reg; |
1178 | 1292 | ||
1179 | info->mtd.priv = &info->nand; | 1293 | info->mtd.priv = &info->nand; |
1180 | info->mtd.name = dev_name(&pdev->dev); | 1294 | info->mtd.name = dev_name(&pdev->dev); |
@@ -1183,16 +1297,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1183 | info->nand.options = pdata->devsize; | 1297 | info->nand.options = pdata->devsize; |
1184 | info->nand.options |= NAND_SKIP_BBTSCAN; | 1298 | info->nand.options |= NAND_SKIP_BBTSCAN; |
1185 | 1299 | ||
1186 | /* NAND write protect off */ | 1300 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1187 | gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); | 1301 | if (res == NULL) { |
1302 | err = -EINVAL; | ||
1303 | dev_err(&pdev->dev, "error getting memory resource\n"); | ||
1304 | goto out_free_info; | ||
1305 | } | ||
1306 | |||
1307 | info->phys_base = res->start; | ||
1308 | info->mem_size = resource_size(res); | ||
1188 | 1309 | ||
1189 | if (!request_mem_region(info->phys_base, NAND_IO_SIZE, | 1310 | if (!request_mem_region(info->phys_base, info->mem_size, |
1190 | pdev->dev.driver->name)) { | 1311 | pdev->dev.driver->name)) { |
1191 | err = -EBUSY; | 1312 | err = -EBUSY; |
1192 | goto out_free_info; | 1313 | goto out_free_info; |
1193 | } | 1314 | } |
1194 | 1315 | ||
1195 | info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); | 1316 | info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size); |
1196 | if (!info->nand.IO_ADDR_R) { | 1317 | if (!info->nand.IO_ADDR_R) { |
1197 | err = -ENOMEM; | 1318 | err = -ENOMEM; |
1198 | goto out_release_mem_region; | 1319 | goto out_release_mem_region; |
@@ -1265,17 +1386,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1265 | break; | 1386 | break; |
1266 | 1387 | ||
1267 | case NAND_OMAP_PREFETCH_IRQ: | 1388 | case NAND_OMAP_PREFETCH_IRQ: |
1268 | err = request_irq(pdata->gpmc_irq, | 1389 | info->gpmc_irq_fifo = platform_get_irq(pdev, 0); |
1269 | omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); | 1390 | if (info->gpmc_irq_fifo <= 0) { |
1391 | dev_err(&pdev->dev, "error getting fifo irq\n"); | ||
1392 | err = -ENODEV; | ||
1393 | goto out_release_mem_region; | ||
1394 | } | ||
1395 | err = request_irq(info->gpmc_irq_fifo, omap_nand_irq, | ||
1396 | IRQF_SHARED, "gpmc-nand-fifo", info); | ||
1270 | if (err) { | 1397 | if (err) { |
1271 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | 1398 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", |
1272 | pdata->gpmc_irq, err); | 1399 | info->gpmc_irq_fifo, err); |
1400 | info->gpmc_irq_fifo = 0; | ||
1401 | goto out_release_mem_region; | ||
1402 | } | ||
1403 | |||
1404 | info->gpmc_irq_count = platform_get_irq(pdev, 1); | ||
1405 | if (info->gpmc_irq_count <= 0) { | ||
1406 | dev_err(&pdev->dev, "error getting count irq\n"); | ||
1407 | err = -ENODEV; | ||
1408 | goto out_release_mem_region; | ||
1409 | } | ||
1410 | err = request_irq(info->gpmc_irq_count, omap_nand_irq, | ||
1411 | IRQF_SHARED, "gpmc-nand-count", info); | ||
1412 | if (err) { | ||
1413 | dev_err(&pdev->dev, "requesting irq(%d) error:%d", | ||
1414 | info->gpmc_irq_count, err); | ||
1415 | info->gpmc_irq_count = 0; | ||
1273 | goto out_release_mem_region; | 1416 | goto out_release_mem_region; |
1274 | } else { | ||
1275 | info->gpmc_irq = pdata->gpmc_irq; | ||
1276 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1277 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1278 | } | 1417 | } |
1418 | |||
1419 | info->nand.read_buf = omap_read_buf_irq_pref; | ||
1420 | info->nand.write_buf = omap_write_buf_irq_pref; | ||
1421 | |||
1279 | break; | 1422 | break; |
1280 | 1423 | ||
1281 | default: | 1424 | default: |
@@ -1363,7 +1506,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1363 | out_release_mem_region: | 1506 | out_release_mem_region: |
1364 | if (info->dma) | 1507 | if (info->dma) |
1365 | dma_release_channel(info->dma); | 1508 | dma_release_channel(info->dma); |
1366 | release_mem_region(info->phys_base, NAND_IO_SIZE); | 1509 | if (info->gpmc_irq_count > 0) |
1510 | free_irq(info->gpmc_irq_count, info); | ||
1511 | if (info->gpmc_irq_fifo > 0) | ||
1512 | free_irq(info->gpmc_irq_fifo, info); | ||
1513 | release_mem_region(info->phys_base, info->mem_size); | ||
1367 | out_free_info: | 1514 | out_free_info: |
1368 | kfree(info); | 1515 | kfree(info); |
1369 | 1516 | ||
@@ -1381,8 +1528,10 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
1381 | if (info->dma) | 1528 | if (info->dma) |
1382 | dma_release_channel(info->dma); | 1529 | dma_release_channel(info->dma); |
1383 | 1530 | ||
1384 | if (info->gpmc_irq) | 1531 | if (info->gpmc_irq_count > 0) |
1385 | free_irq(info->gpmc_irq, info); | 1532 | free_irq(info->gpmc_irq_count, info); |
1533 | if (info->gpmc_irq_fifo > 0) | ||
1534 | free_irq(info->gpmc_irq_fifo, info); | ||
1386 | 1535 | ||
1387 | /* Release NAND device, its internal structures and partitions */ | 1536 | /* Release NAND device, its internal structures and partitions */ |
1388 | nand_release(&info->mtd); | 1537 | nand_release(&info->mtd); |