aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-04-24 19:16:00 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-07-31 07:06:24 -0400
commit763e735910922382c2577e820e2a51df0a7cf17c (patch)
treef03b8e03378eccd75f11e276742e56b21f27a4c1 /drivers
parent8c7494a501bd45e1bf1599a51331b1c210cadbae (diff)
mtd: omap2: add DMA engine support
Add DMA engine support to the OMAP2 NAND driver. This supplements the private DMA API implementation contained within this driver, and the driver can be independently switched at build time between using DMA engine and the private DMA API. Tested-by: Grazvydas Ignotas <notasas@gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/nand/omap2.c92
1 files changed, 91 insertions, 1 deletions
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d0c9b9..2912d6c93635 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/dmaengine.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -18,6 +19,7 @@
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/omap-dma.h>
21#include <linux/io.h> 23#include <linux/io.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23 25
@@ -123,6 +125,7 @@ struct omap_nand_info {
123 int gpmc_cs; 125 int gpmc_cs;
124 unsigned long phys_base; 126 unsigned long phys_base;
125 struct completion comp; 127 struct completion comp;
128 struct dma_chan *dma;
126 int dma_ch; 129 int dma_ch;
127 int gpmc_irq; 130 int gpmc_irq;
128 enum { 131 enum {
@@ -345,6 +348,10 @@ static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
345{ 348{
346 complete((struct completion *) data); 349 complete((struct completion *) data);
347} 350}
351static void omap_nand_dma_callback(void *data)
352{
353 complete((struct completion *) data);
354}
348 355
349/* 356/*
350 * omap_nand_dma_transfer: configer and start dma transfer 357 * omap_nand_dma_transfer: configer and start dma transfer
@@ -382,6 +389,56 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
382 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 389 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
383 } 390 }
384 391
392 if (info->dma) {
393 struct dma_async_tx_descriptor *tx;
394 struct scatterlist sg;
395 unsigned n;
396
397 sg_init_one(&sg, addr, len);
398 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
399 if (n == 0) {
400 dev_err(&info->pdev->dev,
401 "Couldn't DMA map a %d byte buffer\n", len);
402 goto out_copy;
403 }
404
405 tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
406 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
407 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
408 if (!tx) {
409 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
410 goto out_copy;
411 }
412 tx->callback = omap_nand_dma_callback;
413 tx->callback_param = &info->comp;
414 dmaengine_submit(tx);
415
416 /* configure and start prefetch transfer */
417 ret = gpmc_prefetch_enable(info->gpmc_cs,
418 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
419 if (ret) {
420 /* PFPW engine is busy, use cpu copy method */
421 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
422 goto out_copy;
423 }
424
425 init_completion(&info->comp);
426 dma_async_issue_pending(info->dma);
427
428 /* setup and start DMA using dma_addr */
429 wait_for_completion(&info->comp);
430 tim = 0;
431 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
432 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
433 cpu_relax();
434
435 /* disable and stop the PFPW engine */
436 gpmc_prefetch_reset(info->gpmc_cs);
437
438 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
439 return 0;
440 }
441
385 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); 442 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
386 if (dma_mapping_error(&info->pdev->dev, dma_addr)) { 443 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
387 dev_err(&info->pdev->dev, 444 dev_err(&info->pdev->dev,
@@ -414,7 +471,6 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
414 goto out_copy_unmap; 471 goto out_copy_unmap;
415 472
416 init_completion(&info->comp); 473 init_completion(&info->comp);
417
418 omap_start_dma(info->dma_ch); 474 omap_start_dma(info->dma_ch);
419 475
420 /* setup and start DMA using dma_addr */ 476 /* setup and start DMA using dma_addr */
@@ -1164,6 +1220,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1164 struct omap_nand_platform_data *pdata; 1220 struct omap_nand_platform_data *pdata;
1165 int err; 1221 int err;
1166 int i, offset; 1222 int i, offset;
1223 dma_cap_mask_t mask;
1224 unsigned sig;
1167 1225
1168 pdata = pdev->dev.platform_data; 1226 pdata = pdev->dev.platform_data;
1169 if (pdata == NULL) { 1227 if (pdata == NULL) {
@@ -1244,6 +1302,33 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1244 break; 1302 break;
1245 1303
1246 case NAND_OMAP_PREFETCH_DMA: 1304 case NAND_OMAP_PREFETCH_DMA:
1305 dma_cap_zero(mask);
1306 dma_cap_set(DMA_SLAVE, mask);
1307 sig = OMAP24XX_DMA_GPMC;
1308 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1309 if (!info->dma) {
1310 dev_warn(&pdev->dev, "DMA engine request failed\n");
1311 } else {
1312 struct dma_slave_config cfg;
1313 int rc;
1314
1315 memset(&cfg, 0, sizeof(cfg));
1316 cfg.src_addr = info->phys_base;
1317 cfg.dst_addr = info->phys_base;
1318 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1319 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1320 cfg.src_maxburst = 16;
1321 cfg.dst_maxburst = 16;
1322 rc = dmaengine_slave_config(info->dma, &cfg);
1323 if (rc) {
1324 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
1325 rc);
1326 goto out_release_mem_region;
1327 }
1328 info->nand.read_buf = omap_read_buf_dma_pref;
1329 info->nand.write_buf = omap_write_buf_dma_pref;
1330 break;
1331 }
1247 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1332 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1248 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1333 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1249 if (err < 0) { 1334 if (err < 0) {
@@ -1358,6 +1443,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1358 return 0; 1443 return 0;
1359 1444
1360out_release_mem_region: 1445out_release_mem_region:
1446 if (info->dma)
1447 dma_release_channel(info->dma);
1361 release_mem_region(info->phys_base, NAND_IO_SIZE); 1448 release_mem_region(info->phys_base, NAND_IO_SIZE);
1362out_free_info: 1449out_free_info:
1363 kfree(info); 1450 kfree(info);
@@ -1376,6 +1463,9 @@ static int omap_nand_remove(struct platform_device *pdev)
1376 if (info->dma_ch != -1) 1463 if (info->dma_ch != -1)
1377 omap_free_dma(info->dma_ch); 1464 omap_free_dma(info->dma_ch);
1378 1465
1466 if (info->dma)
1467 dma_release_channel(info->dma);
1468
1379 if (info->gpmc_irq) 1469 if (info->gpmc_irq)
1380 free_irq(info->gpmc_irq, info); 1470 free_irq(info->gpmc_irq, info);
1381 1471