diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-04-24 19:19:39 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-07-31 07:06:25 -0400 |
commit | 2df41d0533ad1a77f5914667a1699068bf83ae64 (patch) | |
tree | 27cc156f4a23781f011b7a24dcd809ea4f51cf5e /drivers | |
parent | 763e735910922382c2577e820e2a51df0a7cf17c (diff) |
mtd: omap2: remove private DMA API implementation
Remove the private DMA API implementation from nand/omap2.c
making it use entirely the DMA engine API.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mtd/nand/omap2.c | 136 |
1 files changed, 26 insertions, 110 deletions
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 2912d6c93635..e9309b3659e7 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -126,7 +126,6 @@ struct omap_nand_info { | |||
126 | unsigned long phys_base; | 126 | unsigned long phys_base; |
127 | struct completion comp; | 127 | struct completion comp; |
128 | struct dma_chan *dma; | 128 | struct dma_chan *dma; |
129 | int dma_ch; | ||
130 | int gpmc_irq; | 129 | int gpmc_irq; |
131 | enum { | 130 | enum { |
132 | OMAP_NAND_IO_READ = 0, /* read */ | 131 | OMAP_NAND_IO_READ = 0, /* read */ |
@@ -339,15 +338,9 @@ static void omap_write_buf_pref(struct mtd_info *mtd, | |||
339 | } | 338 | } |
340 | 339 | ||
341 | /* | 340 | /* |
342 | * omap_nand_dma_cb: callback on the completion of dma transfer | 341 | * omap_nand_dma_callback: callback on the completion of dma transfer |
343 | * @lch: logical channel | ||
344 | * @ch_satuts: channel status | ||
345 | * @data: pointer to completion data structure | 342 | * @data: pointer to completion data structure |
346 | */ | 343 | */ |
347 | static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) | ||
348 | { | ||
349 | complete((struct completion *) data); | ||
350 | } | ||
351 | static void omap_nand_dma_callback(void *data) | 344 | static void omap_nand_dma_callback(void *data) |
352 | { | 345 | { |
353 | complete((struct completion *) data); | 346 | complete((struct completion *) data); |
@@ -365,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
365 | { | 358 | { |
366 | struct omap_nand_info *info = container_of(mtd, | 359 | struct omap_nand_info *info = container_of(mtd, |
367 | struct omap_nand_info, mtd); | 360 | struct omap_nand_info, mtd); |
361 | struct dma_async_tx_descriptor *tx; | ||
368 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : | 362 | enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : |
369 | DMA_FROM_DEVICE; | 363 | DMA_FROM_DEVICE; |
370 | dma_addr_t dma_addr; | 364 | struct scatterlist sg; |
371 | int ret; | ||
372 | unsigned long tim, limit; | 365 | unsigned long tim, limit; |
373 | 366 | unsigned n; | |
374 | /* The fifo depth is 64 bytes max. | 367 | int ret; |
375 | * But configure the FIFO-threahold to 32 to get a sync at each frame | ||
376 | * and frame length is 32 bytes. | ||
377 | */ | ||
378 | int buf_len = len >> 6; | ||
379 | 368 | ||
380 | if (addr >= high_memory) { | 369 | if (addr >= high_memory) { |
381 | struct page *p1; | 370 | struct page *p1; |
@@ -389,89 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
389 | addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); | 378 | addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); |
390 | } | 379 | } |
391 | 380 | ||
392 | if (info->dma) { | 381 | sg_init_one(&sg, addr, len); |
393 | struct dma_async_tx_descriptor *tx; | 382 | n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); |
394 | struct scatterlist sg; | 383 | if (n == 0) { |
395 | unsigned n; | ||
396 | |||
397 | sg_init_one(&sg, addr, len); | ||
398 | n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); | ||
399 | if (n == 0) { | ||
400 | dev_err(&info->pdev->dev, | ||
401 | "Couldn't DMA map a %d byte buffer\n", len); | ||
402 | goto out_copy; | ||
403 | } | ||
404 | |||
405 | tx = dmaengine_prep_slave_sg(info->dma, &sg, n, | ||
406 | is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, | ||
407 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
408 | if (!tx) { | ||
409 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); | ||
410 | goto out_copy; | ||
411 | } | ||
412 | tx->callback = omap_nand_dma_callback; | ||
413 | tx->callback_param = &info->comp; | ||
414 | dmaengine_submit(tx); | ||
415 | |||
416 | /* configure and start prefetch transfer */ | ||
417 | ret = gpmc_prefetch_enable(info->gpmc_cs, | ||
418 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | ||
419 | if (ret) { | ||
420 | /* PFPW engine is busy, use cpu copy method */ | ||
421 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); | ||
422 | goto out_copy; | ||
423 | } | ||
424 | |||
425 | init_completion(&info->comp); | ||
426 | dma_async_issue_pending(info->dma); | ||
427 | |||
428 | /* setup and start DMA using dma_addr */ | ||
429 | wait_for_completion(&info->comp); | ||
430 | tim = 0; | ||
431 | limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); | ||
432 | while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) | ||
433 | cpu_relax(); | ||
434 | |||
435 | /* disable and stop the PFPW engine */ | ||
436 | gpmc_prefetch_reset(info->gpmc_cs); | ||
437 | |||
438 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); | ||
443 | if (dma_mapping_error(&info->pdev->dev, dma_addr)) { | ||
444 | dev_err(&info->pdev->dev, | 384 | dev_err(&info->pdev->dev, |
445 | "Couldn't DMA map a %d byte buffer\n", len); | 385 | "Couldn't DMA map a %d byte buffer\n", len); |
446 | goto out_copy; | 386 | goto out_copy; |
447 | } | 387 | } |
448 | 388 | ||
449 | if (is_write) { | 389 | tx = dmaengine_prep_slave_sg(info->dma, &sg, n, |
450 | omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, | 390 | is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, |
451 | info->phys_base, 0, 0); | 391 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
452 | omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | 392 | if (!tx) |
453 | dma_addr, 0, 0); | 393 | goto out_copy_unmap; |
454 | omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, | 394 | |
455 | 0x10, buf_len, OMAP_DMA_SYNC_FRAME, | 395 | tx->callback = omap_nand_dma_callback; |
456 | OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); | 396 | tx->callback_param = &info->comp; |
457 | } else { | 397 | dmaengine_submit(tx); |
458 | omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, | 398 | |
459 | info->phys_base, 0, 0); | 399 | /* configure and start prefetch transfer */ |
460 | omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | ||
461 | dma_addr, 0, 0); | ||
462 | omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, | ||
463 | 0x10, buf_len, OMAP_DMA_SYNC_FRAME, | ||
464 | OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); | ||
465 | } | ||
466 | /* configure and start prefetch transfer */ | ||
467 | ret = gpmc_prefetch_enable(info->gpmc_cs, | 400 | ret = gpmc_prefetch_enable(info->gpmc_cs, |
468 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); | 401 | PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); |
469 | if (ret) | 402 | if (ret) |
470 | /* PFPW engine is busy, use cpu copy method */ | 403 | /* PFPW engine is busy, use cpu copy method */ |
471 | goto out_copy_unmap; | 404 | goto out_copy_unmap; |
472 | 405 | ||
473 | init_completion(&info->comp); | 406 | init_completion(&info->comp); |
474 | omap_start_dma(info->dma_ch); | 407 | dma_async_issue_pending(info->dma); |
475 | 408 | ||
476 | /* setup and start DMA using dma_addr */ | 409 | /* setup and start DMA using dma_addr */ |
477 | wait_for_completion(&info->comp); | 410 | wait_for_completion(&info->comp); |
@@ -483,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | |||
483 | /* disable and stop the PFPW engine */ | 416 | /* disable and stop the PFPW engine */ |
484 | gpmc_prefetch_reset(info->gpmc_cs); | 417 | gpmc_prefetch_reset(info->gpmc_cs); |
485 | 418 | ||
486 | dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); | 419 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); |
487 | return 0; | 420 | return 0; |
488 | 421 | ||
489 | out_copy_unmap: | 422 | out_copy_unmap: |
490 | dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); | 423 | dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); |
491 | out_copy: | 424 | out_copy: |
492 | if (info->nand.options & NAND_BUSWIDTH_16) | 425 | if (info->nand.options & NAND_BUSWIDTH_16) |
493 | is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) | 426 | is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) |
@@ -1307,7 +1240,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1307 | sig = OMAP24XX_DMA_GPMC; | 1240 | sig = OMAP24XX_DMA_GPMC; |
1308 | info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); | 1241 | info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); |
1309 | if (!info->dma) { | 1242 | if (!info->dma) { |
1310 | dev_warn(&pdev->dev, "DMA engine request failed\n"); | 1243 | dev_err(&pdev->dev, "DMA engine request failed\n"); |
1244 | err = -ENXIO; | ||
1245 | goto out_release_mem_region; | ||
1311 | } else { | 1246 | } else { |
1312 | struct dma_slave_config cfg; | 1247 | struct dma_slave_config cfg; |
1313 | int rc; | 1248 | int rc; |
@@ -1327,22 +1262,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1327 | } | 1262 | } |
1328 | info->nand.read_buf = omap_read_buf_dma_pref; | 1263 | info->nand.read_buf = omap_read_buf_dma_pref; |
1329 | info->nand.write_buf = omap_write_buf_dma_pref; | 1264 | info->nand.write_buf = omap_write_buf_dma_pref; |
1330 | break; | ||
1331 | } | ||
1332 | err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | ||
1333 | omap_nand_dma_cb, &info->comp, &info->dma_ch); | ||
1334 | if (err < 0) { | ||
1335 | info->dma_ch = -1; | ||
1336 | dev_err(&pdev->dev, "DMA request failed!\n"); | ||
1337 | goto out_release_mem_region; | ||
1338 | } else { | ||
1339 | omap_set_dma_dest_burst_mode(info->dma_ch, | ||
1340 | OMAP_DMA_DATA_BURST_16); | ||
1341 | omap_set_dma_src_burst_mode(info->dma_ch, | ||
1342 | OMAP_DMA_DATA_BURST_16); | ||
1343 | |||
1344 | info->nand.read_buf = omap_read_buf_dma_pref; | ||
1345 | info->nand.write_buf = omap_write_buf_dma_pref; | ||
1346 | } | 1265 | } |
1347 | break; | 1266 | break; |
1348 | 1267 | ||
@@ -1460,9 +1379,6 @@ static int omap_nand_remove(struct platform_device *pdev) | |||
1460 | omap3_free_bch(&info->mtd); | 1379 | omap3_free_bch(&info->mtd); |
1461 | 1380 | ||
1462 | platform_set_drvdata(pdev, NULL); | 1381 | platform_set_drvdata(pdev, NULL); |
1463 | if (info->dma_ch != -1) | ||
1464 | omap_free_dma(info->dma_ch); | ||
1465 | |||
1466 | if (info->dma) | 1382 | if (info->dma) |
1467 | dma_release_channel(info->dma); | 1383 | dma_release_channel(info->dma); |
1468 | 1384 | ||