diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 12:24:48 -0500 |
commit | 5115f3c19d17851aaff5a857f55b4a019c908775 (patch) | |
tree | 0d02cf01e12e86365f4f5e3b234f986daef181a7 /drivers/mtd/nand/fsmc_nand.c | |
parent | c41b3810c09e60664433548c5218cc6ece6a8903 (diff) | |
parent | 17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This is fairly big pull by my standards as I had missed last merge
window. So we have the support for device tree for slave-dmaengine,
large updates to dw_dmac driver from Andy for reusing on different
architectures. Along with this we have fixes on bunch of the drivers"
Fix up trivial conflicts, usually due to #include line movement next to
each other.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits)
Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT"
ARM: dts: pl330: Add #dma-cells for generic dma binding support
DMA: PL330: Register the DMA controller with the generic DMA helpers
DMA: PL330: Add xlate function
DMA: PL330: Add new pl330 filter for DT case.
dma: tegra20-apb-dma: remove unnecessary assignment
edma: do not waste memory for dma_mask
dma: coh901318: set residue only if dma is in progress
dma: coh901318: avoid unbalanced locking
dmaengine.h: remove redundant else keyword
dma: of-dma: protect list write operation by spin_lock
dmaengine: ste_dma40: do not remove descriptors for cyclic transfers
dma: of-dma.c: fix memory leakage
dw_dmac: apply default dma_mask if needed
dmaengine: ioat - fix spare sparse complain
dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c
ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING
dw_dmac: add support for Lynxpoint DMA controllers
dw_dmac: return proper residue value
dw_dmac: fill individual length of descriptor
...
Diffstat (limited to 'drivers/mtd/nand/fsmc_nand.c')
-rw-r--r-- | drivers/mtd/nand/fsmc_nand.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 09af555408b7..05ba3f0c2d19 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
573 | dma_dev = chan->device; | 573 | dma_dev = chan->device; |
574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); | 574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); |
575 | 575 | ||
576 | flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; | ||
577 | |||
576 | if (direction == DMA_TO_DEVICE) { | 578 | if (direction == DMA_TO_DEVICE) { |
577 | dma_src = dma_addr; | 579 | dma_src = dma_addr; |
578 | dma_dst = host->data_pa; | 580 | dma_dst = host->data_pa; |
579 | flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP; | ||
580 | } else { | 581 | } else { |
581 | dma_src = host->data_pa; | 582 | dma_src = host->data_pa; |
582 | dma_dst = dma_addr; | 583 | dma_dst = dma_addr; |
583 | flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP; | ||
584 | } | 584 | } |
585 | 585 | ||
586 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, | 586 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, |
587 | len, flags); | 587 | len, flags); |
588 | |||
589 | if (!tx) { | 588 | if (!tx) { |
590 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); | 589 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); |
591 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | 590 | ret = -EIO; |
592 | return -EIO; | 591 | goto unmap_dma; |
593 | } | 592 | } |
594 | 593 | ||
595 | tx->callback = dma_complete; | 594 | tx->callback = dma_complete; |
@@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
599 | ret = dma_submit_error(cookie); | 598 | ret = dma_submit_error(cookie); |
600 | if (ret) { | 599 | if (ret) { |
601 | dev_err(host->dev, "dma_submit_error %d\n", cookie); | 600 | dev_err(host->dev, "dma_submit_error %d\n", cookie); |
602 | return ret; | 601 | goto unmap_dma; |
603 | } | 602 | } |
604 | 603 | ||
605 | dma_async_issue_pending(chan); | 604 | dma_async_issue_pending(chan); |
@@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
610 | if (ret <= 0) { | 609 | if (ret <= 0) { |
611 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 610 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
612 | dev_err(host->dev, "wait_for_completion_timeout\n"); | 611 | dev_err(host->dev, "wait_for_completion_timeout\n"); |
613 | return ret ? ret : -ETIMEDOUT; | 612 | if (!ret) |
613 | ret = -ETIMEDOUT; | ||
614 | goto unmap_dma; | ||
614 | } | 615 | } |
615 | 616 | ||
616 | return 0; | 617 | ret = 0; |
618 | |||
619 | unmap_dma: | ||
620 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | ||
621 | |||
622 | return ret; | ||
617 | } | 623 | } |
618 | 624 | ||
619 | /* | 625 | /* |