diff options
author | Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | 2012-11-05 05:00:14 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2013-01-08 01:04:59 -0500 |
commit | d1806a5c4d2248d2799f4367dbdb1800be94a26f (patch) | |
tree | 6c9bfb121641de1f4acfaa9612ff6186f2094f3f /drivers/mtd | |
parent | 522d974451743abcf674cbebd7c29d44fbd63586 (diff) |
mtd: fsmc_nand: add missing DMA unmap to dma_xfer()
Make dma_xfer() do DMA unmapping itself and fix handling
of failure cases.
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Vipin Kumar <vipin.kumar@st.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <djbw@fb.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/nand/fsmc_nand.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 1d7446434b0e..a66576863e53 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
573 | dma_dev = chan->device; | 573 | dma_dev = chan->device; |
574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); | 574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); |
575 | 575 | ||
576 | flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; | ||
577 | |||
576 | if (direction == DMA_TO_DEVICE) { | 578 | if (direction == DMA_TO_DEVICE) { |
577 | dma_src = dma_addr; | 579 | dma_src = dma_addr; |
578 | dma_dst = host->data_pa; | 580 | dma_dst = host->data_pa; |
579 | flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP; | ||
580 | } else { | 581 | } else { |
581 | dma_src = host->data_pa; | 582 | dma_src = host->data_pa; |
582 | dma_dst = dma_addr; | 583 | dma_dst = dma_addr; |
583 | flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP; | ||
584 | } | 584 | } |
585 | 585 | ||
586 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, | 586 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, |
587 | len, flags); | 587 | len, flags); |
588 | |||
589 | if (!tx) { | 588 | if (!tx) { |
590 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); | 589 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); |
591 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | 590 | ret = -EIO; |
592 | return -EIO; | 591 | goto unmap_dma; |
593 | } | 592 | } |
594 | 593 | ||
595 | tx->callback = dma_complete; | 594 | tx->callback = dma_complete; |
@@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
599 | ret = dma_submit_error(cookie); | 598 | ret = dma_submit_error(cookie); |
600 | if (ret) { | 599 | if (ret) { |
601 | dev_err(host->dev, "dma_submit_error %d\n", cookie); | 600 | dev_err(host->dev, "dma_submit_error %d\n", cookie); |
602 | return ret; | 601 | goto unmap_dma; |
603 | } | 602 | } |
604 | 603 | ||
605 | dma_async_issue_pending(chan); | 604 | dma_async_issue_pending(chan); |
@@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
610 | if (ret <= 0) { | 609 | if (ret <= 0) { |
611 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | 610 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
612 | dev_err(host->dev, "wait_for_completion_timeout\n"); | 611 | dev_err(host->dev, "wait_for_completion_timeout\n"); |
613 | return ret ? ret : -ETIMEDOUT; | 612 | if (!ret) |
613 | ret = -ETIMEDOUT; | ||
614 | goto unmap_dma; | ||
614 | } | 615 | } |
615 | 616 | ||
616 | return 0; | 617 | ret = 0; |
618 | |||
619 | unmap_dma: | ||
620 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | ||
621 | |||
622 | return ret; | ||
617 | } | 623 | } |
618 | 624 | ||
619 | /* | 625 | /* |