diff options
author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2012-07-30 15:28:27 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-08-01 00:48:52 -0400 |
commit | 4f46f8ac80416b0e8fd3aba6a0d842205fb29140 (patch) | |
tree | 1913a661d292e74ea13e2e1af38b21ef6e5fb1bc /drivers/dma/sh | |
parent | ac694dbdbc403c00e2c14d10bc7b8412cc378259 (diff) |
dmaengine: shdma: restore partial transfer calculation
The recent shdma driver split has mistakenly removed support for partial
DMA transfer size calculation on forced termination. This patch restores
it.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Vinod Koul <vinod.koul@linux.intel.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/dma/sh')
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 9 | ||||
-rw-r--r-- | drivers/dma/sh/shdma.c | 12 |
2 files changed, 21 insertions, 0 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 27f5c781fd73..f4cd946d259d 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | |||
483 | new->mark = DESC_PREPARED; | 483 | new->mark = DESC_PREPARED; |
484 | new->async_tx.flags = flags; | 484 | new->async_tx.flags = flags; |
485 | new->direction = direction; | 485 | new->direction = direction; |
486 | new->partial = 0; | ||
486 | 487 | ||
487 | *len -= copy_size; | 488 | *len -= copy_size; |
488 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | 489 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) |
@@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
644 | case DMA_TERMINATE_ALL: | 645 | case DMA_TERMINATE_ALL: |
645 | spin_lock_irqsave(&schan->chan_lock, flags); | 646 | spin_lock_irqsave(&schan->chan_lock, flags); |
646 | ops->halt_channel(schan); | 647 | ops->halt_channel(schan); |
648 | |||
649 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { | ||
650 | /* Record partial transfer */ | ||
651 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | ||
652 | struct shdma_desc, node); | ||
653 | desc->partial = ops->get_partial(schan, desc); | ||
654 | } | ||
655 | |||
647 | spin_unlock_irqrestore(&schan->chan_lock, flags); | 656 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
648 | 657 | ||
649 | shdma_chan_ld_cleanup(schan, true); | 658 | shdma_chan_ld_cleanup(schan, true); |
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 027c9be97654..f41bcc5267fd 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
@@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) | |||
381 | return true; | 381 | return true; |
382 | } | 382 | } |
383 | 383 | ||
384 | static size_t sh_dmae_get_partial(struct shdma_chan *schan, | ||
385 | struct shdma_desc *sdesc) | ||
386 | { | ||
387 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, | ||
388 | shdma_chan); | ||
389 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
390 | struct sh_dmae_desc, shdma_desc); | ||
391 | return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
392 | sh_chan->xmit_shift; | ||
393 | } | ||
394 | |||
384 | /* Called from error IRQ or NMI */ | 395 | /* Called from error IRQ or NMI */ |
385 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | 396 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) |
386 | { | 397 | { |
@@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { | |||
632 | .start_xfer = sh_dmae_start_xfer, | 643 | .start_xfer = sh_dmae_start_xfer, |
633 | .embedded_desc = sh_dmae_embedded_desc, | 644 | .embedded_desc = sh_dmae_embedded_desc, |
634 | .chan_irq = sh_dmae_chan_irq, | 645 | .chan_irq = sh_dmae_chan_irq, |
646 | .get_partial = sh_dmae_get_partial, | ||
635 | }; | 647 | }; |
636 | 648 | ||
637 | static int __devinit sh_dmae_probe(struct platform_device *pdev) | 649 | static int __devinit sh_dmae_probe(struct platform_device *pdev) |