aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2012-07-30 15:28:27 -0400
committerPaul Mundt <lethal@linux-sh.org>2012-08-01 00:48:52 -0400
commit4f46f8ac80416b0e8fd3aba6a0d842205fb29140 (patch)
tree1913a661d292e74ea13e2e1af38b21ef6e5fb1bc
parentac694dbdbc403c00e2c14d10bc7b8412cc378259 (diff)
dmaengine: shdma: restore partial transfer calculation
The recent shdma driver split has mistakenly removed support for partial DMA transfer size calculation on forced termination. This patch restores it. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--drivers/dma/sh/shdma-base.c9
-rw-r--r--drivers/dma/sh/shdma.c12
-rw-r--r--include/linux/shdma-base.h2
3 files changed, 23 insertions, 0 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 27f5c781fd73..f4cd946d259d 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
483 new->mark = DESC_PREPARED; 483 new->mark = DESC_PREPARED;
484 new->async_tx.flags = flags; 484 new->async_tx.flags = flags;
485 new->direction = direction; 485 new->direction = direction;
486 new->partial = 0;
486 487
487 *len -= copy_size; 488 *len -= copy_size;
488 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 489 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
@@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
644 case DMA_TERMINATE_ALL: 645 case DMA_TERMINATE_ALL:
645 spin_lock_irqsave(&schan->chan_lock, flags); 646 spin_lock_irqsave(&schan->chan_lock, flags);
646 ops->halt_channel(schan); 647 ops->halt_channel(schan);
648
649 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
650 /* Record partial transfer */
651 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
652 struct shdma_desc, node);
653 desc->partial = ops->get_partial(schan, desc);
654 }
655
647 spin_unlock_irqrestore(&schan->chan_lock, flags); 656 spin_unlock_irqrestore(&schan->chan_lock, flags);
648 657
649 shdma_chan_ld_cleanup(schan, true); 658 shdma_chan_ld_cleanup(schan, true);
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 027c9be97654..f41bcc5267fd 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
381 return true; 381 return true;
382} 382}
383 383
384static size_t sh_dmae_get_partial(struct shdma_chan *schan,
385 struct shdma_desc *sdesc)
386{
387 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
388 shdma_chan);
389 struct sh_dmae_desc *sh_desc = container_of(sdesc,
390 struct sh_dmae_desc, shdma_desc);
391 return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
392 sh_chan->xmit_shift;
393}
394
384/* Called from error IRQ or NMI */ 395/* Called from error IRQ or NMI */
385static bool sh_dmae_reset(struct sh_dmae_device *shdev) 396static bool sh_dmae_reset(struct sh_dmae_device *shdev)
386{ 397{
@@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
632 .start_xfer = sh_dmae_start_xfer, 643 .start_xfer = sh_dmae_start_xfer,
633 .embedded_desc = sh_dmae_embedded_desc, 644 .embedded_desc = sh_dmae_embedded_desc,
634 .chan_irq = sh_dmae_chan_irq, 645 .chan_irq = sh_dmae_chan_irq,
646 .get_partial = sh_dmae_get_partial,
635}; 647};
636 648
637static int __devinit sh_dmae_probe(struct platform_device *pdev) 649static int __devinit sh_dmae_probe(struct platform_device *pdev)
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 93f9821554b6..a3728bf66f0e 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -50,6 +50,7 @@ struct shdma_desc {
50 struct list_head node; 50 struct list_head node;
51 struct dma_async_tx_descriptor async_tx; 51 struct dma_async_tx_descriptor async_tx;
52 enum dma_transfer_direction direction; 52 enum dma_transfer_direction direction;
53 size_t partial;
53 dma_cookie_t cookie; 54 dma_cookie_t cookie;
54 int chunks; 55 int chunks;
55 int mark; 56 int mark;
@@ -98,6 +99,7 @@ struct shdma_ops {
98 void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); 99 void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
99 struct shdma_desc *(*embedded_desc)(void *, int); 100 struct shdma_desc *(*embedded_desc)(void *, int);
100 bool (*chan_irq)(struct shdma_chan *, int); 101 bool (*chan_irq)(struct shdma_chan *, int);
102 size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
101}; 103};
102 104
103struct shdma_dev { 105struct shdma_dev {