aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/tegra20-apb-dma.c
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2012-07-02 04:22:07 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-07-12 23:19:54 -0400
commit4a46ba36e25dcff1d30eb1681135c3c10af71c16 (patch)
tree8003546e61cd4337b5f27001bab7854cefcfe8bc /drivers/dma/tegra20-apb-dma.c
parent46fb3f8ef5bde1325b1e58867a3a98dd746511d7 (diff)
dma: tegra: fix residual calculation for cyclic case
In cyclic mode of DMA, the byte transferred can be more than the requested size and in this case, calculating residuals based on the current position of DMA transfer to bytes requested i.e. bytes required to transfer to reach bytes requested from current DMA position. Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com> Acked-by: Stephen Warren <swarren@wwwdotorg.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/tegra20-apb-dma.c')
-rw-r--r--drivers/dma/tegra20-apb-dma.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index c0836a7a8631..8e0ea2438ce1 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -731,6 +731,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
731 struct tegra_dma_sg_req *sg_req; 731 struct tegra_dma_sg_req *sg_req;
732 enum dma_status ret; 732 enum dma_status ret;
733 unsigned long flags; 733 unsigned long flags;
734 unsigned int residual;
734 735
735 spin_lock_irqsave(&tdc->lock, flags); 736 spin_lock_irqsave(&tdc->lock, flags);
736 737
@@ -744,9 +745,10 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
744 /* Check on wait_ack desc status */ 745 /* Check on wait_ack desc status */
745 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 746 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
746 if (dma_desc->txd.cookie == cookie) { 747 if (dma_desc->txd.cookie == cookie) {
747 dma_set_residue(txstate, 748 residual = dma_desc->bytes_requested -
748 dma_desc->bytes_requested - 749 (dma_desc->bytes_transferred %
749 dma_desc->bytes_transferred); 750 dma_desc->bytes_requested);
751 dma_set_residue(txstate, residual);
750 ret = dma_desc->dma_status; 752 ret = dma_desc->dma_status;
751 spin_unlock_irqrestore(&tdc->lock, flags); 753 spin_unlock_irqrestore(&tdc->lock, flags);
752 return ret; 754 return ret;
@@ -757,9 +759,10 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
757 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { 759 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
758 dma_desc = sg_req->dma_desc; 760 dma_desc = sg_req->dma_desc;
759 if (dma_desc->txd.cookie == cookie) { 761 if (dma_desc->txd.cookie == cookie) {
760 dma_set_residue(txstate, 762 residual = dma_desc->bytes_requested -
761 dma_desc->bytes_requested - 763 (dma_desc->bytes_transferred %
762 dma_desc->bytes_transferred); 764 dma_desc->bytes_requested);
765 dma_set_residue(txstate, residual);
763 ret = dma_desc->dma_status; 766 ret = dma_desc->dma_status;
764 spin_unlock_irqrestore(&tdc->lock, flags); 767 spin_unlock_irqrestore(&tdc->lock, flags);
765 return ret; 768 return ret;