diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/edma.c | 34 |
1 files changed, 9 insertions, 25 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 473155d34d7b..30cbbde52364 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -64,6 +64,7 @@ struct edma_desc { | |||
64 | int absync; | 64 | int absync; |
65 | int pset_nr; | 65 | int pset_nr; |
66 | int processed; | 66 | int processed; |
67 | u32 residue; | ||
67 | struct edmacc_param pset[0]; | 68 | struct edmacc_param pset[0]; |
68 | }; | 69 | }; |
69 | 70 | ||
@@ -456,6 +457,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
456 | } | 457 | } |
457 | 458 | ||
458 | edesc->pset_nr = sg_len; | 459 | edesc->pset_nr = sg_len; |
460 | edesc->residue = 0; | ||
459 | 461 | ||
460 | /* Allocate a PaRAM slot, if needed */ | 462 | /* Allocate a PaRAM slot, if needed */ |
461 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); | 463 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); |
@@ -491,6 +493,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
491 | } | 493 | } |
492 | 494 | ||
493 | edesc->absync = ret; | 495 | edesc->absync = ret; |
496 | edesc->residue += sg_dma_len(sg); | ||
494 | 497 | ||
495 | /* If this is the last in a current SG set of transactions, | 498 | /* If this is the last in a current SG set of transactions, |
496 | enable interrupts so that next set is processed */ | 499 | enable interrupts so that next set is processed */ |
@@ -606,6 +609,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
606 | 609 | ||
607 | edesc->cyclic = 1; | 610 | edesc->cyclic = 1; |
608 | edesc->pset_nr = nslots; | 611 | edesc->pset_nr = nslots; |
612 | edesc->residue = buf_len; | ||
609 | 613 | ||
610 | dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", | 614 | dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", |
611 | __func__, echan->ch_num, nslots, period_len, buf_len); | 615 | __func__, echan->ch_num, nslots, period_len, buf_len); |
@@ -700,6 +704,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
700 | vchan_cyclic_callback(&edesc->vdesc); | 704 | vchan_cyclic_callback(&edesc->vdesc); |
701 | } else if (edesc->processed == edesc->pset_nr) { | 705 | } else if (edesc->processed == edesc->pset_nr) { |
702 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 706 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); |
707 | edesc->residue = 0; | ||
703 | edma_stop(echan->ch_num); | 708 | edma_stop(echan->ch_num); |
704 | vchan_cookie_complete(&edesc->vdesc); | 709 | vchan_cookie_complete(&edesc->vdesc); |
705 | edma_execute(echan); | 710 | edma_execute(echan); |
@@ -832,25 +837,6 @@ static void edma_issue_pending(struct dma_chan *chan) | |||
832 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 837 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
833 | } | 838 | } |
834 | 839 | ||
835 | static size_t edma_desc_size(struct edma_desc *edesc) | ||
836 | { | ||
837 | int i; | ||
838 | size_t size; | ||
839 | |||
840 | if (edesc->absync) | ||
841 | for (size = i = 0; i < edesc->pset_nr; i++) | ||
842 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | ||
843 | (edesc->pset[i].a_b_cnt >> 16) * | ||
844 | edesc->pset[i].ccnt; | ||
845 | else | ||
846 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | ||
847 | (edesc->pset[0].a_b_cnt >> 16) + | ||
848 | (edesc->pset[0].a_b_cnt & 0xffff) * | ||
849 | (SZ_64K - 1) * edesc->pset[0].ccnt; | ||
850 | |||
851 | return size; | ||
852 | } | ||
853 | |||
854 | /* Check request completion status */ | 840 | /* Check request completion status */ |
855 | static enum dma_status edma_tx_status(struct dma_chan *chan, | 841 | static enum dma_status edma_tx_status(struct dma_chan *chan, |
856 | dma_cookie_t cookie, | 842 | dma_cookie_t cookie, |
@@ -867,12 +853,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
867 | 853 | ||
868 | spin_lock_irqsave(&echan->vchan.lock, flags); | 854 | spin_lock_irqsave(&echan->vchan.lock, flags); |
869 | vdesc = vchan_find_desc(&echan->vchan, cookie); | 855 | vdesc = vchan_find_desc(&echan->vchan, cookie); |
870 | if (vdesc) { | 856 | if (vdesc) |
871 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | 857 | txstate->residue = to_edma_desc(&vdesc->tx)->residue; |
872 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | 858 | else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) |
873 | struct edma_desc *edesc = echan->edesc; | 859 | txstate->residue = echan->edesc->residue; |
874 | txstate->residue = edma_desc_size(edesc); | ||
875 | } | ||
876 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 860 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
877 | 861 | ||
878 | return ret; | 862 | return ret; |