aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDaniel Mack <zonque@gmail.com>2014-02-17 06:29:06 -0500
committerVinod Koul <vinod.koul@intel.com>2014-05-07 03:03:40 -0400
commit1b38da264674d6a0fe26a63996b8f88b88c3da48 (patch)
treefaf02404cae3dbd64740fdb1998e17e4ce3518d1 /drivers/dma
parentc906a3ec458742c95850c0c1cde9e8b68df25c01 (diff)
dma: mmp_pdma: add support for residue reporting
A channel can accommodate more than one transaction, each consisting of multiple descriptors, the last of which has the DCMD_ENDIRQEN bit set. In order to report the channel's residue, we hence have to walk the list of running descriptors, look for those which match the cookie, and then try to find the descriptor which defines upper and lower boundaries that embrace the current transport pointer. Once it is found, walk forward until we find the descriptor that tells us about the end of a transaction via a set DCMD_ENDIRQEN bit. Signed-off-by: Daniel Mack <zonque@gmail.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mmp_pdma.c87
1 files changed, 84 insertions, 3 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 7affa533d2a8..a7b186d536b3 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -29,8 +29,8 @@
29#define DALGN 0x00a0 29#define DALGN 0x00a0
30#define DINT 0x00f0 30#define DINT 0x00f0
31#define DDADR 0x0200 31#define DDADR 0x0200
32#define DSADR 0x0204 32#define DSADR(n) (0x0204 + ((n) << 4))
33#define DTADR 0x0208 33#define DTADR(n) (0x0208 + ((n) << 4))
34#define DCMD 0x020c 34#define DCMD 0x020c
35 35
36#define DCSR_RUN BIT(31) /* Run Bit (read / write) */ 36#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -748,11 +748,92 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
748 return 0; 748 return 0;
749} 749}
750 750
751static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
752 dma_cookie_t cookie)
753{
754 struct mmp_pdma_desc_sw *sw;
755 u32 curr, residue = 0;
756 bool passed = false;
757 bool cyclic = chan->cyclic_first != NULL;
758
759 /*
760 * If the channel does not have a phy pointer anymore, it has already
761 * been completed. Therefore, its residue is 0.
762 */
763 if (!chan->phy)
764 return 0;
765
766 if (chan->dir == DMA_DEV_TO_MEM)
767 curr = readl(chan->phy->base + DTADR(chan->phy->idx));
768 else
769 curr = readl(chan->phy->base + DSADR(chan->phy->idx));
770
771 list_for_each_entry(sw, &chan->chain_running, node) {
772 u32 start, end, len;
773
774 if (chan->dir == DMA_DEV_TO_MEM)
775 start = sw->desc.dtadr;
776 else
777 start = sw->desc.dsadr;
778
779 len = sw->desc.dcmd & DCMD_LENGTH;
780 end = start + len;
781
782 /*
783 * 'passed' will be latched once we found the descriptor which
784 * lies inside the boundaries of the curr pointer. All
785 * descriptors that occur in the list _after_ we found that
786 * partially handled descriptor are still to be processed and
787 * are hence added to the residual bytes counter.
788 */
789
790 if (passed) {
791 residue += len;
792 } else if (curr >= start && curr <= end) {
793 residue += end - curr;
794 passed = true;
795 }
796
797 /*
798 * Descriptors that have the ENDIRQEN bit set mark the end of a
799 * transaction chain, and the cookie assigned with it has been
800 * returned previously from mmp_pdma_tx_submit().
801 *
802 * In case we have multiple transactions in the running chain,
803 * and the cookie does not match the one the user asked us
804 * about, reset the state variables and start over.
805 *
806 * This logic does not apply to cyclic transactions, where all
807 * descriptors have the ENDIRQEN bit set, and for which we
808 * can't have multiple transactions on one channel anyway.
809 */
810 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
811 continue;
812
813 if (sw->async_tx.cookie == cookie) {
814 return residue;
815 } else {
816 residue = 0;
817 passed = false;
818 }
819 }
820
821 /* We should only get here in case of cyclic transactions */
822 return residue;
823}
824
751static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, 825static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
752 dma_cookie_t cookie, 826 dma_cookie_t cookie,
753 struct dma_tx_state *txstate) 827 struct dma_tx_state *txstate)
754{ 828{
755 return dma_cookie_status(dchan, cookie, txstate); 829 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
830 enum dma_status ret;
831
832 ret = dma_cookie_status(dchan, cookie, txstate);
833 if (likely(ret != DMA_ERROR))
834 dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
835
836 return ret;
756} 837}
757 838
758/** 839/**