aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dw_dmac.c
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2011-04-18 20:31:32 -0400
committerVinod Koul <vinod.koul@intel.com>2011-05-13 10:10:15 -0400
commita7c57cf7d4327c41510f8cbf45b1b970e02c34f8 (patch)
tree2f1b7ae4940d9540d966f8a287a7337d14cb7c76 /drivers/dma/dw_dmac.c
parent69cea5a00d3135677939fce1fefe54ed522055a0 (diff)
dmaengine/dw_dmac: implement pause and resume in dwc_control
Some peripherals like amba-pl011 needs pause to be implemented in DMA controller drivers. This also returns correct status from dwc_tx_status() in case chan is paused. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Viresh Kumar <viresh.kumar@st.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r--drivers/dma/dw_dmac.c59
1 files changed, 39 insertions, 20 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 442b98b81e7c..eec675bf4f95 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -862,34 +862,50 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
862 struct dw_dma *dw = to_dw_dma(chan->device); 862 struct dw_dma *dw = to_dw_dma(chan->device);
863 struct dw_desc *desc, *_desc; 863 struct dw_desc *desc, *_desc;
864 unsigned long flags; 864 unsigned long flags;
865 u32 cfglo;
865 LIST_HEAD(list); 866 LIST_HEAD(list);
866 867
867 /* Only supports DMA_TERMINATE_ALL */ 868 if (cmd == DMA_PAUSE) {
868 if (cmd != DMA_TERMINATE_ALL) 869 spin_lock_irqsave(&dwc->lock, flags);
869 return -ENXIO;
870 870
871 /* 871 cfglo = channel_readl(dwc, CFG_LO);
872 * This is only called when something went wrong elsewhere, so 872 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
873 * we don't really care about the data. Just disable the 873 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
874 * channel. We still have to poll the channel enable bit due 874 cpu_relax();
875 * to AHB/HSB limitations.
876 */
877 spin_lock_irqsave(&dwc->lock, flags);
878 875
879 channel_clear_bit(dw, CH_EN, dwc->mask); 876 dwc->paused = true;
877 spin_unlock_irqrestore(&dwc->lock, flags);
878 } else if (cmd == DMA_RESUME) {
879 if (!dwc->paused)
880 return 0;
880 881
881 while (dma_readl(dw, CH_EN) & dwc->mask) 882 spin_lock_irqsave(&dwc->lock, flags);
882 cpu_relax();
883 883
884 /* active_list entries will end up before queued entries */ 884 cfglo = channel_readl(dwc, CFG_LO);
885 list_splice_init(&dwc->queue, &list); 885 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
886 list_splice_init(&dwc->active_list, &list); 886 dwc->paused = false;
887 887
888 spin_unlock_irqrestore(&dwc->lock, flags); 888 spin_unlock_irqrestore(&dwc->lock, flags);
889 } else if (cmd == DMA_TERMINATE_ALL) {
890 spin_lock_irqsave(&dwc->lock, flags);
889 891
890 /* Flush all pending and queued descriptors */ 892 channel_clear_bit(dw, CH_EN, dwc->mask);
891 list_for_each_entry_safe(desc, _desc, &list, desc_node) 893 while (dma_readl(dw, CH_EN) & dwc->mask)
892 dwc_descriptor_complete(dwc, desc, false); 894 cpu_relax();
895
896 dwc->paused = false;
897
898 /* active_list entries will end up before queued entries */
899 list_splice_init(&dwc->queue, &list);
900 list_splice_init(&dwc->active_list, &list);
901
902 spin_unlock_irqrestore(&dwc->lock, flags);
903
904 /* Flush all pending and queued descriptors */
905 list_for_each_entry_safe(desc, _desc, &list, desc_node)
906 dwc_descriptor_complete(dwc, desc, false);
907 } else
908 return -ENXIO;
893 909
894 return 0; 910 return 0;
895} 911}
@@ -923,6 +939,9 @@ dwc_tx_status(struct dma_chan *chan,
923 else 939 else
924 dma_set_tx_state(txstate, last_complete, last_used, 0); 940 dma_set_tx_state(txstate, last_complete, last_used, 0);
925 941
942 if (dwc->paused)
943 return DMA_PAUSED;
944
926 return ret; 945 return ret;
927} 946}
928 947