aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/fsldma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r--drivers/dma/fsldma.c131
1 files changed, 95 insertions, 36 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6e9ad6edc4a..526579df603 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -83,6 +83,11 @@ static void set_desc_cnt(struct fsldma_chan *chan,
83 hw->count = CPU_TO_DMA(chan, count, 32); 83 hw->count = CPU_TO_DMA(chan, count, 32);
84} 84}
85 85
86static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
87{
88 return DMA_TO_CPU(chan, desc->hw.count, 32);
89}
90
86static void set_desc_src(struct fsldma_chan *chan, 91static void set_desc_src(struct fsldma_chan *chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t src) 92 struct fsl_dma_ld_hw *hw, dma_addr_t src)
88{ 93{
@@ -93,6 +98,16 @@ static void set_desc_src(struct fsldma_chan *chan,
93 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 98 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
94} 99}
95 100
101static dma_addr_t get_desc_src(struct fsldma_chan *chan,
102 struct fsl_desc_sw *desc)
103{
104 u64 snoop_bits;
105
106 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
108 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
109}
110
96static void set_desc_dst(struct fsldma_chan *chan, 111static void set_desc_dst(struct fsldma_chan *chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 112 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
98{ 113{
@@ -103,6 +118,16 @@ static void set_desc_dst(struct fsldma_chan *chan,
103 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 118 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
104} 119}
105 120
121static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
122 struct fsl_desc_sw *desc)
123{
124 u64 snoop_bits;
125
126 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
127 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
128 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
129}
130
106static void set_desc_next(struct fsldma_chan *chan, 131static void set_desc_next(struct fsldma_chan *chan,
107 struct fsl_dma_ld_hw *hw, dma_addr_t next) 132 struct fsl_dma_ld_hw *hw, dma_addr_t next)
108{ 133{
@@ -806,6 +831,57 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
806} 831}
807 832
808/** 833/**
834 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
835 * @chan: Freescale DMA channel
836 * @desc: descriptor to cleanup and free
837 *
838 * This function is used on a descriptor which has been executed by the DMA
839 * controller. It will run any callbacks, submit any dependencies, and then
840 * free the descriptor.
841 */
842static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
843 struct fsl_desc_sw *desc)
844{
845 struct dma_async_tx_descriptor *txd = &desc->async_tx;
846 struct device *dev = chan->common.device->dev;
847 dma_addr_t src = get_desc_src(chan, desc);
848 dma_addr_t dst = get_desc_dst(chan, desc);
849 u32 len = get_desc_cnt(chan, desc);
850
851 /* Run the link descriptor callback function */
852 if (txd->callback) {
853#ifdef FSL_DMA_LD_DEBUG
854 chan_dbg(chan, "LD %p callback\n", desc);
855#endif
856 txd->callback(txd->callback_param);
857 }
858
859 /* Run any dependencies */
860 dma_run_dependencies(txd);
861
862 /* Unmap the dst buffer, if requested */
863 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
864 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
865 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
866 else
867 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
868 }
869
870 /* Unmap the src buffer, if requested */
871 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
872 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
873 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
874 else
875 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
876 }
877
878#ifdef FSL_DMA_LD_DEBUG
879 chan_dbg(chan, "LD %p free\n", desc);
880#endif
881 dma_pool_free(chan->desc_pool, desc, txd->phys);
882}
883
884/**
809 * fsl_chan_ld_cleanup - Clean up link descriptors 885 * fsl_chan_ld_cleanup - Clean up link descriptors
810 * @chan : Freescale DMA channel 886 * @chan : Freescale DMA channel
811 * 887 *
@@ -818,56 +894,39 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
818static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 894static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
819{ 895{
820 struct fsl_desc_sw *desc, *_desc; 896 struct fsl_desc_sw *desc, *_desc;
897 LIST_HEAD(ld_cleanup);
821 unsigned long flags; 898 unsigned long flags;
822 899
823 spin_lock_irqsave(&chan->desc_lock, flags); 900 spin_lock_irqsave(&chan->desc_lock, flags);
824 901
825 /* if the ld_running list is empty, there is nothing to do */ 902 /* update the cookie if we have some descriptors to cleanup */
826 if (list_empty(&chan->ld_running)) { 903 if (!list_empty(&chan->ld_running)) {
827 chan_dbg(chan, "no descriptors to cleanup\n"); 904 dma_cookie_t cookie;
828 goto out_unlock; 905
906 desc = to_fsl_desc(chan->ld_running.prev);
907 cookie = desc->async_tx.cookie;
908
909 chan->completed_cookie = cookie;
910 chan_dbg(chan, "completed cookie=%d\n", cookie);
829 } 911 }
830 912
831 /* 913 /*
832 * Get the last descriptor, update the cookie to it 914 * move the descriptors to a temporary list so we can drop the lock
833 * 915 * during the entire cleanup operation
834 * This is done before callbacks run so that clients can check the
835 * status of their DMA transfer inside the callback.
836 */ 916 */
837 desc = to_fsl_desc(chan->ld_running.prev); 917 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
838 chan->completed_cookie = desc->async_tx.cookie; 918
839 chan_dbg(chan, "completed_cookie = %d\n", chan->completed_cookie); 919 spin_unlock_irqrestore(&chan->desc_lock, flags);
840 920
841 /* Run the callback for each descriptor, in order */ 921 /* Run the callback for each descriptor, in order */
842 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 922 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
843 dma_async_tx_callback callback;
844 void *callback_param;
845 923
846 /* Remove from the list of running transactions */ 924 /* Remove from the list of transactions */
847 list_del(&desc->node); 925 list_del(&desc->node);
848 926
849 /* Run the link descriptor callback function */ 927 /* Run all cleanup for this descriptor */
850 callback = desc->async_tx.callback; 928 fsldma_cleanup_descriptor(chan, desc);
851 callback_param = desc->async_tx.callback_param;
852 if (callback) {
853 spin_unlock_irqrestore(&chan->desc_lock, flags);
854#ifdef FSL_DMA_LD_DEBUG
855 chan_dbg(chan, "LD %p callback\n", desc);
856#endif
857 callback(callback_param);
858 spin_lock_irqsave(&chan->desc_lock, flags);
859 }
860
861 /* Run any dependencies, then free the descriptor */
862 dma_run_dependencies(&desc->async_tx);
863#ifdef FSL_DMA_LD_DEBUG
864 chan_dbg(chan, "LD %p free\n", desc);
865#endif
866 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
867 } 929 }
868
869out_unlock:
870 spin_unlock_irqrestore(&chan->desc_lock, flags);
871} 930}
872 931
873/** 932/**