summaryrefslogtreecommitdiffstats
path: root/drivers/dma/xgene-dma.c
diff options
context:
space:
mode:
authorRameshwar Prasad Sahu <rsahu@apm.com>2015-08-21 05:03:34 -0400
committerVinod Koul <vinod.koul@intel.com>2015-08-21 06:18:37 -0400
commit005ce70b9448ed86c9a12e6504f1f9896a826e3d (patch)
tree300d80bfcbd221132ed858e2a7ed66a4e4ee9bd5 /drivers/dma/xgene-dma.c
parent64f1d0ffbaaccf2ddaf02d3ebf67bf9044cb4db4 (diff)
dmaengine: xgene-dma: Fix the lock to allow client for further submission of requests
This patch provides the fix in the cleanup routing such that client can perform further submission by releasing the lock before calling client's callback function. Signed-off-by: Rameshwar Prasad Sahu <rsahu@apm.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/xgene-dma.c')
-rw-r--r--drivers/dma/xgene-dma.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index d1c8809a0810..0b82bc00b83a 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -763,12 +763,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
763 struct xgene_dma_ring *ring = &chan->rx_ring; 763 struct xgene_dma_ring *ring = &chan->rx_ring;
764 struct xgene_dma_desc_sw *desc_sw, *_desc_sw; 764 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
765 struct xgene_dma_desc_hw *desc_hw; 765 struct xgene_dma_desc_hw *desc_hw;
766 struct list_head ld_completed;
766 u8 status; 767 u8 status;
767 768
769 INIT_LIST_HEAD(&ld_completed);
770
771 spin_lock_bh(&chan->lock);
772
768 /* Clean already completed and acked descriptors */ 773 /* Clean already completed and acked descriptors */
769 xgene_dma_clean_completed_descriptor(chan); 774 xgene_dma_clean_completed_descriptor(chan);
770 775
771 /* Run the callback for each descriptor, in order */ 776 /* Move all completed descriptors to ld completed queue, in order */
772 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { 777 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
773 /* Get subsequent hw descriptor from DMA rx ring */ 778 /* Get subsequent hw descriptor from DMA rx ring */
774 desc_hw = &ring->desc_hw[ring->head]; 779 desc_hw = &ring->desc_hw[ring->head];
@@ -811,15 +816,17 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
811 /* Mark this hw descriptor as processed */ 816 /* Mark this hw descriptor as processed */
812 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); 817 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
813 818
814 xgene_dma_run_tx_complete_actions(chan, desc_sw);
815
816 xgene_dma_clean_running_descriptor(chan, desc_sw);
817
818 /* 819 /*
819 * Decrement the pending transaction count 820 * Decrement the pending transaction count
820 * as we have processed one 821 * as we have processed one
821 */ 822 */
822 chan->pending--; 823 chan->pending--;
824
825 /*
826 * Delete this node from ld running queue and append it to
827 * ld completed queue for further processing
828 */
829 list_move_tail(&desc_sw->node, &ld_completed);
823 } 830 }
824 831
825 /* 832 /*
@@ -828,6 +835,14 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
828 * ahead and free the descriptors below. 835 * ahead and free the descriptors below.
829 */ 836 */
830 xgene_chan_xfer_ld_pending(chan); 837 xgene_chan_xfer_ld_pending(chan);
838
839 spin_unlock_bh(&chan->lock);
840
841 /* Run the callback for each descriptor, in order */
842 list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
843 xgene_dma_run_tx_complete_actions(chan, desc_sw);
844 xgene_dma_clean_running_descriptor(chan, desc_sw);
845 }
831} 846}
832 847
833static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) 848static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -876,11 +891,11 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
876 if (!chan->desc_pool) 891 if (!chan->desc_pool)
877 return; 892 return;
878 893
879 spin_lock_bh(&chan->lock);
880
881 /* Process all running descriptor */ 894 /* Process all running descriptor */
882 xgene_dma_cleanup_descriptors(chan); 895 xgene_dma_cleanup_descriptors(chan);
883 896
897 spin_lock_bh(&chan->lock);
898
884 /* Clean all link descriptor queues */ 899 /* Clean all link descriptor queues */
885 xgene_dma_free_desc_list(chan, &chan->ld_pending); 900 xgene_dma_free_desc_list(chan, &chan->ld_pending);
886 xgene_dma_free_desc_list(chan, &chan->ld_running); 901 xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1200,15 +1215,11 @@ static void xgene_dma_tasklet_cb(unsigned long data)
1200{ 1215{
1201 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data; 1216 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
1202 1217
1203 spin_lock_bh(&chan->lock);
1204
1205 /* Run all cleanup for descriptors which have been completed */ 1218 /* Run all cleanup for descriptors which have been completed */
1206 xgene_dma_cleanup_descriptors(chan); 1219 xgene_dma_cleanup_descriptors(chan);
1207 1220
1208 /* Re-enable DMA channel IRQ */ 1221 /* Re-enable DMA channel IRQ */
1209 enable_irq(chan->rx_irq); 1222 enable_irq(chan->rx_irq);
1210
1211 spin_unlock_bh(&chan->lock);
1212} 1223}
1213 1224
1214static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) 1225static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)