aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSinan Kaya <okaya@codeaurora.org>2016-08-31 11:10:27 -0400
committerVinod Koul <vinod.koul@intel.com>2016-08-31 11:57:32 -0400
commit8a31f8b5db65b860fd0d358dc27f6daf26074406 (patch)
tree2464951e1e9b2eaa2930294abe5c2c2b685bb35c
parent73fc45e3ce7838e6f47228dd51144c492931e8ad (diff)
dmaengine: qcom_hidma: release the descriptor before the callback
There is a race condition between data transfer callback and descriptor free code. The callback routine may decide to clear the resources even though the descriptor has not yet been freed. Instead of calling the callback first and then releasing the memory, this code is changing the order to return the descriptor back to the free pool and then call the user provided callback. Signed-off-by: Sinan Kaya <okaya@codeaurora.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/qcom/hidma.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 1197fbf8f30e..b8493bafdb3f 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
111 struct dma_async_tx_descriptor *desc; 111 struct dma_async_tx_descriptor *desc;
112 dma_cookie_t last_cookie; 112 dma_cookie_t last_cookie;
113 struct hidma_desc *mdesc; 113 struct hidma_desc *mdesc;
114 struct hidma_desc *next;
114 unsigned long irqflags; 115 unsigned long irqflags;
115 struct list_head list; 116 struct list_head list;
116 117
@@ -122,8 +123,9 @@ static void hidma_process_completed(struct hidma_chan *mchan)
122 spin_unlock_irqrestore(&mchan->lock, irqflags); 123 spin_unlock_irqrestore(&mchan->lock, irqflags);
123 124
124 /* Execute callbacks and run dependencies */ 125 /* Execute callbacks and run dependencies */
125 list_for_each_entry(mdesc, &list, node) { 126 list_for_each_entry_safe(mdesc, next, &list, node) {
126 enum dma_status llstat; 127 enum dma_status llstat;
128 struct dmaengine_desc_callback cb;
127 129
128 desc = &mdesc->desc; 130 desc = &mdesc->desc;
129 131
@@ -132,18 +134,18 @@ static void hidma_process_completed(struct hidma_chan *mchan)
132 spin_unlock_irqrestore(&mchan->lock, irqflags); 134 spin_unlock_irqrestore(&mchan->lock, irqflags);
133 135
134 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 136 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
135 if (llstat == DMA_COMPLETE) 137 dmaengine_desc_get_callback(desc, &cb);
136 dmaengine_desc_get_callback_invoke(desc, NULL);
137 138
138 last_cookie = desc->cookie; 139 last_cookie = desc->cookie;
139 dma_run_dependencies(desc); 140 dma_run_dependencies(desc);
140 }
141 141
142 /* Free descriptors */ 142 spin_lock_irqsave(&mchan->lock, irqflags);
143 spin_lock_irqsave(&mchan->lock, irqflags); 143 list_move(&mdesc->node, &mchan->free);
144 list_splice_tail_init(&list, &mchan->free); 144 spin_unlock_irqrestore(&mchan->lock, irqflags);
145 spin_unlock_irqrestore(&mchan->lock, irqflags);
146 145
146 if (llstat == DMA_COMPLETE)
147 dmaengine_desc_callback_invoke(&cb, NULL);
148 }
147} 149}
148 150
149/* 151/*