summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2017-11-14 09:32:04 -0500
committerVinod Koul <vinod.koul@intel.com>2017-12-04 12:03:51 -0500
commit1c7f072d94e8b697fd9b70cdb268622a18faf522 (patch)
tree706d68a16f1c1b17b99fde45d830b730e33e4932 /drivers
parent6af149d2b1422e0e873d8558274713e6f63142c2 (diff)
dmaengine: virt-dma: Support for race free transfer termination
Even with the introduced vchan_synchronize() we can face race when terminating a cyclic transfer. If the terminate_all is called after the interrupt handler called vchan_cyclic_callback(), but before the vchan_complete tasklet is called: vc->cyclic is set to the cyclic descriptor, but the descriptor itself was freed up in the driver's terminate_all() callback. When the vhan_complete() is executed it will try to fetch the vc->cyclic vdesc, but the pointer is pointing now to uninitialized memory leading to (hard to reproduce) kernel crash. In order to fix this, drivers should: - call vchan_terminate_vdesc() from their terminate_all callback instead calling their free_desc function to free up the descriptor. - implement device_synchronize callback and call vchan_synchronize(). This way we can make sure that the descriptor is only going to be freed up after the vchan_callback was executed in a safe manner. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/virt-dma.h30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 2edb05505102..b09b75ab0751 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -35,6 +35,7 @@ struct virt_dma_chan {
35 struct list_head desc_completed; 35 struct list_head desc_completed;
36 36
37 struct virt_dma_desc *cyclic; 37 struct virt_dma_desc *cyclic;
38 struct virt_dma_desc *vd_terminated;
38}; 39};
39 40
40static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) 41static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -130,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
130} 131}
131 132
132/** 133/**
134 * vchan_terminate_vdesc - Disable pending cyclic callback
135 * @vd: virtual descriptor to be terminated
136 *
137 * vc.lock must be held by caller
138 */
139static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
140{
141 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
142
143 /* free up stuck descriptor */
144 if (vc->vd_terminated)
145 vchan_vdesc_fini(vc->vd_terminated);
146
147 vc->vd_terminated = vd;
148 if (vc->cyclic == vd)
149 vc->cyclic = NULL;
150}
151
152/**
133 * vchan_next_desc - peek at the next descriptor to be processed 153 * vchan_next_desc - peek at the next descriptor to be processed
134 * @vc: virtual channel to obtain descriptor from 154 * @vc: virtual channel to obtain descriptor from
135 * 155 *
@@ -182,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
182 * Makes sure that all scheduled or active callbacks have finished running. For 202 * Makes sure that all scheduled or active callbacks have finished running. For
183 * proper operation the caller has to ensure that no new callbacks are scheduled 203 * proper operation the caller has to ensure that no new callbacks are scheduled
184 * after the invocation of this function started. 204 * after the invocation of this function started.
205 * Free up the terminated cyclic descriptor to prevent memory leakage.
185 */ 206 */
186static inline void vchan_synchronize(struct virt_dma_chan *vc) 207static inline void vchan_synchronize(struct virt_dma_chan *vc)
187{ 208{
209 unsigned long flags;
210
188 tasklet_kill(&vc->task); 211 tasklet_kill(&vc->task);
212
213 spin_lock_irqsave(&vc->lock, flags);
214 if (vc->vd_terminated) {
215 vchan_vdesc_fini(vc->vd_terminated);
216 vc->vd_terminated = NULL;
217 }
218 spin_unlock_irqrestore(&vc->lock, flags);
189} 219}
190 220
191#endif 221#endif