diff options
author | Ira Snyder <iws@ovro.caltech.edu> | 2009-05-15 17:27:16 -0400 |
---|---|---|
committer | Li Yang <leoli@freescale.com> | 2009-05-22 04:51:28 -0400 |
commit | bcfb7465c03a8c62c89da374677df56f6b894d44 (patch) | |
tree | 70194cbfab5fb172bcbf7b8443082a721589f04c /drivers/dma/fsldma.c | |
parent | 138ef0185177a6d221d24b6aa8f12d867fbbef90 (diff) |
fsldma: fix infinite loop on multi-descriptor DMA chain completion
When creating a DMA transaction with multiple descriptors, the async_tx
cookie is set to 0 for each descriptor in the chain, excluding the last
descriptor, whose cookie is set to -EBUSY.
When fsl_dma_tx_submit() is run, it only assigns a cookie to the first
descriptor. All of the remaining descriptors keep their original value,
including the last descriptor, which is set to -EBUSY.
After the DMA completes, the driver will update the last completed cookie
to be -EBUSY, which is an error code instead of a valid cookie. This causes
dma_async_is_complete() to always return DMA_IN_PROGRESS.
This causes the fsldma driver to never cleanup the queue of link
descriptors, and the driver will re-run the DMA transaction on the hardware
each time it receives the End-of-Chain interrupt. This causes an infinite
loop.
With this patch, fsl_dma_tx_submit() is changed to assign a cookie to every
descriptor in the chain. The rest of the code then works without problems.
Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Li Yang <leoli@freescale.com>
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r-- | drivers/dma/fsldma.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index a4151c3bb78b..7313a1ae5f83 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -313,8 +313,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
313 | 313 | ||
314 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 314 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
315 | { | 315 | { |
316 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
317 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 316 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); |
317 | struct fsl_desc_sw *desc; | ||
318 | unsigned long flags; | 318 | unsigned long flags; |
319 | dma_cookie_t cookie; | 319 | dma_cookie_t cookie; |
320 | 320 | ||
@@ -322,14 +322,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
322 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 322 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); |
323 | 323 | ||
324 | cookie = fsl_chan->common.cookie; | 324 | cookie = fsl_chan->common.cookie; |
325 | cookie++; | 325 | list_for_each_entry(desc, &tx->tx_list, node) { |
326 | if (cookie < 0) | 326 | cookie++; |
327 | cookie = 1; | 327 | if (cookie < 0) |
328 | desc->async_tx.cookie = cookie; | 328 | cookie = 1; |
329 | fsl_chan->common.cookie = desc->async_tx.cookie; | 329 | |
330 | 330 | desc->async_tx.cookie = cookie; | |
331 | append_ld_queue(fsl_chan, desc); | 331 | } |
332 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | 332 | |
333 | fsl_chan->common.cookie = cookie; | ||
334 | append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); | ||
335 | list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); | ||
333 | 336 | ||
334 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 337 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); |
335 | 338 | ||