diff options
author | Ira Snyder <iws@ovro.caltech.edu> | 2009-05-19 18:42:13 -0400 |
---|---|---|
committer | Li Yang <leoli@freescale.com> | 2009-05-22 04:49:17 -0400 |
commit | 138ef0185177a6d221d24b6aa8f12d867fbbef90 (patch) | |
tree | 4a48ab1154b7533dcaac831ff2dad0e10865efa1 | |
parent | f47edc6dab11801c2e97088ba7bbce042ded867c (diff) |
fsldma: fix "DMA halt timeout!" errors
When using the DMA controller from multiple threads at the same time, it is
possible to get lots of "DMA halt timeout!" errors printed to the kernel
log.
This occurs due to a race between fsl_dma_memcpy_issue_pending() and the
interrupt handler, fsl_dma_chan_do_interrupt(). Both call the
fsl_chan_xfer_ld_queue() function, which does not protect against
concurrent accesses to dma_halt() and dma_start().
The existing spinlock is moved to cover the dma_halt() and dma_start()
functions. Testing shows that the "DMA halt timeout!" errors disappear.
Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Li Yang <leoli@freescale.com>
-rw-r--r-- | drivers/dma/fsldma.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 391b1bd7098b..a4151c3bb78b 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -598,15 +598,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
598 | dma_addr_t next_dest_addr; | 598 | dma_addr_t next_dest_addr; |
599 | unsigned long flags; | 599 | unsigned long flags; |
600 | 600 | ||
601 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
602 | |||
601 | if (!dma_is_idle(fsl_chan)) | 603 | if (!dma_is_idle(fsl_chan)) |
602 | return; | 604 | goto out_unlock; |
603 | 605 | ||
604 | dma_halt(fsl_chan); | 606 | dma_halt(fsl_chan); |
605 | 607 | ||
606 | /* If there are some link descriptors | 608 | /* If there are some link descriptors |
607 | * not transfered in queue. We need to start it. | 609 | * not transfered in queue. We need to start it. |
608 | */ | 610 | */ |
609 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
610 | 611 | ||
611 | /* Find the first un-transfer desciptor */ | 612 | /* Find the first un-transfer desciptor */ |
612 | for (ld_node = fsl_chan->ld_queue.next; | 613 | for (ld_node = fsl_chan->ld_queue.next; |
@@ -617,8 +618,6 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
617 | fsl_chan->common.cookie) == DMA_SUCCESS); | 618 | fsl_chan->common.cookie) == DMA_SUCCESS); |
618 | ld_node = ld_node->next); | 619 | ld_node = ld_node->next); |
619 | 620 | ||
620 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
621 | |||
622 | if (ld_node != &fsl_chan->ld_queue) { | 621 | if (ld_node != &fsl_chan->ld_queue) { |
623 | /* Get the ld start address from ld_queue */ | 622 | /* Get the ld start address from ld_queue */ |
624 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | 623 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; |
@@ -630,6 +629,9 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
630 | set_cdar(fsl_chan, 0); | 629 | set_cdar(fsl_chan, 0); |
631 | set_ndar(fsl_chan, 0); | 630 | set_ndar(fsl_chan, 0); |
632 | } | 631 | } |
632 | |||
633 | out_unlock: | ||
634 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
633 | } | 635 | } |
634 | 636 | ||
635 | /** | 637 | /** |