aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/fsldma.c
diff options
context:
space:
mode:
authorZhang Wei <wei.zhang@freescale.com>2008-03-18 21:45:00 -0400
committerDan Williams <dan.j.williams@intel.com>2008-03-18 20:00:59 -0400
commitf79abb627f033c85a6088231f20c85bc4a9bd757 (patch)
tree151538a3a33026ae516606240a13404d1f1e7037 /drivers/dma/fsldma.c
parentf920bb6f5fe21047e669381fe4dd346f6a9d3562 (diff)
fsldma: Fix the DMA halt when using DMA_INTERRUPT async_tx transfer.
The DMA_INTERRUPT async_tx is a NULL transfer, thus the BCR(count register) is 0. When the transfer started with a byte count of zero, the DMA controller will triger a PE(programming error) event and halt, not a normal interrupt. I add special codes for PE event and DMA_INTERRUPT async_tx testing. Signed-off-by: Zhang Wei <wei.zhang@freescale.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r--drivers/dma/fsldma.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ad2f938597e2..72692309398a 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -123,6 +123,11 @@ static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); 123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
124} 124}
125 125
126static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
127{
128 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
129}
130
126static int dma_is_idle(struct fsl_dma_chan *fsl_chan) 131static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
127{ 132{
128 u32 sr = get_sr(fsl_chan); 133 u32 sr = get_sr(fsl_chan);
@@ -426,6 +431,9 @@ fsl_dma_prep_interrupt(struct dma_chan *chan)
426 new->async_tx.cookie = -EBUSY; 431 new->async_tx.cookie = -EBUSY;
427 new->async_tx.ack = 0; 432 new->async_tx.ack = 0;
428 433
434 /* Insert the link descriptor to the LD ring */
435 list_add_tail(&new->node, &new->async_tx.tx_list);
436
429 /* Set End-of-link to the last link descriptor of new list*/ 437 /* Set End-of-link to the last link descriptor of new list*/
430 set_ld_eol(fsl_chan, new); 438 set_ld_eol(fsl_chan, new);
431 439
@@ -701,6 +709,23 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
701 if (stat & FSL_DMA_SR_TE) 709 if (stat & FSL_DMA_SR_TE)
702 dev_err(fsl_chan->dev, "Transfer Error!\n"); 710 dev_err(fsl_chan->dev, "Transfer Error!\n");
703 711
712 /* Programming Error
713 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
714 * triger a PE interrupt.
715 */
716 if (stat & FSL_DMA_SR_PE) {
717 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
718 if (get_bcr(fsl_chan) == 0) {
719 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
720 * Now, update the completed cookie, and continue the
721 * next uncompleted transfer.
722 */
723 fsl_dma_update_completed_cookie(fsl_chan);
724 fsl_chan_xfer_ld_queue(fsl_chan);
725 }
726 stat &= ~FSL_DMA_SR_PE;
727 }
728
704 /* If the link descriptor segment transfer finishes, 729 /* If the link descriptor segment transfer finishes,
705 * we will recycle the used descriptor. 730 * we will recycle the used descriptor.
706 */ 731 */
@@ -841,6 +866,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
841 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); 866 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
842 async_tx_ack(tx3); 867 async_tx_ack(tx3);
843 868
869 /* Interrupt tx test */
870 tx1 = fsl_dma_prep_interrupt(chan);
871 async_tx_ack(tx1);
872 cookie = fsl_dma_tx_submit(tx1);
873
844 /* Test exchanging the prepared tx sort */ 874 /* Test exchanging the prepared tx sort */
845 cookie = fsl_dma_tx_submit(tx3); 875 cookie = fsl_dma_tx_submit(tx3);
846 cookie = fsl_dma_tx_submit(tx2); 876 cookie = fsl_dma_tx_submit(tx2);