aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorRobert Jarzmik <robert.jarzmik@free.fr>2016-03-28 17:32:24 -0400
committerVinod Koul <vinod.koul@intel.com>2016-04-25 23:33:57 -0400
commite093bf60ca498a03b4ea8f5d6cf1d520a68e5d2e (patch)
treef65c6272c314bb51dab1d3972533435739776722 /drivers/dma
parentf55532a0c0b8bb6148f4e07853b876ef73bc69ca (diff)
dmaengine: pxa: handle bus errors
In the current state, upon bus error the driver will spin endlessly, relaunching the last tx, which will fail again and again : - a bus error happens - pxad_chan_handler() is called - as PXA_DCSR_STOPSTATE is true, the last non-terminated transaction is lauched, which is the one triggering the bus error, as it didn't terminate - moreover, the STOP interrupt fires a new, as the STOPIRQEN is still active Break this logic by stopping the automatic relaunch of a dma channel upon a bus error, even if there are still pending issued requests on it. As dma_cookie_status() seems unable to return DMA_ERROR in its current form, ie. there seems no way to mark a DMA_ERROR on a per-async-tx basis, it is chosen in this patch to remember on the channel which transaction failed, and report it in pxad_tx_status(). It's a bit misleading because if T1, T2, T3 and T4 were queued, and T1 was completed while T2 causes a bus error, the status of T3 and T4 will be reported as DMA_IN_PROGRESS, while the channel is actually stopped. Signed-off-by: Robert Jarzmik <robert.jarzmik@free.fr> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/pxa_dma.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 77c1c44009d8..6d17dfd67881 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -117,6 +117,7 @@ struct pxad_chan {
117 /* protected by vc->lock */ 117 /* protected by vc->lock */
118 struct pxad_phy *phy; 118 struct pxad_phy *phy;
119 struct dma_pool *desc_pool; /* Descriptors pool */ 119 struct dma_pool *desc_pool; /* Descriptors pool */
120 dma_cookie_t bus_error;
120}; 121};
121 122
122struct pxad_device { 123struct pxad_device {
@@ -563,6 +564,7 @@ static void pxad_launch_chan(struct pxad_chan *chan,
563 return; 564 return;
564 } 565 }
565 } 566 }
567 chan->bus_error = 0;
566 568
567 /* 569 /*
568 * Program the descriptor's address into the DMA controller, 570 * Program the descriptor's address into the DMA controller,
@@ -666,6 +668,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
666 struct virt_dma_desc *vd, *tmp; 668 struct virt_dma_desc *vd, *tmp;
667 unsigned int dcsr; 669 unsigned int dcsr;
668 unsigned long flags; 670 unsigned long flags;
671 dma_cookie_t last_started = 0;
669 672
670 BUG_ON(!chan); 673 BUG_ON(!chan);
671 674
@@ -678,6 +681,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
678 dev_dbg(&chan->vc.chan.dev->device, 681 dev_dbg(&chan->vc.chan.dev->device,
679 "%s(): checking txd %p[%x]: completed=%d\n", 682 "%s(): checking txd %p[%x]: completed=%d\n",
680 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 683 __func__, vd, vd->tx.cookie, is_desc_completed(vd));
684 last_started = vd->tx.cookie;
681 if (to_pxad_sw_desc(vd)->cyclic) { 685 if (to_pxad_sw_desc(vd)->cyclic) {
682 vchan_cyclic_callback(vd); 686 vchan_cyclic_callback(vd);
683 break; 687 break;
@@ -690,7 +694,12 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
690 } 694 }
691 } 695 }
692 696
693 if (dcsr & PXA_DCSR_STOPSTATE) { 697 if (dcsr & PXA_DCSR_BUSERR) {
698 chan->bus_error = last_started;
699 phy_disable(phy);
700 }
701
702 if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
694 dev_dbg(&chan->vc.chan.dev->device, 703 dev_dbg(&chan->vc.chan.dev->device,
695 "%s(): channel stopped, submitted_empty=%d issued_empty=%d", 704 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
696 __func__, 705 __func__,
@@ -1249,6 +1258,9 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1249 struct pxad_chan *chan = to_pxad_chan(dchan); 1258 struct pxad_chan *chan = to_pxad_chan(dchan);
1250 enum dma_status ret; 1259 enum dma_status ret;
1251 1260
1261 if (cookie == chan->bus_error)
1262 return DMA_ERROR;
1263
1252 ret = dma_cookie_status(dchan, cookie, txstate); 1264 ret = dma_cookie_status(dchan, cookie, txstate);
1253 if (likely(txstate && (ret != DMA_ERROR))) 1265 if (likely(txstate && (ret != DMA_ERROR)))
1254 dma_set_residue(txstate, pxad_residue(chan, cookie)); 1266 dma_set_residue(txstate, pxad_residue(chan, cookie));