aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2014-12-11 11:13:42 -0500
committerVinod Koul <vinod.koul@intel.com>2014-12-22 01:52:56 -0500
commitabf538ae0374ea827ac5fd51bf2a5184c50afd53 (patch)
tree07c568e910157f26e61a3cc033ab30e25feb99b6 /drivers/dma/ioat
parent681d15ecd7c3fafb5c9b8c0305343a5abbf834d6 (diff)
dmaengine: ioatdma: PQ err descriptors should callback with err results
The err completion callback is missing from the error handler. Two reasons we never hit this. On Xeon because the hw err workaround, the completion happens on a NULL descriptor so we don't do callback on the PQ descriptor. On Atom we have DWBES support and thus the callback already happened or we don't halt on error, so that was take cared of. But this code needs to be corrected for future error handlers. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma_v3.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 32eae38291e5..be307182231e 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -489,6 +489,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
489 struct ioat_chan_common *chan = &ioat->base; 489 struct ioat_chan_common *chan = &ioat->base;
490 struct pci_dev *pdev = to_pdev(chan); 490 struct pci_dev *pdev = to_pdev(chan);
491 struct ioat_dma_descriptor *hw; 491 struct ioat_dma_descriptor *hw;
492 struct dma_async_tx_descriptor *tx;
492 u64 phys_complete; 493 u64 phys_complete;
493 struct ioat_ring_ent *desc; 494 struct ioat_ring_ent *desc;
494 u32 err_handled = 0; 495 u32 err_handled = 0;
@@ -534,6 +535,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
534 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 535 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
535 __func__, chanerr, err_handled); 536 __func__, chanerr, err_handled);
536 BUG(); 537 BUG();
538 } else { /* cleanup the faulty descriptor */
539 tx = &desc->txd;
540 if (tx->cookie) {
541 dma_cookie_complete(tx);
542 dma_descriptor_unmap(tx);
543 if (tx->callback) {
544 tx->callback(tx->callback_param);
545 tx->callback = NULL;
546 }
547 }
537 } 548 }
538 549
539 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 550 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);