aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-10-02 23:47:33 -0400
committerVinod Koul <vinod.koul@intel.com>2016-10-02 23:47:33 -0400
commit11bfedff5594eef74617e6aa02986cf517526b98 (patch)
tree2aa1a3b8d4e71abda7103b41edffb3a92ca26784
parent0a98f4b857e9aedf426d8b5b07699a8526e07530 (diff)
parent793ae66c7dcc7e6655029f6613221a111b15b58e (diff)
Merge branch 'topic/err_reporting' into for-linus
Signed-off-by: Vinod Koul <vinod.koul@intel.com> Conflicts: drivers/dma/cppi41.c
-rw-r--r--Documentation/dmaengine/provider.txt11
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/coh901318.c9
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dmaengine.h84
-rw-r--r--drivers/dma/dw/core.c14
-rw-r--r--drivers/dma/ep93xx_dma.c10
-rw-r--r--drivers/dma/fsl_raid.c10
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/imx-dma.c4
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/dma/ioat/dma.c213
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/iop-adma.c3
-rw-r--r--drivers/dma/ipu/ipu_idmac.c18
-rw-r--r--drivers/dma/mic_x100_dma.c6
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/mmp_tdma.c4
-rw-r--r--drivers/dma/mpc512x_dma.c3
-rw-r--r--drivers/dma/mv_xor.c5
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/pch_dma.c7
-rw-r--r--drivers/dma/pl330.c10
-rw-r--r--drivers/dma/ppc4xx/adma.c5
-rw-r--r--drivers/dma/qcom/hidma.c57
-rw-r--r--drivers/dma/qcom/hidma.h2
-rw-r--r--drivers/dma/qcom/hidma_ll.c32
-rw-r--r--drivers/dma/sh/rcar-dmac.c16
-rw-r--r--drivers/dma/sh/shdma-base.c12
-rw-r--r--drivers/dma/sirf-dma.c7
-rw-r--r--drivers/dma/ste_dma40.c10
-rw-r--r--drivers/dma/tegra20-apb-dma.c10
-rw-r--r--drivers/dma/timb_dma.c9
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/virt-dma.c17
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c10
-rw-r--r--drivers/ntb/ntb_transport.c193
-rw-r--r--include/linux/dmaengine.h16
41 files changed, 596 insertions, 285 deletions
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 91ce82d5f0c4..c4fd47540b31 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -282,6 +282,17 @@ supported.
282 that is supposed to push the current 282 that is supposed to push the current
283 transaction descriptor to a pending queue, waiting 283 transaction descriptor to a pending queue, waiting
284 for issue_pending to be called. 284 for issue_pending to be called.
285 - In this structure the function pointer callback_result can be
286 initialized in order for the submitter to be notified that a
287 transaction has completed. In the earlier code the function pointer
288 callback has been used. However it does not provide any status to the
289 transaction and will be deprecated. The result structure defined as
290 dmaengine_result that is passed in to callback_result has two fields:
291 + result: This provides the transfer result defined by
292 dmaengine_tx_result. Either success or some error
293 condition.
294 + residue: Provides the residue bytes of the transfer for those that
295 support residue.
285 296
286 * device_issue_pending 297 * device_issue_pending
287 - Takes the first transaction descriptor in the pending queue, 298 - Takes the first transaction descriptor in the pending queue,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 53d22eb73b56..a4c8f80db29d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
473 /* for cyclic transfers, 473 /* for cyclic transfers,
474 * no need to replay callback function while stopping */ 474 * no need to replay callback function while stopping */
475 if (!atc_chan_is_cyclic(atchan)) { 475 if (!atc_chan_is_cyclic(atchan)) {
476 dma_async_tx_callback callback = txd->callback;
477 void *param = txd->callback_param;
478
479 /* 476 /*
480 * The API requires that no submissions are done from a 477 * The API requires that no submissions are done from a
481 * callback, so we don't need to drop the lock here 478 * callback, so we don't need to drop the lock here
482 */ 479 */
483 if (callback) 480 dmaengine_desc_get_callback_invoke(txd, NULL);
484 callback(param);
485 } 481 }
486 482
487 dma_run_dependencies(txd); 483 dma_run_dependencies(txd);
@@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
598{ 594{
599 struct at_desc *first = atc_first_active(atchan); 595 struct at_desc *first = atc_first_active(atchan);
600 struct dma_async_tx_descriptor *txd = &first->txd; 596 struct dma_async_tx_descriptor *txd = &first->txd;
601 dma_async_tx_callback callback = txd->callback;
602 void *param = txd->callback_param;
603 597
604 dev_vdbg(chan2dev(&atchan->chan_common), 598 dev_vdbg(chan2dev(&atchan->chan_common),
605 "new cyclic period llp 0x%08x\n", 599 "new cyclic period llp 0x%08x\n",
606 channel_readl(atchan, DSCR)); 600 channel_readl(atchan, DSCR));
607 601
608 if (callback) 602 dmaengine_desc_get_callback_invoke(txd, NULL);
609 callback(param);
610} 603}
611 604
612/*-- IRQ & Tasklet ---------------------------------------------------*/ 605/*-- IRQ & Tasklet ---------------------------------------------------*/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index e434ffe7bc5c..2badc57a7f31 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1572 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 1572 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1573 txd = &desc->tx_dma_desc; 1573 txd = &desc->tx_dma_desc;
1574 1574
1575 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) 1575 if (txd->flags & DMA_PREP_INTERRUPT)
1576 txd->callback(txd->callback_param); 1576 dmaengine_desc_get_callback_invoke(txd, NULL);
1577} 1577}
1578 1578
1579static void at_xdmac_tasklet(unsigned long data) 1579static void at_xdmac_tasklet(unsigned long data)
@@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
1616 1616
1617 if (!at_xdmac_chan_is_cyclic(atchan)) { 1617 if (!at_xdmac_chan_is_cyclic(atchan)) {
1618 dma_cookie_complete(txd); 1618 dma_cookie_complete(txd);
1619 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) 1619 if (txd->flags & DMA_PREP_INTERRUPT)
1620 txd->callback(txd->callback_param); 1620 dmaengine_desc_get_callback_invoke(txd, NULL);
1621 } 1621 }
1622 1622
1623 dma_run_dependencies(txd); 1623 dma_run_dependencies(txd);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 472be1d09586..74794c9859f6 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1875,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
1875 struct coh901318_chan *cohc = (struct coh901318_chan *) data; 1875 struct coh901318_chan *cohc = (struct coh901318_chan *) data;
1876 struct coh901318_desc *cohd_fin; 1876 struct coh901318_desc *cohd_fin;
1877 unsigned long flags; 1877 unsigned long flags;
1878 dma_async_tx_callback callback; 1878 struct dmaengine_desc_callback cb;
1879 void *callback_param;
1880 1879
1881 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" 1880 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
1882 " nbr_active_done %ld\n", __func__, 1881 " nbr_active_done %ld\n", __func__,
@@ -1891,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
1891 goto err; 1890 goto err;
1892 1891
1893 /* locate callback to client */ 1892 /* locate callback to client */
1894 callback = cohd_fin->desc.callback; 1893 dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
1895 callback_param = cohd_fin->desc.callback_param;
1896 1894
1897 /* sign this job as completed on the channel */ 1895 /* sign this job as completed on the channel */
1898 dma_cookie_complete(&cohd_fin->desc); 1896 dma_cookie_complete(&cohd_fin->desc);
@@ -1907,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
1907 spin_unlock_irqrestore(&cohc->lock, flags); 1905 spin_unlock_irqrestore(&cohc->lock, flags);
1908 1906
1909 /* Call the callback when we're done */ 1907 /* Call the callback when we're done */
1910 if (callback) 1908 dmaengine_desc_callback_invoke(&cb, NULL);
1911 callback(callback_param);
1912 1909
1913 spin_lock_irqsave(&cohc->lock, flags); 1910 spin_lock_irqsave(&cohc->lock, flags);
1914 1911
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 97f4d6c1b6b9..bac5f023013b 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -336,7 +336,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
336 336
337 c->residue = pd_trans_len(c->desc->pd6) - len; 337 c->residue = pd_trans_len(c->desc->pd6) - len;
338 dma_cookie_complete(&c->txd); 338 dma_cookie_complete(&c->txd);
339 c->txd.callback(c->txd.callback_param); 339 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
340 340
341 /* Paired with cppi41_dma_issue_pending */ 341 /* Paired with cppi41_dma_issue_pending */
342 pm_runtime_mark_last_busy(cdd->ddev.dev); 342 pm_runtime_mark_last_busy(cdd->ddev.dev);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 17f983a4e9ba..882ff9448c3b 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
86 state->residue = residue; 86 state->residue = residue;
87} 87}
88 88
89struct dmaengine_desc_callback {
90 dma_async_tx_callback callback;
91 dma_async_tx_callback_result callback_result;
92 void *callback_param;
93};
94
95/**
96 * dmaengine_desc_get_callback - get the passed in callback function
97 * @tx: tx descriptor
98 * @cb: temp struct to hold the callback info
99 *
100 * Fill the passed in cb struct with what's available in the passed in
101 * tx descriptor struct
102 * No locking is required.
103 */
104static inline void
105dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
106 struct dmaengine_desc_callback *cb)
107{
108 cb->callback = tx->callback;
109 cb->callback_result = tx->callback_result;
110 cb->callback_param = tx->callback_param;
111}
112
113/**
114 * dmaengine_desc_callback_invoke - call the callback function in cb struct
115 * @cb: temp struct that is holding the callback info
116 * @result: transaction result
117 *
118 * Call the callback function provided in the cb struct with the parameter
119 * in the cb struct.
120 * Locking is dependent on the driver.
121 */
122static inline void
123dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
124 const struct dmaengine_result *result)
125{
126 struct dmaengine_result dummy_result = {
127 .result = DMA_TRANS_NOERROR,
128 .residue = 0
129 };
130
131 if (cb->callback_result) {
132 if (!result)
133 result = &dummy_result;
134 cb->callback_result(cb->callback_param, result);
135 } else if (cb->callback) {
136 cb->callback(cb->callback_param);
137 }
138}
139
140/**
141 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
142 * then immediately call the callback.
143 * @tx: dma async tx descriptor
144 * @result: transaction result
145 *
146 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
147 * in a single function since no work is necessary in between for the driver.
148 * Locking is dependent on the driver.
149 */
150static inline void
151dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
152 const struct dmaengine_result *result)
153{
154 struct dmaengine_desc_callback cb;
155
156 dmaengine_desc_get_callback(tx, &cb);
157 dmaengine_desc_callback_invoke(&cb, result);
158}
159
160/**
161 * dmaengine_desc_callback_valid - verify the callback is valid in cb
162 * @cb: callback info struct
163 *
164 * Return a bool that verifies whether callback in cb is valid or not.
165 * No locking is required.
166 */
167static inline bool
168dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
169{
170 return (cb->callback) ? true : false;
171}
172
89#endif 173#endif
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index edf053f73a49..12eedd457193 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -270,20 +270,19 @@ static void
270dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, 270dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
271 bool callback_required) 271 bool callback_required)
272{ 272{
273 dma_async_tx_callback callback = NULL;
274 void *param = NULL;
275 struct dma_async_tx_descriptor *txd = &desc->txd; 273 struct dma_async_tx_descriptor *txd = &desc->txd;
276 struct dw_desc *child; 274 struct dw_desc *child;
277 unsigned long flags; 275 unsigned long flags;
276 struct dmaengine_desc_callback cb;
278 277
279 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 278 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
280 279
281 spin_lock_irqsave(&dwc->lock, flags); 280 spin_lock_irqsave(&dwc->lock, flags);
282 dma_cookie_complete(txd); 281 dma_cookie_complete(txd);
283 if (callback_required) { 282 if (callback_required)
284 callback = txd->callback; 283 dmaengine_desc_get_callback(txd, &cb);
285 param = txd->callback_param; 284 else
286 } 285 memset(&cb, 0, sizeof(cb));
287 286
288 /* async_tx_ack */ 287 /* async_tx_ack */
289 list_for_each_entry(child, &desc->tx_list, desc_node) 288 list_for_each_entry(child, &desc->tx_list, desc_node)
@@ -292,8 +291,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
292 dwc_desc_put(dwc, desc); 291 dwc_desc_put(dwc, desc);
293 spin_unlock_irqrestore(&dwc->lock, flags); 292 spin_unlock_irqrestore(&dwc->lock, flags);
294 293
295 if (callback) 294 dmaengine_desc_callback_invoke(&cb, NULL);
296 callback(param);
297} 295}
298 296
299static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 297static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index ca17e8751af2..d37e8dda8079 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -737,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
737{ 737{
738 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; 738 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
739 struct ep93xx_dma_desc *desc, *d; 739 struct ep93xx_dma_desc *desc, *d;
740 dma_async_tx_callback callback = NULL; 740 struct dmaengine_desc_callback cb;
741 void *callback_param = NULL;
742 LIST_HEAD(list); 741 LIST_HEAD(list);
743 742
743 memset(&cb, 0, sizeof(cb));
744 spin_lock_irq(&edmac->lock); 744 spin_lock_irq(&edmac->lock);
745 /* 745 /*
746 * If dma_terminate_all() was called before we get to run, the active 746 * If dma_terminate_all() was called before we get to run, the active
@@ -755,8 +755,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
755 dma_cookie_complete(&desc->txd); 755 dma_cookie_complete(&desc->txd);
756 list_splice_init(&edmac->active, &list); 756 list_splice_init(&edmac->active, &list);
757 } 757 }
758 callback = desc->txd.callback; 758 dmaengine_desc_get_callback(&desc->txd, &cb);
759 callback_param = desc->txd.callback_param;
760 } 759 }
761 spin_unlock_irq(&edmac->lock); 760 spin_unlock_irq(&edmac->lock);
762 761
@@ -769,8 +768,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
769 ep93xx_dma_desc_put(edmac, desc); 768 ep93xx_dma_desc_put(edmac, desc);
770 } 769 }
771 770
772 if (callback) 771 dmaengine_desc_callback_invoke(&cb, NULL);
773 callback(callback_param);
774} 772}
775 773
776static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) 774static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 496ff8e7d7f9..40c58ae80660 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -134,16 +134,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
134 134
135static void fsl_re_desc_done(struct fsl_re_desc *desc) 135static void fsl_re_desc_done(struct fsl_re_desc *desc)
136{ 136{
137 dma_async_tx_callback callback;
138 void *callback_param;
139
140 dma_cookie_complete(&desc->async_tx); 137 dma_cookie_complete(&desc->async_tx);
141 138 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
142 callback = desc->async_tx.callback;
143 callback_param = desc->async_tx.callback_param;
144 if (callback)
145 callback(callback_param);
146
147 dma_descriptor_unmap(&desc->async_tx); 139 dma_descriptor_unmap(&desc->async_tx);
148} 140}
149 141
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6ccb787ba56d..87f6ab222d8c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -517,11 +517,7 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
517 ret = txd->cookie; 517 ret = txd->cookie;
518 518
519 /* Run the link descriptor callback function */ 519 /* Run the link descriptor callback function */
520 if (txd->callback) { 520 dmaengine_desc_get_callback_invoke(txd, NULL);
521 chan_dbg(chan, "LD %p callback\n", desc);
522 txd->callback(txd->callback_param);
523 }
524
525 dma_descriptor_unmap(txd); 521 dma_descriptor_unmap(txd);
526 } 522 }
527 523
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a960608c0a4d..ab0fb804fb1e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data)
663out: 663out:
664 spin_unlock_irqrestore(&imxdma->lock, flags); 664 spin_unlock_irqrestore(&imxdma->lock, flags);
665 665
666 if (desc->desc.callback) 666 dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
667 desc->desc.callback(desc->desc.callback_param);
668
669} 667}
670 668
671static int imxdma_terminate_all(struct dma_chan *chan) 669static int imxdma_terminate_all(struct dma_chan *chan)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 03ec76fc22ff..624facb6c8f4 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -650,8 +650,7 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
650 650
651static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 651static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
652{ 652{
653 if (sdmac->desc.callback) 653 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
654 sdmac->desc.callback(sdmac->desc.callback_param);
655} 654}
656 655
657static void sdma_update_channel_loop(struct sdma_channel *sdmac) 656static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@@ -701,8 +700,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
701 sdmac->status = DMA_COMPLETE; 700 sdmac->status = DMA_COMPLETE;
702 701
703 dma_cookie_complete(&sdmac->desc); 702 dma_cookie_complete(&sdmac->desc);
704 if (sdmac->desc.callback) 703
705 sdmac->desc.callback(sdmac->desc.callback_param); 704 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
706} 705}
707 706
708static void sdma_tasklet(unsigned long data) 707static void sdma_tasklet(unsigned long data)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index bd09961443b1..49386ce04bf5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -38,8 +38,54 @@
38 38
39#include "../dmaengine.h" 39#include "../dmaengine.h"
40 40
41static char *chanerr_str[] = {
42 "DMA Transfer Destination Address Error",
43 "Next Descriptor Address Error",
44 "Descriptor Error",
45 "Chan Address Value Error",
46 "CHANCMD Error",
47 "Chipset Uncorrectable Data Integrity Error",
48 "DMA Uncorrectable Data Integrity Error",
49 "Read Data Error",
50 "Write Data Error",
51 "Descriptor Control Error",
52 "Descriptor Transfer Size Error",
53 "Completion Address Error",
54 "Interrupt Configuration Error",
55 "Super extended descriptor Address Error",
56 "Unaffiliated Error",
57 "CRC or XOR P Error",
58 "XOR Q Error",
59 "Descriptor Count Error",
60 "DIF All F detect Error",
61 "Guard Tag verification Error",
62 "Application Tag verification Error",
63 "Reference Tag verification Error",
64 "Bundle Bit Error",
65 "Result DIF All F detect Error",
66 "Result Guard Tag verification Error",
67 "Result Application Tag verification Error",
68 "Result Reference Tag verification Error",
69 NULL
70};
71
41static void ioat_eh(struct ioatdma_chan *ioat_chan); 72static void ioat_eh(struct ioatdma_chan *ioat_chan);
42 73
74static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75{
76 int i;
77
78 for (i = 0; i < 32; i++) {
79 if ((chanerr >> i) & 1) {
80 if (chanerr_str[i]) {
81 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
82 i, chanerr_str[i]);
83 } else
84 break;
85 }
86 }
87}
88
43/** 89/**
44 * ioat_dma_do_interrupt - handler used for single vector interrupt mode 90 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
45 * @irq: interrupt id 91 * @irq: interrupt id
@@ -568,12 +614,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
568 614
569 tx = &desc->txd; 615 tx = &desc->txd;
570 if (tx->cookie) { 616 if (tx->cookie) {
617 struct dmaengine_result res;
618
571 dma_cookie_complete(tx); 619 dma_cookie_complete(tx);
572 dma_descriptor_unmap(tx); 620 dma_descriptor_unmap(tx);
573 if (tx->callback) { 621 res.result = DMA_TRANS_NOERROR;
574 tx->callback(tx->callback_param); 622 dmaengine_desc_get_callback_invoke(tx, NULL);
575 tx->callback = NULL; 623 tx->callback = NULL;
576 } 624 tx->callback_result = NULL;
577 } 625 }
578 626
579 if (tx->phys == phys_complete) 627 if (tx->phys == phys_complete)
@@ -622,7 +670,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
622 if (is_ioat_halted(*ioat_chan->completion)) { 670 if (is_ioat_halted(*ioat_chan->completion)) {
623 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 671 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
624 672
625 if (chanerr & IOAT_CHANERR_HANDLE_MASK) { 673 if (chanerr &
674 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
626 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); 675 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
627 ioat_eh(ioat_chan); 676 ioat_eh(ioat_chan);
628 } 677 }
@@ -652,6 +701,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
652 __ioat_restart_chan(ioat_chan); 701 __ioat_restart_chan(ioat_chan);
653} 702}
654 703
704
705static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
706{
707 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
708 struct ioat_ring_ent *desc;
709 u16 active;
710 int idx = ioat_chan->tail, i;
711
712 /*
713 * We assume that the failed descriptor has been processed.
714 * Now we are just returning all the remaining submitted
715 * descriptors to abort.
716 */
717 active = ioat_ring_active(ioat_chan);
718
719 /* we skip the failed descriptor that tail points to */
720 for (i = 1; i < active; i++) {
721 struct dma_async_tx_descriptor *tx;
722
723 smp_read_barrier_depends();
724 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
725 desc = ioat_get_ring_ent(ioat_chan, idx + i);
726
727 tx = &desc->txd;
728 if (tx->cookie) {
729 struct dmaengine_result res;
730
731 dma_cookie_complete(tx);
732 dma_descriptor_unmap(tx);
733 res.result = DMA_TRANS_ABORTED;
734 dmaengine_desc_get_callback_invoke(tx, &res);
735 tx->callback = NULL;
736 tx->callback_result = NULL;
737 }
738
739 /* skip extended descriptors */
740 if (desc_has_ext(desc)) {
741 WARN_ON(i + 1 >= active);
742 i++;
743 }
744
745 /* cleanup super extended descriptors */
746 if (desc->sed) {
747 ioat_free_sed(ioat_dma, desc->sed);
748 desc->sed = NULL;
749 }
750 }
751
752 smp_mb(); /* finish all descriptor reads before incrementing tail */
753 ioat_chan->tail = idx + active;
754
755 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
756 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
757}
758
655static void ioat_eh(struct ioatdma_chan *ioat_chan) 759static void ioat_eh(struct ioatdma_chan *ioat_chan)
656{ 760{
657 struct pci_dev *pdev = to_pdev(ioat_chan); 761 struct pci_dev *pdev = to_pdev(ioat_chan);
@@ -662,6 +766,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
662 u32 err_handled = 0; 766 u32 err_handled = 0;
663 u32 chanerr_int; 767 u32 chanerr_int;
664 u32 chanerr; 768 u32 chanerr;
769 bool abort = false;
770 struct dmaengine_result res;
665 771
666 /* cleanup so tail points to descriptor that caused the error */ 772 /* cleanup so tail points to descriptor that caused the error */
667 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 773 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
@@ -697,30 +803,55 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
697 break; 803 break;
698 } 804 }
699 805
806 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
807 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
808 res.result = DMA_TRANS_READ_FAILED;
809 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
810 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
811 res.result = DMA_TRANS_WRITE_FAILED;
812 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
813 }
814
815 abort = true;
816 } else
817 res.result = DMA_TRANS_NOERROR;
818
700 /* fault on unhandled error or spurious halt */ 819 /* fault on unhandled error or spurious halt */
701 if (chanerr ^ err_handled || chanerr == 0) { 820 if (chanerr ^ err_handled || chanerr == 0) {
702 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", 821 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
703 __func__, chanerr, err_handled); 822 __func__, chanerr, err_handled);
823 dev_err(to_dev(ioat_chan), "Errors handled:\n");
824 ioat_print_chanerrs(ioat_chan, err_handled);
825 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
826 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
827
704 BUG(); 828 BUG();
705 } else { /* cleanup the faulty descriptor */
706 tx = &desc->txd;
707 if (tx->cookie) {
708 dma_cookie_complete(tx);
709 dma_descriptor_unmap(tx);
710 if (tx->callback) {
711 tx->callback(tx->callback_param);
712 tx->callback = NULL;
713 }
714 }
715 } 829 }
716 830
717 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 831 /* cleanup the faulty descriptor since we are continuing */
718 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); 832 tx = &desc->txd;
833 if (tx->cookie) {
834 dma_cookie_complete(tx);
835 dma_descriptor_unmap(tx);
836 dmaengine_desc_get_callback_invoke(tx, &res);
837 tx->callback = NULL;
838 tx->callback_result = NULL;
839 }
719 840
720 /* mark faulting descriptor as complete */ 841 /* mark faulting descriptor as complete */
721 *ioat_chan->completion = desc->txd.phys; 842 *ioat_chan->completion = desc->txd.phys;
722 843
723 spin_lock_bh(&ioat_chan->prep_lock); 844 spin_lock_bh(&ioat_chan->prep_lock);
845 /* we need abort all descriptors */
846 if (abort) {
847 ioat_abort_descs(ioat_chan);
848 /* clean up the channel, we could be in weird state */
849 ioat_reset_hw(ioat_chan);
850 }
851
852 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
853 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
854
724 ioat_restart_channel(ioat_chan); 855 ioat_restart_channel(ioat_chan);
725 spin_unlock_bh(&ioat_chan->prep_lock); 856 spin_unlock_bh(&ioat_chan->prep_lock);
726} 857}
@@ -753,10 +884,28 @@ void ioat_timer_event(unsigned long data)
753 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
754 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", 885 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
755 __func__, chanerr); 886 __func__, chanerr);
756 if (test_bit(IOAT_RUN, &ioat_chan->state)) 887 dev_err(to_dev(ioat_chan), "Errors:\n");
757 BUG_ON(is_ioat_bug(chanerr)); 888 ioat_print_chanerrs(ioat_chan, chanerr);
758 else /* we never got off the ground */ 889
759 return; 890 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
891 spin_lock_bh(&ioat_chan->cleanup_lock);
892 spin_lock_bh(&ioat_chan->prep_lock);
893 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
894 spin_unlock_bh(&ioat_chan->prep_lock);
895
896 ioat_abort_descs(ioat_chan);
897 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
898 ioat_reset_hw(ioat_chan);
899 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
900 ioat_restart_channel(ioat_chan);
901
902 spin_lock_bh(&ioat_chan->prep_lock);
903 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
904 spin_unlock_bh(&ioat_chan->prep_lock);
905 spin_unlock_bh(&ioat_chan->cleanup_lock);
906 }
907
908 return;
760 } 909 }
761 910
762 spin_lock_bh(&ioat_chan->cleanup_lock); 911 spin_lock_bh(&ioat_chan->cleanup_lock);
@@ -780,14 +929,26 @@ void ioat_timer_event(unsigned long data)
780 u32 chanerr; 929 u32 chanerr;
781 930
782 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 931 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
783 dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); 932 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
784 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", 933 status, chanerr);
785 status, chanerr); 934 dev_err(to_dev(ioat_chan), "Errors:\n");
786 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n", 935 ioat_print_chanerrs(ioat_chan, chanerr);
787 ioat_ring_active(ioat_chan)); 936
937 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
938 ioat_ring_active(ioat_chan));
788 939
789 spin_lock_bh(&ioat_chan->prep_lock); 940 spin_lock_bh(&ioat_chan->prep_lock);
941 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
942 spin_unlock_bh(&ioat_chan->prep_lock);
943
944 ioat_abort_descs(ioat_chan);
945 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
946 ioat_reset_hw(ioat_chan);
947 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
790 ioat_restart_channel(ioat_chan); 948 ioat_restart_channel(ioat_chan);
949
950 spin_lock_bh(&ioat_chan->prep_lock);
951 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
791 spin_unlock_bh(&ioat_chan->prep_lock); 952 spin_unlock_bh(&ioat_chan->prep_lock);
792 spin_unlock_bh(&ioat_chan->cleanup_lock); 953 spin_unlock_bh(&ioat_chan->cleanup_lock);
793 return; 954 return;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 70534981a49b..48fa4cf9f64a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -240,6 +240,8 @@
240#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000 240#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
241 241
242#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR) 242#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
243#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \
244 IOAT_CHANERR_WRITE_DATA_ERR)
243 245
244#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ 246#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
245 247
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index f039cfadf17b..a410657f7bcd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
71 /* call the callback (must not sleep or submit new 71 /* call the callback (must not sleep or submit new
72 * operations to this channel) 72 * operations to this channel)
73 */ 73 */
74 if (tx->callback) 74 dmaengine_desc_get_callback_invoke(tx, NULL);
75 tx->callback(tx->callback_param);
76 75
77 dma_descriptor_unmap(tx); 76 dma_descriptor_unmap(tx);
78 if (desc->group_head) 77 if (desc->group_head)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index b54f62de9232..ed76044ce4b9 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1160 struct scatterlist **sg, *sgnext, *sgnew = NULL; 1160 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1161 /* Next transfer descriptor */ 1161 /* Next transfer descriptor */
1162 struct idmac_tx_desc *desc, *descnew; 1162 struct idmac_tx_desc *desc, *descnew;
1163 dma_async_tx_callback callback;
1164 void *callback_param;
1165 bool done = false; 1163 bool done = false;
1166 u32 ready0, ready1, curbuf, err; 1164 u32 ready0, ready1, curbuf, err;
1167 unsigned long flags; 1165 unsigned long flags;
1166 struct dmaengine_desc_callback cb;
1168 1167
1169 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ 1168 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1170 1169
@@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1278 1277
1279 if (likely(sgnew) && 1278 if (likely(sgnew) &&
1280 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1279 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1281 callback = descnew->txd.callback; 1280 dmaengine_desc_get_callback(&descnew->txd, &cb);
1282 callback_param = descnew->txd.callback_param; 1281
1283 list_del_init(&descnew->list); 1282 list_del_init(&descnew->list);
1284 spin_unlock(&ichan->lock); 1283 spin_unlock(&ichan->lock);
1285 if (callback) 1284
1286 callback(callback_param); 1285 dmaengine_desc_callback_invoke(&cb, NULL);
1287 spin_lock(&ichan->lock); 1286 spin_lock(&ichan->lock);
1288 } 1287 }
1289 1288
@@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1292 if (done) 1291 if (done)
1293 dma_cookie_complete(&desc->txd); 1292 dma_cookie_complete(&desc->txd);
1294 1293
1295 callback = desc->txd.callback; 1294 dmaengine_desc_get_callback(&desc->txd, &cb);
1296 callback_param = desc->txd.callback_param;
1297 1295
1298 spin_unlock(&ichan->lock); 1296 spin_unlock(&ichan->lock);
1299 1297
1300 if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) 1298 if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
1301 callback(callback_param); 1299 dmaengine_desc_callback_invoke(&cb, NULL);
1302 1300
1303 return IRQ_HANDLED; 1301 return IRQ_HANDLED;
1304} 1302}
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 1502b24b7c7d..818255844a3c 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch)
104 tx = &ch->tx_array[last_tail]; 104 tx = &ch->tx_array[last_tail];
105 if (tx->cookie) { 105 if (tx->cookie) {
106 dma_cookie_complete(tx); 106 dma_cookie_complete(tx);
107 if (tx->callback) { 107 dmaengine_desc_get_callback_invoke(tx, NULL);
108 tx->callback(tx->callback_param); 108 tx->callback = NULL;
109 tx->callback = NULL;
110 }
111 } 109 }
112 last_tail = mic_dma_hw_ring_inc(last_tail); 110 last_tail = mic_dma_hw_ring_inc(last_tail);
113 } 111 }
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index f4b25fb0d040..eb3a1f42ab06 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data)
864 struct mmp_pdma_desc_sw *desc, *_desc; 864 struct mmp_pdma_desc_sw *desc, *_desc;
865 LIST_HEAD(chain_cleanup); 865 LIST_HEAD(chain_cleanup);
866 unsigned long flags; 866 unsigned long flags;
867 struct dmaengine_desc_callback cb;
867 868
868 if (chan->cyclic_first) { 869 if (chan->cyclic_first) {
869 dma_async_tx_callback cb = NULL;
870 void *cb_data = NULL;
871
872 spin_lock_irqsave(&chan->desc_lock, flags); 870 spin_lock_irqsave(&chan->desc_lock, flags);
873 desc = chan->cyclic_first; 871 desc = chan->cyclic_first;
874 cb = desc->async_tx.callback; 872 dmaengine_desc_get_callback(&desc->async_tx, &cb);
875 cb_data = desc->async_tx.callback_param;
876 spin_unlock_irqrestore(&chan->desc_lock, flags); 873 spin_unlock_irqrestore(&chan->desc_lock, flags);
877 874
878 if (cb) 875 dmaengine_desc_callback_invoke(&cb, NULL);
879 cb(cb_data);
880 876
881 return; 877 return;
882 } 878 }
@@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data)
921 /* Remove from the list of transactions */ 917 /* Remove from the list of transactions */
922 list_del(&desc->node); 918 list_del(&desc->node);
923 /* Run the link descriptor callback function */ 919 /* Run the link descriptor callback function */
924 if (txd->callback) 920 dmaengine_desc_get_callback(txd, &cb);
925 txd->callback(txd->callback_param); 921 dmaengine_desc_callback_invoke(&cb, NULL);
926 922
927 dma_pool_free(chan->desc_pool, desc, txd->phys); 923 dma_pool_free(chan->desc_pool, desc, txd->phys);
928 } 924 }
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d7422b1bf406..13c68b6434ce 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data)
349{ 349{
350 struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; 350 struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
351 351
352 if (tdmac->desc.callback) 352 dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
353 tdmac->desc.callback(tdmac->desc.callback_param);
354
355} 353}
356 354
357static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) 355static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 9dd99ba18fce..dde713461a95 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
411 list_for_each_entry(mdesc, &list, node) { 411 list_for_each_entry(mdesc, &list, node) {
412 desc = &mdesc->desc; 412 desc = &mdesc->desc;
413 413
414 if (desc->callback) 414 dmaengine_desc_get_callback_invoke(desc, NULL);
415 desc->callback(desc->callback_param);
416 415
417 last_cookie = desc->cookie; 416 last_cookie = desc->cookie;
418 dma_run_dependencies(desc); 417 dma_run_dependencies(desc);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f4c9f98ec35e..f8b5e7424b3a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -209,10 +209,7 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
209 /* call the callback (must not sleep or submit new 209 /* call the callback (must not sleep or submit new
210 * operations to this channel) 210 * operations to this channel)
211 */ 211 */
212 if (desc->async_tx.callback) 212 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
213 desc->async_tx.callback(
214 desc->async_tx.callback_param);
215
216 dma_descriptor_unmap(&desc->async_tx); 213 dma_descriptor_unmap(&desc->async_tx);
217 } 214 }
218 215
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 60de35251da5..50e64e113ffb 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data)
326{ 326{
327 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; 327 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
328 328
329 if (mxs_chan->desc.callback) 329 dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
330 mxs_chan->desc.callback(mxs_chan->desc.callback_param);
331} 330}
332 331
333static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) 332static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 08c45c185549..09de71519d37 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data)
1102{ 1102{
1103 struct nbpf_channel *chan = (struct nbpf_channel *)data; 1103 struct nbpf_channel *chan = (struct nbpf_channel *)data;
1104 struct nbpf_desc *desc, *tmp; 1104 struct nbpf_desc *desc, *tmp;
1105 dma_async_tx_callback callback; 1105 struct dmaengine_desc_callback cb;
1106 void *param;
1107 1106
1108 while (!list_empty(&chan->done)) { 1107 while (!list_empty(&chan->done)) {
1109 bool found = false, must_put, recycling = false; 1108 bool found = false, must_put, recycling = false;
@@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data)
1151 must_put = false; 1150 must_put = false;
1152 } 1151 }
1153 1152
1154 callback = desc->async_tx.callback; 1153 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1155 param = desc->async_tx.callback_param;
1156 1154
1157 /* ack and callback completed descriptor */ 1155 /* ack and callback completed descriptor */
1158 spin_unlock_irq(&chan->lock); 1156 spin_unlock_irq(&chan->lock);
1159 1157
1160 if (callback) 1158 dmaengine_desc_callback_invoke(&cb, NULL);
1161 callback(param);
1162 1159
1163 if (must_put) 1160 if (must_put)
1164 nbpf_desc_put(desc); 1161 nbpf_desc_put(desc);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 113605f6fe20..df95727dc2fb 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
357 struct pch_dma_desc *desc) 357 struct pch_dma_desc *desc)
358{ 358{
359 struct dma_async_tx_descriptor *txd = &desc->txd; 359 struct dma_async_tx_descriptor *txd = &desc->txd;
360 dma_async_tx_callback callback = txd->callback; 360 struct dmaengine_desc_callback cb;
361 void *param = txd->callback_param;
362 361
362 dmaengine_desc_get_callback(txd, &cb);
363 list_splice_init(&desc->tx_list, &pd_chan->free_list); 363 list_splice_init(&desc->tx_list, &pd_chan->free_list);
364 list_move(&desc->desc_node, &pd_chan->free_list); 364 list_move(&desc->desc_node, &pd_chan->free_list);
365 365
366 if (callback) 366 dmaengine_desc_callback_invoke(&cb, NULL);
367 callback(param);
368} 367}
369 368
370static void pdc_complete_all(struct pch_dma_chan *pd_chan) 369static void pdc_complete_all(struct pch_dma_chan *pd_chan)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4fc3ffbd5ca0..1ecd4674aa23 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data)
2039 } 2039 }
2040 2040
2041 while (!list_empty(&pch->completed_list)) { 2041 while (!list_empty(&pch->completed_list)) {
2042 dma_async_tx_callback callback; 2042 struct dmaengine_desc_callback cb;
2043 void *callback_param;
2044 2043
2045 desc = list_first_entry(&pch->completed_list, 2044 desc = list_first_entry(&pch->completed_list,
2046 struct dma_pl330_desc, node); 2045 struct dma_pl330_desc, node);
2047 2046
2048 callback = desc->txd.callback; 2047 dmaengine_desc_get_callback(&desc->txd, &cb);
2049 callback_param = desc->txd.callback_param;
2050 2048
2051 if (pch->cyclic) { 2049 if (pch->cyclic) {
2052 desc->status = PREP; 2050 desc->status = PREP;
@@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data)
2064 2062
2065 dma_descriptor_unmap(&desc->txd); 2063 dma_descriptor_unmap(&desc->txd);
2066 2064
2067 if (callback) { 2065 if (dmaengine_desc_callback_valid(&cb)) {
2068 spin_unlock_irqrestore(&pch->lock, flags); 2066 spin_unlock_irqrestore(&pch->lock, flags);
2069 callback(callback_param); 2067 dmaengine_desc_callback_invoke(&cb, NULL);
2070 spin_lock_irqsave(&pch->lock, flags); 2068 spin_lock_irqsave(&pch->lock, flags);
2071 } 2069 }
2072 } 2070 }
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 66bd96724b2f..d45da34a0568 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1485,10 +1485,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1485 /* call the callback (must not sleep or submit new 1485 /* call the callback (must not sleep or submit new
1486 * operations to this channel) 1486 * operations to this channel)
1487 */ 1487 */
1488 if (desc->async_tx.callback) 1488 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
1489 desc->async_tx.callback(
1490 desc->async_tx.callback_param);
1491
1492 dma_descriptor_unmap(&desc->async_tx); 1489 dma_descriptor_unmap(&desc->async_tx);
1493 } 1490 }
1494 1491
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index b2374cd91e45..e244e10a94b5 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
111 struct dma_async_tx_descriptor *desc; 111 struct dma_async_tx_descriptor *desc;
112 dma_cookie_t last_cookie; 112 dma_cookie_t last_cookie;
113 struct hidma_desc *mdesc; 113 struct hidma_desc *mdesc;
114 struct hidma_desc *next;
114 unsigned long irqflags; 115 unsigned long irqflags;
115 struct list_head list; 116 struct list_head list;
116 117
@@ -122,28 +123,36 @@ static void hidma_process_completed(struct hidma_chan *mchan)
122 spin_unlock_irqrestore(&mchan->lock, irqflags); 123 spin_unlock_irqrestore(&mchan->lock, irqflags);
123 124
124 /* Execute callbacks and run dependencies */ 125 /* Execute callbacks and run dependencies */
125 list_for_each_entry(mdesc, &list, node) { 126 list_for_each_entry_safe(mdesc, next, &list, node) {
126 enum dma_status llstat; 127 enum dma_status llstat;
128 struct dmaengine_desc_callback cb;
129 struct dmaengine_result result;
127 130
128 desc = &mdesc->desc; 131 desc = &mdesc->desc;
132 last_cookie = desc->cookie;
129 133
130 spin_lock_irqsave(&mchan->lock, irqflags); 134 spin_lock_irqsave(&mchan->lock, irqflags);
131 dma_cookie_complete(desc); 135 dma_cookie_complete(desc);
132 spin_unlock_irqrestore(&mchan->lock, irqflags); 136 spin_unlock_irqrestore(&mchan->lock, irqflags);
133 137
134 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 138 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
135 if (desc->callback && (llstat == DMA_COMPLETE)) 139 dmaengine_desc_get_callback(desc, &cb);
136 desc->callback(desc->callback_param);
137 140
138 last_cookie = desc->cookie;
139 dma_run_dependencies(desc); 141 dma_run_dependencies(desc);
140 }
141 142
142 /* Free descriptors */ 143 spin_lock_irqsave(&mchan->lock, irqflags);
143 spin_lock_irqsave(&mchan->lock, irqflags); 144 list_move(&mdesc->node, &mchan->free);
144 list_splice_tail_init(&list, &mchan->free); 145
145 spin_unlock_irqrestore(&mchan->lock, irqflags); 146 if (llstat == DMA_COMPLETE) {
147 mchan->last_success = last_cookie;
148 result.result = DMA_TRANS_NOERROR;
149 } else
150 result.result = DMA_TRANS_ABORTED;
151
152 spin_unlock_irqrestore(&mchan->lock, irqflags);
146 153
154 dmaengine_desc_callback_invoke(&cb, &result);
155 }
147} 156}
148 157
149/* 158/*
@@ -238,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
238 hidma_ll_start(dmadev->lldev); 247 hidma_ll_start(dmadev->lldev);
239} 248}
240 249
250static inline bool hidma_txn_is_success(dma_cookie_t cookie,
251 dma_cookie_t last_success, dma_cookie_t last_used)
252{
253 if (last_success <= last_used) {
254 if ((cookie <= last_success) || (cookie > last_used))
255 return true;
256 } else {
257 if ((cookie <= last_success) && (cookie > last_used))
258 return true;
259 }
260 return false;
261}
262
241static enum dma_status hidma_tx_status(struct dma_chan *dmach, 263static enum dma_status hidma_tx_status(struct dma_chan *dmach,
242 dma_cookie_t cookie, 264 dma_cookie_t cookie,
243 struct dma_tx_state *txstate) 265 struct dma_tx_state *txstate)
@@ -246,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
246 enum dma_status ret; 268 enum dma_status ret;
247 269
248 ret = dma_cookie_status(dmach, cookie, txstate); 270 ret = dma_cookie_status(dmach, cookie, txstate);
249 if (ret == DMA_COMPLETE) 271 if (ret == DMA_COMPLETE) {
250 return ret; 272 bool is_success;
273
274 is_success = hidma_txn_is_success(cookie, mchan->last_success,
275 dmach->cookie);
276 return is_success ? ret : DMA_ERROR;
277 }
251 278
252 if (mchan->paused && (ret == DMA_IN_PROGRESS)) { 279 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
253 unsigned long flags; 280 unsigned long flags;
@@ -398,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
398 hidma_process_completed(mchan); 425 hidma_process_completed(mchan);
399 426
400 spin_lock_irqsave(&mchan->lock, irqflags); 427 spin_lock_irqsave(&mchan->lock, irqflags);
428 mchan->last_success = 0;
401 list_splice_init(&mchan->active, &list); 429 list_splice_init(&mchan->active, &list);
402 list_splice_init(&mchan->prepared, &list); 430 list_splice_init(&mchan->prepared, &list);
403 list_splice_init(&mchan->completed, &list); 431 list_splice_init(&mchan->completed, &list);
@@ -413,14 +441,9 @@ static int hidma_terminate_channel(struct dma_chan *chan)
413 /* return all user requests */ 441 /* return all user requests */
414 list_for_each_entry_safe(mdesc, tmp, &list, node) { 442 list_for_each_entry_safe(mdesc, tmp, &list, node) {
415 struct dma_async_tx_descriptor *txd = &mdesc->desc; 443 struct dma_async_tx_descriptor *txd = &mdesc->desc;
416 dma_async_tx_callback callback = mdesc->desc.callback;
417 void *param = mdesc->desc.callback_param;
418 444
419 dma_descriptor_unmap(txd); 445 dma_descriptor_unmap(txd);
420 446 dmaengine_desc_get_callback_invoke(txd, NULL);
421 if (callback)
422 callback(param);
423
424 dma_run_dependencies(txd); 447 dma_run_dependencies(txd);
425 448
426 /* move myself to free_list */ 449 /* move myself to free_list */
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index db413a5efc4e..e52e20716303 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -72,7 +72,6 @@ struct hidma_lldev {
72 72
73 u32 tre_write_offset; /* TRE write location */ 73 u32 tre_write_offset; /* TRE write location */
74 struct tasklet_struct task; /* task delivering notifications */ 74 struct tasklet_struct task; /* task delivering notifications */
75 struct tasklet_struct rst_task; /* task to reset HW */
76 DECLARE_KFIFO_PTR(handoff_fifo, 75 DECLARE_KFIFO_PTR(handoff_fifo,
77 struct hidma_tre *); /* pending TREs FIFO */ 76 struct hidma_tre *); /* pending TREs FIFO */
78}; 77};
@@ -89,6 +88,7 @@ struct hidma_chan {
89 bool allocated; 88 bool allocated;
90 char dbg_name[16]; 89 char dbg_name[16];
91 u32 dma_sig; 90 u32 dma_sig;
91 dma_cookie_t last_success;
92 92
93 /* 93 /*
94 * active descriptor on this channel 94 * active descriptor on this channel
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index ad20dfb64c71..3224f24c577b 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -381,27 +381,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
381} 381}
382 382
383/* 383/*
384 * Abort all transactions and perform a reset.
385 */
386static void hidma_ll_abort(unsigned long arg)
387{
388 struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
389 u8 err_code = HIDMA_EVRE_STATUS_ERROR;
390 u8 err_info = 0xFF;
391 int rc;
392
393 hidma_cleanup_pending_tre(lldev, err_info, err_code);
394
395 /* reset the channel for recovery */
396 rc = hidma_ll_setup(lldev);
397 if (rc) {
398 dev_err(lldev->dev, "channel reinitialize failed after error\n");
399 return;
400 }
401 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
402}
403
404/*
405 * The interrupt handler for HIDMA will try to consume as many pending 384 * The interrupt handler for HIDMA will try to consume as many pending
406 * EVRE from the event queue as possible. Each EVRE has an associated 385 * EVRE from the event queue as possible. Each EVRE has an associated
407 * TRE that holds the user interface parameters. EVRE reports the 386 * TRE that holds the user interface parameters. EVRE reports the
@@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
454 433
455 while (cause) { 434 while (cause) {
456 if (cause & HIDMA_ERR_INT_MASK) { 435 if (cause & HIDMA_ERR_INT_MASK) {
457 dev_err(lldev->dev, "error 0x%x, resetting...\n", 436 dev_err(lldev->dev, "error 0x%x, disabling...\n",
458 cause); 437 cause);
459 438
460 /* Clear out pending interrupts */ 439 /* Clear out pending interrupts */
461 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); 440 writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
462 441
463 tasklet_schedule(&lldev->rst_task); 442 /* No further submissions. */
443 hidma_ll_disable(lldev);
444
445 /* Driver completes the txn and intimates the client.*/
446 hidma_cleanup_pending_tre(lldev, 0xFF,
447 HIDMA_EVRE_STATUS_ERROR);
464 goto out; 448 goto out;
465 } 449 }
466 450
@@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
808 return NULL; 792 return NULL;
809 793
810 spin_lock_init(&lldev->lock); 794 spin_lock_init(&lldev->lock);
811 tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
812 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); 795 tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
813 lldev->initialized = 1; 796 lldev->initialized = 1;
814 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); 797 writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
@@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
831 814
832 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; 815 required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
833 tasklet_kill(&lldev->task); 816 tasklet_kill(&lldev->task);
834 tasklet_kill(&lldev->rst_task);
835 memset(lldev->trepool, 0, required_bytes); 817 memset(lldev->trepool, 0, required_bytes);
836 lldev->trepool = NULL; 818 lldev->trepool = NULL;
837 lldev->pending_tre_count = 0; 819 lldev->pending_tre_count = 0;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0dd953884d1d..d1defa4646ba 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1389,21 +1389,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1389{ 1389{
1390 struct rcar_dmac_chan *chan = dev; 1390 struct rcar_dmac_chan *chan = dev;
1391 struct rcar_dmac_desc *desc; 1391 struct rcar_dmac_desc *desc;
1392 struct dmaengine_desc_callback cb;
1392 1393
1393 spin_lock_irq(&chan->lock); 1394 spin_lock_irq(&chan->lock);
1394 1395
1395 /* For cyclic transfers notify the user after every chunk. */ 1396 /* For cyclic transfers notify the user after every chunk. */
1396 if (chan->desc.running && chan->desc.running->cyclic) { 1397 if (chan->desc.running && chan->desc.running->cyclic) {
1397 dma_async_tx_callback callback;
1398 void *callback_param;
1399
1400 desc = chan->desc.running; 1398 desc = chan->desc.running;
1401 callback = desc->async_tx.callback; 1399 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1402 callback_param = desc->async_tx.callback_param;
1403 1400
1404 if (callback) { 1401 if (dmaengine_desc_callback_valid(&cb)) {
1405 spin_unlock_irq(&chan->lock); 1402 spin_unlock_irq(&chan->lock);
1406 callback(callback_param); 1403 dmaengine_desc_callback_invoke(&cb, NULL);
1407 spin_lock_irq(&chan->lock); 1404 spin_lock_irq(&chan->lock);
1408 } 1405 }
1409 } 1406 }
@@ -1418,14 +1415,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1418 dma_cookie_complete(&desc->async_tx); 1415 dma_cookie_complete(&desc->async_tx);
1419 list_del(&desc->node); 1416 list_del(&desc->node);
1420 1417
1421 if (desc->async_tx.callback) { 1418 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1419 if (dmaengine_desc_callback_valid(&cb)) {
1422 spin_unlock_irq(&chan->lock); 1420 spin_unlock_irq(&chan->lock);
1423 /* 1421 /*
1424 * We own the only reference to this descriptor, we can 1422 * We own the only reference to this descriptor, we can
1425 * safely dereference it without holding the channel 1423 * safely dereference it without holding the channel
1426 * lock. 1424 * lock.
1427 */ 1425 */
1428 desc->async_tx.callback(desc->async_tx.callback_param); 1426 dmaengine_desc_callback_invoke(&cb, NULL);
1429 spin_lock_irq(&chan->lock); 1427 spin_lock_irq(&chan->lock);
1430 } 1428 }
1431 1429
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 10fcabad80f3..12fa48e380cf 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
330 bool head_acked = false; 330 bool head_acked = false;
331 dma_cookie_t cookie = 0; 331 dma_cookie_t cookie = 0;
332 dma_async_tx_callback callback = NULL; 332 dma_async_tx_callback callback = NULL;
333 void *param = NULL; 333 struct dmaengine_desc_callback cb;
334 unsigned long flags; 334 unsigned long flags;
335 LIST_HEAD(cyclic_list); 335 LIST_HEAD(cyclic_list);
336 336
337 memset(&cb, 0, sizeof(cb));
337 spin_lock_irqsave(&schan->chan_lock, flags); 338 spin_lock_irqsave(&schan->chan_lock, flags);
338 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { 339 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
339 struct dma_async_tx_descriptor *tx = &desc->async_tx; 340 struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
367 /* Call callback on the last chunk */ 368 /* Call callback on the last chunk */
368 if (desc->mark == DESC_COMPLETED && tx->callback) { 369 if (desc->mark == DESC_COMPLETED && tx->callback) {
369 desc->mark = DESC_WAITING; 370 desc->mark = DESC_WAITING;
371 dmaengine_desc_get_callback(tx, &cb);
370 callback = tx->callback; 372 callback = tx->callback;
371 param = tx->callback_param;
372 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", 373 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
373 tx->cookie, tx, schan->id); 374 tx->cookie, tx, schan->id);
374 BUG_ON(desc->chunks != 1); 375 BUG_ON(desc->chunks != 1);
@@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
430 431
431 spin_unlock_irqrestore(&schan->chan_lock, flags); 432 spin_unlock_irqrestore(&schan->chan_lock, flags);
432 433
433 if (callback) 434 dmaengine_desc_callback_invoke(&cb, NULL);
434 callback(param);
435 435
436 return callback; 436 return callback;
437} 437}
@@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev)
885 /* Complete all */ 885 /* Complete all */
886 list_for_each_entry(sdesc, &dl, node) { 886 list_for_each_entry(sdesc, &dl, node) {
887 struct dma_async_tx_descriptor *tx = &sdesc->async_tx; 887 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
888
888 sdesc->mark = DESC_IDLE; 889 sdesc->mark = DESC_IDLE;
889 if (tx->callback) 890 dmaengine_desc_get_callback_invoke(tx, NULL);
890 tx->callback(tx->callback_param);
891 } 891 }
892 892
893 spin_lock(&schan->chan_lock); 893 spin_lock(&schan->chan_lock);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d8bc3f2a71db..a96e4a480de5 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
360 list_for_each_entry(sdesc, &list, node) { 360 list_for_each_entry(sdesc, &list, node) {
361 desc = &sdesc->desc; 361 desc = &sdesc->desc;
362 362
363 if (desc->callback) 363 dmaengine_desc_get_callback_invoke(desc, NULL);
364 desc->callback(desc->callback_param);
365
366 last_cookie = desc->cookie; 364 last_cookie = desc->cookie;
367 dma_run_dependencies(desc); 365 dma_run_dependencies(desc);
368 } 366 }
@@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
388 386
389 desc = &sdesc->desc; 387 desc = &sdesc->desc;
390 while (happened_cyclic != schan->completed_cyclic) { 388 while (happened_cyclic != schan->completed_cyclic) {
391 if (desc->callback) 389 dmaengine_desc_get_callback_invoke(desc, NULL);
392 desc->callback(desc->callback_param);
393 schan->completed_cyclic++; 390 schan->completed_cyclic++;
394 } 391 }
395 } 392 }
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e43d2bbfd122..08f3d7be2df0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1570,8 +1570,7 @@ static void dma_tasklet(unsigned long data)
1570 struct d40_desc *d40d; 1570 struct d40_desc *d40d;
1571 unsigned long flags; 1571 unsigned long flags;
1572 bool callback_active; 1572 bool callback_active;
1573 dma_async_tx_callback callback; 1573 struct dmaengine_desc_callback cb;
1574 void *callback_param;
1575 1574
1576 spin_lock_irqsave(&d40c->lock, flags); 1575 spin_lock_irqsave(&d40c->lock, flags);
1577 1576
@@ -1598,8 +1597,7 @@ static void dma_tasklet(unsigned long data)
1598 1597
1599 /* Callback to client */ 1598 /* Callback to client */
1600 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); 1599 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1601 callback = d40d->txd.callback; 1600 dmaengine_desc_get_callback(&d40d->txd, &cb);
1602 callback_param = d40d->txd.callback_param;
1603 1601
1604 if (!d40d->cyclic) { 1602 if (!d40d->cyclic) {
1605 if (async_tx_test_ack(&d40d->txd)) { 1603 if (async_tx_test_ack(&d40d->txd)) {
@@ -1620,8 +1618,8 @@ static void dma_tasklet(unsigned long data)
1620 1618
1621 spin_unlock_irqrestore(&d40c->lock, flags); 1619 spin_unlock_irqrestore(&d40c->lock, flags);
1622 1620
1623 if (callback_active && callback) 1621 if (callback_active)
1624 callback(callback_param); 1622 dmaengine_desc_callback_invoke(&cb, NULL);
1625 1623
1626 return; 1624 return;
1627 1625
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 6ab9eb98588a..3722b9d8d9fe 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
655static void tegra_dma_tasklet(unsigned long data) 655static void tegra_dma_tasklet(unsigned long data)
656{ 656{
657 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; 657 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
658 dma_async_tx_callback callback = NULL; 658 struct dmaengine_desc_callback cb;
659 void *callback_param = NULL;
660 struct tegra_dma_desc *dma_desc; 659 struct tegra_dma_desc *dma_desc;
661 unsigned long flags; 660 unsigned long flags;
662 int cb_count; 661 int cb_count;
@@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data)
666 dma_desc = list_first_entry(&tdc->cb_desc, 665 dma_desc = list_first_entry(&tdc->cb_desc,
667 typeof(*dma_desc), cb_node); 666 typeof(*dma_desc), cb_node);
668 list_del(&dma_desc->cb_node); 667 list_del(&dma_desc->cb_node);
669 callback = dma_desc->txd.callback; 668 dmaengine_desc_get_callback(&dma_desc->txd, &cb);
670 callback_param = dma_desc->txd.callback_param;
671 cb_count = dma_desc->cb_count; 669 cb_count = dma_desc->cb_count;
672 dma_desc->cb_count = 0; 670 dma_desc->cb_count = 0;
673 spin_unlock_irqrestore(&tdc->lock, flags); 671 spin_unlock_irqrestore(&tdc->lock, flags);
674 while (cb_count-- && callback) 672 while (cb_count--)
675 callback(callback_param); 673 dmaengine_desc_callback_invoke(&cb, NULL);
676 spin_lock_irqsave(&tdc->lock, flags); 674 spin_lock_irqsave(&tdc->lock, flags);
677 } 675 }
678 spin_unlock_irqrestore(&tdc->lock, flags); 676 spin_unlock_irqrestore(&tdc->lock, flags);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index e82745aa42a8..896bafb7a532 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
226 226
227static void __td_finish(struct timb_dma_chan *td_chan) 227static void __td_finish(struct timb_dma_chan *td_chan)
228{ 228{
229 dma_async_tx_callback callback; 229 struct dmaengine_desc_callback cb;
230 void *param;
231 struct dma_async_tx_descriptor *txd; 230 struct dma_async_tx_descriptor *txd;
232 struct timb_dma_desc *td_desc; 231 struct timb_dma_desc *td_desc;
233 232
@@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
252 dma_cookie_complete(txd); 251 dma_cookie_complete(txd);
253 td_chan->ongoing = false; 252 td_chan->ongoing = false;
254 253
255 callback = txd->callback; 254 dmaengine_desc_get_callback(txd, &cb);
256 param = txd->callback_param;
257 255
258 list_move(&td_desc->desc_node, &td_chan->free_list); 256 list_move(&td_desc->desc_node, &td_chan->free_list);
259 257
@@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
262 * The API requires that no submissions are done from a 260 * The API requires that no submissions are done from a
263 * callback, so we don't need to drop the lock here 261 * callback, so we don't need to drop the lock here
264 */ 262 */
265 if (callback) 263 dmaengine_desc_callback_invoke(&cb, NULL);
266 callback(param);
267} 264}
268 265
269static u32 __td_ier_mask(struct timb_dma *td) 266static u32 __td_ier_mask(struct timb_dma *td)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 7632290e7c14..4d8c7b9078fd 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -403,16 +403,14 @@ static void
403txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, 403txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
404 struct txx9dmac_desc *desc) 404 struct txx9dmac_desc *desc)
405{ 405{
406 dma_async_tx_callback callback; 406 struct dmaengine_desc_callback cb;
407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 407 struct dma_async_tx_descriptor *txd = &desc->txd;
409 408
410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 409 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
411 txd->cookie, desc); 410 txd->cookie, desc);
412 411
413 dma_cookie_complete(txd); 412 dma_cookie_complete(txd);
414 callback = txd->callback; 413 dmaengine_desc_get_callback(txd, &cb);
415 param = txd->callback_param;
416 414
417 txx9dmac_sync_desc_for_cpu(dc, desc); 415 txx9dmac_sync_desc_for_cpu(dc, desc);
418 list_splice_init(&desc->tx_list, &dc->free_list); 416 list_splice_init(&desc->tx_list, &dc->free_list);
@@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
423 * The API requires that no submissions are done from a 421 * The API requires that no submissions are done from a
424 * callback, so we don't need to drop the lock here 422 * callback, so we don't need to drop the lock here
425 */ 423 */
426 if (callback) 424 dmaengine_desc_callback_invoke(&cb, NULL);
427 callback(param);
428 dma_run_dependencies(txd); 425 dma_run_dependencies(txd);
429} 426}
430 427
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a35c211857dd..e47fc9b0944f 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg)
87{ 87{
88 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; 88 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
89 struct virt_dma_desc *vd; 89 struct virt_dma_desc *vd;
90 dma_async_tx_callback cb = NULL; 90 struct dmaengine_desc_callback cb;
91 void *cb_data = NULL;
92 LIST_HEAD(head); 91 LIST_HEAD(head);
93 92
94 spin_lock_irq(&vc->lock); 93 spin_lock_irq(&vc->lock);
@@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg)
96 vd = vc->cyclic; 95 vd = vc->cyclic;
97 if (vd) { 96 if (vd) {
98 vc->cyclic = NULL; 97 vc->cyclic = NULL;
99 cb = vd->tx.callback; 98 dmaengine_desc_get_callback(&vd->tx, &cb);
100 cb_data = vd->tx.callback_param; 99 } else {
100 memset(&cb, 0, sizeof(cb));
101 } 101 }
102 spin_unlock_irq(&vc->lock); 102 spin_unlock_irq(&vc->lock);
103 103
104 if (cb) 104 dmaengine_desc_callback_invoke(&cb, NULL);
105 cb(cb_data);
106 105
107 while (!list_empty(&head)) { 106 while (!list_empty(&head)) {
108 vd = list_first_entry(&head, struct virt_dma_desc, node); 107 vd = list_first_entry(&head, struct virt_dma_desc, node);
109 cb = vd->tx.callback; 108 dmaengine_desc_get_callback(&vd->tx, &cb);
110 cb_data = vd->tx.callback_param;
111 109
112 list_del(&vd->node); 110 list_del(&vd->node);
113 if (dmaengine_desc_test_reuse(&vd->tx)) 111 if (dmaengine_desc_test_reuse(&vd->tx))
@@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg)
115 else 113 else
116 vc->desc_free(vd); 114 vc->desc_free(vd);
117 115
118 if (cb) 116 dmaengine_desc_callback_invoke(&cb, NULL);
119 cb(cb_data);
120 } 117 }
121} 118}
122 119
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 9cb93c5b655d..d66ed11baaec 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -608,8 +608,7 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
608 dma_cookie_complete(tx); 608 dma_cookie_complete(tx);
609 609
610 /* Run the link descriptor callback function */ 610 /* Run the link descriptor callback function */
611 if (tx->callback) 611 dmaengine_desc_get_callback_invoke(tx, NULL);
612 tx->callback(tx->callback_param);
613 612
614 dma_descriptor_unmap(tx); 613 dma_descriptor_unmap(tx);
615 614
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4e223d094433..8288fe4d17c3 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
755 spin_lock_irqsave(&chan->lock, flags); 755 spin_lock_irqsave(&chan->lock, flags);
756 756
757 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 757 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
758 dma_async_tx_callback callback; 758 struct dmaengine_desc_callback cb;
759 void *callback_param;
760 759
761 if (desc->cyclic) { 760 if (desc->cyclic) {
762 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 761 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
767 list_del(&desc->node); 766 list_del(&desc->node);
768 767
769 /* Run the link descriptor callback function */ 768 /* Run the link descriptor callback function */
770 callback = desc->async_tx.callback; 769 dmaengine_desc_get_callback(&desc->async_tx, &cb);
771 callback_param = desc->async_tx.callback_param; 770 if (dmaengine_desc_callback_valid(&cb)) {
772 if (callback) {
773 spin_unlock_irqrestore(&chan->lock, flags); 771 spin_unlock_irqrestore(&chan->lock, flags);
774 callback(callback_param); 772 dmaengine_desc_callback_invoke(&cb, NULL);
775 spin_lock_irqsave(&chan->lock, flags); 773 spin_lock_irqsave(&chan->lock, flags);
776 } 774 }
777 775
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index d5c5894f252e..8601c10acf74 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -102,13 +102,16 @@ struct ntb_queue_entry {
102 void *buf; 102 void *buf;
103 unsigned int len; 103 unsigned int len;
104 unsigned int flags; 104 unsigned int flags;
105 int retries;
106 int errors;
107 unsigned int tx_index;
108 unsigned int rx_index;
105 109
106 struct ntb_transport_qp *qp; 110 struct ntb_transport_qp *qp;
107 union { 111 union {
108 struct ntb_payload_header __iomem *tx_hdr; 112 struct ntb_payload_header __iomem *tx_hdr;
109 struct ntb_payload_header *rx_hdr; 113 struct ntb_payload_header *rx_hdr;
110 }; 114 };
111 unsigned int index;
112}; 115};
113 116
114struct ntb_rx_info { 117struct ntb_rx_info {
@@ -259,6 +262,12 @@ enum {
259static void ntb_transport_rxc_db(unsigned long data); 262static void ntb_transport_rxc_db(unsigned long data);
260static const struct ntb_ctx_ops ntb_transport_ops; 263static const struct ntb_ctx_ops ntb_transport_ops;
261static struct ntb_client ntb_transport_client; 264static struct ntb_client ntb_transport_client;
265static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
266 struct ntb_queue_entry *entry);
267static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
268static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
269static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
270
262 271
263static int ntb_transport_bus_match(struct device *dev, 272static int ntb_transport_bus_match(struct device *dev,
264 struct device_driver *drv) 273 struct device_driver *drv)
@@ -1229,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1229 break; 1238 break;
1230 1239
1231 entry->rx_hdr->flags = 0; 1240 entry->rx_hdr->flags = 0;
1232 iowrite32(entry->index, &qp->rx_info->entry); 1241 iowrite32(entry->rx_index, &qp->rx_info->entry);
1233 1242
1234 cb_data = entry->cb_data; 1243 cb_data = entry->cb_data;
1235 len = entry->len; 1244 len = entry->len;
@@ -1247,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1247 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1256 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1248} 1257}
1249 1258
1250static void ntb_rx_copy_callback(void *data) 1259static void ntb_rx_copy_callback(void *data,
1260 const struct dmaengine_result *res)
1251{ 1261{
1252 struct ntb_queue_entry *entry = data; 1262 struct ntb_queue_entry *entry = data;
1253 1263
1264 /* we need to check DMA results if we are using DMA */
1265 if (res) {
1266 enum dmaengine_tx_result dma_err = res->result;
1267
1268 switch (dma_err) {
1269 case DMA_TRANS_READ_FAILED:
1270 case DMA_TRANS_WRITE_FAILED:
1271 entry->errors++;
1272 case DMA_TRANS_ABORTED:
1273 {
1274 struct ntb_transport_qp *qp = entry->qp;
1275 void *offset = qp->rx_buff + qp->rx_max_frame *
1276 qp->rx_index;
1277
1278 ntb_memcpy_rx(entry, offset);
1279 qp->rx_memcpy++;
1280 return;
1281 }
1282
1283 case DMA_TRANS_NOERROR:
1284 default:
1285 break;
1286 }
1287 }
1288
1254 entry->flags |= DESC_DONE_FLAG; 1289 entry->flags |= DESC_DONE_FLAG;
1255 1290
1256 ntb_complete_rxc(entry->qp); 1291 ntb_complete_rxc(entry->qp);
@@ -1266,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1266 /* Ensure that the data is fully copied out before clearing the flag */ 1301 /* Ensure that the data is fully copied out before clearing the flag */
1267 wmb(); 1302 wmb();
1268 1303
1269 ntb_rx_copy_callback(entry); 1304 ntb_rx_copy_callback(entry, NULL);
1270} 1305}
1271 1306
1272static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1307static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1273{ 1308{
1274 struct dma_async_tx_descriptor *txd; 1309 struct dma_async_tx_descriptor *txd;
1275 struct ntb_transport_qp *qp = entry->qp; 1310 struct ntb_transport_qp *qp = entry->qp;
@@ -1282,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1282 int retries = 0; 1317 int retries = 0;
1283 1318
1284 len = entry->len; 1319 len = entry->len;
1285
1286 if (!chan)
1287 goto err;
1288
1289 if (len < copy_bytes)
1290 goto err;
1291
1292 device = chan->device; 1320 device = chan->device;
1293 pay_off = (size_t)offset & ~PAGE_MASK; 1321 pay_off = (size_t)offset & ~PAGE_MASK;
1294 buff_off = (size_t)buf & ~PAGE_MASK; 1322 buff_off = (size_t)buf & ~PAGE_MASK;
@@ -1316,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1316 unmap->from_cnt = 1; 1344 unmap->from_cnt = 1;
1317 1345
1318 for (retries = 0; retries < DMA_RETRIES; retries++) { 1346 for (retries = 0; retries < DMA_RETRIES; retries++) {
1319 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1347 txd = device->device_prep_dma_memcpy(chan,
1348 unmap->addr[1],
1320 unmap->addr[0], len, 1349 unmap->addr[0], len,
1321 DMA_PREP_INTERRUPT); 1350 DMA_PREP_INTERRUPT);
1322 if (txd) 1351 if (txd)
@@ -1331,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1331 goto err_get_unmap; 1360 goto err_get_unmap;
1332 } 1361 }
1333 1362
1334 txd->callback = ntb_rx_copy_callback; 1363 txd->callback_result = ntb_rx_copy_callback;
1335 txd->callback_param = entry; 1364 txd->callback_param = entry;
1336 dma_set_unmap(txd, unmap); 1365 dma_set_unmap(txd, unmap);
1337 1366
@@ -1345,13 +1374,38 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1345 1374
1346 qp->rx_async++; 1375 qp->rx_async++;
1347 1376
1348 return; 1377 return 0;
1349 1378
1350err_set_unmap: 1379err_set_unmap:
1351 dmaengine_unmap_put(unmap); 1380 dmaengine_unmap_put(unmap);
1352err_get_unmap: 1381err_get_unmap:
1353 dmaengine_unmap_put(unmap); 1382 dmaengine_unmap_put(unmap);
1354err: 1383err:
1384 return -ENXIO;
1385}
1386
1387static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1388{
1389 struct ntb_transport_qp *qp = entry->qp;
1390 struct dma_chan *chan = qp->rx_dma_chan;
1391 int res;
1392
1393 if (!chan)
1394 goto err;
1395
1396 if (entry->len < copy_bytes)
1397 goto err;
1398
1399 res = ntb_async_rx_submit(entry, offset);
1400 if (res < 0)
1401 goto err;
1402
1403 if (!entry->retries)
1404 qp->rx_async++;
1405
1406 return;
1407
1408err:
1355 ntb_memcpy_rx(entry, offset); 1409 ntb_memcpy_rx(entry, offset);
1356 qp->rx_memcpy++; 1410 qp->rx_memcpy++;
1357} 1411}
@@ -1397,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1397 } 1451 }
1398 1452
1399 entry->rx_hdr = hdr; 1453 entry->rx_hdr = hdr;
1400 entry->index = qp->rx_index; 1454 entry->rx_index = qp->rx_index;
1401 1455
1402 if (hdr->len > entry->len) { 1456 if (hdr->len > entry->len) {
1403 dev_dbg(&qp->ndev->pdev->dev, 1457 dev_dbg(&qp->ndev->pdev->dev,
@@ -1467,12 +1521,39 @@ static void ntb_transport_rxc_db(unsigned long data)
1467 } 1521 }
1468} 1522}
1469 1523
1470static void ntb_tx_copy_callback(void *data) 1524static void ntb_tx_copy_callback(void *data,
1525 const struct dmaengine_result *res)
1471{ 1526{
1472 struct ntb_queue_entry *entry = data; 1527 struct ntb_queue_entry *entry = data;
1473 struct ntb_transport_qp *qp = entry->qp; 1528 struct ntb_transport_qp *qp = entry->qp;
1474 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1529 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1475 1530
1531 /* we need to check DMA results if we are using DMA */
1532 if (res) {
1533 enum dmaengine_tx_result dma_err = res->result;
1534
1535 switch (dma_err) {
1536 case DMA_TRANS_READ_FAILED:
1537 case DMA_TRANS_WRITE_FAILED:
1538 entry->errors++;
1539 case DMA_TRANS_ABORTED:
1540 {
1541 void __iomem *offset =
1542 qp->tx_mw + qp->tx_max_frame *
1543 entry->tx_index;
1544
1545 /* resubmit via CPU */
1546 ntb_memcpy_tx(entry, offset);
1547 qp->tx_memcpy++;
1548 return;
1549 }
1550
1551 case DMA_TRANS_NOERROR:
1552 default:
1553 break;
1554 }
1555 }
1556
1476 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1557 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1477 1558
1478 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1559 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1507,40 +1588,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1507 /* Ensure that the data is fully copied out before setting the flags */ 1588 /* Ensure that the data is fully copied out before setting the flags */
1508 wmb(); 1589 wmb();
1509 1590
1510 ntb_tx_copy_callback(entry); 1591 ntb_tx_copy_callback(entry, NULL);
1511} 1592}
1512 1593
1513static void ntb_async_tx(struct ntb_transport_qp *qp, 1594static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1514 struct ntb_queue_entry *entry) 1595 struct ntb_queue_entry *entry)
1515{ 1596{
1516 struct ntb_payload_header __iomem *hdr;
1517 struct dma_async_tx_descriptor *txd; 1597 struct dma_async_tx_descriptor *txd;
1518 struct dma_chan *chan = qp->tx_dma_chan; 1598 struct dma_chan *chan = qp->tx_dma_chan;
1519 struct dma_device *device; 1599 struct dma_device *device;
1600 size_t len = entry->len;
1601 void *buf = entry->buf;
1520 size_t dest_off, buff_off; 1602 size_t dest_off, buff_off;
1521 struct dmaengine_unmap_data *unmap; 1603 struct dmaengine_unmap_data *unmap;
1522 dma_addr_t dest; 1604 dma_addr_t dest;
1523 dma_cookie_t cookie; 1605 dma_cookie_t cookie;
1524 void __iomem *offset;
1525 size_t len = entry->len;
1526 void *buf = entry->buf;
1527 int retries = 0; 1606 int retries = 0;
1528 1607
1529 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1530 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1531 entry->tx_hdr = hdr;
1532
1533 iowrite32(entry->len, &hdr->len);
1534 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1535
1536 if (!chan)
1537 goto err;
1538
1539 if (len < copy_bytes)
1540 goto err;
1541
1542 device = chan->device; 1608 device = chan->device;
1543 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; 1609 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
1544 buff_off = (size_t)buf & ~PAGE_MASK; 1610 buff_off = (size_t)buf & ~PAGE_MASK;
1545 dest_off = (size_t)dest & ~PAGE_MASK; 1611 dest_off = (size_t)dest & ~PAGE_MASK;
1546 1612
@@ -1560,8 +1626,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1560 unmap->to_cnt = 1; 1626 unmap->to_cnt = 1;
1561 1627
1562 for (retries = 0; retries < DMA_RETRIES; retries++) { 1628 for (retries = 0; retries < DMA_RETRIES; retries++) {
1563 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], 1629 txd = device->device_prep_dma_memcpy(chan, dest,
1564 len, DMA_PREP_INTERRUPT); 1630 unmap->addr[0], len,
1631 DMA_PREP_INTERRUPT);
1565 if (txd) 1632 if (txd)
1566 break; 1633 break;
1567 1634
@@ -1574,7 +1641,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1574 goto err_get_unmap; 1641 goto err_get_unmap;
1575 } 1642 }
1576 1643
1577 txd->callback = ntb_tx_copy_callback; 1644 txd->callback_result = ntb_tx_copy_callback;
1578 txd->callback_param = entry; 1645 txd->callback_param = entry;
1579 dma_set_unmap(txd, unmap); 1646 dma_set_unmap(txd, unmap);
1580 1647
@@ -1585,14 +1652,48 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1585 dmaengine_unmap_put(unmap); 1652 dmaengine_unmap_put(unmap);
1586 1653
1587 dma_async_issue_pending(chan); 1654 dma_async_issue_pending(chan);
1588 qp->tx_async++;
1589 1655
1590 return; 1656 return 0;
1591err_set_unmap: 1657err_set_unmap:
1592 dmaengine_unmap_put(unmap); 1658 dmaengine_unmap_put(unmap);
1593err_get_unmap: 1659err_get_unmap:
1594 dmaengine_unmap_put(unmap); 1660 dmaengine_unmap_put(unmap);
1595err: 1661err:
1662 return -ENXIO;
1663}
1664
1665static void ntb_async_tx(struct ntb_transport_qp *qp,
1666 struct ntb_queue_entry *entry)
1667{
1668 struct ntb_payload_header __iomem *hdr;
1669 struct dma_chan *chan = qp->tx_dma_chan;
1670 void __iomem *offset;
1671 int res;
1672
1673 entry->tx_index = qp->tx_index;
1674 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
1675 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1676 entry->tx_hdr = hdr;
1677
1678 iowrite32(entry->len, &hdr->len);
1679 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1680
1681 if (!chan)
1682 goto err;
1683
1684 if (entry->len < copy_bytes)
1685 goto err;
1686
1687 res = ntb_async_tx_submit(qp, entry);
1688 if (res < 0)
1689 goto err;
1690
1691 if (!entry->retries)
1692 qp->tx_async++;
1693
1694 return;
1695
1696err:
1596 ntb_memcpy_tx(entry, offset); 1697 ntb_memcpy_tx(entry, offset);
1597 qp->tx_memcpy++; 1698 qp->tx_memcpy++;
1598} 1699}
@@ -1928,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1928 entry->buf = data; 2029 entry->buf = data;
1929 entry->len = len; 2030 entry->len = len;
1930 entry->flags = 0; 2031 entry->flags = 0;
2032 entry->retries = 0;
2033 entry->errors = 0;
2034 entry->rx_index = 0;
1931 2035
1932 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 2036 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1933 2037
@@ -1970,6 +2074,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1970 entry->buf = data; 2074 entry->buf = data;
1971 entry->len = len; 2075 entry->len = len;
1972 entry->flags = 0; 2076 entry->flags = 0;
2077 entry->errors = 0;
2078 entry->retries = 0;
2079 entry->tx_index = 0;
1973 2080
1974 rc = ntb_process_tx(qp, entry); 2081 rc = ntb_process_tx(qp, entry);
1975 if (rc) 2082 if (rc)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 30de0197263a..cc535a478bae 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
441 441
442typedef void (*dma_async_tx_callback)(void *dma_async_param); 442typedef void (*dma_async_tx_callback)(void *dma_async_param);
443 443
444enum dmaengine_tx_result {
445 DMA_TRANS_NOERROR = 0, /* SUCCESS */
446 DMA_TRANS_READ_FAILED, /* Source DMA read failed */
447 DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
448 DMA_TRANS_ABORTED, /* Op never submitted / aborted */
449};
450
451struct dmaengine_result {
452 enum dmaengine_tx_result result;
453 u32 residue;
454};
455
456typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
457 const struct dmaengine_result *result);
458
444struct dmaengine_unmap_data { 459struct dmaengine_unmap_data {
445 u8 map_cnt; 460 u8 map_cnt;
446 u8 to_cnt; 461 u8 to_cnt;
@@ -478,6 +493,7 @@ struct dma_async_tx_descriptor {
478 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 493 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
479 int (*desc_free)(struct dma_async_tx_descriptor *tx); 494 int (*desc_free)(struct dma_async_tx_descriptor *tx);
480 dma_async_tx_callback callback; 495 dma_async_tx_callback callback;
496 dma_async_tx_callback_result callback_result;
481 void *callback_param; 497 void *callback_param;
482 struct dmaengine_unmap_data *unmap; 498 struct dmaengine_unmap_data *unmap;
483#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 499#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH