aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2013-10-30 06:12:19 -0400
committerVinod Koul <vinod.koul@intel.com>2013-10-30 06:12:19 -0400
commitf0dad6e701cb66301287074c39183c7312139530 (patch)
treeb4a38d8dcc49206fcedae7192e898df1011a6810
parentb967aecf1714c10d1e6c045e43b6385884f1ca77 (diff)
parent7db5f7274a0b065abdc358be2a44b4a911d75707 (diff)
Merge branch 'dma_complete' into next
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--drivers/dma/amba-pl08x.c4
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dma-jz4740.c2
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/intel_mid_dma.c4
-rw-r--r--drivers/dma/ioat/dma.c4
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c4
-rw-r--r--drivers/dma/txx9dmac.c4
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--net/ipv4/tcp.c4
30 files changed, 59 insertions, 59 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 7be34248b450..39ea4791a3c9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
128 } 128 }
129 device->device_issue_pending(chan); 129 device->device_issue_pending(chan);
130 } else { 130 } else {
131 if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) 131 if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
132 panic("%s: DMA error waiting for depend_tx\n", 132 panic("%s: DMA error waiting for depend_tx\n",
133 __func__); 133 __func__);
134 tx->tx_submit(tx); 134 tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
280 * we are referring to the correct operation 280 * we are referring to the correct operation
281 */ 281 */
282 BUG_ON(async_tx_test_ack(*tx)); 282 BUG_ON(async_tx_test_ack(*tx));
283 if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) 283 if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
284 panic("%s: DMA error waiting for transaction\n", 284 panic("%s: DMA error waiting for transaction\n",
285 __func__); 285 __func__);
286 async_tx_ack(*tx); 286 async_tx_ack(*tx);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9b5025777ac8..4ee6533108f6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1252,7 +1252,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1252 size_t bytes = 0; 1252 size_t bytes = 0;
1253 1253
1254 ret = dma_cookie_status(chan, cookie, txstate); 1254 ret = dma_cookie_status(chan, cookie, txstate);
1255 if (ret == DMA_SUCCESS) 1255 if (ret == DMA_COMPLETE)
1256 return ret; 1256 return ret;
1257 1257
1258 /* 1258 /*
@@ -1267,7 +1267,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1267 1267
1268 spin_lock_irqsave(&plchan->vc.lock, flags); 1268 spin_lock_irqsave(&plchan->vc.lock, flags);
1269 ret = dma_cookie_status(chan, cookie, txstate); 1269 ret = dma_cookie_status(chan, cookie, txstate);
1270 if (ret != DMA_SUCCESS) { 1270 if (ret != DMA_COMPLETE) {
1271 vd = vchan_find_desc(&plchan->vc, cookie); 1271 vd = vchan_find_desc(&plchan->vc, cookie);
1272 if (vd) { 1272 if (vd) {
1273 /* On the issued list, so hasn't been processed yet */ 1273 /* On the issued list, so hasn't been processed yet */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c787f38a186a..1ef74579447d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1102,7 +1102,7 @@ atc_tx_status(struct dma_chan *chan,
1102 int bytes = 0; 1102 int bytes = 0;
1103 1103
1104 ret = dma_cookie_status(chan, cookie, txstate); 1104 ret = dma_cookie_status(chan, cookie, txstate);
1105 if (ret == DMA_SUCCESS) 1105 if (ret == DMA_COMPLETE)
1106 return ret; 1106 return ret;
1107 /* 1107 /*
1108 * There's no point calculating the residue if there's 1108 * There's no point calculating the residue if there's
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 3025b9107af2..3c6716e0b78e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2369 enum dma_status ret; 2369 enum dma_status ret;
2370 2370
2371 ret = dma_cookie_status(chan, cookie, txstate); 2371 ret = dma_cookie_status(chan, cookie, txstate);
2372 if (ret == DMA_SUCCESS) 2372 if (ret == DMA_COMPLETE)
2373 return ret; 2373 return ret;
2374 2374
2375 dma_set_residue(txstate, coh901318_get_bytes_left(chan)); 2375 dma_set_residue(txstate, coh901318_get_bytes_left(chan));
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 167c0223ae9e..278b3058919a 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -353,7 +353,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
353 353
354 /* lock */ 354 /* lock */
355 ret = dma_cookie_status(chan, cookie, txstate); 355 ret = dma_cookie_status(chan, cookie, txstate);
356 if (txstate && ret == DMA_SUCCESS) 356 if (txstate && ret == DMA_COMPLETE)
357 txstate->residue = c->residue; 357 txstate->residue = c->residue;
358 /* unlock */ 358 /* unlock */
359 359
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index b0c0c8268d42..94c380f07538 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
491 unsigned long flags; 491 unsigned long flags;
492 492
493 status = dma_cookie_status(c, cookie, state); 493 status = dma_cookie_status(c, cookie, state);
494 if (status == DMA_SUCCESS || !state) 494 if (status == DMA_COMPLETE || !state)
495 return status; 495 return status;
496 496
497 spin_lock_irqsave(&chan->vchan.lock, flags); 497 spin_lock_irqsave(&chan->vchan.lock, flags);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9162ac80c18f..81d876528c70 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1062,7 +1062,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1062 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1062 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1063 1063
1064 if (!tx) 1064 if (!tx)
1065 return DMA_SUCCESS; 1065 return DMA_COMPLETE;
1066 1066
1067 while (tx->cookie == -EBUSY) { 1067 while (tx->cookie == -EBUSY) {
1068 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1068 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 92f796cdc6ab..59e287f56dfc 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -740,7 +740,7 @@ static int dmatest_func(void *data)
740 len, 0); 740 len, 0);
741 failed_tests++; 741 failed_tests++;
742 continue; 742 continue;
743 } else if (status != DMA_SUCCESS) { 743 } else if (status != DMA_COMPLETE) {
744 enum dmatest_error_type type = (status == DMA_ERROR) ? 744 enum dmatest_error_type type = (status == DMA_ERROR) ?
745 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; 745 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
746 thread_result_add(info, result, type, 746 thread_result_add(info, result, type,
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 89eb89f22284..2c29331571e4 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1098,13 +1098,13 @@ dwc_tx_status(struct dma_chan *chan,
1098 enum dma_status ret; 1098 enum dma_status ret;
1099 1099
1100 ret = dma_cookie_status(chan, cookie, txstate); 1100 ret = dma_cookie_status(chan, cookie, txstate);
1101 if (ret == DMA_SUCCESS) 1101 if (ret == DMA_COMPLETE)
1102 return ret; 1102 return ret;
1103 1103
1104 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1104 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1105 1105
1106 ret = dma_cookie_status(chan, cookie, txstate); 1106 ret = dma_cookie_status(chan, cookie, txstate);
1107 if (ret != DMA_SUCCESS) 1107 if (ret != DMA_COMPLETE)
1108 dma_set_residue(txstate, dwc_get_residue(dwc)); 1108 dma_set_residue(txstate, dwc_get_residue(dwc));
1109 1109
1110 if (dwc->paused && ret == DMA_IN_PROGRESS) 1110 if (dwc->paused && ret == DMA_IN_PROGRESS)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 8c612415867e..5dce96af9c46 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -640,7 +640,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
640 unsigned long flags; 640 unsigned long flags;
641 641
642 ret = dma_cookie_status(chan, cookie, txstate); 642 ret = dma_cookie_status(chan, cookie, txstate);
643 if (ret == DMA_SUCCESS || !txstate) 643 if (ret == DMA_COMPLETE || !txstate)
644 return ret; 644 return ret;
645 645
646 spin_lock_irqsave(&echan->vchan.lock, flags); 646 spin_lock_irqsave(&echan->vchan.lock, flags);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 55852c026791..2af4028cc23e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -771,7 +771,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
771 desc->desc.tx_submit = imxdma_tx_submit; 771 desc->desc.tx_submit = imxdma_tx_submit;
772 /* txd.flags will be overwritten in prep funcs */ 772 /* txd.flags will be overwritten in prep funcs */
773 desc->desc.flags = DMA_CTRL_ACK; 773 desc->desc.flags = DMA_CTRL_ACK;
774 desc->status = DMA_SUCCESS; 774 desc->status = DMA_COMPLETE;
775 775
776 list_add_tail(&desc->node, &imxdmac->ld_free); 776 list_add_tail(&desc->node, &imxdmac->ld_free);
777 imxdmac->descs_allocated++; 777 imxdmac->descs_allocated++;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fc43603cf0bb..0e03b3146b32 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
638 if (error) 638 if (error)
639 sdmac->status = DMA_ERROR; 639 sdmac->status = DMA_ERROR;
640 else 640 else
641 sdmac->status = DMA_SUCCESS; 641 sdmac->status = DMA_COMPLETE;
642 642
643 dma_cookie_complete(&sdmac->desc); 643 dma_cookie_complete(&sdmac->desc);
644 if (sdmac->desc.callback) 644 if (sdmac->desc.callback)
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a975ebebea8a..1aab8130efa1 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
309 callback_txd(param_txd); 309 callback_txd(param_txd);
310 } 310 }
311 if (midc->raw_tfr) { 311 if (midc->raw_tfr) {
312 desc->status = DMA_SUCCESS; 312 desc->status = DMA_COMPLETE;
313 if (desc->lli != NULL) { 313 if (desc->lli != NULL) {
314 pci_pool_free(desc->lli_pool, desc->lli, 314 pci_pool_free(desc->lli_pool, desc->lli,
315 desc->lli_phys); 315 desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
481 enum dma_status ret; 481 enum dma_status ret;
482 482
483 ret = dma_cookie_status(chan, cookie, txstate); 483 ret = dma_cookie_status(chan, cookie, txstate);
484 if (ret != DMA_SUCCESS) { 484 if (ret != DMA_COMPLETE) {
485 spin_lock_bh(&midc->lock); 485 spin_lock_bh(&midc->lock);
486 midc_scan_descriptors(to_middma_device(chan->device), midc); 486 midc_scan_descriptors(to_middma_device(chan->device), midc);
487 spin_unlock_bh(&midc->lock); 487 spin_unlock_bh(&midc->lock);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 5ff6fc1819dc..a0f0fce5a84e 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -733,7 +733,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
733 enum dma_status ret; 733 enum dma_status ret;
734 734
735 ret = dma_cookie_status(c, cookie, txstate); 735 ret = dma_cookie_status(c, cookie, txstate);
736 if (ret == DMA_SUCCESS) 736 if (ret == DMA_COMPLETE)
737 return ret; 737 return ret;
738 738
739 device->cleanup_fn((unsigned long) c); 739 device->cleanup_fn((unsigned long) c);
@@ -859,7 +859,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
859 859
860 if (tmo == 0 || 860 if (tmo == 0 ||
861 dma->device_tx_status(dma_chan, cookie, NULL) 861 dma->device_tx_status(dma_chan, cookie, NULL)
862 != DMA_SUCCESS) { 862 != DMA_COMPLETE) {
863 dev_err(dev, "Self-test copy timed out, disabling\n"); 863 dev_err(dev, "Self-test copy timed out, disabling\n");
864 err = -ENODEV; 864 err = -ENODEV;
865 goto unmap_dma; 865 goto unmap_dma;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d8ececaf1b57..806b4ce5e38c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -807,7 +807,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
807 enum dma_status ret; 807 enum dma_status ret;
808 808
809 ret = dma_cookie_status(c, cookie, txstate); 809 ret = dma_cookie_status(c, cookie, txstate);
810 if (ret == DMA_SUCCESS) 810 if (ret == DMA_COMPLETE)
811 return ret; 811 return ret;
812 812
813 ioat3_cleanup(ioat); 813 ioat3_cleanup(ioat);
@@ -1468,7 +1468,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1468 1468
1469 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1469 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1470 1470
1471 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1471 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1472 dev_err(dev, "Self-test xor timed out\n"); 1472 dev_err(dev, "Self-test xor timed out\n");
1473 err = -ENODEV; 1473 err = -ENODEV;
1474 goto dma_unmap; 1474 goto dma_unmap;
@@ -1530,7 +1530,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1530 1530
1531 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1531 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1532 1532
1533 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1533 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1534 dev_err(dev, "Self-test validate timed out\n"); 1534 dev_err(dev, "Self-test validate timed out\n");
1535 err = -ENODEV; 1535 err = -ENODEV;
1536 goto dma_unmap; 1536 goto dma_unmap;
@@ -1577,7 +1577,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1577 1577
1578 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1578 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1579 1579
1580 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1580 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1581 dev_err(dev, "Self-test 2nd validate timed out\n"); 1581 dev_err(dev, "Self-test 2nd validate timed out\n");
1582 err = -ENODEV; 1582 err = -ENODEV;
1583 goto dma_unmap; 1583 goto dma_unmap;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index dd8b44a56e5d..408fe6be15f4 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -864,7 +864,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
864 int ret; 864 int ret;
865 865
866 ret = dma_cookie_status(chan, cookie, txstate); 866 ret = dma_cookie_status(chan, cookie, txstate);
867 if (ret == DMA_SUCCESS) 867 if (ret == DMA_COMPLETE)
868 return ret; 868 return ret;
869 869
870 iop_adma_slot_cleanup(iop_chan); 870 iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +983,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
983 msleep(1); 983 msleep(1);
984 984
985 if (iop_adma_status(dma_chan, cookie, NULL) != 985 if (iop_adma_status(dma_chan, cookie, NULL) !=
986 DMA_SUCCESS) { 986 DMA_COMPLETE) {
987 dev_err(dma_chan->device->dev, 987 dev_err(dma_chan->device->dev,
988 "Self-test copy timed out, disabling\n"); 988 "Self-test copy timed out, disabling\n");
989 err = -ENODEV; 989 err = -ENODEV;
@@ -1083,7 +1083,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1083 msleep(8); 1083 msleep(8);
1084 1084
1085 if (iop_adma_status(dma_chan, cookie, NULL) != 1085 if (iop_adma_status(dma_chan, cookie, NULL) !=
1086 DMA_SUCCESS) { 1086 DMA_COMPLETE) {
1087 dev_err(dma_chan->device->dev, 1087 dev_err(dma_chan->device->dev,
1088 "Self-test xor timed out, disabling\n"); 1088 "Self-test xor timed out, disabling\n");
1089 err = -ENODEV; 1089 err = -ENODEV;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1129 iop_adma_issue_pending(dma_chan); 1129 iop_adma_issue_pending(dma_chan);
1130 msleep(8); 1130 msleep(8);
1131 1131
1132 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1132 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1133 dev_err(dma_chan->device->dev, 1133 dev_err(dma_chan->device->dev,
1134 "Self-test zero sum timed out, disabling\n"); 1134 "Self-test zero sum timed out, disabling\n");
1135 err = -ENODEV; 1135 err = -ENODEV;
@@ -1158,7 +1158,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1158 iop_adma_issue_pending(dma_chan); 1158 iop_adma_issue_pending(dma_chan);
1159 msleep(8); 1159 msleep(8);
1160 1160
1161 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1161 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1162 dev_err(dma_chan->device->dev, 1162 dev_err(dma_chan->device->dev,
1163 "Self-test non-zero sum timed out, disabling\n"); 1163 "Self-test non-zero sum timed out, disabling\n");
1164 err = -ENODEV; 1164 err = -ENODEV;
@@ -1254,7 +1254,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1254 msleep(8); 1254 msleep(8);
1255 1255
1256 if (iop_adma_status(dma_chan, cookie, NULL) != 1256 if (iop_adma_status(dma_chan, cookie, NULL) !=
1257 DMA_SUCCESS) { 1257 DMA_COMPLETE) {
1258 dev_err(dev, "Self-test pq timed out, disabling\n"); 1258 dev_err(dev, "Self-test pq timed out, disabling\n");
1259 err = -ENODEV; 1259 err = -ENODEV;
1260 goto free_resources; 1260 goto free_resources;
@@ -1291,7 +1291,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1291 msleep(8); 1291 msleep(8);
1292 1292
1293 if (iop_adma_status(dma_chan, cookie, NULL) != 1293 if (iop_adma_status(dma_chan, cookie, NULL) !=
1294 DMA_SUCCESS) { 1294 DMA_COMPLETE) {
1295 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1295 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1296 err = -ENODEV; 1296 err = -ENODEV;
1297 goto free_resources; 1297 goto free_resources;
@@ -1323,7 +1323,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1323 msleep(8); 1323 msleep(8);
1324 1324
1325 if (iop_adma_status(dma_chan, cookie, NULL) != 1325 if (iop_adma_status(dma_chan, cookie, NULL) !=
1326 DMA_SUCCESS) { 1326 DMA_COMPLETE) {
1327 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1327 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1328 err = -ENODEV; 1328 err = -ENODEV;
1329 goto free_resources; 1329 goto free_resources;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index da430623fbc4..e26075408e9b 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
344 size_t bytes = 0; 344 size_t bytes = 0;
345 345
346 ret = dma_cookie_status(&c->vc.chan, cookie, state); 346 ret = dma_cookie_status(&c->vc.chan, cookie, state);
347 if (ret == DMA_SUCCESS) 347 if (ret == DMA_COMPLETE)
348 return ret; 348 return ret;
349 349
350 spin_lock_irqsave(&c->vc.lock, flags); 350 spin_lock_irqsave(&c->vc.lock, flags);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 8f3e865053d4..2b4026d1f31d 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -163,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
163 /* disable irq */ 163 /* disable irq */
164 writel(0, tdmac->reg_base + TDIMR); 164 writel(0, tdmac->reg_base + TDIMR);
165 165
166 tdmac->status = DMA_SUCCESS; 166 tdmac->status = DMA_COMPLETE;
167} 167}
168 168
169static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 169static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -398,7 +398,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
398 int num_periods = buf_len / period_len; 398 int num_periods = buf_len / period_len;
399 int i = 0, buf = 0; 399 int i = 0, buf = 0;
400 400
401 if (tdmac->status != DMA_SUCCESS) 401 if (tdmac->status != DMA_COMPLETE)
402 return NULL; 402 return NULL;
403 403
404 if (period_len > TDMA_MAX_XFER_BYTES) { 404 if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -532,7 +532,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
532 tdmac->idx = idx; 532 tdmac->idx = idx;
533 tdmac->type = type; 533 tdmac->type = type;
534 tdmac->reg_base = (unsigned long)tdev->base + idx * 4; 534 tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
535 tdmac->status = DMA_SUCCESS; 535 tdmac->status = DMA_COMPLETE;
536 tdev->tdmac[tdmac->idx] = tdmac; 536 tdev->tdmac[tdmac->idx] = tdmac;
537 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); 537 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
538 538
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 536dcb8ba5fd..8d5bce9e867e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -749,7 +749,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
749 enum dma_status ret; 749 enum dma_status ret;
750 750
751 ret = dma_cookie_status(chan, cookie, txstate); 751 ret = dma_cookie_status(chan, cookie, txstate);
752 if (ret == DMA_SUCCESS) { 752 if (ret == DMA_COMPLETE) {
753 mv_xor_clean_completed_slots(mv_chan); 753 mv_xor_clean_completed_slots(mv_chan);
754 return ret; 754 return ret;
755 } 755 }
@@ -874,7 +874,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
874 msleep(1); 874 msleep(1);
875 875
876 if (mv_xor_status(dma_chan, cookie, NULL) != 876 if (mv_xor_status(dma_chan, cookie, NULL) !=
877 DMA_SUCCESS) { 877 DMA_COMPLETE) {
878 dev_err(dma_chan->device->dev, 878 dev_err(dma_chan->device->dev,
879 "Self-test copy timed out, disabling\n"); 879 "Self-test copy timed out, disabling\n");
880 err = -ENODEV; 880 err = -ENODEV;
@@ -968,7 +968,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
968 msleep(8); 968 msleep(8);
969 969
970 if (mv_xor_status(dma_chan, cookie, NULL) != 970 if (mv_xor_status(dma_chan, cookie, NULL) !=
971 DMA_SUCCESS) { 971 DMA_COMPLETE) {
972 dev_err(dma_chan->device->dev, 972 dev_err(dma_chan->device->dev,
973 "Self-test xor timed out, disabling\n"); 973 "Self-test xor timed out, disabling\n");
974 err = -ENODEV; 974 err = -ENODEV;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ccd13df841db..7ab7cecc48a4 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -224,7 +224,7 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
224 224
225static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 225static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
226{ 226{
227 mxs_chan->status = DMA_SUCCESS; 227 mxs_chan->status = DMA_COMPLETE;
228} 228}
229 229
230static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 230static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -312,12 +312,12 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
312 if (mxs_chan->flags & MXS_DMA_SG_LOOP) 312 if (mxs_chan->flags & MXS_DMA_SG_LOOP)
313 mxs_chan->status = DMA_IN_PROGRESS; 313 mxs_chan->status = DMA_IN_PROGRESS;
314 else 314 else
315 mxs_chan->status = DMA_SUCCESS; 315 mxs_chan->status = DMA_COMPLETE;
316 } 316 }
317 317
318 stat1 &= ~(1 << channel); 318 stat1 &= ~(1 << channel);
319 319
320 if (mxs_chan->status == DMA_SUCCESS) 320 if (mxs_chan->status == DMA_COMPLETE)
321 dma_cookie_complete(&mxs_chan->desc); 321 dma_cookie_complete(&mxs_chan->desc);
322 322
323 /* schedule tasklet on this channel */ 323 /* schedule tasklet on this channel */
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ec3fc4fd9160..2f66cf4e54fe 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
248 unsigned long flags; 248 unsigned long flags;
249 249
250 ret = dma_cookie_status(chan, cookie, txstate); 250 ret = dma_cookie_status(chan, cookie, txstate);
251 if (ret == DMA_SUCCESS || !txstate) 251 if (ret == DMA_COMPLETE || !txstate)
252 return ret; 252 return ret;
253 253
254 spin_lock_irqsave(&c->vc.lock, flags); 254 spin_lock_irqsave(&c->vc.lock, flags);
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 370ff8265630..60e02ae38b04 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3891,7 +3891,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3891 3891
3892 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3892 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3893 ret = dma_cookie_status(chan, cookie, txstate); 3893 ret = dma_cookie_status(chan, cookie, txstate);
3894 if (ret == DMA_SUCCESS) 3894 if (ret == DMA_COMPLETE)
3895 return ret; 3895 return ret;
3896 3896
3897 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3897 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 461a91ab70bb..ab26d46bbe15 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
436 enum dma_status ret; 436 enum dma_status ret;
437 437
438 ret = dma_cookie_status(&c->vc.chan, cookie, state); 438 ret = dma_cookie_status(&c->vc.chan, cookie, state);
439 if (ret == DMA_SUCCESS) 439 if (ret == DMA_COMPLETE)
440 return ret; 440 return ret;
441 441
442 if (!state) 442 if (!state)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index d94ab592cc1b..2e7b394def80 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
724 * If we don't find cookie on the queue, it has been aborted and we have 724 * If we don't find cookie on the queue, it has been aborted and we have
725 * to report error 725 * to report error
726 */ 726 */
727 if (status != DMA_SUCCESS) { 727 if (status != DMA_COMPLETE) {
728 struct shdma_desc *sdesc; 728 struct shdma_desc *sdesc;
729 status = DMA_ERROR; 729 status = DMA_ERROR;
730 list_for_each_entry(sdesc, &schan->ld_queue, node) 730 list_for_each_entry(sdesc, &schan->ld_queue, node)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 3d5e4ee94f5f..b8c031b7de4e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2627,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2627 } 2627 }
2628 2628
2629 ret = dma_cookie_status(chan, cookie, txstate); 2629 ret = dma_cookie_status(chan, cookie, txstate);
2630 if (ret != DMA_SUCCESS) 2630 if (ret != DMA_COMPLETE)
2631 dma_set_residue(txstate, stedma40_residue(chan)); 2631 dma_set_residue(txstate, stedma40_residue(chan));
2632 2632
2633 if (d40_is_paused(d40c)) 2633 if (d40_is_paused(d40c))
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 67a6752bf863..73654e33f13b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
570 570
571 list_del(&sgreq->node); 571 list_del(&sgreq->node);
572 if (sgreq->last_sg) { 572 if (sgreq->last_sg) {
573 dma_desc->dma_status = DMA_SUCCESS; 573 dma_desc->dma_status = DMA_COMPLETE;
574 dma_cookie_complete(&dma_desc->txd); 574 dma_cookie_complete(&dma_desc->txd);
575 if (!dma_desc->cb_count) 575 if (!dma_desc->cb_count)
576 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 576 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
768 unsigned int residual; 768 unsigned int residual;
769 769
770 ret = dma_cookie_status(dc, cookie, txstate); 770 ret = dma_cookie_status(dc, cookie, txstate);
771 if (ret == DMA_SUCCESS) 771 if (ret == DMA_COMPLETE)
772 return ret; 772 return ret;
773 773
774 spin_lock_irqsave(&tdc->lock, flags); 774 spin_lock_irqsave(&tdc->lock, flags);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 71e8e775189e..c2829b481bf2 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,8 +962,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
962 enum dma_status ret; 962 enum dma_status ret;
963 963
964 ret = dma_cookie_status(chan, cookie, txstate); 964 ret = dma_cookie_status(chan, cookie, txstate);
965 if (ret == DMA_SUCCESS) 965 if (ret == DMA_COMPLETE)
966 return DMA_SUCCESS; 966 return DMA_COMPLETE;
967 967
968 spin_lock_bh(&dc->lock); 968 spin_lock_bh(&dc->lock);
969 txx9dmac_scan_descriptors(dc); 969 txx9dmac_scan_descriptors(dc);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 537750261aaa..7d8103cd3e2e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work)
1433 desc = s->desc_rx[new]; 1433 desc = s->desc_rx[new];
1434 1434
1435 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != 1435 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1436 DMA_SUCCESS) { 1436 DMA_COMPLETE) {
1437 /* Handle incomplete DMA receive */ 1437 /* Handle incomplete DMA receive */
1438 struct dma_chan *chan = s->chan_rx; 1438 struct dma_chan *chan = s->chan_rx;
1439 struct shdma_desc *sh_desc = container_of(desc, 1439 struct shdma_desc *sh_desc = container_of(desc,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0bc727534108..4b460a683968 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
45 45
46/** 46/**
47 * enum dma_status - DMA transaction status 47 * enum dma_status - DMA transaction status
48 * @DMA_SUCCESS: transaction completed successfully 48 * @DMA_COMPLETE: transaction completed
49 * @DMA_IN_PROGRESS: transaction not yet processed 49 * @DMA_IN_PROGRESS: transaction not yet processed
50 * @DMA_PAUSED: transaction is paused 50 * @DMA_PAUSED: transaction is paused
51 * @DMA_ERROR: transaction failed 51 * @DMA_ERROR: transaction failed
52 */ 52 */
53enum dma_status { 53enum dma_status {
54 DMA_SUCCESS, 54 DMA_COMPLETE,
55 DMA_IN_PROGRESS, 55 DMA_IN_PROGRESS,
56 DMA_PAUSED, 56 DMA_PAUSED,
57 DMA_ERROR, 57 DMA_ERROR,
@@ -979,10 +979,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
979{ 979{
980 if (last_complete <= last_used) { 980 if (last_complete <= last_used) {
981 if ((cookie <= last_complete) || (cookie > last_used)) 981 if ((cookie <= last_complete) || (cookie > last_used))
982 return DMA_SUCCESS; 982 return DMA_COMPLETE;
983 } else { 983 } else {
984 if ((cookie <= last_complete) && (cookie > last_used)) 984 if ((cookie <= last_complete) && (cookie > last_used))
985 return DMA_SUCCESS; 985 return DMA_COMPLETE;
986 } 986 }
987 return DMA_IN_PROGRESS; 987 return DMA_IN_PROGRESS;
988} 988}
@@ -1013,11 +1013,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
1013} 1013}
1014static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 1014static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1015{ 1015{
1016 return DMA_SUCCESS; 1016 return DMA_COMPLETE;
1017} 1017}
1018static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1018static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1019{ 1019{
1020 return DMA_SUCCESS; 1020 return DMA_COMPLETE;
1021} 1021}
1022static inline void dma_issue_pending_all(void) 1022static inline void dma_issue_pending_all(void)
1023{ 1023{
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6e5617b9f9db..d2652fb3232e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1429,7 +1429,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1429 do { 1429 do {
1430 if (dma_async_is_tx_complete(tp->ucopy.dma_chan, 1430 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1431 last_issued, &done, 1431 last_issued, &done,
1432 &used) == DMA_SUCCESS) { 1432 &used) == DMA_COMPLETE) {
1433 /* Safe to free early-copied skbs now */ 1433 /* Safe to free early-copied skbs now */
1434 __skb_queue_purge(&sk->sk_async_wait_queue); 1434 __skb_queue_purge(&sk->sk_async_wait_queue);
1435 break; 1435 break;
@@ -1437,7 +1437,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1437 struct sk_buff *skb; 1437 struct sk_buff *skb;
1438 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1438 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1439 (dma_async_is_complete(skb->dma_cookie, done, 1439 (dma_async_is_complete(skb->dma_cookie, done,
1440 used) == DMA_SUCCESS)) { 1440 used) == DMA_COMPLETE)) {
1441 __skb_dequeue(&sk->sk_async_wait_queue); 1441 __skb_dequeue(&sk->sk_async_wait_queue);
1442 kfree_skb(skb); 1442 kfree_skb(skb);
1443 } 1443 }