aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2013-10-18 13:35:33 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:04:38 -0500
commit0776ae7b89782124ddd72eafe0b1e0fdcdabe32e (patch)
treef16e917b66a8a60a7341937a40021d683f3e27f0
parent54f8d501e842879143e867e70996574a54d1e130 (diff)
dmaengine: remove DMA unmap flags
Remove no longer needed DMA unmap flags: - DMA_COMPL_SKIP_SRC_UNMAP - DMA_COMPL_SKIP_DEST_UNMAP - DMA_COMPL_SRC_UNMAP_SINGLE - DMA_COMPL_DEST_UNMAP_SINGLE Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Jon Mason <jon.mason@intel.com> Acked-by: Mark Brown <broonie@linaro.org> [djbw: clean up straggling skip unmap flags in ntb] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--crypto/async_tx/async_memcpy.c3
-rw-r--r--crypto/async_tx/async_pq.c1
-rw-r--r--crypto/async_tx/async_raid6_recov.c8
-rw-r--r--crypto/async_tx/async_xor.c6
-rw-r--r--drivers/ata/pata_arasan_cf.c3
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/dmatest.c3
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma_v3.c12
-rw-r--r--drivers/media/platform/m2m-deinterlace.c3
-rw-r--r--drivers/media/platform/timblogiw.c2
-rw-r--r--drivers/misc/carma/carma-fpga.c3
-rw-r--r--drivers/mtd/nand/atmel_nand.c3
-rw-r--r--drivers/mtd/nand/fsmc_nand.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c6
-rw-r--r--drivers/ntb/ntb_transport.c11
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--include/linux/dmaengine.h18
18 files changed, 27 insertions, 67 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 72750214f779..f8c0b8dbeb75 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -56,8 +56,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
57 57
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
59 unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | 59 unsigned long dma_prep_flags = 0;
60 DMA_COMPL_SKIP_DEST_UNMAP;
61 60
62 if (submit->cb_fn) 61 if (submit->cb_fn)
63 dma_prep_flags |= DMA_PREP_INTERRUPT; 62 dma_prep_flags |= DMA_PREP_INTERRUPT;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 4126b56fbc01..d05327caf69d 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,7 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
62 dma_addr_t dma_dest[2]; 62 dma_addr_t dma_dest[2];
63 int src_off = 0; 63 int src_off = 0;
64 64
65 dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
66 if (submit->flags & ASYNC_TX_FENCE) 65 if (submit->flags & ASYNC_TX_FENCE)
67 dma_flags |= DMA_PREP_FENCE; 66 dma_flags |= DMA_PREP_FENCE;
68 67
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index a3a72a784421..934a84981495 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -47,9 +47,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
47 struct device *dev = dma->dev; 47 struct device *dev = dma->dev;
48 dma_addr_t pq[2]; 48 dma_addr_t pq[2];
49 struct dma_async_tx_descriptor *tx; 49 struct dma_async_tx_descriptor *tx;
50 enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | 50 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
51 DMA_COMPL_SKIP_DEST_UNMAP |
52 DMA_PREP_PQ_DISABLE_P;
53 51
54 if (submit->flags & ASYNC_TX_FENCE) 52 if (submit->flags & ASYNC_TX_FENCE)
55 dma_flags |= DMA_PREP_FENCE; 53 dma_flags |= DMA_PREP_FENCE;
@@ -113,9 +111,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
113 dma_addr_t dma_dest[2]; 111 dma_addr_t dma_dest[2];
114 struct device *dev = dma->dev; 112 struct device *dev = dma->dev;
115 struct dma_async_tx_descriptor *tx; 113 struct dma_async_tx_descriptor *tx;
116 enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | 114 enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
117 DMA_COMPL_SKIP_DEST_UNMAP |
118 DMA_PREP_PQ_DISABLE_P;
119 115
120 if (submit->flags & ASYNC_TX_FENCE) 116 if (submit->flags & ASYNC_TX_FENCE)
121 dma_flags |= DMA_PREP_FENCE; 117 dma_flags |= DMA_PREP_FENCE;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index d2cc77d501c7..3c562f5a60bb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -41,7 +41,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
41 dma_async_tx_callback cb_fn_orig = submit->cb_fn; 41 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
42 void *cb_param_orig = submit->cb_param; 42 void *cb_param_orig = submit->cb_param;
43 enum async_tx_flags flags_orig = submit->flags; 43 enum async_tx_flags flags_orig = submit->flags;
44 enum dma_ctrl_flags dma_flags; 44 enum dma_ctrl_flags dma_flags = 0;
45 int src_cnt = unmap->to_cnt; 45 int src_cnt = unmap->to_cnt;
46 int xor_src_cnt; 46 int xor_src_cnt;
47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; 47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
@@ -55,7 +55,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
55 /* if we are submitting additional xors, leave the chain open 55 /* if we are submitting additional xors, leave the chain open
56 * and clear the callback parameters 56 * and clear the callback parameters
57 */ 57 */
58 dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
59 if (src_cnt > xor_src_cnt) { 58 if (src_cnt > xor_src_cnt) {
60 submit->flags &= ~ASYNC_TX_ACK; 59 submit->flags &= ~ASYNC_TX_ACK;
61 submit->flags |= ASYNC_TX_FENCE; 60 submit->flags |= ASYNC_TX_FENCE;
@@ -284,8 +283,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
284 283
285 if (unmap && src_cnt <= device->max_xor && 284 if (unmap && src_cnt <= device->max_xor &&
286 is_dma_xor_aligned(device, offset, 0, len)) { 285 is_dma_xor_aligned(device, offset, 0, len)) {
287 unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | 286 unsigned long dma_prep_flags = 0;
288 DMA_COMPL_SKIP_DEST_UNMAP;
289 int i; 287 int i;
290 288
291 pr_debug("%s: (async) len: %zu\n", __func__, len); 289 pr_debug("%s: (async) len: %zu\n", __func__, len);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 853f610af28f..e88690ebfd82 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
396 struct dma_async_tx_descriptor *tx; 396 struct dma_async_tx_descriptor *tx;
397 struct dma_chan *chan = acdev->dma_chan; 397 struct dma_chan *chan = acdev->dma_chan;
398 dma_cookie_t cookie; 398 dma_cookie_t cookie;
399 unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 399 unsigned long flags = DMA_PREP_INTERRUPT;
400 DMA_COMPL_SKIP_DEST_UNMAP;
401 int ret = 0; 400 int ret = 0;
402 401
403 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); 402 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index f878c808466e..b69ac3892b86 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1065,8 +1065,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1065 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, 1065 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1066 DMA_FROM_DEVICE); 1066 DMA_FROM_DEVICE);
1067 unmap->len = len; 1067 unmap->len = len;
1068 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP | 1068 flags = DMA_CTRL_ACK;
1069 DMA_COMPL_SKIP_DEST_UNMAP;
1070 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], 1069 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1071 len, flags); 1070 len, flags);
1072 1071
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f4a2a25fae31..5791091c13ca 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -599,8 +599,7 @@ static int dmatest_func(void *data)
599 /* 599 /*
600 * src and dst buffers are freed by ourselves below 600 * src and dst buffers are freed by ourselves below
601 */ 601 */
602 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 602 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
603 DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
604 603
605 while (!kthread_should_stop() 604 while (!kthread_should_stop()
606 && !(params->iterations && total_tests >= params->iterations)) { 605 && !(params->iterations && total_tests >= params->iterations)) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c123e32dbbb0..6fcf741ad91b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -818,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
818 818
819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
821 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | 821 flags = DMA_PREP_INTERRUPT;
822 DMA_PREP_INTERRUPT;
823 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
824 IOAT_TEST_SIZE, flags); 823 IOAT_TEST_SIZE, flags);
825 if (!tx) { 824 if (!tx) {
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 43386c171bba..a4798f0cc225 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1279,9 +1279,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1279 DMA_TO_DEVICE); 1279 DMA_TO_DEVICE);
1280 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1280 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1281 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1281 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1282 DMA_PREP_INTERRUPT | 1282 DMA_PREP_INTERRUPT);
1283 DMA_COMPL_SKIP_SRC_UNMAP |
1284 DMA_COMPL_SKIP_DEST_UNMAP);
1285 1283
1286 if (!tx) { 1284 if (!tx) {
1287 dev_err(dev, "Self-test xor prep failed\n"); 1285 dev_err(dev, "Self-test xor prep failed\n");
@@ -1342,9 +1340,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1342 DMA_TO_DEVICE); 1340 DMA_TO_DEVICE);
1343 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1341 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1344 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1342 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1345 &xor_val_result, DMA_PREP_INTERRUPT | 1343 &xor_val_result, DMA_PREP_INTERRUPT);
1346 DMA_COMPL_SKIP_SRC_UNMAP |
1347 DMA_COMPL_SKIP_DEST_UNMAP);
1348 if (!tx) { 1344 if (!tx) {
1349 dev_err(dev, "Self-test zero prep failed\n"); 1345 dev_err(dev, "Self-test zero prep failed\n");
1350 err = -ENODEV; 1346 err = -ENODEV;
@@ -1389,9 +1385,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1389 DMA_TO_DEVICE); 1385 DMA_TO_DEVICE);
1390 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1386 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1391 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1387 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1392 &xor_val_result, DMA_PREP_INTERRUPT | 1388 &xor_val_result, DMA_PREP_INTERRUPT);
1393 DMA_COMPL_SKIP_SRC_UNMAP |
1394 DMA_COMPL_SKIP_DEST_UNMAP);
1395 if (!tx) { 1389 if (!tx) {
1396 dev_err(dev, "Self-test 2nd zero prep failed\n"); 1390 dev_err(dev, "Self-test 2nd zero prep failed\n");
1397 err = -ENODEV; 1391 err = -ENODEV;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 540516ca872c..879ea6fdd1be 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
341 ctx->xt->dir = DMA_MEM_TO_MEM; 341 ctx->xt->dir = DMA_MEM_TO_MEM;
342 ctx->xt->src_sgl = false; 342 ctx->xt->src_sgl = false;
343 ctx->xt->dst_sgl = true; 343 ctx->xt->dst_sgl = true;
344 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 344 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
345 DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
346 345
347 tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); 346 tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
348 if (tx == NULL) { 347 if (tx == NULL) {
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index b557caf5b1a4..59a95e3ab0e3 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
565 565
566 desc = dmaengine_prep_slave_sg(fh->chan, 566 desc = dmaengine_prep_slave_sg(fh->chan,
567 buf->sg, sg_elems, DMA_DEV_TO_MEM, 567 buf->sg, sg_elems, DMA_DEV_TO_MEM,
568 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 568 DMA_PREP_INTERRUPT);
569 if (!desc) { 569 if (!desc) {
570 spin_lock_irq(&fh->queue_lock); 570 spin_lock_irq(&fh->queue_lock);
571 list_del_init(&vb->queue); 571 list_del_init(&vb->queue);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 7b56563f8b74..5335104e7c84 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -631,8 +631,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
631 struct dma_async_tx_descriptor *tx; 631 struct dma_async_tx_descriptor *tx;
632 dma_cookie_t cookie; 632 dma_cookie_t cookie;
633 dma_addr_t dst, src; 633 dma_addr_t dst, src;
634 unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | 634 unsigned long dma_flags = 0;
635 DMA_COMPL_SKIP_SRC_UNMAP;
636 635
637 dst_sg = buf->vb.sglist; 636 dst_sg = buf->vb.sglist;
638 dst_nents = buf->vb.sglen; 637 dst_nents = buf->vb.sglen;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 060feeaf6b3e..2a837cb425d7 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
375 375
376 dma_dev = host->dma_chan->device; 376 dma_dev = host->dma_chan->device;
377 377
378 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 378 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
379 DMA_COMPL_SKIP_DEST_UNMAP;
380 379
381 phys_addr = dma_map_single(dma_dev->dev, p, len, dir); 380 phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
382 if (dma_mapping_error(dma_dev->dev, phys_addr)) { 381 if (dma_mapping_error(dma_dev->dev, phys_addr)) {
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 3dc1a7564d87..8b2752263db9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
573 dma_dev = chan->device; 573 dma_dev = chan->device;
574 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); 574 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
575 575
576 flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
577
578 if (direction == DMA_TO_DEVICE) { 576 if (direction == DMA_TO_DEVICE) {
579 dma_src = dma_addr; 577 dma_src = dma_addr;
580 dma_dst = host->data_pa; 578 dma_dst = host->data_pa;
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0951f7aca1ef..822616e3c375 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
460 460
461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
462 &ctl->sg, 1, DMA_MEM_TO_DEV, 462 &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
463 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
464 if (!ctl->adesc) 463 if (!ctl->adesc)
465 return NETDEV_TX_BUSY; 464 return NETDEV_TX_BUSY;
466 465
@@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
571 sg_dma_len(sg) = DMA_BUFFER_SIZE; 570 sg_dma_len(sg) = DMA_BUFFER_SIZE;
572 571
573 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 572 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
574 sg, 1, DMA_DEV_TO_MEM, 573 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
575 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
576 574
577 if (!ctl->adesc) 575 if (!ctl->adesc)
578 goto out; 576 goto out;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 222c2baa3a4b..d0222f13d154 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1037,7 +1037,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1037 struct dmaengine_unmap_data *unmap; 1037 struct dmaengine_unmap_data *unmap;
1038 dma_cookie_t cookie; 1038 dma_cookie_t cookie;
1039 void *buf = entry->buf; 1039 void *buf = entry->buf;
1040 unsigned long flags;
1041 1040
1042 entry->len = len; 1041 entry->len = len;
1043 1042
@@ -1073,10 +1072,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1073 1072
1074 unmap->from_cnt = 1; 1073 unmap->from_cnt = 1;
1075 1074
1076 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
1077 DMA_PREP_INTERRUPT;
1078 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1075 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1079 unmap->addr[0], len, flags); 1076 unmap->addr[0], len,
1077 DMA_PREP_INTERRUPT);
1080 if (!txd) 1078 if (!txd)
1081 goto err_get_unmap; 1079 goto err_get_unmap;
1082 1080
@@ -1266,7 +1264,6 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1266 void __iomem *offset; 1264 void __iomem *offset;
1267 size_t len = entry->len; 1265 size_t len = entry->len;
1268 void *buf = entry->buf; 1266 void *buf = entry->buf;
1269 unsigned long flags;
1270 1267
1271 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1268 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1272 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1269 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@@ -1301,10 +1298,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1301 1298
1302 unmap->to_cnt = 1; 1299 unmap->to_cnt = 1;
1303 1300
1304 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
1305 DMA_PREP_INTERRUPT;
1306 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1301 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1307 flags); 1302 DMA_PREP_INTERRUPT);
1308 if (!txd) 1303 if (!txd)
1309 goto err_get_unmap; 1304 goto err_get_unmap;
1310 1305
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index b9f0192758d6..6d207afec8cb 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
150 &dws->tx_sgl, 150 &dws->tx_sgl,
151 1, 151 1,
152 DMA_MEM_TO_DEV, 152 DMA_MEM_TO_DEV,
153 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 153 DMA_PREP_INTERRUPT);
154 txdesc->callback = dw_spi_dma_done; 154 txdesc->callback = dw_spi_dma_done;
155 txdesc->callback_param = dws; 155 txdesc->callback_param = dws;
156 156
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
173 &dws->rx_sgl, 173 &dws->rx_sgl,
174 1, 174 1,
175 DMA_DEV_TO_MEM, 175 DMA_DEV_TO_MEM,
176 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 176 DMA_PREP_INTERRUPT);
177 rxdesc->callback = dw_spi_dma_done; 177 rxdesc->callback = dw_spi_dma_done;
178 rxdesc->callback_param = dws; 178 rxdesc->callback_param = dws;
179 179
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3782cdb782a8..491072cb5ba0 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -171,12 +171,6 @@ struct dma_interleaved_template {
171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
172 * acknowledges receipt, i.e. has has a chance to establish any dependency 172 * acknowledges receipt, i.e. has has a chance to establish any dependency
173 * chains 173 * chains
174 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
175 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
176 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
177 * (if not set, do the source dma-unmapping as page)
178 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
179 * (if not set, do the destination dma-unmapping as page)
180 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 174 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
181 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 175 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
182 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as 176 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -188,14 +182,10 @@ struct dma_interleaved_template {
188enum dma_ctrl_flags { 182enum dma_ctrl_flags {
189 DMA_PREP_INTERRUPT = (1 << 0), 183 DMA_PREP_INTERRUPT = (1 << 0),
190 DMA_CTRL_ACK = (1 << 1), 184 DMA_CTRL_ACK = (1 << 1),
191 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), 185 DMA_PREP_PQ_DISABLE_P = (1 << 2),
192 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 186 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
193 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 187 DMA_PREP_CONTINUE = (1 << 4),
194 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 188 DMA_PREP_FENCE = (1 << 5),
195 DMA_PREP_PQ_DISABLE_P = (1 << 6),
196 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
197 DMA_PREP_CONTINUE = (1 << 8),
198 DMA_PREP_FENCE = (1 << 9),
199}; 189};
200 190
201/** 191/**