aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/async-tx-api.txt1
-rw-r--r--arch/arm/mach-iop13xx/setup.c3
-rw-r--r--arch/arm/plat-iop/adma.c2
-rw-r--r--arch/arm/plat-orion/common.c10
-rw-r--r--crypto/async_tx/Kconfig4
-rw-r--r--crypto/async_tx/Makefile1
-rw-r--r--crypto/async_tx/async_memset.c89
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma_v2.h1
-rw-r--r--drivers/dma/ioat/dma_v3.c114
-rw-r--r--drivers/dma/ioat/hw.h27
-rw-r--r--drivers/dma/iop-adma.c66
-rw-r--r--drivers/dma/mv_xor.c85
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/ppc4xx/adma.c47
-rw-r--r--include/linux/async_tx.h4
-rw-r--r--include/linux/dmaengine.h5
18 files changed, 8 insertions, 462 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index ba046b8fa92f..7bf1be20d93a 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -222,5 +222,4 @@ drivers/dma/: location for offload engine drivers
222include/linux/async_tx.h: core header file for the async_tx api 222include/linux/async_tx.h: core header file for the async_tx api
223crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code 223crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
224crypto/async_tx/async_memcpy.c: copy offload 224crypto/async_tx/async_memcpy.c: copy offload
225crypto/async_tx/async_memset.c: memory fill offload
226crypto/async_tx/async_xor.c: xor and xor zero sum offload 225crypto/async_tx/async_xor.c: xor and xor zero sum offload
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 3181f61ea63e..1c5bd7637b05 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -469,7 +469,6 @@ void __init iop13xx_platform_init(void)
469 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 469 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
470 dma_cap_set(DMA_XOR, plat_data->cap_mask); 470 dma_cap_set(DMA_XOR, plat_data->cap_mask);
471 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); 471 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
472 dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
473 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 472 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
474 break; 473 break;
475 case IOP13XX_INIT_ADMA_1: 474 case IOP13XX_INIT_ADMA_1:
@@ -479,7 +478,6 @@ void __init iop13xx_platform_init(void)
479 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 478 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
480 dma_cap_set(DMA_XOR, plat_data->cap_mask); 479 dma_cap_set(DMA_XOR, plat_data->cap_mask);
481 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); 480 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
482 dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
483 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 481 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
484 break; 482 break;
485 case IOP13XX_INIT_ADMA_2: 483 case IOP13XX_INIT_ADMA_2:
@@ -489,7 +487,6 @@ void __init iop13xx_platform_init(void)
489 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 487 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
490 dma_cap_set(DMA_XOR, plat_data->cap_mask); 488 dma_cap_set(DMA_XOR, plat_data->cap_mask);
491 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); 489 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
492 dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
493 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 490 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
494 dma_cap_set(DMA_PQ, plat_data->cap_mask); 491 dma_cap_set(DMA_PQ, plat_data->cap_mask);
495 dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); 492 dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index 1ff6a37e893c..a4d1f8de3b5b 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -192,12 +192,10 @@ static int __init iop3xx_adma_cap_init(void)
192 192
193 #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ 193 #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */
194 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); 194 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
195 dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
196 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); 195 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
197 #else 196 #else
198 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); 197 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
199 dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); 198 dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
200 dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
201 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); 199 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
202 #endif 200 #endif
203 201
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index c019b7aaf776..c66d163d7a2a 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -666,14 +666,9 @@ void __init orion_xor0_init(unsigned long mapbase_low,
666 orion_xor0_shared_resources[3].start = irq_1; 666 orion_xor0_shared_resources[3].start = irq_1;
667 orion_xor0_shared_resources[3].end = irq_1; 667 orion_xor0_shared_resources[3].end = irq_1;
668 668
669 /*
670 * two engines can't do memset simultaneously, this limitation
671 * satisfied by removing memset support from one of the engines.
672 */
673 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); 669 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask);
674 dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); 670 dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask);
675 671
676 dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask);
677 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); 672 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask);
678 dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); 673 dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask);
679 674
@@ -732,14 +727,9 @@ void __init orion_xor1_init(unsigned long mapbase_low,
732 orion_xor1_shared_resources[3].start = irq_1; 727 orion_xor1_shared_resources[3].start = irq_1;
733 orion_xor1_shared_resources[3].end = irq_1; 728 orion_xor1_shared_resources[3].end = irq_1;
734 729
735 /*
736 * two engines can't do memset simultaneously, this limitation
737 * satisfied by removing memset support from one of the engines.
738 */
739 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); 730 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask);
740 dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); 731 dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask);
741 732
742 dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask);
743 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); 733 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask);
744 dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); 734 dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask);
745 735
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index 1b11abbb5c91..f38a58aef3ec 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -10,10 +10,6 @@ config ASYNC_XOR
10 select ASYNC_CORE 10 select ASYNC_CORE
11 select XOR_BLOCKS 11 select XOR_BLOCKS
12 12
13config ASYNC_MEMSET
14 tristate
15 select ASYNC_CORE
16
17config ASYNC_PQ 13config ASYNC_PQ
18 tristate 14 tristate
19 select ASYNC_CORE 15 select ASYNC_CORE
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile
index d1e0e6f72bc1..462e4abbfe69 100644
--- a/crypto/async_tx/Makefile
+++ b/crypto/async_tx/Makefile
@@ -1,6 +1,5 @@
1obj-$(CONFIG_ASYNC_CORE) += async_tx.o 1obj-$(CONFIG_ASYNC_CORE) += async_tx.o
2obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o 2obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
3obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
4obj-$(CONFIG_ASYNC_XOR) += async_xor.o 3obj-$(CONFIG_ASYNC_XOR) += async_xor.o
5obj-$(CONFIG_ASYNC_PQ) += async_pq.o 4obj-$(CONFIG_ASYNC_PQ) += async_pq.o
6obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o 5obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
deleted file mode 100644
index 05a4d1e00148..000000000000
--- a/crypto/async_tx/async_memset.c
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * memory fill offload engine support
3 *
4 * Copyright © 2006, Intel Corporation.
5 *
6 * Dan Williams <dan.j.williams@intel.com>
7 *
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 *
25 */
26#include <linux/kernel.h>
27#include <linux/interrupt.h>
28#include <linux/module.h>
29#include <linux/mm.h>
30#include <linux/dma-mapping.h>
31#include <linux/async_tx.h>
32
33/**
34 * async_memset - attempt to fill memory with a dma engine.
35 * @dest: destination page
36 * @val: fill value
37 * @offset: offset in pages to start transaction
38 * @len: length in bytes
39 *
40 * honored flags: ASYNC_TX_ACK
41 */
42struct dma_async_tx_descriptor *
43async_memset(struct page *dest, int val, unsigned int offset, size_t len,
44 struct async_submit_ctl *submit)
45{
46 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
47 &dest, 1, NULL, 0, len);
48 struct dma_device *device = chan ? chan->device : NULL;
49 struct dma_async_tx_descriptor *tx = NULL;
50
51 if (device && is_dma_fill_aligned(device, offset, 0, len)) {
52 dma_addr_t dma_dest;
53 unsigned long dma_prep_flags = 0;
54
55 if (submit->cb_fn)
56 dma_prep_flags |= DMA_PREP_INTERRUPT;
57 if (submit->flags & ASYNC_TX_FENCE)
58 dma_prep_flags |= DMA_PREP_FENCE;
59 dma_dest = dma_map_page(device->dev, dest, offset, len,
60 DMA_FROM_DEVICE);
61
62 tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
63 dma_prep_flags);
64 }
65
66 if (tx) {
67 pr_debug("%s: (async) len: %zu\n", __func__, len);
68 async_tx_submit(chan, tx, submit);
69 } else { /* run the memset synchronously */
70 void *dest_buf;
71 pr_debug("%s: (sync) len: %zu\n", __func__, len);
72
73 dest_buf = page_address(dest) + offset;
74
75 /* wait for any prerequisite operations */
76 async_tx_quiesce(&submit->depend_tx);
77
78 memset(dest_buf, val, len);
79
80 async_tx_sync_epilog(submit);
81 }
82
83 return tx;
84}
85EXPORT_SYMBOL_GPL(async_memset);
86
87MODULE_AUTHOR("Intel Corporation");
88MODULE_DESCRIPTION("asynchronous memset api");
89MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 93f7992bee5c..9e56745f87bf 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -663,11 +663,6 @@ static bool device_has_all_tx_types(struct dma_device *device)
663 return false; 663 return false;
664 #endif 664 #endif
665 665
666 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
667 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
668 return false;
669 #endif
670
671 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 666 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
672 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 667 if (!dma_has_cap(DMA_XOR, device->cap_mask))
673 return false; 668 return false;
@@ -729,8 +724,6 @@ int dma_async_device_register(struct dma_device *device)
729 !device->device_prep_dma_pq); 724 !device->device_prep_dma_pq);
730 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 725 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
731 !device->device_prep_dma_pq_val); 726 !device->device_prep_dma_pq_val);
732 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
733 !device->device_prep_dma_memset);
734 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 727 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
735 !device->device_prep_dma_interrupt); 728 !device->device_prep_dma_interrupt);
736 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 729 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 17a2393b3e25..5ff6fc1819dc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1105,12 +1105,11 @@ static ssize_t cap_show(struct dma_chan *c, char *page)
1105{ 1105{
1106 struct dma_device *dma = c->device; 1106 struct dma_device *dma = c->device;
1107 1107
1108 return sprintf(page, "copy%s%s%s%s%s%s\n", 1108 return sprintf(page, "copy%s%s%s%s%s\n",
1109 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", 1109 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1110 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", 1110 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1111 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", 1111 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1112 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", 1112 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1113 dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
1114 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); 1113 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1115 1114
1116} 1115}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 29bf9448035d..212d584fe427 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -123,7 +123,6 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len
123struct ioat_ring_ent { 123struct ioat_ring_ent {
124 union { 124 union {
125 struct ioat_dma_descriptor *hw; 125 struct ioat_dma_descriptor *hw;
126 struct ioat_fill_descriptor *fill;
127 struct ioat_xor_descriptor *xor; 126 struct ioat_xor_descriptor *xor;
128 struct ioat_xor_ext_descriptor *xor_ex; 127 struct ioat_xor_ext_descriptor *xor_ex;
129 struct ioat_pq_descriptor *pq; 128 struct ioat_pq_descriptor *pq;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index ca6ea9b3551b..b642e035579b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -311,14 +311,6 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ 311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
312 ioat_dma_unmap(chan, flags, len, desc->hw); 312 ioat_dma_unmap(chan, flags, len, desc->hw);
313 break; 313 break;
314 case IOAT_OP_FILL: {
315 struct ioat_fill_descriptor *hw = desc->fill;
316
317 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
318 ioat_unmap(pdev, hw->dst_addr - offset, len,
319 PCI_DMA_FROMDEVICE, flags, 1);
320 break;
321 }
322 case IOAT_OP_XOR_VAL: 314 case IOAT_OP_XOR_VAL:
323 case IOAT_OP_XOR: { 315 case IOAT_OP_XOR: {
324 struct ioat_xor_descriptor *xor = desc->xor; 316 struct ioat_xor_descriptor *xor = desc->xor;
@@ -824,51 +816,6 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
824} 816}
825 817
826static struct dma_async_tx_descriptor * 818static struct dma_async_tx_descriptor *
827ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
828 size_t len, unsigned long flags)
829{
830 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
831 struct ioat_ring_ent *desc;
832 size_t total_len = len;
833 struct ioat_fill_descriptor *fill;
834 u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
835 int num_descs, idx, i;
836
837 num_descs = ioat2_xferlen_to_descs(ioat, len);
838 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
839 idx = ioat->head;
840 else
841 return NULL;
842 i = 0;
843 do {
844 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
845
846 desc = ioat2_get_ring_ent(ioat, idx + i);
847 fill = desc->fill;
848
849 fill->size = xfer_size;
850 fill->src_data = src_data;
851 fill->dst_addr = dest;
852 fill->ctl = 0;
853 fill->ctl_f.op = IOAT_OP_FILL;
854
855 len -= xfer_size;
856 dest += xfer_size;
857 dump_desc_dbg(ioat, desc);
858 } while (++i < num_descs);
859
860 desc->txd.flags = flags;
861 desc->len = total_len;
862 fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
863 fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
864 fill->ctl_f.compl_write = 1;
865 dump_desc_dbg(ioat, desc);
866
867 /* we leave the channel locked to ensure in order submission */
868 return &desc->txd;
869}
870
871static struct dma_async_tx_descriptor *
872__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, 819__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
873 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, 820 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
874 size_t len, unsigned long flags) 821 size_t len, unsigned long flags)
@@ -1431,7 +1378,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1431 struct page *xor_srcs[IOAT_NUM_SRC_TEST]; 1378 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
1432 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; 1379 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
1433 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; 1380 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
1434 dma_addr_t dma_addr, dest_dma; 1381 dma_addr_t dest_dma;
1435 struct dma_async_tx_descriptor *tx; 1382 struct dma_async_tx_descriptor *tx;
1436 struct dma_chan *dma_chan; 1383 struct dma_chan *dma_chan;
1437 dma_cookie_t cookie; 1384 dma_cookie_t cookie;
@@ -1598,56 +1545,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1598 goto free_resources; 1545 goto free_resources;
1599 } 1546 }
1600 1547
1601 /* skip memset if the capability is not present */
1602 if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
1603 goto free_resources;
1604
1605 /* test memset */
1606 op = IOAT_OP_FILL;
1607
1608 dma_addr = dma_map_page(dev, dest, 0,
1609 PAGE_SIZE, DMA_FROM_DEVICE);
1610 tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1611 DMA_PREP_INTERRUPT |
1612 DMA_COMPL_SKIP_SRC_UNMAP |
1613 DMA_COMPL_SKIP_DEST_UNMAP);
1614 if (!tx) {
1615 dev_err(dev, "Self-test memset prep failed\n");
1616 err = -ENODEV;
1617 goto dma_unmap;
1618 }
1619
1620 async_tx_ack(tx);
1621 init_completion(&cmp);
1622 tx->callback = ioat3_dma_test_callback;
1623 tx->callback_param = &cmp;
1624 cookie = tx->tx_submit(tx);
1625 if (cookie < 0) {
1626 dev_err(dev, "Self-test memset setup failed\n");
1627 err = -ENODEV;
1628 goto dma_unmap;
1629 }
1630 dma->device_issue_pending(dma_chan);
1631
1632 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1633
1634 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1635 dev_err(dev, "Self-test memset timed out\n");
1636 err = -ENODEV;
1637 goto dma_unmap;
1638 }
1639
1640 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
1641
1642 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1643 u32 *ptr = page_address(dest);
1644 if (ptr[i]) {
1645 dev_err(dev, "Self-test memset failed compare\n");
1646 err = -ENODEV;
1647 goto free_resources;
1648 }
1649 }
1650
1651 /* test for non-zero parity sum */ 1548 /* test for non-zero parity sum */
1652 op = IOAT_OP_XOR_VAL; 1549 op = IOAT_OP_XOR_VAL;
1653 1550
@@ -1706,8 +1603,7 @@ dma_unmap:
1706 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1603 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1707 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1604 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1708 DMA_TO_DEVICE); 1605 DMA_TO_DEVICE);
1709 } else if (op == IOAT_OP_FILL) 1606 }
1710 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
1711free_resources: 1607free_resources:
1712 dma->device_free_chan_resources(dma_chan); 1608 dma->device_free_chan_resources(dma_chan);
1713out: 1609out:
@@ -1944,12 +1840,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1944 } 1840 }
1945 } 1841 }
1946 1842
1947 if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) {
1948 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1949 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1950 }
1951
1952
1953 dma->device_tx_status = ioat3_tx_status; 1843 dma->device_tx_status = ioat3_tx_status;
1954 device->cleanup_fn = ioat3_cleanup_event; 1844 device->cleanup_fn = ioat3_cleanup_event;
1955 device->timer_fn = ioat3_timer_event; 1845 device->timer_fn = ioat3_timer_event;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 5ee57d402a6e..62f83e983d8d 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -100,33 +100,6 @@ struct ioat_dma_descriptor {
100 uint64_t user2; 100 uint64_t user2;
101}; 101};
102 102
103struct ioat_fill_descriptor {
104 uint32_t size;
105 union {
106 uint32_t ctl;
107 struct {
108 unsigned int int_en:1;
109 unsigned int rsvd:1;
110 unsigned int dest_snoop_dis:1;
111 unsigned int compl_write:1;
112 unsigned int fence:1;
113 unsigned int rsvd2:2;
114 unsigned int dest_brk:1;
115 unsigned int bundle:1;
116 unsigned int rsvd4:15;
117 #define IOAT_OP_FILL 0x01
118 unsigned int op:8;
119 } ctl_f;
120 };
121 uint64_t src_data;
122 uint64_t dst_addr;
123 uint64_t next;
124 uint64_t rsv1;
125 uint64_t next_dst_addr;
126 uint64_t user1;
127 uint64_t user2;
128};
129
130struct ioat_xor_descriptor { 103struct ioat_xor_descriptor {
131 uint32_t size; 104 uint32_t size;
132 union { 105 union {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 7dafb9f3785f..c9cc08c2dbba 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -633,39 +633,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
633} 633}
634 634
635static struct dma_async_tx_descriptor * 635static struct dma_async_tx_descriptor *
636iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
637 int value, size_t len, unsigned long flags)
638{
639 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
640 struct iop_adma_desc_slot *sw_desc, *grp_start;
641 int slot_cnt, slots_per_op;
642
643 if (unlikely(!len))
644 return NULL;
645 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
646
647 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
648 __func__, len);
649
650 spin_lock_bh(&iop_chan->lock);
651 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
652 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
653 if (sw_desc) {
654 grp_start = sw_desc->group_head;
655 iop_desc_init_memset(grp_start, flags);
656 iop_desc_set_byte_count(grp_start, iop_chan, len);
657 iop_desc_set_block_fill_val(grp_start, value);
658 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
659 sw_desc->unmap_src_cnt = 1;
660 sw_desc->unmap_len = len;
661 sw_desc->async_tx.flags = flags;
662 }
663 spin_unlock_bh(&iop_chan->lock);
664
665 return sw_desc ? &sw_desc->async_tx : NULL;
666}
667
668static struct dma_async_tx_descriptor *
669iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 636iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
670 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 637 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
671 unsigned long flags) 638 unsigned long flags)
@@ -1176,33 +1143,6 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
1176 goto free_resources; 1143 goto free_resources;
1177 } 1144 }
1178 1145
1179 /* test memset */
1180 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1181 PAGE_SIZE, DMA_FROM_DEVICE);
1182 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1183 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1184
1185 cookie = iop_adma_tx_submit(tx);
1186 iop_adma_issue_pending(dma_chan);
1187 msleep(8);
1188
1189 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1190 dev_err(dma_chan->device->dev,
1191 "Self-test memset timed out, disabling\n");
1192 err = -ENODEV;
1193 goto free_resources;
1194 }
1195
1196 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1197 u32 *ptr = page_address(dest);
1198 if (ptr[i]) {
1199 dev_err(dma_chan->device->dev,
1200 "Self-test memset failed compare, disabling\n");
1201 err = -ENODEV;
1202 goto free_resources;
1203 }
1204 }
1205
1206 /* test for non-zero parity sum */ 1146 /* test for non-zero parity sum */
1207 zero_sum_result = 0; 1147 zero_sum_result = 0;
1208 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1148 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
@@ -1487,8 +1427,6 @@ static int iop_adma_probe(struct platform_device *pdev)
1487 /* set prep routines based on capability */ 1427 /* set prep routines based on capability */
1488 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1428 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1489 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; 1429 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1490 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1491 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1492 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1430 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1493 dma_dev->max_xor = iop_adma_get_max_xor(); 1431 dma_dev->max_xor = iop_adma_get_max_xor();
1494 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; 1432 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
@@ -1556,8 +1494,7 @@ static int iop_adma_probe(struct platform_device *pdev)
1556 goto err_free_iop_chan; 1494 goto err_free_iop_chan;
1557 } 1495 }
1558 1496
1559 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || 1497 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1560 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1561 ret = iop_adma_xor_val_self_test(adev); 1498 ret = iop_adma_xor_val_self_test(adev);
1562 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1499 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1563 if (ret) 1500 if (ret)
@@ -1584,7 +1521,6 @@ static int iop_adma_probe(struct platform_device *pdev)
1584 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", 1521 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1585 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1522 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1586 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", 1523 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1587 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1588 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1524 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1589 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1525 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1590 1526
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d64ae14f2706..200f1a3c9a44 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -89,11 +89,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
89 hw_desc->phy_next_desc = 0; 89 hw_desc->phy_next_desc = 0;
90} 90}
91 91
92static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
93{
94 desc->value = val;
95}
96
97static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 92static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
98 dma_addr_t addr) 93 dma_addr_t addr)
99{ 94{
@@ -128,22 +123,6 @@ static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
128 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 123 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
129} 124}
130 125
131static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
132{
133 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
134}
135
136static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
137{
138 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
139}
140
141static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
142{
143 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
144 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
145}
146
147static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
148{ 127{
149 u32 val = __raw_readl(XOR_INTR_MASK(chan)); 128 u32 val = __raw_readl(XOR_INTR_MASK(chan));
@@ -186,8 +165,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc)
186 165
187 if (chain_old_tail->type != desc->type) 166 if (chain_old_tail->type != desc->type)
188 return 0; 167 return 0;
189 if (desc->type == DMA_MEMSET)
190 return 0;
191 168
192 return 1; 169 return 1;
193} 170}
@@ -205,9 +182,6 @@ static void mv_set_mode(struct mv_xor_chan *chan,
205 case DMA_MEMCPY: 182 case DMA_MEMCPY:
206 op_mode = XOR_OPERATION_MODE_MEMCPY; 183 op_mode = XOR_OPERATION_MODE_MEMCPY;
207 break; 184 break;
208 case DMA_MEMSET:
209 op_mode = XOR_OPERATION_MODE_MEMSET;
210 break;
211 default: 185 default:
212 dev_err(mv_chan_to_devp(chan), 186 dev_err(mv_chan_to_devp(chan),
213 "error: unsupported operation %d\n", 187 "error: unsupported operation %d\n",
@@ -274,18 +248,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
274 if (sw_desc->type != mv_chan->current_type) 248 if (sw_desc->type != mv_chan->current_type)
275 mv_set_mode(mv_chan, sw_desc->type); 249 mv_set_mode(mv_chan, sw_desc->type);
276 250
277 if (sw_desc->type == DMA_MEMSET) { 251 /* set the hardware chain */
278 /* for memset requests we need to program the engine, no 252 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
279 * descriptors used. 253
280 */
281 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
282 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
283 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
284 mv_chan_set_value(mv_chan, sw_desc->value);
285 } else {
286 /* set the hardware chain */
287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
288 }
289 mv_chan->pending += sw_desc->slot_cnt; 254 mv_chan->pending += sw_desc->slot_cnt;
290 mv_xor_issue_pending(&mv_chan->dmachan); 255 mv_xor_issue_pending(&mv_chan->dmachan);
291} 256}
@@ -688,43 +653,6 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
688} 653}
689 654
690static struct dma_async_tx_descriptor * 655static struct dma_async_tx_descriptor *
691mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
692 size_t len, unsigned long flags)
693{
694 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
695 struct mv_xor_desc_slot *sw_desc, *grp_start;
696 int slot_cnt;
697
698 dev_dbg(mv_chan_to_devp(mv_chan),
699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags);
701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
702 return NULL;
703
704 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
705
706 spin_lock_bh(&mv_chan->lock);
707 slot_cnt = mv_chan_memset_slot_count(len);
708 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
709 if (sw_desc) {
710 sw_desc->type = DMA_MEMSET;
711 sw_desc->async_tx.flags = flags;
712 grp_start = sw_desc->group_head;
713 mv_desc_init(grp_start, flags);
714 mv_desc_set_byte_count(grp_start, len);
715 mv_desc_set_dest_addr(sw_desc->group_head, dest);
716 mv_desc_set_block_fill_val(grp_start, value);
717 sw_desc->unmap_src_cnt = 1;
718 sw_desc->unmap_len = len;
719 }
720 spin_unlock_bh(&mv_chan->lock);
721 dev_dbg(mv_chan_to_devp(mv_chan),
722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->async_tx);
724 return sw_desc ? &sw_desc->async_tx : NULL;
725}
726
727static struct dma_async_tx_descriptor *
728mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 656mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
729 unsigned int src_cnt, size_t len, unsigned long flags) 657 unsigned int src_cnt, size_t len, unsigned long flags)
730{ 658{
@@ -1137,8 +1065,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1137 /* set prep routines based on capability */ 1065 /* set prep routines based on capability */
1138 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1066 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1139 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1067 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1140 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1141 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1142 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1068 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1143 dma_dev->max_xor = 8; 1069 dma_dev->max_xor = 8;
1144 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1070 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1187,9 +1113,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1187 goto err_free_irq; 1113 goto err_free_irq;
1188 } 1114 }
1189 1115
1190 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", 1116 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1191 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1117 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1192 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1193 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1118 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1194 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1119 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1195 1120
@@ -1298,8 +1223,6 @@ static int mv_xor_probe(struct platform_device *pdev)
1298 dma_cap_set(DMA_MEMCPY, cap_mask); 1223 dma_cap_set(DMA_MEMCPY, cap_mask);
1299 if (of_property_read_bool(np, "dmacap,xor")) 1224 if (of_property_read_bool(np, "dmacap,xor"))
1300 dma_cap_set(DMA_XOR, cap_mask); 1225 dma_cap_set(DMA_XOR, cap_mask);
1301 if (of_property_read_bool(np, "dmacap,memset"))
1302 dma_cap_set(DMA_MEMSET, cap_mask);
1303 if (of_property_read_bool(np, "dmacap,interrupt")) 1226 if (of_property_read_bool(np, "dmacap,interrupt"))
1304 dma_cap_set(DMA_INTERRUPT, cap_mask); 1227 dma_cap_set(DMA_INTERRUPT, cap_mask);
1305 1228
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c632a4761fcf..c619359cb7fe 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -31,7 +31,6 @@
31 31
32#define XOR_OPERATION_MODE_XOR 0 32#define XOR_OPERATION_MODE_XOR 0
33#define XOR_OPERATION_MODE_MEMCPY 2 33#define XOR_OPERATION_MODE_MEMCPY 2
34#define XOR_OPERATION_MODE_MEMSET 4
35 34
36#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) 35#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
37#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) 36#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 5d3d95569a1e..1e220f8dfd8c 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2323,47 +2323,6 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
2323} 2323}
2324 2324
2325/** 2325/**
2326 * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
2327 */
2328static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
2329 struct dma_chan *chan, dma_addr_t dma_dest, int value,
2330 size_t len, unsigned long flags)
2331{
2332 struct ppc440spe_adma_chan *ppc440spe_chan;
2333 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2334 int slot_cnt, slots_per_op;
2335
2336 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2337
2338 if (unlikely(!len))
2339 return NULL;
2340
2341 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
2342
2343 spin_lock_bh(&ppc440spe_chan->lock);
2344
2345 dev_dbg(ppc440spe_chan->device->common.dev,
2346 "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
2347 ppc440spe_chan->device->id, __func__, value, len,
2348 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2349
2350 slot_cnt = slots_per_op = 1;
2351 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2352 slots_per_op);
2353 if (sw_desc) {
2354 group_start = sw_desc->group_head;
2355 ppc440spe_desc_init_memset(group_start, value, flags);
2356 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2357 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2358 sw_desc->unmap_len = len;
2359 sw_desc->async_tx.flags = flags;
2360 }
2361 spin_unlock_bh(&ppc440spe_chan->lock);
2362
2363 return sw_desc ? &sw_desc->async_tx : NULL;
2364}
2365
2366/**
2367 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation 2326 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2368 */ 2327 */
2369static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( 2328static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
@@ -4125,7 +4084,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4125 case PPC440SPE_DMA1_ID: 4084 case PPC440SPE_DMA1_ID:
4126 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); 4085 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
4127 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); 4086 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4128 dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
4129 dma_cap_set(DMA_PQ, adev->common.cap_mask); 4087 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4130 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); 4088 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
4131 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); 4089 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
@@ -4151,10 +4109,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4151 adev->common.device_prep_dma_memcpy = 4109 adev->common.device_prep_dma_memcpy =
4152 ppc440spe_adma_prep_dma_memcpy; 4110 ppc440spe_adma_prep_dma_memcpy;
4153 } 4111 }
4154 if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
4155 adev->common.device_prep_dma_memset =
4156 ppc440spe_adma_prep_dma_memset;
4157 }
4158 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { 4112 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
4159 adev->common.max_xor = XOR_MAX_OPS; 4113 adev->common.max_xor = XOR_MAX_OPS;
4160 adev->common.device_prep_dma_xor = 4114 adev->common.device_prep_dma_xor =
@@ -4217,7 +4171,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4217 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", 4171 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
4218 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", 4172 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
4219 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", 4173 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
4220 dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
4221 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); 4174 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
4222} 4175}
4223 4176
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index a1c486a88e88..179b38ffd351 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -182,10 +182,6 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
182 unsigned int src_offset, size_t len, 182 unsigned int src_offset, size_t len,
183 struct async_submit_ctl *submit); 183 struct async_submit_ctl *submit);
184 184
185struct dma_async_tx_descriptor *
186async_memset(struct page *dest, int val, unsigned int offset,
187 size_t len, struct async_submit_ctl *submit);
188
189struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); 185struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
190 186
191struct dma_async_tx_descriptor * 187struct dma_async_tx_descriptor *
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 96d3e4ab11a9..cb286b1acdb6 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,7 +66,6 @@ enum dma_transaction_type {
66 DMA_PQ, 66 DMA_PQ,
67 DMA_XOR_VAL, 67 DMA_XOR_VAL,
68 DMA_PQ_VAL, 68 DMA_PQ_VAL,
69 DMA_MEMSET,
70 DMA_INTERRUPT, 69 DMA_INTERRUPT,
71 DMA_SG, 70 DMA_SG,
72 DMA_PRIVATE, 71 DMA_PRIVATE,
@@ -520,7 +519,6 @@ struct dma_tx_state {
520 * @device_prep_dma_xor_val: prepares a xor validation operation 519 * @device_prep_dma_xor_val: prepares a xor validation operation
521 * @device_prep_dma_pq: prepares a pq operation 520 * @device_prep_dma_pq: prepares a pq operation
522 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 521 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
523 * @device_prep_dma_memset: prepares a memset operation
524 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 522 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
525 * @device_prep_slave_sg: prepares a slave dma operation 523 * @device_prep_slave_sg: prepares a slave dma operation
526 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 524 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -573,9 +571,6 @@ struct dma_device {
573 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 571 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
574 unsigned int src_cnt, const unsigned char *scf, size_t len, 572 unsigned int src_cnt, const unsigned char *scf, size_t len,
575 enum sum_check_flags *pqres, unsigned long flags); 573 enum sum_check_flags *pqres, unsigned long flags);
576 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
577 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
578 unsigned long flags);
579 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 574 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
580 struct dma_chan *chan, unsigned long flags); 575 struct dma_chan *chan, unsigned long flags);
581 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 576 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(