diff options
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 27 | ||||
-rw-r--r-- | crypto/async_tx/async_memset.c | 20 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 94 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 49 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 39 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 124 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 20 |
8 files changed, 178 insertions, 196 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index e8c8956ef1d..faca0bc5206 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -48,26 +48,25 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
48 | { | 48 | { |
49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY); | 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY); |
50 | struct dma_device *device = chan ? chan->device : NULL; | 50 | struct dma_device *device = chan ? chan->device : NULL; |
51 | int int_en = cb_fn ? 1 : 0; | 51 | struct dma_async_tx_descriptor *tx = NULL; |
52 | struct dma_async_tx_descriptor *tx = device ? | ||
53 | device->device_prep_dma_memcpy(chan, len, | ||
54 | int_en) : NULL; | ||
55 | 52 | ||
56 | if (tx) { /* run the memcpy asynchronously */ | 53 | if (device) { |
57 | dma_addr_t addr; | 54 | dma_addr_t dma_dest, dma_src; |
58 | 55 | ||
59 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | 56 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, |
57 | DMA_FROM_DEVICE); | ||
60 | 58 | ||
61 | addr = dma_map_page(device->dev, dest, dest_offset, len, | 59 | dma_src = dma_map_page(device->dev, src, src_offset, len, |
62 | DMA_FROM_DEVICE); | 60 | DMA_TO_DEVICE); |
63 | tx->tx_set_dest(addr, tx, 0); | ||
64 | 61 | ||
65 | addr = dma_map_page(device->dev, src, src_offset, len, | 62 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, |
66 | DMA_TO_DEVICE); | 63 | len, cb_fn != NULL); |
67 | tx->tx_set_src(addr, tx, 0); | 64 | } |
68 | 65 | ||
66 | if (tx) { | ||
67 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | ||
69 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 68 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
70 | } else { /* run the memcpy synchronously */ | 69 | } else { |
71 | void *dest_buf, *src_buf; | 70 | void *dest_buf, *src_buf; |
72 | pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); | 71 | pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); |
73 | 72 | ||
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 76097280395..0c94851cfd3 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
@@ -48,20 +48,20 @@ async_memset(struct page *dest, int val, unsigned int offset, | |||
48 | { | 48 | { |
49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET); | 49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET); |
50 | struct dma_device *device = chan ? chan->device : NULL; | 50 | struct dma_device *device = chan ? chan->device : NULL; |
51 | int int_en = cb_fn ? 1 : 0; | 51 | struct dma_async_tx_descriptor *tx = NULL; |
52 | struct dma_async_tx_descriptor *tx = device ? | ||
53 | device->device_prep_dma_memset(chan, val, len, | ||
54 | int_en) : NULL; | ||
55 | 52 | ||
56 | if (tx) { /* run the memset asynchronously */ | 53 | if (device) { |
57 | dma_addr_t dma_addr; | 54 | dma_addr_t dma_dest; |
58 | 55 | ||
59 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | 56 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
60 | |||
61 | dma_addr = dma_map_page(device->dev, dest, offset, len, | ||
62 | DMA_FROM_DEVICE); | 57 | DMA_FROM_DEVICE); |
63 | tx->tx_set_dest(dma_addr, tx, 0); | ||
64 | 58 | ||
59 | tx = device->device_prep_dma_memset(chan, dma_dest, val, len, | ||
60 | cb_fn != NULL); | ||
61 | } | ||
62 | |||
63 | if (tx) { | ||
64 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | ||
65 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 65 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
66 | } else { /* run the memset synchronously */ | 66 | } else { /* run the memset synchronously */ |
67 | void *dest_buf; | 67 | void *dest_buf; |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index cb41e6bbbc4..12cba1a4205 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -34,29 +34,46 @@ | |||
34 | * This routine is marked __always_inline so it can be compiled away | 34 | * This routine is marked __always_inline so it can be compiled away |
35 | * when CONFIG_DMA_ENGINE=n | 35 | * when CONFIG_DMA_ENGINE=n |
36 | */ | 36 | */ |
37 | static __always_inline void | 37 | static __always_inline struct dma_async_tx_descriptor * |
38 | do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device, | 38 | do_async_xor(struct dma_device *device, |
39 | struct dma_chan *chan, struct page *dest, struct page **src_list, | 39 | struct dma_chan *chan, struct page *dest, struct page **src_list, |
40 | unsigned int offset, unsigned int src_cnt, size_t len, | 40 | unsigned int offset, unsigned int src_cnt, size_t len, |
41 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 41 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, |
42 | dma_async_tx_callback cb_fn, void *cb_param) | 42 | dma_async_tx_callback cb_fn, void *cb_param) |
43 | { | 43 | { |
44 | dma_addr_t dma_addr; | 44 | dma_addr_t dma_dest; |
45 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | ||
46 | struct dma_async_tx_descriptor *tx; | ||
45 | int i; | 47 | int i; |
46 | 48 | ||
47 | pr_debug("%s: len: %zu\n", __FUNCTION__, len); | 49 | pr_debug("%s: len: %zu\n", __FUNCTION__, len); |
48 | 50 | ||
49 | dma_addr = dma_map_page(device->dev, dest, offset, len, | 51 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
50 | DMA_FROM_DEVICE); | 52 | DMA_FROM_DEVICE); |
51 | tx->tx_set_dest(dma_addr, tx, 0); | ||
52 | 53 | ||
53 | for (i = 0; i < src_cnt; i++) { | 54 | for (i = 0; i < src_cnt; i++) |
54 | dma_addr = dma_map_page(device->dev, src_list[i], | 55 | dma_src[i] = dma_map_page(device->dev, src_list[i], offset, |
55 | offset, len, DMA_TO_DEVICE); | 56 | len, DMA_TO_DEVICE); |
56 | tx->tx_set_src(dma_addr, tx, i); | 57 | |
58 | /* Since we have clobbered the src_list we are committed | ||
59 | * to doing this asynchronously. Drivers force forward progress | ||
60 | * in case they can not provide a descriptor | ||
61 | */ | ||
62 | tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, | ||
63 | cb_fn != NULL); | ||
64 | if (!tx) { | ||
65 | if (depend_tx) | ||
66 | dma_wait_for_async_tx(depend_tx); | ||
67 | |||
68 | while (!tx) | ||
69 | tx = device->device_prep_dma_xor(chan, dma_dest, | ||
70 | dma_src, src_cnt, len, | ||
71 | cb_fn != NULL); | ||
57 | } | 72 | } |
58 | 73 | ||
59 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 74 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
75 | |||
76 | return tx; | ||
60 | } | 77 | } |
61 | 78 | ||
62 | static void | 79 | static void |
@@ -118,7 +135,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
118 | void *_cb_param; | 135 | void *_cb_param; |
119 | unsigned long local_flags; | 136 | unsigned long local_flags; |
120 | int xor_src_cnt; | 137 | int xor_src_cnt; |
121 | int i = 0, src_off = 0, int_en; | 138 | int i = 0, src_off = 0; |
122 | 139 | ||
123 | BUG_ON(src_cnt <= 1); | 140 | BUG_ON(src_cnt <= 1); |
124 | 141 | ||
@@ -138,20 +155,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
138 | _cb_param = cb_param; | 155 | _cb_param = cb_param; |
139 | } | 156 | } |
140 | 157 | ||
141 | int_en = _cb_fn ? 1 : 0; | 158 | tx = do_async_xor(device, chan, dest, |
142 | 159 | &src_list[src_off], offset, | |
143 | tx = device->device_prep_dma_xor( | 160 | xor_src_cnt, len, local_flags, |
144 | chan, xor_src_cnt, len, int_en); | 161 | depend_tx, _cb_fn, _cb_param); |
145 | |||
146 | if (tx) { | ||
147 | do_async_xor(tx, device, chan, dest, | ||
148 | &src_list[src_off], offset, xor_src_cnt, len, | ||
149 | local_flags, depend_tx, _cb_fn, | ||
150 | _cb_param); | ||
151 | } else /* fall through */ | ||
152 | goto xor_sync; | ||
153 | } else { /* run the xor synchronously */ | 162 | } else { /* run the xor synchronously */ |
154 | xor_sync: | ||
155 | /* in the sync case the dest is an implied source | 163 | /* in the sync case the dest is an implied source |
156 | * (assumes the dest is at the src_off index) | 164 | * (assumes the dest is at the src_off index) |
157 | */ | 165 | */ |
@@ -254,23 +262,31 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, | |||
254 | { | 262 | { |
255 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM); | 263 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM); |
256 | struct dma_device *device = chan ? chan->device : NULL; | 264 | struct dma_device *device = chan ? chan->device : NULL; |
257 | int int_en = cb_fn ? 1 : 0; | 265 | struct dma_async_tx_descriptor *tx = NULL; |
258 | struct dma_async_tx_descriptor *tx = device ? | ||
259 | device->device_prep_dma_zero_sum(chan, src_cnt, len, result, | ||
260 | int_en) : NULL; | ||
261 | int i; | ||
262 | 266 | ||
263 | BUG_ON(src_cnt <= 1); | 267 | BUG_ON(src_cnt <= 1); |
264 | 268 | ||
265 | if (tx) { | 269 | if (device) { |
266 | dma_addr_t dma_addr; | 270 | dma_addr_t *dma_src = (dma_addr_t *) src_list; |
271 | int i; | ||
267 | 272 | ||
268 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); | 273 | pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); |
269 | 274 | ||
270 | for (i = 0; i < src_cnt; i++) { | 275 | for (i = 0; i < src_cnt; i++) |
271 | dma_addr = dma_map_page(device->dev, src_list[i], | 276 | dma_src[i] = dma_map_page(device->dev, src_list[i], |
272 | offset, len, DMA_TO_DEVICE); | 277 | offset, len, DMA_TO_DEVICE); |
273 | tx->tx_set_src(dma_addr, tx, i); | 278 | |
279 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, | ||
280 | len, result, | ||
281 | cb_fn != NULL); | ||
282 | if (!tx) { | ||
283 | if (depend_tx) | ||
284 | dma_wait_for_async_tx(depend_tx); | ||
285 | |||
286 | while (!tx) | ||
287 | tx = device->device_prep_dma_zero_sum(chan, | ||
288 | dma_src, src_cnt, len, result, | ||
289 | cb_fn != NULL); | ||
274 | } | 290 | } |
275 | 291 | ||
276 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 292 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
@@ -305,6 +321,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum); | |||
305 | 321 | ||
306 | static int __init async_xor_init(void) | 322 | static int __init async_xor_init(void) |
307 | { | 323 | { |
324 | #ifdef CONFIG_DMA_ENGINE | ||
325 | /* To conserve stack space the input src_list (array of page pointers) | ||
326 | * is reused to hold the array of dma addresses passed to the driver. | ||
327 | * This conversion is only possible when dma_addr_t is less than the | ||
328 | * the size of a pointer. HIGHMEM64G is known to violate this | ||
329 | * assumption. | ||
330 | */ | ||
331 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *)); | ||
332 | #endif | ||
333 | |||
308 | return 0; | 334 | return 0; |
309 | } | 335 | } |
310 | 336 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index c46b7c219ee..a703deffb79 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
8 | depends on !HIGHMEM64G | ||
8 | help | 9 | help |
9 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
10 | involving the host CPU. Currently, this framework can be | 11 | involving the host CPU. Currently, this framework can be |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index bcf52df3033..29965231b91 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
473 | { | 473 | { |
474 | struct dma_device *dev = chan->device; | 474 | struct dma_device *dev = chan->device; |
475 | struct dma_async_tx_descriptor *tx; | 475 | struct dma_async_tx_descriptor *tx; |
476 | dma_addr_t addr; | 476 | dma_addr_t dma_dest, dma_src; |
477 | dma_cookie_t cookie; | 477 | dma_cookie_t cookie; |
478 | int cpu; | 478 | int cpu; |
479 | 479 | ||
480 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | 480 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
481 | if (!tx) | 481 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); |
482 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | ||
483 | |||
484 | if (!tx) { | ||
485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
486 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
482 | return -ENOMEM; | 487 | return -ENOMEM; |
488 | } | ||
483 | 489 | ||
484 | tx->ack = 1; | 490 | tx->ack = 1; |
485 | tx->callback = NULL; | 491 | tx->callback = NULL; |
486 | addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | ||
487 | tx->tx_set_src(addr, tx, 0); | ||
488 | addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | ||
489 | tx->tx_set_dest(addr, tx, 0); | ||
490 | cookie = tx->tx_submit(tx); | 492 | cookie = tx->tx_submit(tx); |
491 | 493 | ||
492 | cpu = get_cpu(); | 494 | cpu = get_cpu(); |
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
517 | { | 519 | { |
518 | struct dma_device *dev = chan->device; | 520 | struct dma_device *dev = chan->device; |
519 | struct dma_async_tx_descriptor *tx; | 521 | struct dma_async_tx_descriptor *tx; |
520 | dma_addr_t addr; | 522 | dma_addr_t dma_dest, dma_src; |
521 | dma_cookie_t cookie; | 523 | dma_cookie_t cookie; |
522 | int cpu; | 524 | int cpu; |
523 | 525 | ||
524 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | 526 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
525 | if (!tx) | 527 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); |
528 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | ||
529 | |||
530 | if (!tx) { | ||
531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
532 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
526 | return -ENOMEM; | 533 | return -ENOMEM; |
534 | } | ||
527 | 535 | ||
528 | tx->ack = 1; | 536 | tx->ack = 1; |
529 | tx->callback = NULL; | 537 | tx->callback = NULL; |
530 | addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | ||
531 | tx->tx_set_src(addr, tx, 0); | ||
532 | addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | ||
533 | tx->tx_set_dest(addr, tx, 0); | ||
534 | cookie = tx->tx_submit(tx); | 538 | cookie = tx->tx_submit(tx); |
535 | 539 | ||
536 | cpu = get_cpu(); | 540 | cpu = get_cpu(); |
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
563 | { | 567 | { |
564 | struct dma_device *dev = chan->device; | 568 | struct dma_device *dev = chan->device; |
565 | struct dma_async_tx_descriptor *tx; | 569 | struct dma_async_tx_descriptor *tx; |
566 | dma_addr_t addr; | 570 | dma_addr_t dma_dest, dma_src; |
567 | dma_cookie_t cookie; | 571 | dma_cookie_t cookie; |
568 | int cpu; | 572 | int cpu; |
569 | 573 | ||
570 | tx = dev->device_prep_dma_memcpy(chan, len, 0); | 574 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
571 | if (!tx) | 575 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
576 | DMA_FROM_DEVICE); | ||
577 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | ||
578 | |||
579 | if (!tx) { | ||
580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
581 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
572 | return -ENOMEM; | 582 | return -ENOMEM; |
583 | } | ||
573 | 584 | ||
574 | tx->ack = 1; | 585 | tx->ack = 1; |
575 | tx->callback = NULL; | 586 | tx->callback = NULL; |
576 | addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | ||
577 | tx->tx_set_src(addr, tx, 0); | ||
578 | addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); | ||
579 | tx->tx_set_dest(addr, tx, 0); | ||
580 | cookie = tx->tx_submit(tx); | 587 | cookie = tx->tx_submit(tx); |
581 | 588 | ||
582 | cpu = get_cpu(); | 589 | cpu = get_cpu(); |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 45e7b4666c7..5bcfc55a277 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
159 | return device->common.chancnt; | 159 | return device->common.chancnt; |
160 | } | 160 | } |
161 | 161 | ||
162 | static void ioat_set_src(dma_addr_t addr, | ||
163 | struct dma_async_tx_descriptor *tx, | ||
164 | int index) | ||
165 | { | ||
166 | tx_to_ioat_desc(tx)->src = addr; | ||
167 | } | ||
168 | |||
169 | static void ioat_set_dest(dma_addr_t addr, | ||
170 | struct dma_async_tx_descriptor *tx, | ||
171 | int index) | ||
172 | { | ||
173 | tx_to_ioat_desc(tx)->dst = addr; | ||
174 | } | ||
175 | |||
176 | /** | 162 | /** |
177 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | 163 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended |
178 | * descriptors to hw | 164 | * descriptors to hw |
@@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | |||
415 | 401 | ||
416 | memset(desc, 0, sizeof(*desc)); | 402 | memset(desc, 0, sizeof(*desc)); |
417 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); | 403 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); |
418 | desc_sw->async_tx.tx_set_src = ioat_set_src; | ||
419 | desc_sw->async_tx.tx_set_dest = ioat_set_dest; | ||
420 | switch (ioat_chan->device->version) { | 404 | switch (ioat_chan->device->version) { |
421 | case IOAT_VER_1_2: | 405 | case IOAT_VER_1_2: |
422 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | 406 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; |
@@ -714,6 +698,8 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | |||
714 | 698 | ||
715 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | 699 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( |
716 | struct dma_chan *chan, | 700 | struct dma_chan *chan, |
701 | dma_addr_t dma_dest, | ||
702 | dma_addr_t dma_src, | ||
717 | size_t len, | 703 | size_t len, |
718 | int int_en) | 704 | int int_en) |
719 | { | 705 | { |
@@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
726 | 712 | ||
727 | if (new) { | 713 | if (new) { |
728 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | ||
716 | new->src = dma_src; | ||
729 | return &new->async_tx; | 717 | return &new->async_tx; |
730 | } else | 718 | } else |
731 | return NULL; | 719 | return NULL; |
@@ -733,6 +721,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
733 | 721 | ||
734 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | 722 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( |
735 | struct dma_chan *chan, | 723 | struct dma_chan *chan, |
724 | dma_addr_t dma_dest, | ||
725 | dma_addr_t dma_src, | ||
736 | size_t len, | 726 | size_t len, |
737 | int int_en) | 727 | int int_en) |
738 | { | 728 | { |
@@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
749 | 739 | ||
750 | if (new) { | 740 | if (new) { |
751 | new->len = len; | 741 | new->len = len; |
742 | new->dst = dma_dest; | ||
743 | new->src = dma_src; | ||
752 | return &new->async_tx; | 744 | return &new->async_tx; |
753 | } else | 745 | } else |
754 | return NULL; | 746 | return NULL; |
@@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1045 | u8 *dest; | 1037 | u8 *dest; |
1046 | struct dma_chan *dma_chan; | 1038 | struct dma_chan *dma_chan; |
1047 | struct dma_async_tx_descriptor *tx; | 1039 | struct dma_async_tx_descriptor *tx; |
1048 | dma_addr_t addr; | 1040 | dma_addr_t dma_dest, dma_src; |
1049 | dma_cookie_t cookie; | 1041 | dma_cookie_t cookie; |
1050 | int err = 0; | 1042 | int err = 0; |
1051 | 1043 | ||
@@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1073 | goto out; | 1065 | goto out; |
1074 | } | 1066 | } |
1075 | 1067 | ||
1076 | tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); | 1068 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, |
1069 | DMA_TO_DEVICE); | ||
1070 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
1071 | DMA_FROM_DEVICE); | ||
1072 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | ||
1073 | IOAT_TEST_SIZE, 0); | ||
1077 | if (!tx) { | 1074 | if (!tx) { |
1078 | dev_err(&device->pdev->dev, | 1075 | dev_err(&device->pdev->dev, |
1079 | "Self-test prep failed, disabling\n"); | 1076 | "Self-test prep failed, disabling\n"); |
@@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1082 | } | 1079 | } |
1083 | 1080 | ||
1084 | async_tx_ack(tx); | 1081 | async_tx_ack(tx); |
1085 | addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, | ||
1086 | DMA_TO_DEVICE); | ||
1087 | tx->tx_set_src(addr, tx, 0); | ||
1088 | addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
1089 | DMA_FROM_DEVICE); | ||
1090 | tx->tx_set_dest(addr, tx, 0); | ||
1091 | tx->callback = ioat_dma_test_callback; | 1082 | tx->callback = ioat_dma_test_callback; |
1092 | tx->callback_param = (void *)0x8086; | 1083 | tx->callback_param = (void *)0x8086; |
1093 | cookie = tx->tx_submit(tx); | 1084 | cookie = tx->tx_submit(tx); |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index b011b5ae22a..eda841c6069 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
443 | return cookie; | 443 | return cookie; |
444 | } | 444 | } |
445 | 445 | ||
446 | static void | ||
447 | iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, | ||
448 | int index) | ||
449 | { | ||
450 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
451 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); | ||
452 | |||
453 | /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */ | ||
454 | iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr); | ||
455 | } | ||
456 | |||
457 | static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); | 446 | static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); |
458 | static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); | 447 | static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); |
459 | 448 | ||
@@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
486 | 475 | ||
487 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 476 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
488 | slot->async_tx.tx_submit = iop_adma_tx_submit; | 477 | slot->async_tx.tx_submit = iop_adma_tx_submit; |
489 | slot->async_tx.tx_set_dest = iop_adma_set_dest; | ||
490 | INIT_LIST_HEAD(&slot->chain_node); | 478 | INIT_LIST_HEAD(&slot->chain_node); |
491 | INIT_LIST_HEAD(&slot->slot_node); | 479 | INIT_LIST_HEAD(&slot->slot_node); |
492 | INIT_LIST_HEAD(&slot->async_tx.tx_list); | 480 | INIT_LIST_HEAD(&slot->async_tx.tx_list); |
@@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
547 | return sw_desc ? &sw_desc->async_tx : NULL; | 535 | return sw_desc ? &sw_desc->async_tx : NULL; |
548 | } | 536 | } |
549 | 537 | ||
550 | static void | ||
551 | iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, | ||
552 | int index) | ||
553 | { | ||
554 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
555 | struct iop_adma_desc_slot *grp_start = sw_desc->group_head; | ||
556 | |||
557 | iop_desc_set_memcpy_src_addr(grp_start, addr); | ||
558 | } | ||
559 | |||
560 | static struct dma_async_tx_descriptor * | 538 | static struct dma_async_tx_descriptor * |
561 | iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) | 539 | iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, |
540 | dma_addr_t dma_src, size_t len, int int_en) | ||
562 | { | 541 | { |
563 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 542 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
564 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 543 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
@@ -578,9 +557,10 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) | |||
578 | grp_start = sw_desc->group_head; | 557 | grp_start = sw_desc->group_head; |
579 | iop_desc_init_memcpy(grp_start, int_en); | 558 | iop_desc_init_memcpy(grp_start, int_en); |
580 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 559 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
560 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
561 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | ||
581 | sw_desc->unmap_src_cnt = 1; | 562 | sw_desc->unmap_src_cnt = 1; |
582 | sw_desc->unmap_len = len; | 563 | sw_desc->unmap_len = len; |
583 | sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src; | ||
584 | } | 564 | } |
585 | spin_unlock_bh(&iop_chan->lock); | 565 | spin_unlock_bh(&iop_chan->lock); |
586 | 566 | ||
@@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) | |||
588 | } | 568 | } |
589 | 569 | ||
590 | static struct dma_async_tx_descriptor * | 570 | static struct dma_async_tx_descriptor * |
591 | iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, | 571 | iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, |
592 | int int_en) | 572 | int value, size_t len, int int_en) |
593 | { | 573 | { |
594 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 574 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
595 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 575 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
@@ -610,6 +590,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, | |||
610 | iop_desc_init_memset(grp_start, int_en); | 590 | iop_desc_init_memset(grp_start, int_en); |
611 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 591 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
612 | iop_desc_set_block_fill_val(grp_start, value); | 592 | iop_desc_set_block_fill_val(grp_start, value); |
593 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
613 | sw_desc->unmap_src_cnt = 1; | 594 | sw_desc->unmap_src_cnt = 1; |
614 | sw_desc->unmap_len = len; | 595 | sw_desc->unmap_len = len; |
615 | } | 596 | } |
@@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, | |||
618 | return sw_desc ? &sw_desc->async_tx : NULL; | 599 | return sw_desc ? &sw_desc->async_tx : NULL; |
619 | } | 600 | } |
620 | 601 | ||
621 | static void | ||
622 | iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, | ||
623 | int index) | ||
624 | { | ||
625 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
626 | struct iop_adma_desc_slot *grp_start = sw_desc->group_head; | ||
627 | |||
628 | iop_desc_set_xor_src_addr(grp_start, index, addr); | ||
629 | } | ||
630 | |||
631 | static struct dma_async_tx_descriptor * | 602 | static struct dma_async_tx_descriptor * |
632 | iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, | 603 | iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, |
633 | int int_en) | 604 | dma_addr_t *dma_src, unsigned int src_cnt, size_t len, |
605 | int int_en) | ||
634 | { | 606 | { |
635 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 607 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
636 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 608 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
@@ -651,29 +623,22 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, | |||
651 | grp_start = sw_desc->group_head; | 623 | grp_start = sw_desc->group_head; |
652 | iop_desc_init_xor(grp_start, src_cnt, int_en); | 624 | iop_desc_init_xor(grp_start, src_cnt, int_en); |
653 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 625 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | ||
654 | sw_desc->unmap_src_cnt = src_cnt; | 627 | sw_desc->unmap_src_cnt = src_cnt; |
655 | sw_desc->unmap_len = len; | 628 | sw_desc->unmap_len = len; |
656 | sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src; | 629 | while (src_cnt--) |
630 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | ||
631 | dma_src[src_cnt]); | ||
657 | } | 632 | } |
658 | spin_unlock_bh(&iop_chan->lock); | 633 | spin_unlock_bh(&iop_chan->lock); |
659 | 634 | ||
660 | return sw_desc ? &sw_desc->async_tx : NULL; | 635 | return sw_desc ? &sw_desc->async_tx : NULL; |
661 | } | 636 | } |
662 | 637 | ||
663 | static void | ||
664 | iop_adma_xor_zero_sum_set_src(dma_addr_t addr, | ||
665 | struct dma_async_tx_descriptor *tx, | ||
666 | int index) | ||
667 | { | ||
668 | struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); | ||
669 | struct iop_adma_desc_slot *grp_start = sw_desc->group_head; | ||
670 | |||
671 | iop_desc_set_zero_sum_src_addr(grp_start, index, addr); | ||
672 | } | ||
673 | |||
674 | static struct dma_async_tx_descriptor * | 638 | static struct dma_async_tx_descriptor * |
675 | iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, | 639 | iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, |
676 | size_t len, u32 *result, int int_en) | 640 | unsigned int src_cnt, size_t len, u32 *result, |
641 | int int_en) | ||
677 | { | 642 | { |
678 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 643 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
679 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 644 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
@@ -697,7 +662,9 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, | |||
697 | __FUNCTION__, grp_start->xor_check_result); | 662 | __FUNCTION__, grp_start->xor_check_result); |
698 | sw_desc->unmap_src_cnt = src_cnt; | 663 | sw_desc->unmap_src_cnt = src_cnt; |
699 | sw_desc->unmap_len = len; | 664 | sw_desc->unmap_len = len; |
700 | sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src; | 665 | while (src_cnt--) |
666 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | ||
667 | dma_src[src_cnt]); | ||
701 | } | 668 | } |
702 | spin_unlock_bh(&iop_chan->lock); | 669 | spin_unlock_bh(&iop_chan->lock); |
703 | 670 | ||
@@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
882 | goto out; | 849 | goto out; |
883 | } | 850 | } |
884 | 851 | ||
885 | tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1); | ||
886 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | 852 | dest_dma = dma_map_single(dma_chan->device->dev, dest, |
887 | IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); | 853 | IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); |
888 | iop_adma_set_dest(dest_dma, tx, 0); | ||
889 | src_dma = dma_map_single(dma_chan->device->dev, src, | 854 | src_dma = dma_map_single(dma_chan->device->dev, src, |
890 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); | 855 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); |
891 | iop_adma_memcpy_set_src(src_dma, tx, 0); | 856 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
857 | IOP_ADMA_TEST_SIZE, 1); | ||
892 | 858 | ||
893 | cookie = iop_adma_tx_submit(tx); | 859 | cookie = iop_adma_tx_submit(tx); |
894 | iop_adma_issue_pending(dma_chan); | 860 | iop_adma_issue_pending(dma_chan); |
@@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
929 | struct page *dest; | 895 | struct page *dest; |
930 | struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; | 896 | struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; |
931 | struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; | 897 | struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; |
898 | dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; | ||
932 | dma_addr_t dma_addr, dest_dma; | 899 | dma_addr_t dma_addr, dest_dma; |
933 | struct dma_async_tx_descriptor *tx; | 900 | struct dma_async_tx_descriptor *tx; |
934 | struct dma_chan *dma_chan; | 901 | struct dma_chan *dma_chan; |
@@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
981 | } | 948 | } |
982 | 949 | ||
983 | /* test xor */ | 950 | /* test xor */ |
984 | tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST, | ||
985 | PAGE_SIZE, 1); | ||
986 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, | 951 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, |
987 | PAGE_SIZE, DMA_FROM_DEVICE); | 952 | PAGE_SIZE, DMA_FROM_DEVICE); |
988 | iop_adma_set_dest(dest_dma, tx, 0); | 953 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) |
989 | 954 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
990 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { | 955 | 0, PAGE_SIZE, DMA_TO_DEVICE); |
991 | dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, | 956 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
992 | PAGE_SIZE, DMA_TO_DEVICE); | 957 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1); |
993 | iop_adma_xor_set_src(dma_addr, tx, i); | ||
994 | } | ||
995 | 958 | ||
996 | cookie = iop_adma_tx_submit(tx); | 959 | cookie = iop_adma_tx_submit(tx); |
997 | iop_adma_issue_pending(dma_chan); | 960 | iop_adma_issue_pending(dma_chan); |
@@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1032 | 995 | ||
1033 | zero_sum_result = 1; | 996 | zero_sum_result = 1; |
1034 | 997 | ||
1035 | tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, | 998 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) |
1036 | PAGE_SIZE, &zero_sum_result, 1); | 999 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, |
1037 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { | 1000 | zero_sum_srcs[i], 0, PAGE_SIZE, |
1038 | dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], | 1001 | DMA_TO_DEVICE); |
1039 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 1002 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
1040 | iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); | 1003 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1041 | } | 1004 | &zero_sum_result, 1); |
1042 | 1005 | ||
1043 | cookie = iop_adma_tx_submit(tx); | 1006 | cookie = iop_adma_tx_submit(tx); |
1044 | iop_adma_issue_pending(dma_chan); | 1007 | iop_adma_issue_pending(dma_chan); |
@@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1060 | } | 1023 | } |
1061 | 1024 | ||
1062 | /* test memset */ | 1025 | /* test memset */ |
1063 | tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1); | ||
1064 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | 1026 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, |
1065 | PAGE_SIZE, DMA_FROM_DEVICE); | 1027 | PAGE_SIZE, DMA_FROM_DEVICE); |
1066 | iop_adma_set_dest(dma_addr, tx, 0); | 1028 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1); |
1067 | 1029 | ||
1068 | cookie = iop_adma_tx_submit(tx); | 1030 | cookie = iop_adma_tx_submit(tx); |
1069 | iop_adma_issue_pending(dma_chan); | 1031 | iop_adma_issue_pending(dma_chan); |
@@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1089 | 1051 | ||
1090 | /* test for non-zero parity sum */ | 1052 | /* test for non-zero parity sum */ |
1091 | zero_sum_result = 0; | 1053 | zero_sum_result = 0; |
1092 | tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, | 1054 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) |
1093 | PAGE_SIZE, &zero_sum_result, 1); | 1055 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, |
1094 | for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { | 1056 | zero_sum_srcs[i], 0, PAGE_SIZE, |
1095 | dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], | 1057 | DMA_TO_DEVICE); |
1096 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 1058 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
1097 | iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); | 1059 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1098 | } | 1060 | &zero_sum_result, 1); |
1099 | 1061 | ||
1100 | cookie = iop_adma_tx_submit(tx); | 1062 | cookie = iop_adma_tx_submit(tx); |
1101 | iop_adma_issue_pending(dma_chan); | 1063 | iop_adma_issue_pending(dma_chan); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 5c84bf89759..b0864f5b729 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -209,8 +209,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
209 | * descriptors | 209 | * descriptors |
210 | * @chan: target channel for this operation | 210 | * @chan: target channel for this operation |
211 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 211 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine |
212 | * @tx_set_dest: set a destination address in a hardware descriptor | ||
213 | * @tx_set_src: set a source address in a hardware descriptor | ||
214 | * @callback: routine to call after this operation is complete | 212 | * @callback: routine to call after this operation is complete |
215 | * @callback_param: general parameter to pass to the callback routine | 213 | * @callback_param: general parameter to pass to the callback routine |
216 | * ---async_tx api specific fields--- | 214 | * ---async_tx api specific fields--- |
@@ -227,10 +225,6 @@ struct dma_async_tx_descriptor { | |||
227 | struct list_head tx_list; | 225 | struct list_head tx_list; |
228 | struct dma_chan *chan; | 226 | struct dma_chan *chan; |
229 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 227 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
230 | void (*tx_set_dest)(dma_addr_t addr, | ||
231 | struct dma_async_tx_descriptor *tx, int index); | ||
232 | void (*tx_set_src)(dma_addr_t addr, | ||
233 | struct dma_async_tx_descriptor *tx, int index); | ||
234 | dma_async_tx_callback callback; | 228 | dma_async_tx_callback callback; |
235 | void *callback_param; | 229 | void *callback_param; |
236 | struct list_head depend_list; | 230 | struct list_head depend_list; |
@@ -279,15 +273,17 @@ struct dma_device { | |||
279 | void (*device_free_chan_resources)(struct dma_chan *chan); | 273 | void (*device_free_chan_resources)(struct dma_chan *chan); |
280 | 274 | ||
281 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 275 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
282 | struct dma_chan *chan, size_t len, int int_en); | 276 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
277 | size_t len, int int_en); | ||
283 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 278 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
284 | struct dma_chan *chan, unsigned int src_cnt, size_t len, | 279 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
285 | int int_en); | 280 | unsigned int src_cnt, size_t len, int int_en); |
286 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | 281 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( |
287 | struct dma_chan *chan, unsigned int src_cnt, size_t len, | 282 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
288 | u32 *result, int int_en); | 283 | size_t len, u32 *result, int int_en); |
289 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 284 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( |
290 | struct dma_chan *chan, int value, size_t len, int int_en); | 285 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
286 | int int_en); | ||
291 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 287 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
292 | struct dma_chan *chan); | 288 | struct dma_chan *chan); |
293 | 289 | ||