aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c15
-rw-r--r--crypto/async_tx/async_memset.c8
-rw-r--r--crypto/async_tx/async_xor.c22
3 files changed, 14 insertions, 31 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 047e533fcc5b..e8c8956ef1dd 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -35,7 +35,7 @@
35 * @src: src page 35 * @src: src page
36 * @offset: offset in pages to start transaction 36 * @offset: offset in pages to start transaction
37 * @len: length in bytes 37 * @len: length in bytes
38 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, 38 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
39 * @depend_tx: memcpy depends on the result of this transaction 39 * @depend_tx: memcpy depends on the result of this transaction
40 * @cb_fn: function to call when the memcpy completes 40 * @cb_fn: function to call when the memcpy completes
41 * @cb_param: parameter to pass to the callback routine 41 * @cb_param: parameter to pass to the callback routine
@@ -55,20 +55,15 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
55 55
56 if (tx) { /* run the memcpy asynchronously */ 56 if (tx) { /* run the memcpy asynchronously */
57 dma_addr_t addr; 57 dma_addr_t addr;
58 enum dma_data_direction dir;
59 58
60 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 59 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
61 60
62 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 61 addr = dma_map_page(device->dev, dest, dest_offset, len,
63 DMA_NONE : DMA_FROM_DEVICE; 62 DMA_FROM_DEVICE);
64
65 addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
66 tx->tx_set_dest(addr, tx, 0); 63 tx->tx_set_dest(addr, tx, 0);
67 64
68 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 65 addr = dma_map_page(device->dev, src, src_offset, len,
69 DMA_NONE : DMA_TO_DEVICE; 66 DMA_TO_DEVICE);
70
71 addr = dma_map_page(device->dev, src, src_offset, len, dir);
72 tx->tx_set_src(addr, tx, 0); 67 tx->tx_set_src(addr, tx, 0);
73 68
74 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 69 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 66ef6351202e..760972803958 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,7 +35,7 @@
35 * @val: fill value 35 * @val: fill value
36 * @offset: offset in pages to start transaction 36 * @offset: offset in pages to start transaction
37 * @len: length in bytes 37 * @len: length in bytes
38 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK 38 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
39 * @depend_tx: memset depends on the result of this transaction 39 * @depend_tx: memset depends on the result of this transaction
40 * @cb_fn: function to call when the memcpy completes 40 * @cb_fn: function to call when the memcpy completes
41 * @cb_param: parameter to pass to the callback routine 41 * @cb_param: parameter to pass to the callback routine
@@ -55,13 +55,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
55 55
56 if (tx) { /* run the memset asynchronously */ 56 if (tx) { /* run the memset asynchronously */
57 dma_addr_t dma_addr; 57 dma_addr_t dma_addr;
58 enum dma_data_direction dir;
59 58
60 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 59 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
61 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
62 DMA_NONE : DMA_FROM_DEVICE;
63 60
64 dma_addr = dma_map_page(device->dev, dest, offset, len, dir); 61 dma_addr = dma_map_page(device->dev, dest, offset, len,
62 DMA_FROM_DEVICE);
65 tx->tx_set_dest(dma_addr, tx, 0); 63 tx->tx_set_dest(dma_addr, tx, 0);
66 64
67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 65 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 716885a87f07..cb41e6bbbc4d 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -42,23 +42,17 @@ do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device,
42 dma_async_tx_callback cb_fn, void *cb_param) 42 dma_async_tx_callback cb_fn, void *cb_param)
43{ 43{
44 dma_addr_t dma_addr; 44 dma_addr_t dma_addr;
45 enum dma_data_direction dir;
46 int i; 45 int i;
47 46
48 pr_debug("%s: len: %zu\n", __FUNCTION__, len); 47 pr_debug("%s: len: %zu\n", __FUNCTION__, len);
49 48
50 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 49 dma_addr = dma_map_page(device->dev, dest, offset, len,
51 DMA_NONE : DMA_FROM_DEVICE; 50 DMA_FROM_DEVICE);
52
53 dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
54 tx->tx_set_dest(dma_addr, tx, 0); 51 tx->tx_set_dest(dma_addr, tx, 0);
55 52
56 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
57 DMA_NONE : DMA_TO_DEVICE;
58
59 for (i = 0; i < src_cnt; i++) { 53 for (i = 0; i < src_cnt; i++) {
60 dma_addr = dma_map_page(device->dev, src_list[i], 54 dma_addr = dma_map_page(device->dev, src_list[i],
61 offset, len, dir); 55 offset, len, DMA_TO_DEVICE);
62 tx->tx_set_src(dma_addr, tx, i); 56 tx->tx_set_src(dma_addr, tx, i);
63 } 57 }
64 58
@@ -106,7 +100,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
106 * @src_cnt: number of source pages 100 * @src_cnt: number of source pages
107 * @len: length in bytes 101 * @len: length in bytes
108 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, 102 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
109 * ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK 103 * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
110 * @depend_tx: xor depends on the result of this transaction. 104 * @depend_tx: xor depends on the result of this transaction.
111 * @cb_fn: function to call when the xor completes 105 * @cb_fn: function to call when the xor completes
112 * @cb_param: parameter to pass to the callback routine 106 * @cb_param: parameter to pass to the callback routine
@@ -246,7 +240,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
246 * @src_cnt: number of source pages 240 * @src_cnt: number of source pages
247 * @len: length in bytes 241 * @len: length in bytes
248 * @result: 0 if sum == 0 else non-zero 242 * @result: 0 if sum == 0 else non-zero
249 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK 243 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
250 * @depend_tx: xor depends on the result of this transaction. 244 * @depend_tx: xor depends on the result of this transaction.
251 * @cb_fn: function to call when the xor completes 245 * @cb_fn: function to call when the xor completes
252 * @cb_param: parameter to pass to the callback routine 246 * @cb_param: parameter to pass to the callback routine
@@ -270,16 +264,12 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
270 264
271 if (tx) { 265 if (tx) {
272 dma_addr_t dma_addr; 266 dma_addr_t dma_addr;
273 enum dma_data_direction dir;
274 267
275 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 268 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
276 269
277 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
278 DMA_NONE : DMA_TO_DEVICE;
279
280 for (i = 0; i < src_cnt; i++) { 270 for (i = 0; i < src_cnt; i++) {
281 dma_addr = dma_map_page(device->dev, src_list[i], 271 dma_addr = dma_map_page(device->dev, src_list[i],
282 offset, len, dir); 272 offset, len, DMA_TO_DEVICE);
283 tx->tx_set_src(dma_addr, tx, i); 273 tx->tx_set_src(dma_addr, tx, i);
284 } 274 }
285 275