aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_memcpy.c38
-rw-r--r--crypto/async_tx/async_memset.c28
-rw-r--r--crypto/async_tx/async_tx.c9
-rw-r--r--crypto/async_tx/async_xor.c124
4 files changed, 110 insertions, 89 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 047e533fcc5b..0f6282207b32 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -35,7 +35,7 @@
35 * @src: src page 35 * @src: src page
36 * @offset: offset in pages to start transaction 36 * @offset: offset in pages to start transaction
37 * @len: length in bytes 37 * @len: length in bytes
38 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, 38 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
39 * @depend_tx: memcpy depends on the result of this transaction 39 * @depend_tx: memcpy depends on the result of this transaction
40 * @cb_fn: function to call when the memcpy completes 40 * @cb_fn: function to call when the memcpy completes
41 * @cb_param: parameter to pass to the callback routine 41 * @cb_param: parameter to pass to the callback routine
@@ -46,33 +46,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
46 struct dma_async_tx_descriptor *depend_tx, 46 struct dma_async_tx_descriptor *depend_tx,
47 dma_async_tx_callback cb_fn, void *cb_param) 47 dma_async_tx_callback cb_fn, void *cb_param)
48{ 48{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY); 49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
50 &dest, 1, &src, 1, len);
50 struct dma_device *device = chan ? chan->device : NULL; 51 struct dma_device *device = chan ? chan->device : NULL;
51 int int_en = cb_fn ? 1 : 0; 52 struct dma_async_tx_descriptor *tx = NULL;
52 struct dma_async_tx_descriptor *tx = device ?
53 device->device_prep_dma_memcpy(chan, len,
54 int_en) : NULL;
55 53
56 if (tx) { /* run the memcpy asynchronously */ 54 if (device) {
57 dma_addr_t addr; 55 dma_addr_t dma_dest, dma_src;
58 enum dma_data_direction dir; 56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
59 57
60 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 58 dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
61 59 DMA_FROM_DEVICE);
62 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
63 DMA_NONE : DMA_FROM_DEVICE;
64
65 addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
66 tx->tx_set_dest(addr, tx, 0);
67 60
68 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 61 dma_src = dma_map_page(device->dev, src, src_offset, len,
69 DMA_NONE : DMA_TO_DEVICE; 62 DMA_TO_DEVICE);
70 63
71 addr = dma_map_page(device->dev, src, src_offset, len, dir); 64 tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
72 tx->tx_set_src(addr, tx, 0); 65 len, dma_prep_flags);
66 }
73 67
68 if (tx) {
69 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
74 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
75 } else { /* run the memcpy synchronously */ 71 } else {
76 void *dest_buf, *src_buf; 72 void *dest_buf, *src_buf;
77 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 73 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
78 74
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 66ef6351202e..09c0e83664bc 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,7 +35,7 @@
35 * @val: fill value 35 * @val: fill value
36 * @offset: offset in pages to start transaction 36 * @offset: offset in pages to start transaction
37 * @len: length in bytes 37 * @len: length in bytes
38 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK 38 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
39 * @depend_tx: memset depends on the result of this transaction 39 * @depend_tx: memset depends on the result of this transaction
40 * @cb_fn: function to call when the memcpy completes 40 * @cb_fn: function to call when the memcpy completes
41 * @cb_param: parameter to pass to the callback routine 41 * @cb_param: parameter to pass to the callback routine
@@ -46,24 +46,24 @@ async_memset(struct page *dest, int val, unsigned int offset,
46 struct dma_async_tx_descriptor *depend_tx, 46 struct dma_async_tx_descriptor *depend_tx,
47 dma_async_tx_callback cb_fn, void *cb_param) 47 dma_async_tx_callback cb_fn, void *cb_param)
48{ 48{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET); 49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
50 &dest, 1, NULL, 0, len);
50 struct dma_device *device = chan ? chan->device : NULL; 51 struct dma_device *device = chan ? chan->device : NULL;
51 int int_en = cb_fn ? 1 : 0; 52 struct dma_async_tx_descriptor *tx = NULL;
52 struct dma_async_tx_descriptor *tx = device ?
53 device->device_prep_dma_memset(chan, val, len,
54 int_en) : NULL;
55 53
56 if (tx) { /* run the memset asynchronously */ 54 if (device) {
57 dma_addr_t dma_addr; 55 dma_addr_t dma_dest;
58 enum dma_data_direction dir; 56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
59 57
60 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 58 dma_dest = dma_map_page(device->dev, dest, offset, len,
61 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 59 DMA_FROM_DEVICE);
62 DMA_NONE : DMA_FROM_DEVICE;
63 60
64 dma_addr = dma_map_page(device->dev, dest, offset, len, dir); 61 tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
65 tx->tx_set_dest(dma_addr, tx, 0); 62 dma_prep_flags);
63 }
66 64
65 if (tx) {
66 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
68 } else { /* run the memset synchronously */ 68 } else { /* run the memset synchronously */
69 void *dest_buf; 69 void *dest_buf;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index bc18cbb8ea79..562882189de5 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -57,8 +57,7 @@ static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
57 */ 57 */
58static spinlock_t async_tx_lock; 58static spinlock_t async_tx_lock;
59 59
60static struct list_head 60static LIST_HEAD(async_tx_master_list);
61async_tx_master_list = LIST_HEAD_INIT(async_tx_master_list);
62 61
63/* async_tx_issue_pending_all - start all transactions on all channels */ 62/* async_tx_issue_pending_all - start all transactions on all channels */
64void async_tx_issue_pending_all(void) 63void async_tx_issue_pending_all(void)
@@ -362,13 +361,13 @@ static void __exit async_tx_exit(void)
362} 361}
363 362
364/** 363/**
365 * async_tx_find_channel - find a channel to carry out the operation or let 364 * __async_tx_find_channel - find a channel to carry out the operation or let
366 * the transaction execute synchronously 365 * the transaction execute synchronously
367 * @depend_tx: transaction dependency 366 * @depend_tx: transaction dependency
368 * @tx_type: transaction type 367 * @tx_type: transaction type
369 */ 368 */
370struct dma_chan * 369struct dma_chan *
371async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 370__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
372 enum dma_transaction_type tx_type) 371 enum dma_transaction_type tx_type)
373{ 372{
374 /* see if we can keep the chain on one channel */ 373 /* see if we can keep the chain on one channel */
@@ -384,7 +383,7 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
384 } else 383 } else
385 return NULL; 384 return NULL;
386} 385}
387EXPORT_SYMBOL_GPL(async_tx_find_channel); 386EXPORT_SYMBOL_GPL(__async_tx_find_channel);
388#else 387#else
389static int __init async_tx_init(void) 388static int __init async_tx_init(void)
390{ 389{
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 2575f674dcd5..2259a4ff15cb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,35 +30,51 @@
30#include <linux/raid/xor.h> 30#include <linux/raid/xor.h>
31#include <linux/async_tx.h> 31#include <linux/async_tx.h>
32 32
33static void 33/* do_async_xor - dma map the pages and perform the xor with an engine.
34do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device, 34 * This routine is marked __always_inline so it can be compiled away
35 * when CONFIG_DMA_ENGINE=n
36 */
37static __always_inline struct dma_async_tx_descriptor *
38do_async_xor(struct dma_device *device,
35 struct dma_chan *chan, struct page *dest, struct page **src_list, 39 struct dma_chan *chan, struct page *dest, struct page **src_list,
36 unsigned int offset, unsigned int src_cnt, size_t len, 40 unsigned int offset, unsigned int src_cnt, size_t len,
37 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
38 dma_async_tx_callback cb_fn, void *cb_param) 42 dma_async_tx_callback cb_fn, void *cb_param)
39{ 43{
40 dma_addr_t dma_addr; 44 dma_addr_t dma_dest;
41 enum dma_data_direction dir; 45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx;
42 int i; 47 int i;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
43 49
44 pr_debug("%s: len: %zu\n", __FUNCTION__, len); 50 pr_debug("%s: len: %zu\n", __FUNCTION__, len);
45 51
46 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 52 dma_dest = dma_map_page(device->dev, dest, offset, len,
47 DMA_NONE : DMA_FROM_DEVICE; 53 DMA_FROM_DEVICE);
48
49 dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
50 tx->tx_set_dest(dma_addr, tx, 0);
51
52 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
53 DMA_NONE : DMA_TO_DEVICE;
54 54
55 for (i = 0; i < src_cnt; i++) { 55 for (i = 0; i < src_cnt; i++)
56 dma_addr = dma_map_page(device->dev, src_list[i], 56 dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
57 offset, len, dir); 57 len, DMA_TO_DEVICE);
58 tx->tx_set_src(dma_addr, tx, i); 58
59 /* Since we have clobbered the src_list we are committed
60 * to doing this asynchronously. Drivers force forward progress
61 * in case they can not provide a descriptor
62 */
63 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
64 dma_prep_flags);
65 if (!tx) {
66 if (depend_tx)
67 dma_wait_for_async_tx(depend_tx);
68
69 while (!tx)
70 tx = device->device_prep_dma_xor(chan, dma_dest,
71 dma_src, src_cnt, len,
72 dma_prep_flags);
59 } 73 }
60 74
61 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 75 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
76
77 return tx;
62} 78}
63 79
64static void 80static void
@@ -102,7 +118,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
102 * @src_cnt: number of source pages 118 * @src_cnt: number of source pages
103 * @len: length in bytes 119 * @len: length in bytes
104 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, 120 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
105 * ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK 121 * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
106 * @depend_tx: xor depends on the result of this transaction. 122 * @depend_tx: xor depends on the result of this transaction.
107 * @cb_fn: function to call when the xor completes 123 * @cb_fn: function to call when the xor completes
108 * @cb_param: parameter to pass to the callback routine 124 * @cb_param: parameter to pass to the callback routine
@@ -113,14 +129,16 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
113 struct dma_async_tx_descriptor *depend_tx, 129 struct dma_async_tx_descriptor *depend_tx,
114 dma_async_tx_callback cb_fn, void *cb_param) 130 dma_async_tx_callback cb_fn, void *cb_param)
115{ 131{
116 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR); 132 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
133 &dest, 1, src_list,
134 src_cnt, len);
117 struct dma_device *device = chan ? chan->device : NULL; 135 struct dma_device *device = chan ? chan->device : NULL;
118 struct dma_async_tx_descriptor *tx = NULL; 136 struct dma_async_tx_descriptor *tx = NULL;
119 dma_async_tx_callback _cb_fn; 137 dma_async_tx_callback _cb_fn;
120 void *_cb_param; 138 void *_cb_param;
121 unsigned long local_flags; 139 unsigned long local_flags;
122 int xor_src_cnt; 140 int xor_src_cnt;
123 int i = 0, src_off = 0, int_en; 141 int i = 0, src_off = 0;
124 142
125 BUG_ON(src_cnt <= 1); 143 BUG_ON(src_cnt <= 1);
126 144
@@ -140,20 +158,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
140 _cb_param = cb_param; 158 _cb_param = cb_param;
141 } 159 }
142 160
143 int_en = _cb_fn ? 1 : 0; 161 tx = do_async_xor(device, chan, dest,
144 162 &src_list[src_off], offset,
145 tx = device->device_prep_dma_xor( 163 xor_src_cnt, len, local_flags,
146 chan, xor_src_cnt, len, int_en); 164 depend_tx, _cb_fn, _cb_param);
147
148 if (tx) {
149 do_async_xor(tx, device, chan, dest,
150 &src_list[src_off], offset, xor_src_cnt, len,
151 local_flags, depend_tx, _cb_fn,
152 _cb_param);
153 } else /* fall through */
154 goto xor_sync;
155 } else { /* run the xor synchronously */ 165 } else { /* run the xor synchronously */
156xor_sync:
157 /* in the sync case the dest is an implied source 166 /* in the sync case the dest is an implied source
158 * (assumes the dest is at the src_off index) 167 * (assumes the dest is at the src_off index)
159 */ 168 */
@@ -242,7 +251,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
242 * @src_cnt: number of source pages 251 * @src_cnt: number of source pages
243 * @len: length in bytes 252 * @len: length in bytes
244 * @result: 0 if sum == 0 else non-zero 253 * @result: 0 if sum == 0 else non-zero
245 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK 254 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
246 * @depend_tx: xor depends on the result of this transaction. 255 * @depend_tx: xor depends on the result of this transaction.
247 * @cb_fn: function to call when the xor completes 256 * @cb_fn: function to call when the xor completes
248 * @cb_param: parameter to pass to the callback routine 257 * @cb_param: parameter to pass to the callback routine
@@ -254,29 +263,36 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
254 struct dma_async_tx_descriptor *depend_tx, 263 struct dma_async_tx_descriptor *depend_tx,
255 dma_async_tx_callback cb_fn, void *cb_param) 264 dma_async_tx_callback cb_fn, void *cb_param)
256{ 265{
257 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM); 266 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
267 &dest, 1, src_list,
268 src_cnt, len);
258 struct dma_device *device = chan ? chan->device : NULL; 269 struct dma_device *device = chan ? chan->device : NULL;
259 int int_en = cb_fn ? 1 : 0; 270 struct dma_async_tx_descriptor *tx = NULL;
260 struct dma_async_tx_descriptor *tx = device ?
261 device->device_prep_dma_zero_sum(chan, src_cnt, len, result,
262 int_en) : NULL;
263 int i;
264 271
265 BUG_ON(src_cnt <= 1); 272 BUG_ON(src_cnt <= 1);
266 273
267 if (tx) { 274 if (device) {
268 dma_addr_t dma_addr; 275 dma_addr_t *dma_src = (dma_addr_t *) src_list;
269 enum dma_data_direction dir; 276 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
277 int i;
270 278
271 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 279 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
272 280
273 dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? 281 for (i = 0; i < src_cnt; i++)
274 DMA_NONE : DMA_TO_DEVICE; 282 dma_src[i] = dma_map_page(device->dev, src_list[i],
275 283 offset, len, DMA_TO_DEVICE);
276 for (i = 0; i < src_cnt; i++) { 284
277 dma_addr = dma_map_page(device->dev, src_list[i], 285 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
278 offset, len, dir); 286 len, result,
279 tx->tx_set_src(dma_addr, tx, i); 287 dma_prep_flags);
288 if (!tx) {
289 if (depend_tx)
290 dma_wait_for_async_tx(depend_tx);
291
292 while (!tx)
293 tx = device->device_prep_dma_zero_sum(chan,
294 dma_src, src_cnt, len, result,
295 dma_prep_flags);
280 } 296 }
281 297
282 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 298 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -311,6 +327,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum);
311 327
312static int __init async_xor_init(void) 328static int __init async_xor_init(void)
313{ 329{
330 #ifdef CONFIG_DMA_ENGINE
331 /* To conserve stack space the input src_list (array of page pointers)
332 * is reused to hold the array of dma addresses passed to the driver.
333 * This conversion is only possible when dma_addr_t is less than the
334 * the size of a pointer. HIGHMEM64G is known to violate this
335 * assumption.
336 */
337 BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
338 #endif
339
314 return 0; 340 return 0;
315} 341}
316 342