aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--crypto/async_tx/async_memcpy.c10
-rw-r--r--crypto/async_tx/async_memset.c10
-rw-r--r--crypto/async_tx/async_tx.c29
-rw-r--r--crypto/async_tx/async_xor.c37
-rw-r--r--include/linux/async_tx.h2
5 files changed, 26 insertions, 62 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index a5eda80e8427..06a7f4be9736 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 if (depend_tx) { 76 async_tx_quiesce(&depend_tx);
77 /* if ack is already set then we cannot be sure
78 * we are referring to the correct operation
79 */
80 BUG_ON(async_tx_test_ack(depend_tx));
81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
82 panic("%s: DMA_ERROR waiting for depend_tx\n",
83 __func__);
84 }
85 77
86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
87 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 27a97dc90a7e..d48ed22ed1c3 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -72,15 +72,7 @@ async_memset(struct page *dest, int val, unsigned int offset,
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 72 dest_buf = (void *) (((char *) page_address(dest)) + offset);
73 73
74 /* wait for any prerequisite operations */ 74 /* wait for any prerequisite operations */
75 if (depend_tx) { 75 async_tx_quiesce(&depend_tx);
76 /* if ack is already set then we cannot be sure
77 * we are referring to the correct operation
78 */
79 BUG_ON(async_tx_test_ack(depend_tx));
80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
81 panic("%s: DMA_ERROR waiting for depend_tx\n",
82 __func__);
83 }
84 76
85 memset(dest_buf, val, len); 77 memset(dest_buf, val, len);
86 78
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 9325c61208a0..78a61e7f631a 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -607,15 +607,7 @@ async_trigger_callback(enum async_tx_flags flags,
607 pr_debug("%s: (sync)\n", __func__); 607 pr_debug("%s: (sync)\n", __func__);
608 608
609 /* wait for any prerequisite operations */ 609 /* wait for any prerequisite operations */
610 if (depend_tx) { 610 async_tx_quiesce(&depend_tx);
611 /* if ack is already set then we cannot be sure
612 * we are referring to the correct operation
613 */
614 BUG_ON(async_tx_test_ack(depend_tx));
615 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
616 panic("%s: DMA_ERROR waiting for depend_tx\n",
617 __func__);
618 }
619 611
620 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 612 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
621 } 613 }
@@ -624,6 +616,25 @@ async_trigger_callback(enum async_tx_flags flags,
624} 616}
625EXPORT_SYMBOL_GPL(async_trigger_callback); 617EXPORT_SYMBOL_GPL(async_trigger_callback);
626 618
619/**
620 * async_tx_quiesce - ensure tx is complete and freeable upon return
621 * @tx - transaction to quiesce
622 */
623void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
624{
625 if (*tx) {
626 /* if ack is already set then we cannot be sure
627 * we are referring to the correct operation
628 */
629 BUG_ON(async_tx_test_ack(*tx));
630 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
631 panic("DMA_ERROR waiting for transaction\n");
632 async_tx_ack(*tx);
633 *tx = NULL;
634 }
635}
636EXPORT_SYMBOL_GPL(async_tx_quiesce);
637
627module_init(async_tx_init); 638module_init(async_tx_init);
628module_exit(async_tx_exit); 639module_exit(async_tx_exit);
629 640
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 19d16e452bcc..689ecce73ee1 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,24 +30,6 @@
30#include <linux/raid/xor.h> 30#include <linux/raid/xor.h>
31#include <linux/async_tx.h> 31#include <linux/async_tx.h>
32 32
33/**
34 * async_tx_quiesce - ensure tx is complete and freeable upon return
35 * @tx - transaction to quiesce
36 */
37static void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
38{
39 if (*tx) {
40 /* if ack is already set then we cannot be sure
41 * we are referring to the correct operation
42 */
43 BUG_ON(async_tx_test_ack(*tx));
44 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
45 panic("DMA_ERROR waiting for transaction\n");
46 async_tx_ack(*tx);
47 *tx = NULL;
48 }
49}
50
51/* do_async_xor - dma map the pages and perform the xor with an engine. 33/* do_async_xor - dma map the pages and perform the xor with an engine.
52 * This routine is marked __always_inline so it can be compiled away 34 * This routine is marked __always_inline so it can be compiled away
53 * when CONFIG_DMA_ENGINE=n 35 * when CONFIG_DMA_ENGINE=n
@@ -219,15 +201,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
219 } 201 }
220 202
221 /* wait for any prerequisite operations */ 203 /* wait for any prerequisite operations */
222 if (depend_tx) { 204 async_tx_quiesce(&depend_tx);
223 /* if ack is already set then we cannot be sure
224 * we are referring to the correct operation
225 */
226 BUG_ON(async_tx_test_ack(depend_tx));
227 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
228 panic("%s: DMA_ERROR waiting for depend_tx\n",
229 __func__);
230 }
231 205
232 do_sync_xor(dest, src_list, offset, src_cnt, len, 206 do_sync_xor(dest, src_list, offset, src_cnt, len,
233 flags, depend_tx, cb_fn, cb_param); 207 flags, depend_tx, cb_fn, cb_param);
@@ -309,17 +283,10 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
309 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 283 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
310 depend_tx, NULL, NULL); 284 depend_tx, NULL, NULL);
311 285
312 if (tx) { 286 async_tx_quiesce(&tx);
313 if (dma_wait_for_async_tx(tx) == DMA_ERROR)
314 panic("%s: DMA_ERROR waiting for tx\n",
315 __func__);
316 async_tx_ack(tx);
317 }
318 287
319 *result = page_is_zero(dest, offset, len) ? 0 : 1; 288 *result = page_is_zero(dest, offset, len) ? 0 : 1;
320 289
321 tx = NULL;
322
323 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); 290 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
324 } 291 }
325 292
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index eb640f0acfac..9f0e7bd5bdc9 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -152,4 +152,6 @@ struct dma_async_tx_descriptor *
152async_trigger_callback(enum async_tx_flags flags, 152async_trigger_callback(enum async_tx_flags flags,
153 struct dma_async_tx_descriptor *depend_tx, 153 struct dma_async_tx_descriptor *depend_tx,
154 dma_async_tx_callback cb_fn, void *cb_fn_param); 154 dma_async_tx_callback cb_fn, void *cb_fn_param);
155
156void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
155#endif /* _ASYNC_TX_H_ */ 157#endif /* _ASYNC_TX_H_ */