aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_xor.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:50 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:50 -0400
commit0403e3827788d878163f9ef0541b748b0f88ca5d (patch)
tree2dc73744bd92c268a1310f24668167f130877278 /crypto/async_tx/async_xor.c
parentf9dd2134374c8de6b911e2b8652c6c9622eaa658 (diff)
dmaengine: add fence support
Some engines optimize operation by reading ahead in the descriptor chain such that descriptor2 may start execution before descriptor1 completes. If descriptor2 depends on the result from descriptor1 then a fence is required (on descriptor2) to disable this optimization. The async_tx api could implicitly identify dependencies via the 'depend_tx' parameter, but that would constrain cases where the dependency chain only specifies a completion order rather than a data dependency. So, provide an ASYNC_TX_FENCE to explicitly identify data dependencies. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx/async_xor.c')
-rw-r--r--crypto/async_tx/async_xor.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 56b5f98da463..db279872ef3d 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -69,6 +69,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
69 */ 69 */
70 if (src_cnt > xor_src_cnt) { 70 if (src_cnt > xor_src_cnt) {
71 submit->flags &= ~ASYNC_TX_ACK; 71 submit->flags &= ~ASYNC_TX_ACK;
72 submit->flags |= ASYNC_TX_FENCE;
72 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; 73 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
73 submit->cb_fn = NULL; 74 submit->cb_fn = NULL;
74 submit->cb_param = NULL; 75 submit->cb_param = NULL;
@@ -78,7 +79,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
78 } 79 }
79 if (submit->cb_fn) 80 if (submit->cb_fn)
80 dma_flags |= DMA_PREP_INTERRUPT; 81 dma_flags |= DMA_PREP_INTERRUPT;
81 82 if (submit->flags & ASYNC_TX_FENCE)
83 dma_flags |= DMA_PREP_FENCE;
82 /* Since we have clobbered the src_list we are committed 84 /* Since we have clobbered the src_list we are committed
83 * to doing this asynchronously. Drivers force forward progress 85 * to doing this asynchronously. Drivers force forward progress
84 * in case they can not provide a descriptor 86 * in case they can not provide a descriptor
@@ -264,12 +266,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
264 dma_src = (dma_addr_t *) src_list; 266 dma_src = (dma_addr_t *) src_list;
265 267
266 if (dma_src && device && src_cnt <= device->max_xor) { 268 if (dma_src && device && src_cnt <= device->max_xor) {
267 unsigned long dma_prep_flags; 269 unsigned long dma_prep_flags = 0;
268 int i; 270 int i;
269 271
270 pr_debug("%s: (async) len: %zu\n", __func__, len); 272 pr_debug("%s: (async) len: %zu\n", __func__, len);
271 273
272 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 274 if (submit->cb_fn)
275 dma_prep_flags |= DMA_PREP_INTERRUPT;
276 if (submit->flags & ASYNC_TX_FENCE)
277 dma_prep_flags |= DMA_PREP_FENCE;
273 for (i = 0; i < src_cnt; i++) 278 for (i = 0; i < src_cnt; i++)
274 dma_src[i] = dma_map_page(device->dev, src_list[i], 279 dma_src[i] = dma_map_page(device->dev, src_list[i],
275 offset, len, DMA_TO_DEVICE); 280 offset, len, DMA_TO_DEVICE);