aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_pq.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:50 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:42:50 -0400
commit0403e3827788d878163f9ef0541b748b0f88ca5d (patch)
tree2dc73744bd92c268a1310f24668167f130877278 /crypto/async_tx/async_pq.c
parentf9dd2134374c8de6b911e2b8652c6c9622eaa658 (diff)
dmaengine: add fence support
Some engines optimize operation by reading ahead in the descriptor chain such that descriptor2 may start execution before descriptor1 completes. If descriptor2 depends on the result from descriptor1 then a fence is required (on descriptor2) to disable this optimization. The async_tx api could implicitly identify dependencies via the 'depend_tx' parameter, but that would constrain cases where the dependency chain only specifies a completion order rather than a data dependency. So, provide an ASYNC_TX_FENCE to explicitly identify data dependencies. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx/async_pq.c')
-rw-r--r--crypto/async_tx/async_pq.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 108b21efb499..a25e290c39fb 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -101,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
101 */ 101 */
102 if (src_cnt > pq_src_cnt) { 102 if (src_cnt > pq_src_cnt) {
103 submit->flags &= ~ASYNC_TX_ACK; 103 submit->flags &= ~ASYNC_TX_ACK;
104 submit->flags |= ASYNC_TX_FENCE;
104 dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; 105 dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
105 submit->cb_fn = NULL; 106 submit->cb_fn = NULL;
106 submit->cb_param = NULL; 107 submit->cb_param = NULL;
@@ -111,6 +112,8 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
111 if (cb_fn_orig) 112 if (cb_fn_orig)
112 dma_flags |= DMA_PREP_INTERRUPT; 113 dma_flags |= DMA_PREP_INTERRUPT;
113 } 114 }
115 if (submit->flags & ASYNC_TX_FENCE)
116 dma_flags |= DMA_PREP_FENCE;
114 117
115 /* Since we have clobbered the src_list we are committed 118 /* Since we have clobbered the src_list we are committed
116 * to doing this asynchronously. Drivers force forward 119 * to doing this asynchronously. Drivers force forward
@@ -282,6 +285,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
282 dma_flags |= DMA_PREP_PQ_DISABLE_P; 285 dma_flags |= DMA_PREP_PQ_DISABLE_P;
283 if (!Q(blocks, disks)) 286 if (!Q(blocks, disks))
284 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 287 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
288 if (submit->flags & ASYNC_TX_FENCE)
289 dma_flags |= DMA_PREP_FENCE;
285 for (i = 0; i < disks; i++) 290 for (i = 0; i < disks; i++)
286 if (likely(blocks[i])) { 291 if (likely(blocks[i])) {
287 BUG_ON(is_raid6_zero_block(blocks[i])); 292 BUG_ON(is_raid6_zero_block(blocks[i]));