aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-06-03 14:43:59 -0400
committerDan Williams <dan.j.williams@intel.com>2009-06-03 17:07:35 -0400
commita08abd8ca890a377521d65d493d174bebcaf694b (patch)
tree987c149a2d7d6ab345f426ac28191627b4a02a3e /crypto/async_tx
parent88ba2aa586c874681c072101287e15d40de7e6e2 (diff)
async_tx: structify submission arguments, add scribble
Prepare the api for the arrival of a new parameter, 'scribble'. This will allow callers to identify scratchpad memory for dma address or page address conversions. As this adds yet another parameter, take this opportunity to convert the common submission parameters (flags, dependency, callback, and callback argument) into an object that is passed by reference. Also, take this opportunity to fix up the kerneldoc and add notes about the relevant ASYNC_TX_* flags for each routine. [ Impact: moves api pass-by-value parameters to a pass-by-reference struct ] Signed-off-by: Andre Noll <maan@systemlinux.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx')
-rw-r--r--crypto/async_tx/async_memcpy.c26
-rw-r--r--crypto/async_tx/async_memset.c25
-rw-r--r--crypto/async_tx/async_tx.c51
-rw-r--r--crypto/async_tx/async_xor.c123
4 files changed, 111 insertions, 114 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 7117ec6f1b74..89e05556f3df 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -33,28 +33,28 @@
33 * async_memcpy - attempt to copy memory with a dma engine. 33 * async_memcpy - attempt to copy memory with a dma engine.
34 * @dest: destination page 34 * @dest: destination page
35 * @src: src page 35 * @src: src page
36 * @offset: offset in pages to start transaction 36 * @dest_offset: offset into 'dest' to start transaction
37 * @src_offset: offset into 'src' to start transaction
37 * @len: length in bytes 38 * @len: length in bytes
38 * @flags: ASYNC_TX_ACK 39 * @submit: submission / completion modifiers
39 * @depend_tx: memcpy depends on the result of this transaction 40 *
40 * @cb_fn: function to call when the memcpy completes 41 * honored flags: ASYNC_TX_ACK
41 * @cb_param: parameter to pass to the callback routine
42 */ 42 */
43struct dma_async_tx_descriptor * 43struct dma_async_tx_descriptor *
44async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, 44async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
45 unsigned int src_offset, size_t len, enum async_tx_flags flags, 45 unsigned int src_offset, size_t len,
46 struct dma_async_tx_descriptor *depend_tx, 46 struct async_submit_ctl *submit)
47 dma_async_tx_callback cb_fn, void *cb_param)
48{ 47{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, 48 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
50 &dest, 1, &src, 1, len); 49 &dest, 1, &src, 1, len);
51 struct dma_device *device = chan ? chan->device : NULL; 50 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL; 51 struct dma_async_tx_descriptor *tx = NULL;
53 52
54 if (device) { 53 if (device) {
55 dma_addr_t dma_dest, dma_src; 54 dma_addr_t dma_dest, dma_src;
56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 55 unsigned long dma_prep_flags;
57 56
57 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
58 dma_dest = dma_map_page(device->dev, dest, dest_offset, len, 58 dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
59 DMA_FROM_DEVICE); 59 DMA_FROM_DEVICE);
60 60
@@ -67,13 +67,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
67 67
68 if (tx) { 68 if (tx) {
69 pr_debug("%s: (async) len: %zu\n", __func__, len); 69 pr_debug("%s: (async) len: %zu\n", __func__, len);
70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 70 async_tx_submit(chan, tx, submit);
71 } else { 71 } else {
72 void *dest_buf, *src_buf; 72 void *dest_buf, *src_buf;
73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 async_tx_quiesce(&depend_tx); 76 async_tx_quiesce(&submit->depend_tx);
77 77
78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
79 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -83,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
83 kunmap_atomic(dest_buf, KM_USER0); 83 kunmap_atomic(dest_buf, KM_USER0);
84 kunmap_atomic(src_buf, KM_USER1); 84 kunmap_atomic(src_buf, KM_USER1);
85 85
86 async_tx_sync_epilog(cb_fn, cb_param); 86 async_tx_sync_epilog(submit);
87 } 87 }
88 88
89 return tx; 89 return tx;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index b2f133885b7f..c14437238f4c 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,26 +35,23 @@
35 * @val: fill value 35 * @val: fill value
36 * @offset: offset in pages to start transaction 36 * @offset: offset in pages to start transaction
37 * @len: length in bytes 37 * @len: length in bytes
38 * @flags: ASYNC_TX_ACK 38 *
39 * @depend_tx: memset depends on the result of this transaction 39 * honored flags: ASYNC_TX_ACK
40 * @cb_fn: function to call when the memcpy completes
41 * @cb_param: parameter to pass to the callback routine
42 */ 40 */
43struct dma_async_tx_descriptor * 41struct dma_async_tx_descriptor *
44async_memset(struct page *dest, int val, unsigned int offset, 42async_memset(struct page *dest, int val, unsigned int offset, size_t len,
45 size_t len, enum async_tx_flags flags, 43 struct async_submit_ctl *submit)
46 struct dma_async_tx_descriptor *depend_tx,
47 dma_async_tx_callback cb_fn, void *cb_param)
48{ 44{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, 45 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
50 &dest, 1, NULL, 0, len); 46 &dest, 1, NULL, 0, len);
51 struct dma_device *device = chan ? chan->device : NULL; 47 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL; 48 struct dma_async_tx_descriptor *tx = NULL;
53 49
54 if (device) { 50 if (device) {
55 dma_addr_t dma_dest; 51 dma_addr_t dma_dest;
56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 52 unsigned long dma_prep_flags;
57 53
54 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
58 dma_dest = dma_map_page(device->dev, dest, offset, len, 55 dma_dest = dma_map_page(device->dev, dest, offset, len,
59 DMA_FROM_DEVICE); 56 DMA_FROM_DEVICE);
60 57
@@ -64,19 +61,19 @@ async_memset(struct page *dest, int val, unsigned int offset,
64 61
65 if (tx) { 62 if (tx) {
66 pr_debug("%s: (async) len: %zu\n", __func__, len); 63 pr_debug("%s: (async) len: %zu\n", __func__, len);
67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 64 async_tx_submit(chan, tx, submit);
68 } else { /* run the memset synchronously */ 65 } else { /* run the memset synchronously */
69 void *dest_buf; 66 void *dest_buf;
70 pr_debug("%s: (sync) len: %zu\n", __func__, len); 67 pr_debug("%s: (sync) len: %zu\n", __func__, len);
71 68
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 69 dest_buf = page_address(dest) + offset;
73 70
74 /* wait for any prerequisite operations */ 71 /* wait for any prerequisite operations */
75 async_tx_quiesce(&depend_tx); 72 async_tx_quiesce(&submit->depend_tx);
76 73
77 memset(dest_buf, val, len); 74 memset(dest_buf, val, len);
78 75
79 async_tx_sync_epilog(cb_fn, cb_param); 76 async_tx_sync_epilog(submit);
80 } 77 }
81 78
82 return tx; 79 return tx;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 3766bc3d7d89..802a5ce437d9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -45,13 +45,15 @@ static void __exit async_tx_exit(void)
45/** 45/**
46 * __async_tx_find_channel - find a channel to carry out the operation or let 46 * __async_tx_find_channel - find a channel to carry out the operation or let
47 * the transaction execute synchronously 47 * the transaction execute synchronously
48 * @depend_tx: transaction dependency 48 * @submit: transaction dependency and submission modifiers
49 * @tx_type: transaction type 49 * @tx_type: transaction type
50 */ 50 */
51struct dma_chan * 51struct dma_chan *
52__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 52__async_tx_find_channel(struct async_submit_ctl *submit,
53 enum dma_transaction_type tx_type) 53 enum dma_transaction_type tx_type)
54{ 54{
55 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
56
55 /* see if we can keep the chain on one channel */ 57 /* see if we can keep the chain on one channel */
56 if (depend_tx && 58 if (depend_tx &&
57 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) 59 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
@@ -144,13 +146,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
144 146
145 147
146/** 148/**
147 * submit_disposition - while holding depend_tx->lock we must avoid submitting 149 * submit_disposition - flags for routing an incoming operation
148 * new operations to prevent a circular locking dependency with
149 * drivers that already hold a channel lock when calling
150 * async_tx_run_dependencies.
151 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock 150 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
152 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch 151 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
153 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly 152 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
153 *
154 * while holding depend_tx->lock we must avoid submitting new operations
155 * to prevent a circular locking dependency with drivers that already
156 * hold a channel lock when calling async_tx_run_dependencies.
154 */ 157 */
155enum submit_disposition { 158enum submit_disposition {
156 ASYNC_TX_SUBMITTED, 159 ASYNC_TX_SUBMITTED,
@@ -160,11 +163,12 @@ enum submit_disposition {
160 163
161void 164void
162async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 165async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
163 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 166 struct async_submit_ctl *submit)
164 dma_async_tx_callback cb_fn, void *cb_param)
165{ 167{
166 tx->callback = cb_fn; 168 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
167 tx->callback_param = cb_param; 169
170 tx->callback = submit->cb_fn;
171 tx->callback_param = submit->cb_param;
168 172
169 if (depend_tx) { 173 if (depend_tx) {
170 enum submit_disposition s; 174 enum submit_disposition s;
@@ -220,7 +224,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
220 tx->tx_submit(tx); 224 tx->tx_submit(tx);
221 } 225 }
222 226
223 if (flags & ASYNC_TX_ACK) 227 if (submit->flags & ASYNC_TX_ACK)
224 async_tx_ack(tx); 228 async_tx_ack(tx);
225 229
226 if (depend_tx) 230 if (depend_tx)
@@ -229,21 +233,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
229EXPORT_SYMBOL_GPL(async_tx_submit); 233EXPORT_SYMBOL_GPL(async_tx_submit);
230 234
231/** 235/**
232 * async_trigger_callback - schedules the callback function to be run after 236 * async_trigger_callback - schedules the callback function to be run
233 * any dependent operations have been completed. 237 * @submit: submission and completion parameters
234 * @flags: ASYNC_TX_ACK 238 *
235 * @depend_tx: 'callback' requires the completion of this transaction 239 * honored flags: ASYNC_TX_ACK
236 * @cb_fn: function to call after depend_tx completes 240 *
237 * @cb_param: parameter to pass to the callback routine 241 * The callback is run after any dependent operations have completed.
238 */ 242 */
239struct dma_async_tx_descriptor * 243struct dma_async_tx_descriptor *
240async_trigger_callback(enum async_tx_flags flags, 244async_trigger_callback(struct async_submit_ctl *submit)
241 struct dma_async_tx_descriptor *depend_tx,
242 dma_async_tx_callback cb_fn, void *cb_param)
243{ 245{
244 struct dma_chan *chan; 246 struct dma_chan *chan;
245 struct dma_device *device; 247 struct dma_device *device;
246 struct dma_async_tx_descriptor *tx; 248 struct dma_async_tx_descriptor *tx;
249 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
247 250
248 if (depend_tx) { 251 if (depend_tx) {
249 chan = depend_tx->chan; 252 chan = depend_tx->chan;
@@ -262,14 +265,14 @@ async_trigger_callback(enum async_tx_flags flags,
262 if (tx) { 265 if (tx) {
263 pr_debug("%s: (async)\n", __func__); 266 pr_debug("%s: (async)\n", __func__);
264 267
265 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 268 async_tx_submit(chan, tx, submit);
266 } else { 269 } else {
267 pr_debug("%s: (sync)\n", __func__); 270 pr_debug("%s: (sync)\n", __func__);
268 271
269 /* wait for any prerequisite operations */ 272 /* wait for any prerequisite operations */
270 async_tx_quiesce(&depend_tx); 273 async_tx_quiesce(&submit->depend_tx);
271 274
272 async_tx_sync_epilog(cb_fn, cb_param); 275 async_tx_sync_epilog(submit);
273 } 276 }
274 277
275 return tx; 278 return tx;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3cc5dc763b54..691fa98a18c4 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -34,18 +34,16 @@
34static __async_inline struct dma_async_tx_descriptor * 34static __async_inline struct dma_async_tx_descriptor *
35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
36 unsigned int offset, int src_cnt, size_t len, 36 unsigned int offset, int src_cnt, size_t len,
37 enum async_tx_flags flags, 37 struct async_submit_ctl *submit)
38 struct dma_async_tx_descriptor *depend_tx,
39 dma_async_tx_callback cb_fn, void *cb_param)
40{ 38{
41 struct dma_device *dma = chan->device; 39 struct dma_device *dma = chan->device;
42 dma_addr_t *dma_src = (dma_addr_t *) src_list; 40 dma_addr_t *dma_src = (dma_addr_t *) src_list;
43 struct dma_async_tx_descriptor *tx = NULL; 41 struct dma_async_tx_descriptor *tx = NULL;
44 int src_off = 0; 42 int src_off = 0;
45 int i; 43 int i;
46 dma_async_tx_callback _cb_fn; 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
47 void *_cb_param; 45 void *cb_param_orig = submit->cb_param;
48 enum async_tx_flags async_flags; 46 enum async_tx_flags flags_orig = submit->flags;
49 enum dma_ctrl_flags dma_flags; 47 enum dma_ctrl_flags dma_flags;
50 int xor_src_cnt; 48 int xor_src_cnt;
51 dma_addr_t dma_dest; 49 dma_addr_t dma_dest;
@@ -63,7 +61,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
63 } 61 }
64 62
65 while (src_cnt) { 63 while (src_cnt) {
66 async_flags = flags; 64 submit->flags = flags_orig;
67 dma_flags = 0; 65 dma_flags = 0;
68 xor_src_cnt = min(src_cnt, dma->max_xor); 66 xor_src_cnt = min(src_cnt, dma->max_xor);
69 /* if we are submitting additional xors, leave the chain open, 67 /* if we are submitting additional xors, leave the chain open,
@@ -71,15 +69,15 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
71 * buffer mapped 69 * buffer mapped
72 */ 70 */
73 if (src_cnt > xor_src_cnt) { 71 if (src_cnt > xor_src_cnt) {
74 async_flags &= ~ASYNC_TX_ACK; 72 submit->flags &= ~ASYNC_TX_ACK;
75 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; 73 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
76 _cb_fn = NULL; 74 submit->cb_fn = NULL;
77 _cb_param = NULL; 75 submit->cb_param = NULL;
78 } else { 76 } else {
79 _cb_fn = cb_fn; 77 submit->cb_fn = cb_fn_orig;
80 _cb_param = cb_param; 78 submit->cb_param = cb_param_orig;
81 } 79 }
82 if (_cb_fn) 80 if (submit->cb_fn)
83 dma_flags |= DMA_PREP_INTERRUPT; 81 dma_flags |= DMA_PREP_INTERRUPT;
84 82
85 /* Since we have clobbered the src_list we are committed 83 /* Since we have clobbered the src_list we are committed
@@ -90,7 +88,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
90 xor_src_cnt, len, dma_flags); 88 xor_src_cnt, len, dma_flags);
91 89
92 if (unlikely(!tx)) 90 if (unlikely(!tx))
93 async_tx_quiesce(&depend_tx); 91 async_tx_quiesce(&submit->depend_tx);
94 92
95 /* spin wait for the preceeding transactions to complete */ 93 /* spin wait for the preceeding transactions to complete */
96 while (unlikely(!tx)) { 94 while (unlikely(!tx)) {
@@ -101,10 +99,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
101 dma_flags); 99 dma_flags);
102 } 100 }
103 101
104 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, 102 async_tx_submit(chan, tx, submit);
105 _cb_param); 103 submit->depend_tx = tx;
106
107 depend_tx = tx;
108 104
109 if (src_cnt > xor_src_cnt) { 105 if (src_cnt > xor_src_cnt) {
110 /* drop completed sources */ 106 /* drop completed sources */
@@ -123,8 +119,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
123 119
124static void 120static void
125do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, 121do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
126 int src_cnt, size_t len, enum async_tx_flags flags, 122 int src_cnt, size_t len, struct async_submit_ctl *submit)
127 dma_async_tx_callback cb_fn, void *cb_param)
128{ 123{
129 int i; 124 int i;
130 int xor_src_cnt; 125 int xor_src_cnt;
@@ -139,7 +134,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
139 /* set destination address */ 134 /* set destination address */
140 dest_buf = page_address(dest) + offset; 135 dest_buf = page_address(dest) + offset;
141 136
142 if (flags & ASYNC_TX_XOR_ZERO_DST) 137 if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
143 memset(dest_buf, 0, len); 138 memset(dest_buf, 0, len);
144 139
145 while (src_cnt > 0) { 140 while (src_cnt > 0) {
@@ -152,33 +147,35 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
152 src_off += xor_src_cnt; 147 src_off += xor_src_cnt;
153 } 148 }
154 149
155 async_tx_sync_epilog(cb_fn, cb_param); 150 async_tx_sync_epilog(submit);
156} 151}
157 152
158/** 153/**
159 * async_xor - attempt to xor a set of blocks with a dma engine. 154 * async_xor - attempt to xor a set of blocks with a dma engine.
160 * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
161 * flag must be set to not include dest data in the calculation. The
162 * assumption with dma eninges is that they only use the destination
163 * buffer as a source when it is explicity specified in the source list.
164 * @dest: destination page 155 * @dest: destination page
165 * @src_list: array of source pages (if the dest is also a source it must be 156 * @src_list: array of source pages
166 * at index zero). The contents of this array may be overwritten. 157 * @offset: common src/dst offset to start transaction
167 * @offset: offset in pages to start transaction
168 * @src_cnt: number of source pages 158 * @src_cnt: number of source pages
169 * @len: length in bytes 159 * @len: length in bytes
170 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK 160 * @submit: submission / completion modifiers
171 * @depend_tx: xor depends on the result of this transaction. 161 *
172 * @cb_fn: function to call when the xor completes 162 * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
173 * @cb_param: parameter to pass to the callback routine 163 *
164 * xor_blocks always uses the dest as a source so the
165 * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
166 * the calculation. The assumption with dma eninges is that they only
167 * use the destination buffer as a source when it is explicity specified
168 * in the source list.
169 *
170 * src_list note: if the dest is also a source it must be at index zero.
171 * The contents of this array will be overwritten if a scribble region
172 * is not specified.
174 */ 173 */
175struct dma_async_tx_descriptor * 174struct dma_async_tx_descriptor *
176async_xor(struct page *dest, struct page **src_list, unsigned int offset, 175async_xor(struct page *dest, struct page **src_list, unsigned int offset,
177 int src_cnt, size_t len, enum async_tx_flags flags, 176 int src_cnt, size_t len, struct async_submit_ctl *submit)
178 struct dma_async_tx_descriptor *depend_tx,
179 dma_async_tx_callback cb_fn, void *cb_param)
180{ 177{
181 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, 178 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
182 &dest, 1, src_list, 179 &dest, 1, src_list,
183 src_cnt, len); 180 src_cnt, len);
184 BUG_ON(src_cnt <= 1); 181 BUG_ON(src_cnt <= 1);
@@ -188,7 +185,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
188 pr_debug("%s (async): len: %zu\n", __func__, len); 185 pr_debug("%s (async): len: %zu\n", __func__, len);
189 186
190 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, 187 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
191 flags, depend_tx, cb_fn, cb_param); 188 submit);
192 } else { 189 } else {
193 /* run the xor synchronously */ 190 /* run the xor synchronously */
194 pr_debug("%s (sync): len: %zu\n", __func__, len); 191 pr_debug("%s (sync): len: %zu\n", __func__, len);
@@ -196,16 +193,15 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
196 /* in the sync case the dest is an implied source 193 /* in the sync case the dest is an implied source
197 * (assumes the dest is the first source) 194 * (assumes the dest is the first source)
198 */ 195 */
199 if (flags & ASYNC_TX_XOR_DROP_DST) { 196 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
200 src_cnt--; 197 src_cnt--;
201 src_list++; 198 src_list++;
202 } 199 }
203 200
204 /* wait for any prerequisite operations */ 201 /* wait for any prerequisite operations */
205 async_tx_quiesce(&depend_tx); 202 async_tx_quiesce(&submit->depend_tx);
206 203
207 do_sync_xor(dest, src_list, offset, src_cnt, len, 204 do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
208 flags, cb_fn, cb_param);
209 205
210 return NULL; 206 return NULL;
211 } 207 }
@@ -222,25 +218,25 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
222/** 218/**
223 * async_xor_val - attempt a xor parity check with a dma engine. 219 * async_xor_val - attempt a xor parity check with a dma engine.
224 * @dest: destination page used if the xor is performed synchronously 220 * @dest: destination page used if the xor is performed synchronously
225 * @src_list: array of source pages. The dest page must be listed as a source 221 * @src_list: array of source pages
226 * at index zero. The contents of this array may be overwritten.
227 * @offset: offset in pages to start transaction 222 * @offset: offset in pages to start transaction
228 * @src_cnt: number of source pages 223 * @src_cnt: number of source pages
229 * @len: length in bytes 224 * @len: length in bytes
230 * @result: 0 if sum == 0 else non-zero 225 * @result: 0 if sum == 0 else non-zero
231 * @flags: ASYNC_TX_ACK 226 * @submit: submission / completion modifiers
232 * @depend_tx: xor depends on the result of this transaction. 227 *
233 * @cb_fn: function to call when the xor completes 228 * honored flags: ASYNC_TX_ACK
234 * @cb_param: parameter to pass to the callback routine 229 *
230 * src_list note: if the dest is also a source it must be at index zero.
231 * The contents of this array will be overwritten if a scribble region
232 * is not specified.
235 */ 233 */
236struct dma_async_tx_descriptor * 234struct dma_async_tx_descriptor *
237async_xor_val(struct page *dest, struct page **src_list, 235async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
238 unsigned int offset, int src_cnt, size_t len, 236 int src_cnt, size_t len, u32 *result,
239 u32 *result, enum async_tx_flags flags, 237 struct async_submit_ctl *submit)
240 struct dma_async_tx_descriptor *depend_tx,
241 dma_async_tx_callback cb_fn, void *cb_param)
242{ 238{
243 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL, 239 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
244 &dest, 1, src_list, 240 &dest, 1, src_list,
245 src_cnt, len); 241 src_cnt, len);
246 struct dma_device *device = chan ? chan->device : NULL; 242 struct dma_device *device = chan ? chan->device : NULL;
@@ -250,11 +246,12 @@ async_xor_val(struct page *dest, struct page **src_list,
250 246
251 if (device && src_cnt <= device->max_xor) { 247 if (device && src_cnt <= device->max_xor) {
252 dma_addr_t *dma_src = (dma_addr_t *) src_list; 248 dma_addr_t *dma_src = (dma_addr_t *) src_list;
253 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 249 unsigned long dma_prep_flags;
254 int i; 250 int i;
255 251
256 pr_debug("%s: (async) len: %zu\n", __func__, len); 252 pr_debug("%s: (async) len: %zu\n", __func__, len);
257 253
254 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
258 for (i = 0; i < src_cnt; i++) 255 for (i = 0; i < src_cnt; i++)
259 dma_src[i] = dma_map_page(device->dev, src_list[i], 256 dma_src[i] = dma_map_page(device->dev, src_list[i],
260 offset, len, DMA_TO_DEVICE); 257 offset, len, DMA_TO_DEVICE);
@@ -263,7 +260,7 @@ async_xor_val(struct page *dest, struct page **src_list,
263 len, result, 260 len, result,
264 dma_prep_flags); 261 dma_prep_flags);
265 if (unlikely(!tx)) { 262 if (unlikely(!tx)) {
266 async_tx_quiesce(&depend_tx); 263 async_tx_quiesce(&submit->depend_tx);
267 264
268 while (!tx) { 265 while (!tx) {
269 dma_async_issue_pending(chan); 266 dma_async_issue_pending(chan);
@@ -273,23 +270,23 @@ async_xor_val(struct page *dest, struct page **src_list,
273 } 270 }
274 } 271 }
275 272
276 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 273 async_tx_submit(chan, tx, submit);
277 } else { 274 } else {
278 unsigned long xor_flags = flags; 275 enum async_tx_flags flags_orig = submit->flags;
279 276
280 pr_debug("%s: (sync) len: %zu\n", __func__, len); 277 pr_debug("%s: (sync) len: %zu\n", __func__, len);
281 278
282 xor_flags |= ASYNC_TX_XOR_DROP_DST; 279 submit->flags |= ASYNC_TX_XOR_DROP_DST;
283 xor_flags &= ~ASYNC_TX_ACK; 280 submit->flags &= ~ASYNC_TX_ACK;
284 281
285 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 282 tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
286 depend_tx, NULL, NULL);
287 283
288 async_tx_quiesce(&tx); 284 async_tx_quiesce(&tx);
289 285
290 *result = page_is_zero(dest, offset, len) ? 0 : 1; 286 *result = page_is_zero(dest, offset, len) ? 0 : 1;
291 287
292 async_tx_sync_epilog(cb_fn, cb_param); 288 async_tx_sync_epilog(submit);
289 submit->flags = flags_orig;
293 } 290 }
294 291
295 return tx; 292 return tx;