aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-06-03 14:43:59 -0400
committerDan Williams <dan.j.williams@intel.com>2009-06-03 17:07:35 -0400
commita08abd8ca890a377521d65d493d174bebcaf694b (patch)
tree987c149a2d7d6ab345f426ac28191627b4a02a3e
parent88ba2aa586c874681c072101287e15d40de7e6e2 (diff)
async_tx: structify submission arguments, add scribble
Prepare the api for the arrival of a new parameter, 'scribble'. This will allow callers to identify scratchpad memory for dma address or page address conversions. As this adds yet another parameter, take this opportunity to convert the common submission parameters (flags, dependency, callback, and callback argument) into an object that is passed by reference. Also, take this opportunity to fix up the kerneldoc and add notes about the relevant ASYNC_TX_* flags for each routine. [ Impact: moves api pass-by-value parameters to a pass-by-reference struct ] Signed-off-by: Andre Noll <maan@systemlinux.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--Documentation/crypto/async-tx-api.txt6
-rw-r--r--crypto/async_tx/async_memcpy.c26
-rw-r--r--crypto/async_tx/async_memset.c25
-rw-r--r--crypto/async_tx/async_tx.c51
-rw-r--r--crypto/async_tx/async_xor.c123
-rw-r--r--drivers/md/raid5.c59
-rw-r--r--include/linux/async_tx.h84
7 files changed, 200 insertions, 174 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index 76feda8541dc..dfe0475f7919 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -54,11 +54,7 @@ features surfaced as a result:
54 54
553.1 General format of the API: 553.1 General format of the API:
56struct dma_async_tx_descriptor * 56struct dma_async_tx_descriptor *
57async_<operation>(<op specific parameters>, 57async_<operation>(<op specific parameters>, struct async_submit ctl *submit)
58 enum async_tx_flags flags,
59 struct dma_async_tx_descriptor *dependency,
60 dma_async_tx_callback callback_routine,
61 void *callback_parameter);
62 58
633.2 Supported operations: 593.2 Supported operations:
64memcpy - memory copy between a source and a destination buffer 60memcpy - memory copy between a source and a destination buffer
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 7117ec6f1b74..89e05556f3df 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -33,28 +33,28 @@
33 * async_memcpy - attempt to copy memory with a dma engine. 33 * async_memcpy - attempt to copy memory with a dma engine.
34 * @dest: destination page 34 * @dest: destination page
35 * @src: src page 35 * @src: src page
36 * @offset: offset in pages to start transaction 36 * @dest_offset: offset into 'dest' to start transaction
37 * @src_offset: offset into 'src' to start transaction
37 * @len: length in bytes 38 * @len: length in bytes
38 * @flags: ASYNC_TX_ACK 39 * @submit: submission / completion modifiers
39 * @depend_tx: memcpy depends on the result of this transaction 40 *
40 * @cb_fn: function to call when the memcpy completes 41 * honored flags: ASYNC_TX_ACK
41 * @cb_param: parameter to pass to the callback routine
42 */ 42 */
43struct dma_async_tx_descriptor * 43struct dma_async_tx_descriptor *
44async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, 44async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
45 unsigned int src_offset, size_t len, enum async_tx_flags flags, 45 unsigned int src_offset, size_t len,
46 struct dma_async_tx_descriptor *depend_tx, 46 struct async_submit_ctl *submit)
47 dma_async_tx_callback cb_fn, void *cb_param)
48{ 47{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, 48 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
50 &dest, 1, &src, 1, len); 49 &dest, 1, &src, 1, len);
51 struct dma_device *device = chan ? chan->device : NULL; 50 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL; 51 struct dma_async_tx_descriptor *tx = NULL;
53 52
54 if (device) { 53 if (device) {
55 dma_addr_t dma_dest, dma_src; 54 dma_addr_t dma_dest, dma_src;
56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 55 unsigned long dma_prep_flags;
57 56
57 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
58 dma_dest = dma_map_page(device->dev, dest, dest_offset, len, 58 dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
59 DMA_FROM_DEVICE); 59 DMA_FROM_DEVICE);
60 60
@@ -67,13 +67,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
67 67
68 if (tx) { 68 if (tx) {
69 pr_debug("%s: (async) len: %zu\n", __func__, len); 69 pr_debug("%s: (async) len: %zu\n", __func__, len);
70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 70 async_tx_submit(chan, tx, submit);
71 } else { 71 } else {
72 void *dest_buf, *src_buf; 72 void *dest_buf, *src_buf;
73 pr_debug("%s: (sync) len: %zu\n", __func__, len); 73 pr_debug("%s: (sync) len: %zu\n", __func__, len);
74 74
75 /* wait for any prerequisite operations */ 75 /* wait for any prerequisite operations */
76 async_tx_quiesce(&depend_tx); 76 async_tx_quiesce(&submit->depend_tx);
77 77
78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 78 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
79 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 79 src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -83,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
83 kunmap_atomic(dest_buf, KM_USER0); 83 kunmap_atomic(dest_buf, KM_USER0);
84 kunmap_atomic(src_buf, KM_USER1); 84 kunmap_atomic(src_buf, KM_USER1);
85 85
86 async_tx_sync_epilog(cb_fn, cb_param); 86 async_tx_sync_epilog(submit);
87 } 87 }
88 88
89 return tx; 89 return tx;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index b2f133885b7f..c14437238f4c 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,26 +35,23 @@
35 * @val: fill value 35 * @val: fill value
36 * @offset: offset in pages to start transaction 36 * @offset: offset in pages to start transaction
37 * @len: length in bytes 37 * @len: length in bytes
38 * @flags: ASYNC_TX_ACK 38 *
39 * @depend_tx: memset depends on the result of this transaction 39 * honored flags: ASYNC_TX_ACK
40 * @cb_fn: function to call when the memcpy completes
41 * @cb_param: parameter to pass to the callback routine
42 */ 40 */
43struct dma_async_tx_descriptor * 41struct dma_async_tx_descriptor *
44async_memset(struct page *dest, int val, unsigned int offset, 42async_memset(struct page *dest, int val, unsigned int offset, size_t len,
45 size_t len, enum async_tx_flags flags, 43 struct async_submit_ctl *submit)
46 struct dma_async_tx_descriptor *depend_tx,
47 dma_async_tx_callback cb_fn, void *cb_param)
48{ 44{
49 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, 45 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
50 &dest, 1, NULL, 0, len); 46 &dest, 1, NULL, 0, len);
51 struct dma_device *device = chan ? chan->device : NULL; 47 struct dma_device *device = chan ? chan->device : NULL;
52 struct dma_async_tx_descriptor *tx = NULL; 48 struct dma_async_tx_descriptor *tx = NULL;
53 49
54 if (device) { 50 if (device) {
55 dma_addr_t dma_dest; 51 dma_addr_t dma_dest;
56 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 52 unsigned long dma_prep_flags;
57 53
54 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
58 dma_dest = dma_map_page(device->dev, dest, offset, len, 55 dma_dest = dma_map_page(device->dev, dest, offset, len,
59 DMA_FROM_DEVICE); 56 DMA_FROM_DEVICE);
60 57
@@ -64,19 +61,19 @@ async_memset(struct page *dest, int val, unsigned int offset,
64 61
65 if (tx) { 62 if (tx) {
66 pr_debug("%s: (async) len: %zu\n", __func__, len); 63 pr_debug("%s: (async) len: %zu\n", __func__, len);
67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 64 async_tx_submit(chan, tx, submit);
68 } else { /* run the memset synchronously */ 65 } else { /* run the memset synchronously */
69 void *dest_buf; 66 void *dest_buf;
70 pr_debug("%s: (sync) len: %zu\n", __func__, len); 67 pr_debug("%s: (sync) len: %zu\n", __func__, len);
71 68
72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 69 dest_buf = page_address(dest) + offset;
73 70
74 /* wait for any prerequisite operations */ 71 /* wait for any prerequisite operations */
75 async_tx_quiesce(&depend_tx); 72 async_tx_quiesce(&submit->depend_tx);
76 73
77 memset(dest_buf, val, len); 74 memset(dest_buf, val, len);
78 75
79 async_tx_sync_epilog(cb_fn, cb_param); 76 async_tx_sync_epilog(submit);
80 } 77 }
81 78
82 return tx; 79 return tx;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 3766bc3d7d89..802a5ce437d9 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -45,13 +45,15 @@ static void __exit async_tx_exit(void)
45/** 45/**
46 * __async_tx_find_channel - find a channel to carry out the operation or let 46 * __async_tx_find_channel - find a channel to carry out the operation or let
47 * the transaction execute synchronously 47 * the transaction execute synchronously
48 * @depend_tx: transaction dependency 48 * @submit: transaction dependency and submission modifiers
49 * @tx_type: transaction type 49 * @tx_type: transaction type
50 */ 50 */
51struct dma_chan * 51struct dma_chan *
52__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 52__async_tx_find_channel(struct async_submit_ctl *submit,
53 enum dma_transaction_type tx_type) 53 enum dma_transaction_type tx_type)
54{ 54{
55 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
56
55 /* see if we can keep the chain on one channel */ 57 /* see if we can keep the chain on one channel */
56 if (depend_tx && 58 if (depend_tx &&
57 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) 59 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
@@ -144,13 +146,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
144 146
145 147
146/** 148/**
147 * submit_disposition - while holding depend_tx->lock we must avoid submitting 149 * submit_disposition - flags for routing an incoming operation
148 * new operations to prevent a circular locking dependency with
149 * drivers that already hold a channel lock when calling
150 * async_tx_run_dependencies.
151 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock 150 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
152 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch 151 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
153 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly 152 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
153 *
154 * while holding depend_tx->lock we must avoid submitting new operations
155 * to prevent a circular locking dependency with drivers that already
156 * hold a channel lock when calling async_tx_run_dependencies.
154 */ 157 */
155enum submit_disposition { 158enum submit_disposition {
156 ASYNC_TX_SUBMITTED, 159 ASYNC_TX_SUBMITTED,
@@ -160,11 +163,12 @@ enum submit_disposition {
160 163
161void 164void
162async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 165async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
163 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, 166 struct async_submit_ctl *submit)
164 dma_async_tx_callback cb_fn, void *cb_param)
165{ 167{
166 tx->callback = cb_fn; 168 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
167 tx->callback_param = cb_param; 169
170 tx->callback = submit->cb_fn;
171 tx->callback_param = submit->cb_param;
168 172
169 if (depend_tx) { 173 if (depend_tx) {
170 enum submit_disposition s; 174 enum submit_disposition s;
@@ -220,7 +224,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
220 tx->tx_submit(tx); 224 tx->tx_submit(tx);
221 } 225 }
222 226
223 if (flags & ASYNC_TX_ACK) 227 if (submit->flags & ASYNC_TX_ACK)
224 async_tx_ack(tx); 228 async_tx_ack(tx);
225 229
226 if (depend_tx) 230 if (depend_tx)
@@ -229,21 +233,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
229EXPORT_SYMBOL_GPL(async_tx_submit); 233EXPORT_SYMBOL_GPL(async_tx_submit);
230 234
231/** 235/**
232 * async_trigger_callback - schedules the callback function to be run after 236 * async_trigger_callback - schedules the callback function to be run
233 * any dependent operations have been completed. 237 * @submit: submission and completion parameters
234 * @flags: ASYNC_TX_ACK 238 *
235 * @depend_tx: 'callback' requires the completion of this transaction 239 * honored flags: ASYNC_TX_ACK
236 * @cb_fn: function to call after depend_tx completes 240 *
237 * @cb_param: parameter to pass to the callback routine 241 * The callback is run after any dependent operations have completed.
238 */ 242 */
239struct dma_async_tx_descriptor * 243struct dma_async_tx_descriptor *
240async_trigger_callback(enum async_tx_flags flags, 244async_trigger_callback(struct async_submit_ctl *submit)
241 struct dma_async_tx_descriptor *depend_tx,
242 dma_async_tx_callback cb_fn, void *cb_param)
243{ 245{
244 struct dma_chan *chan; 246 struct dma_chan *chan;
245 struct dma_device *device; 247 struct dma_device *device;
246 struct dma_async_tx_descriptor *tx; 248 struct dma_async_tx_descriptor *tx;
249 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
247 250
248 if (depend_tx) { 251 if (depend_tx) {
249 chan = depend_tx->chan; 252 chan = depend_tx->chan;
@@ -262,14 +265,14 @@ async_trigger_callback(enum async_tx_flags flags,
262 if (tx) { 265 if (tx) {
263 pr_debug("%s: (async)\n", __func__); 266 pr_debug("%s: (async)\n", __func__);
264 267
265 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 268 async_tx_submit(chan, tx, submit);
266 } else { 269 } else {
267 pr_debug("%s: (sync)\n", __func__); 270 pr_debug("%s: (sync)\n", __func__);
268 271
269 /* wait for any prerequisite operations */ 272 /* wait for any prerequisite operations */
270 async_tx_quiesce(&depend_tx); 273 async_tx_quiesce(&submit->depend_tx);
271 274
272 async_tx_sync_epilog(cb_fn, cb_param); 275 async_tx_sync_epilog(submit);
273 } 276 }
274 277
275 return tx; 278 return tx;
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3cc5dc763b54..691fa98a18c4 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -34,18 +34,16 @@
34static __async_inline struct dma_async_tx_descriptor * 34static __async_inline struct dma_async_tx_descriptor *
35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
36 unsigned int offset, int src_cnt, size_t len, 36 unsigned int offset, int src_cnt, size_t len,
37 enum async_tx_flags flags, 37 struct async_submit_ctl *submit)
38 struct dma_async_tx_descriptor *depend_tx,
39 dma_async_tx_callback cb_fn, void *cb_param)
40{ 38{
41 struct dma_device *dma = chan->device; 39 struct dma_device *dma = chan->device;
42 dma_addr_t *dma_src = (dma_addr_t *) src_list; 40 dma_addr_t *dma_src = (dma_addr_t *) src_list;
43 struct dma_async_tx_descriptor *tx = NULL; 41 struct dma_async_tx_descriptor *tx = NULL;
44 int src_off = 0; 42 int src_off = 0;
45 int i; 43 int i;
46 dma_async_tx_callback _cb_fn; 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
47 void *_cb_param; 45 void *cb_param_orig = submit->cb_param;
48 enum async_tx_flags async_flags; 46 enum async_tx_flags flags_orig = submit->flags;
49 enum dma_ctrl_flags dma_flags; 47 enum dma_ctrl_flags dma_flags;
50 int xor_src_cnt; 48 int xor_src_cnt;
51 dma_addr_t dma_dest; 49 dma_addr_t dma_dest;
@@ -63,7 +61,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
63 } 61 }
64 62
65 while (src_cnt) { 63 while (src_cnt) {
66 async_flags = flags; 64 submit->flags = flags_orig;
67 dma_flags = 0; 65 dma_flags = 0;
68 xor_src_cnt = min(src_cnt, dma->max_xor); 66 xor_src_cnt = min(src_cnt, dma->max_xor);
69 /* if we are submitting additional xors, leave the chain open, 67 /* if we are submitting additional xors, leave the chain open,
@@ -71,15 +69,15 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
71 * buffer mapped 69 * buffer mapped
72 */ 70 */
73 if (src_cnt > xor_src_cnt) { 71 if (src_cnt > xor_src_cnt) {
74 async_flags &= ~ASYNC_TX_ACK; 72 submit->flags &= ~ASYNC_TX_ACK;
75 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; 73 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
76 _cb_fn = NULL; 74 submit->cb_fn = NULL;
77 _cb_param = NULL; 75 submit->cb_param = NULL;
78 } else { 76 } else {
79 _cb_fn = cb_fn; 77 submit->cb_fn = cb_fn_orig;
80 _cb_param = cb_param; 78 submit->cb_param = cb_param_orig;
81 } 79 }
82 if (_cb_fn) 80 if (submit->cb_fn)
83 dma_flags |= DMA_PREP_INTERRUPT; 81 dma_flags |= DMA_PREP_INTERRUPT;
84 82
85 /* Since we have clobbered the src_list we are committed 83 /* Since we have clobbered the src_list we are committed
@@ -90,7 +88,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
90 xor_src_cnt, len, dma_flags); 88 xor_src_cnt, len, dma_flags);
91 89
92 if (unlikely(!tx)) 90 if (unlikely(!tx))
93 async_tx_quiesce(&depend_tx); 91 async_tx_quiesce(&submit->depend_tx);
94 92
95 /* spin wait for the preceeding transactions to complete */ 93 /* spin wait for the preceeding transactions to complete */
96 while (unlikely(!tx)) { 94 while (unlikely(!tx)) {
@@ -101,10 +99,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
101 dma_flags); 99 dma_flags);
102 } 100 }
103 101
104 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, 102 async_tx_submit(chan, tx, submit);
105 _cb_param); 103 submit->depend_tx = tx;
106
107 depend_tx = tx;
108 104
109 if (src_cnt > xor_src_cnt) { 105 if (src_cnt > xor_src_cnt) {
110 /* drop completed sources */ 106 /* drop completed sources */
@@ -123,8 +119,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
123 119
124static void 120static void
125do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, 121do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
126 int src_cnt, size_t len, enum async_tx_flags flags, 122 int src_cnt, size_t len, struct async_submit_ctl *submit)
127 dma_async_tx_callback cb_fn, void *cb_param)
128{ 123{
129 int i; 124 int i;
130 int xor_src_cnt; 125 int xor_src_cnt;
@@ -139,7 +134,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
139 /* set destination address */ 134 /* set destination address */
140 dest_buf = page_address(dest) + offset; 135 dest_buf = page_address(dest) + offset;
141 136
142 if (flags & ASYNC_TX_XOR_ZERO_DST) 137 if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
143 memset(dest_buf, 0, len); 138 memset(dest_buf, 0, len);
144 139
145 while (src_cnt > 0) { 140 while (src_cnt > 0) {
@@ -152,33 +147,35 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
152 src_off += xor_src_cnt; 147 src_off += xor_src_cnt;
153 } 148 }
154 149
155 async_tx_sync_epilog(cb_fn, cb_param); 150 async_tx_sync_epilog(submit);
156} 151}
157 152
158/** 153/**
159 * async_xor - attempt to xor a set of blocks with a dma engine. 154 * async_xor - attempt to xor a set of blocks with a dma engine.
160 * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
161 * flag must be set to not include dest data in the calculation. The
162 * assumption with dma eninges is that they only use the destination
163 * buffer as a source when it is explicity specified in the source list.
164 * @dest: destination page 155 * @dest: destination page
165 * @src_list: array of source pages (if the dest is also a source it must be 156 * @src_list: array of source pages
166 * at index zero). The contents of this array may be overwritten. 157 * @offset: common src/dst offset to start transaction
167 * @offset: offset in pages to start transaction
168 * @src_cnt: number of source pages 158 * @src_cnt: number of source pages
169 * @len: length in bytes 159 * @len: length in bytes
170 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK 160 * @submit: submission / completion modifiers
171 * @depend_tx: xor depends on the result of this transaction. 161 *
172 * @cb_fn: function to call when the xor completes 162 * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
173 * @cb_param: parameter to pass to the callback routine 163 *
164 * xor_blocks always uses the dest as a source so the
165 * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
166 * the calculation. The assumption with dma eninges is that they only
167 * use the destination buffer as a source when it is explicity specified
168 * in the source list.
169 *
170 * src_list note: if the dest is also a source it must be at index zero.
171 * The contents of this array will be overwritten if a scribble region
172 * is not specified.
174 */ 173 */
175struct dma_async_tx_descriptor * 174struct dma_async_tx_descriptor *
176async_xor(struct page *dest, struct page **src_list, unsigned int offset, 175async_xor(struct page *dest, struct page **src_list, unsigned int offset,
177 int src_cnt, size_t len, enum async_tx_flags flags, 176 int src_cnt, size_t len, struct async_submit_ctl *submit)
178 struct dma_async_tx_descriptor *depend_tx,
179 dma_async_tx_callback cb_fn, void *cb_param)
180{ 177{
181 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, 178 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
182 &dest, 1, src_list, 179 &dest, 1, src_list,
183 src_cnt, len); 180 src_cnt, len);
184 BUG_ON(src_cnt <= 1); 181 BUG_ON(src_cnt <= 1);
@@ -188,7 +185,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
188 pr_debug("%s (async): len: %zu\n", __func__, len); 185 pr_debug("%s (async): len: %zu\n", __func__, len);
189 186
190 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, 187 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
191 flags, depend_tx, cb_fn, cb_param); 188 submit);
192 } else { 189 } else {
193 /* run the xor synchronously */ 190 /* run the xor synchronously */
194 pr_debug("%s (sync): len: %zu\n", __func__, len); 191 pr_debug("%s (sync): len: %zu\n", __func__, len);
@@ -196,16 +193,15 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
196 /* in the sync case the dest is an implied source 193 /* in the sync case the dest is an implied source
197 * (assumes the dest is the first source) 194 * (assumes the dest is the first source)
198 */ 195 */
199 if (flags & ASYNC_TX_XOR_DROP_DST) { 196 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
200 src_cnt--; 197 src_cnt--;
201 src_list++; 198 src_list++;
202 } 199 }
203 200
204 /* wait for any prerequisite operations */ 201 /* wait for any prerequisite operations */
205 async_tx_quiesce(&depend_tx); 202 async_tx_quiesce(&submit->depend_tx);
206 203
207 do_sync_xor(dest, src_list, offset, src_cnt, len, 204 do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
208 flags, cb_fn, cb_param);
209 205
210 return NULL; 206 return NULL;
211 } 207 }
@@ -222,25 +218,25 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
222/** 218/**
223 * async_xor_val - attempt a xor parity check with a dma engine. 219 * async_xor_val - attempt a xor parity check with a dma engine.
224 * @dest: destination page used if the xor is performed synchronously 220 * @dest: destination page used if the xor is performed synchronously
225 * @src_list: array of source pages. The dest page must be listed as a source 221 * @src_list: array of source pages
226 * at index zero. The contents of this array may be overwritten.
227 * @offset: offset in pages to start transaction 222 * @offset: offset in pages to start transaction
228 * @src_cnt: number of source pages 223 * @src_cnt: number of source pages
229 * @len: length in bytes 224 * @len: length in bytes
230 * @result: 0 if sum == 0 else non-zero 225 * @result: 0 if sum == 0 else non-zero
231 * @flags: ASYNC_TX_ACK 226 * @submit: submission / completion modifiers
232 * @depend_tx: xor depends on the result of this transaction. 227 *
233 * @cb_fn: function to call when the xor completes 228 * honored flags: ASYNC_TX_ACK
234 * @cb_param: parameter to pass to the callback routine 229 *
230 * src_list note: if the dest is also a source it must be at index zero.
231 * The contents of this array will be overwritten if a scribble region
232 * is not specified.
235 */ 233 */
236struct dma_async_tx_descriptor * 234struct dma_async_tx_descriptor *
237async_xor_val(struct page *dest, struct page **src_list, 235async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
238 unsigned int offset, int src_cnt, size_t len, 236 int src_cnt, size_t len, u32 *result,
239 u32 *result, enum async_tx_flags flags, 237 struct async_submit_ctl *submit)
240 struct dma_async_tx_descriptor *depend_tx,
241 dma_async_tx_callback cb_fn, void *cb_param)
242{ 238{
243 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL, 239 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
244 &dest, 1, src_list, 240 &dest, 1, src_list,
245 src_cnt, len); 241 src_cnt, len);
246 struct dma_device *device = chan ? chan->device : NULL; 242 struct dma_device *device = chan ? chan->device : NULL;
@@ -250,11 +246,12 @@ async_xor_val(struct page *dest, struct page **src_list,
250 246
251 if (device && src_cnt <= device->max_xor) { 247 if (device && src_cnt <= device->max_xor) {
252 dma_addr_t *dma_src = (dma_addr_t *) src_list; 248 dma_addr_t *dma_src = (dma_addr_t *) src_list;
253 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 249 unsigned long dma_prep_flags;
254 int i; 250 int i;
255 251
256 pr_debug("%s: (async) len: %zu\n", __func__, len); 252 pr_debug("%s: (async) len: %zu\n", __func__, len);
257 253
254 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
258 for (i = 0; i < src_cnt; i++) 255 for (i = 0; i < src_cnt; i++)
259 dma_src[i] = dma_map_page(device->dev, src_list[i], 256 dma_src[i] = dma_map_page(device->dev, src_list[i],
260 offset, len, DMA_TO_DEVICE); 257 offset, len, DMA_TO_DEVICE);
@@ -263,7 +260,7 @@ async_xor_val(struct page *dest, struct page **src_list,
263 len, result, 260 len, result,
264 dma_prep_flags); 261 dma_prep_flags);
265 if (unlikely(!tx)) { 262 if (unlikely(!tx)) {
266 async_tx_quiesce(&depend_tx); 263 async_tx_quiesce(&submit->depend_tx);
267 264
268 while (!tx) { 265 while (!tx) {
269 dma_async_issue_pending(chan); 266 dma_async_issue_pending(chan);
@@ -273,23 +270,23 @@ async_xor_val(struct page *dest, struct page **src_list,
273 } 270 }
274 } 271 }
275 272
276 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 273 async_tx_submit(chan, tx, submit);
277 } else { 274 } else {
278 unsigned long xor_flags = flags; 275 enum async_tx_flags flags_orig = submit->flags;
279 276
280 pr_debug("%s: (sync) len: %zu\n", __func__, len); 277 pr_debug("%s: (sync) len: %zu\n", __func__, len);
281 278
282 xor_flags |= ASYNC_TX_XOR_DROP_DST; 279 submit->flags |= ASYNC_TX_XOR_DROP_DST;
283 xor_flags &= ~ASYNC_TX_ACK; 280 submit->flags &= ~ASYNC_TX_ACK;
284 281
285 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 282 tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
286 depend_tx, NULL, NULL);
287 283
288 async_tx_quiesce(&tx); 284 async_tx_quiesce(&tx);
289 285
290 *result = page_is_zero(dest, offset, len) ? 0 : 1; 286 *result = page_is_zero(dest, offset, len) ? 0 : 1;
291 287
292 async_tx_sync_epilog(cb_fn, cb_param); 288 async_tx_sync_epilog(submit);
289 submit->flags = flags_orig;
293 } 290 }
294 291
295 return tx; 292 return tx;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0ef5362c8d02..e1920f23579f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -499,11 +499,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
499 struct page *bio_page; 499 struct page *bio_page;
500 int i; 500 int i;
501 int page_offset; 501 int page_offset;
502 struct async_submit_ctl submit;
502 503
503 if (bio->bi_sector >= sector) 504 if (bio->bi_sector >= sector)
504 page_offset = (signed)(bio->bi_sector - sector) * 512; 505 page_offset = (signed)(bio->bi_sector - sector) * 512;
505 else 506 else
506 page_offset = (signed)(sector - bio->bi_sector) * -512; 507 page_offset = (signed)(sector - bio->bi_sector) * -512;
508
509 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
507 bio_for_each_segment(bvl, bio, i) { 510 bio_for_each_segment(bvl, bio, i) {
508 int len = bio_iovec_idx(bio, i)->bv_len; 511 int len = bio_iovec_idx(bio, i)->bv_len;
509 int clen; 512 int clen;
@@ -525,13 +528,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
525 bio_page = bio_iovec_idx(bio, i)->bv_page; 528 bio_page = bio_iovec_idx(bio, i)->bv_page;
526 if (frombio) 529 if (frombio)
527 tx = async_memcpy(page, bio_page, page_offset, 530 tx = async_memcpy(page, bio_page, page_offset,
528 b_offset, clen, 0, 531 b_offset, clen, &submit);
529 tx, NULL, NULL);
530 else 532 else
531 tx = async_memcpy(bio_page, page, b_offset, 533 tx = async_memcpy(bio_page, page, b_offset,
532 page_offset, clen, 0, 534 page_offset, clen, &submit);
533 tx, NULL, NULL);
534 } 535 }
536 /* chain the operations */
537 submit.depend_tx = tx;
538
535 if (clen < len) /* hit end of page */ 539 if (clen < len) /* hit end of page */
536 break; 540 break;
537 page_offset += len; 541 page_offset += len;
@@ -590,6 +594,7 @@ static void ops_run_biofill(struct stripe_head *sh)
590{ 594{
591 struct dma_async_tx_descriptor *tx = NULL; 595 struct dma_async_tx_descriptor *tx = NULL;
592 raid5_conf_t *conf = sh->raid_conf; 596 raid5_conf_t *conf = sh->raid_conf;
597 struct async_submit_ctl submit;
593 int i; 598 int i;
594 599
595 pr_debug("%s: stripe %llu\n", __func__, 600 pr_debug("%s: stripe %llu\n", __func__,
@@ -613,7 +618,8 @@ static void ops_run_biofill(struct stripe_head *sh)
613 } 618 }
614 619
615 atomic_inc(&sh->count); 620 atomic_inc(&sh->count);
616 async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); 621 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
622 async_trigger_callback(&submit);
617} 623}
618 624
619static void ops_complete_compute5(void *stripe_head_ref) 625static void ops_complete_compute5(void *stripe_head_ref)
@@ -645,6 +651,7 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
645 struct page *xor_dest = tgt->page; 651 struct page *xor_dest = tgt->page;
646 int count = 0; 652 int count = 0;
647 struct dma_async_tx_descriptor *tx; 653 struct dma_async_tx_descriptor *tx;
654 struct async_submit_ctl submit;
648 int i; 655 int i;
649 656
650 pr_debug("%s: stripe %llu block: %d\n", 657 pr_debug("%s: stripe %llu block: %d\n",
@@ -657,13 +664,12 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
657 664
658 atomic_inc(&sh->count); 665 atomic_inc(&sh->count);
659 666
667 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
668 ops_complete_compute5, sh, NULL);
660 if (unlikely(count == 1)) 669 if (unlikely(count == 1))
661 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 670 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
662 0, NULL, ops_complete_compute5, sh);
663 else 671 else
664 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 672 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
665 ASYNC_TX_XOR_ZERO_DST, NULL,
666 ops_complete_compute5, sh);
667 673
668 return tx; 674 return tx;
669} 675}
@@ -683,6 +689,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
683 int disks = sh->disks; 689 int disks = sh->disks;
684 struct page *xor_srcs[disks]; 690 struct page *xor_srcs[disks];
685 int count = 0, pd_idx = sh->pd_idx, i; 691 int count = 0, pd_idx = sh->pd_idx, i;
692 struct async_submit_ctl submit;
686 693
687 /* existing parity data subtracted */ 694 /* existing parity data subtracted */
688 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 695 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
@@ -697,9 +704,9 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
697 xor_srcs[count++] = dev->page; 704 xor_srcs[count++] = dev->page;
698 } 705 }
699 706
700 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 707 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx,
701 ASYNC_TX_XOR_DROP_DST, tx, 708 ops_complete_prexor, sh, NULL);
702 ops_complete_prexor, sh); 709 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
703 710
704 return tx; 711 return tx;
705} 712}
@@ -772,7 +779,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
772 /* kernel stack size limits the total number of disks */ 779 /* kernel stack size limits the total number of disks */
773 int disks = sh->disks; 780 int disks = sh->disks;
774 struct page *xor_srcs[disks]; 781 struct page *xor_srcs[disks];
775 782 struct async_submit_ctl submit;
776 int count = 0, pd_idx = sh->pd_idx, i; 783 int count = 0, pd_idx = sh->pd_idx, i;
777 struct page *xor_dest; 784 struct page *xor_dest;
778 int prexor = 0; 785 int prexor = 0;
@@ -811,13 +818,11 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
811 818
812 atomic_inc(&sh->count); 819 atomic_inc(&sh->count);
813 820
814 if (unlikely(count == 1)) { 821 init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, NULL);
815 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 822 if (unlikely(count == 1))
816 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 823 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
817 flags, tx, ops_complete_postxor, sh); 824 else
818 } else 825 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
819 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
820 flags, tx, ops_complete_postxor, sh);
821} 826}
822 827
823static void ops_complete_check(void *stripe_head_ref) 828static void ops_complete_check(void *stripe_head_ref)
@@ -838,6 +843,7 @@ static void ops_run_check(struct stripe_head *sh)
838 int disks = sh->disks; 843 int disks = sh->disks;
839 struct page *xor_srcs[disks]; 844 struct page *xor_srcs[disks];
840 struct dma_async_tx_descriptor *tx; 845 struct dma_async_tx_descriptor *tx;
846 struct async_submit_ctl submit;
841 847
842 int count = 0, pd_idx = sh->pd_idx, i; 848 int count = 0, pd_idx = sh->pd_idx, i;
843 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 849 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
@@ -851,12 +857,13 @@ static void ops_run_check(struct stripe_head *sh)
851 xor_srcs[count++] = dev->page; 857 xor_srcs[count++] = dev->page;
852 } 858 }
853 859
860 init_async_submit(&submit, 0, NULL, NULL, NULL, NULL);
854 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 861 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
855 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 862 &sh->ops.zero_sum_result, &submit);
856 863
857 atomic_inc(&sh->count); 864 atomic_inc(&sh->count);
858 tx = async_trigger_callback(ASYNC_TX_ACK, tx, 865 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
859 ops_complete_check, sh); 866 tx = async_trigger_callback(&submit);
860} 867}
861 868
862static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) 869static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
@@ -2664,6 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2664 if (i != sh->pd_idx && i != sh->qd_idx) { 2671 if (i != sh->pd_idx && i != sh->qd_idx) {
2665 int dd_idx, j; 2672 int dd_idx, j;
2666 struct stripe_head *sh2; 2673 struct stripe_head *sh2;
2674 struct async_submit_ctl submit;
2667 2675
2668 sector_t bn = compute_blocknr(sh, i, 1); 2676 sector_t bn = compute_blocknr(sh, i, 1);
2669 sector_t s = raid5_compute_sector(conf, bn, 0, 2677 sector_t s = raid5_compute_sector(conf, bn, 0,
@@ -2683,9 +2691,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2683 } 2691 }
2684 2692
2685 /* place all the copies on one channel */ 2693 /* place all the copies on one channel */
2694 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2686 tx = async_memcpy(sh2->dev[dd_idx].page, 2695 tx = async_memcpy(sh2->dev[dd_idx].page,
2687 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2696 sh->dev[i].page, 0, 0, STRIPE_SIZE,
2688 0, tx, NULL, NULL); 2697 &submit);
2689 2698
2690 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2699 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2691 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2700 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 9f14cd540cd2..00cfb637ddf2 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -65,6 +65,22 @@ enum async_tx_flags {
65 ASYNC_TX_ACK = (1 << 2), 65 ASYNC_TX_ACK = (1 << 2),
66}; 66};
67 67
68/**
69 * struct async_submit_ctl - async_tx submission/completion modifiers
70 * @flags: submission modifiers
71 * @depend_tx: parent dependency of the current operation being submitted
72 * @cb_fn: callback routine to run at operation completion
73 * @cb_param: parameter for the callback routine
74 * @scribble: caller provided space for dma/page address conversions
75 */
76struct async_submit_ctl {
77 enum async_tx_flags flags;
78 struct dma_async_tx_descriptor *depend_tx;
79 dma_async_tx_callback cb_fn;
80 void *cb_param;
81 void *scribble;
82};
83
68#ifdef CONFIG_DMA_ENGINE 84#ifdef CONFIG_DMA_ENGINE
69#define async_tx_issue_pending_all dma_issue_pending_all 85#define async_tx_issue_pending_all dma_issue_pending_all
70#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 86#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
@@ -73,8 +89,8 @@ enum async_tx_flags {
73#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ 89#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
74 __async_tx_find_channel(dep, type) 90 __async_tx_find_channel(dep, type)
75struct dma_chan * 91struct dma_chan *
76__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 92__async_tx_find_channel(struct async_submit_ctl *submit,
77 enum dma_transaction_type tx_type); 93 enum dma_transaction_type tx_type);
78#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ 94#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
79#else 95#else
80static inline void async_tx_issue_pending_all(void) 96static inline void async_tx_issue_pending_all(void)
@@ -83,9 +99,10 @@ static inline void async_tx_issue_pending_all(void)
83} 99}
84 100
85static inline struct dma_chan * 101static inline struct dma_chan *
86async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 102async_tx_find_channel(struct async_submit_ctl *submit,
87 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 103 enum dma_transaction_type tx_type, struct page **dst,
88 struct page **src, int src_count, size_t len) 104 int dst_count, struct page **src, int src_count,
105 size_t len)
89{ 106{
90 return NULL; 107 return NULL;
91} 108}
@@ -97,46 +114,53 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
97 * @cb_fn_param: parameter to pass to the callback routine 114 * @cb_fn_param: parameter to pass to the callback routine
98 */ 115 */
99static inline void 116static inline void
100async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) 117async_tx_sync_epilog(struct async_submit_ctl *submit)
118{
119 if (submit->cb_fn)
120 submit->cb_fn(submit->cb_param);
121}
122
123typedef union {
124 unsigned long addr;
125 struct page *page;
126 dma_addr_t dma;
127} addr_conv_t;
128
129static inline void
130init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
131 struct dma_async_tx_descriptor *tx,
132 dma_async_tx_callback cb_fn, void *cb_param,
133 addr_conv_t *scribble)
101{ 134{
102 if (cb_fn) 135 args->flags = flags;
103 cb_fn(cb_fn_param); 136 args->depend_tx = tx;
137 args->cb_fn = cb_fn;
138 args->cb_param = cb_param;
139 args->scribble = scribble;
104} 140}
105 141
106void 142void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
107async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 143 struct async_submit_ctl *submit);
108 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
109 dma_async_tx_callback cb_fn, void *cb_fn_param);
110 144
111struct dma_async_tx_descriptor * 145struct dma_async_tx_descriptor *
112async_xor(struct page *dest, struct page **src_list, unsigned int offset, 146async_xor(struct page *dest, struct page **src_list, unsigned int offset,
113 int src_cnt, size_t len, enum async_tx_flags flags, 147 int src_cnt, size_t len, struct async_submit_ctl *submit);
114 struct dma_async_tx_descriptor *depend_tx,
115 dma_async_tx_callback cb_fn, void *cb_fn_param);
116 148
117struct dma_async_tx_descriptor * 149struct dma_async_tx_descriptor *
118async_xor_val(struct page *dest, struct page **src_list, 150async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
119 unsigned int offset, int src_cnt, size_t len, 151 int src_cnt, size_t len, u32 *result,
120 u32 *result, enum async_tx_flags flags, 152 struct async_submit_ctl *submit);
121 struct dma_async_tx_descriptor *depend_tx,
122 dma_async_tx_callback cb_fn, void *cb_fn_param);
123 153
124struct dma_async_tx_descriptor * 154struct dma_async_tx_descriptor *
125async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, 155async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
126 unsigned int src_offset, size_t len, enum async_tx_flags flags, 156 unsigned int src_offset, size_t len,
127 struct dma_async_tx_descriptor *depend_tx, 157 struct async_submit_ctl *submit);
128 dma_async_tx_callback cb_fn, void *cb_fn_param);
129 158
130struct dma_async_tx_descriptor * 159struct dma_async_tx_descriptor *
131async_memset(struct page *dest, int val, unsigned int offset, 160async_memset(struct page *dest, int val, unsigned int offset,
132 size_t len, enum async_tx_flags flags, 161 size_t len, struct async_submit_ctl *submit);
133 struct dma_async_tx_descriptor *depend_tx,
134 dma_async_tx_callback cb_fn, void *cb_fn_param);
135 162
136struct dma_async_tx_descriptor * 163struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
137async_trigger_callback(enum async_tx_flags flags,
138 struct dma_async_tx_descriptor *depend_tx,
139 dma_async_tx_callback cb_fn, void *cb_fn_param);
140 164
141void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 165void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
142#endif /* _ASYNC_TX_H_ */ 166#endif /* _ASYNC_TX_H_ */