aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_xor.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-06-03 14:43:59 -0400
committerDan Williams <dan.j.williams@intel.com>2009-06-03 17:07:35 -0400
commita08abd8ca890a377521d65d493d174bebcaf694b (patch)
tree987c149a2d7d6ab345f426ac28191627b4a02a3e /crypto/async_tx/async_xor.c
parent88ba2aa586c874681c072101287e15d40de7e6e2 (diff)
async_tx: structify submission arguments, add scribble
Prepare the api for the arrival of a new parameter, 'scribble'. This will allow callers to identify scratchpad memory for dma address or page address conversions. As this adds yet another parameter, take this opportunity to convert the common submission parameters (flags, dependency, callback, and callback argument) into an object that is passed by reference. Also, take this opportunity to fix up the kerneldoc and add notes about the relevant ASYNC_TX_* flags for each routine. [ Impact: moves api pass-by-value parameters to a pass-by-reference struct ] Signed-off-by: Andre Noll <maan@systemlinux.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx/async_xor.c')
-rw-r--r--crypto/async_tx/async_xor.c123
1 files changed, 60 insertions, 63 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 3cc5dc763b54..691fa98a18c4 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -34,18 +34,16 @@
34static __async_inline struct dma_async_tx_descriptor * 34static __async_inline struct dma_async_tx_descriptor *
35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
36 unsigned int offset, int src_cnt, size_t len, 36 unsigned int offset, int src_cnt, size_t len,
37 enum async_tx_flags flags, 37 struct async_submit_ctl *submit)
38 struct dma_async_tx_descriptor *depend_tx,
39 dma_async_tx_callback cb_fn, void *cb_param)
40{ 38{
41 struct dma_device *dma = chan->device; 39 struct dma_device *dma = chan->device;
42 dma_addr_t *dma_src = (dma_addr_t *) src_list; 40 dma_addr_t *dma_src = (dma_addr_t *) src_list;
43 struct dma_async_tx_descriptor *tx = NULL; 41 struct dma_async_tx_descriptor *tx = NULL;
44 int src_off = 0; 42 int src_off = 0;
45 int i; 43 int i;
46 dma_async_tx_callback _cb_fn; 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
47 void *_cb_param; 45 void *cb_param_orig = submit->cb_param;
48 enum async_tx_flags async_flags; 46 enum async_tx_flags flags_orig = submit->flags;
49 enum dma_ctrl_flags dma_flags; 47 enum dma_ctrl_flags dma_flags;
50 int xor_src_cnt; 48 int xor_src_cnt;
51 dma_addr_t dma_dest; 49 dma_addr_t dma_dest;
@@ -63,7 +61,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
63 } 61 }
64 62
65 while (src_cnt) { 63 while (src_cnt) {
66 async_flags = flags; 64 submit->flags = flags_orig;
67 dma_flags = 0; 65 dma_flags = 0;
68 xor_src_cnt = min(src_cnt, dma->max_xor); 66 xor_src_cnt = min(src_cnt, dma->max_xor);
69 /* if we are submitting additional xors, leave the chain open, 67 /* if we are submitting additional xors, leave the chain open,
@@ -71,15 +69,15 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
71 * buffer mapped 69 * buffer mapped
72 */ 70 */
73 if (src_cnt > xor_src_cnt) { 71 if (src_cnt > xor_src_cnt) {
74 async_flags &= ~ASYNC_TX_ACK; 72 submit->flags &= ~ASYNC_TX_ACK;
75 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; 73 dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
76 _cb_fn = NULL; 74 submit->cb_fn = NULL;
77 _cb_param = NULL; 75 submit->cb_param = NULL;
78 } else { 76 } else {
79 _cb_fn = cb_fn; 77 submit->cb_fn = cb_fn_orig;
80 _cb_param = cb_param; 78 submit->cb_param = cb_param_orig;
81 } 79 }
82 if (_cb_fn) 80 if (submit->cb_fn)
83 dma_flags |= DMA_PREP_INTERRUPT; 81 dma_flags |= DMA_PREP_INTERRUPT;
84 82
85 /* Since we have clobbered the src_list we are committed 83 /* Since we have clobbered the src_list we are committed
@@ -90,7 +88,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
90 xor_src_cnt, len, dma_flags); 88 xor_src_cnt, len, dma_flags);
91 89
92 if (unlikely(!tx)) 90 if (unlikely(!tx))
93 async_tx_quiesce(&depend_tx); 91 async_tx_quiesce(&submit->depend_tx);
94 92
95 /* spin wait for the preceeding transactions to complete */ 93 /* spin wait for the preceeding transactions to complete */
96 while (unlikely(!tx)) { 94 while (unlikely(!tx)) {
@@ -101,10 +99,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
101 dma_flags); 99 dma_flags);
102 } 100 }
103 101
104 async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, 102 async_tx_submit(chan, tx, submit);
105 _cb_param); 103 submit->depend_tx = tx;
106
107 depend_tx = tx;
108 104
109 if (src_cnt > xor_src_cnt) { 105 if (src_cnt > xor_src_cnt) {
110 /* drop completed sources */ 106 /* drop completed sources */
@@ -123,8 +119,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
123 119
124static void 120static void
125do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, 121do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
126 int src_cnt, size_t len, enum async_tx_flags flags, 122 int src_cnt, size_t len, struct async_submit_ctl *submit)
127 dma_async_tx_callback cb_fn, void *cb_param)
128{ 123{
129 int i; 124 int i;
130 int xor_src_cnt; 125 int xor_src_cnt;
@@ -139,7 +134,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
139 /* set destination address */ 134 /* set destination address */
140 dest_buf = page_address(dest) + offset; 135 dest_buf = page_address(dest) + offset;
141 136
142 if (flags & ASYNC_TX_XOR_ZERO_DST) 137 if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
143 memset(dest_buf, 0, len); 138 memset(dest_buf, 0, len);
144 139
145 while (src_cnt > 0) { 140 while (src_cnt > 0) {
@@ -152,33 +147,35 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
152 src_off += xor_src_cnt; 147 src_off += xor_src_cnt;
153 } 148 }
154 149
155 async_tx_sync_epilog(cb_fn, cb_param); 150 async_tx_sync_epilog(submit);
156} 151}
157 152
158/** 153/**
159 * async_xor - attempt to xor a set of blocks with a dma engine. 154 * async_xor - attempt to xor a set of blocks with a dma engine.
160 * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
161 * flag must be set to not include dest data in the calculation. The
162 * assumption with dma eninges is that they only use the destination
163 * buffer as a source when it is explicity specified in the source list.
164 * @dest: destination page 155 * @dest: destination page
165 * @src_list: array of source pages (if the dest is also a source it must be 156 * @src_list: array of source pages
166 * at index zero). The contents of this array may be overwritten. 157 * @offset: common src/dst offset to start transaction
167 * @offset: offset in pages to start transaction
168 * @src_cnt: number of source pages 158 * @src_cnt: number of source pages
169 * @len: length in bytes 159 * @len: length in bytes
170 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK 160 * @submit: submission / completion modifiers
171 * @depend_tx: xor depends on the result of this transaction. 161 *
172 * @cb_fn: function to call when the xor completes 162 * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
173 * @cb_param: parameter to pass to the callback routine 163 *
164 * xor_blocks always uses the dest as a source so the
165 * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
166 * the calculation. The assumption with dma eninges is that they only
167 * use the destination buffer as a source when it is explicity specified
168 * in the source list.
169 *
170 * src_list note: if the dest is also a source it must be at index zero.
171 * The contents of this array will be overwritten if a scribble region
172 * is not specified.
174 */ 173 */
175struct dma_async_tx_descriptor * 174struct dma_async_tx_descriptor *
176async_xor(struct page *dest, struct page **src_list, unsigned int offset, 175async_xor(struct page *dest, struct page **src_list, unsigned int offset,
177 int src_cnt, size_t len, enum async_tx_flags flags, 176 int src_cnt, size_t len, struct async_submit_ctl *submit)
178 struct dma_async_tx_descriptor *depend_tx,
179 dma_async_tx_callback cb_fn, void *cb_param)
180{ 177{
181 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, 178 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
182 &dest, 1, src_list, 179 &dest, 1, src_list,
183 src_cnt, len); 180 src_cnt, len);
184 BUG_ON(src_cnt <= 1); 181 BUG_ON(src_cnt <= 1);
@@ -188,7 +185,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
188 pr_debug("%s (async): len: %zu\n", __func__, len); 185 pr_debug("%s (async): len: %zu\n", __func__, len);
189 186
190 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, 187 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
191 flags, depend_tx, cb_fn, cb_param); 188 submit);
192 } else { 189 } else {
193 /* run the xor synchronously */ 190 /* run the xor synchronously */
194 pr_debug("%s (sync): len: %zu\n", __func__, len); 191 pr_debug("%s (sync): len: %zu\n", __func__, len);
@@ -196,16 +193,15 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
196 /* in the sync case the dest is an implied source 193 /* in the sync case the dest is an implied source
197 * (assumes the dest is the first source) 194 * (assumes the dest is the first source)
198 */ 195 */
199 if (flags & ASYNC_TX_XOR_DROP_DST) { 196 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
200 src_cnt--; 197 src_cnt--;
201 src_list++; 198 src_list++;
202 } 199 }
203 200
204 /* wait for any prerequisite operations */ 201 /* wait for any prerequisite operations */
205 async_tx_quiesce(&depend_tx); 202 async_tx_quiesce(&submit->depend_tx);
206 203
207 do_sync_xor(dest, src_list, offset, src_cnt, len, 204 do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
208 flags, cb_fn, cb_param);
209 205
210 return NULL; 206 return NULL;
211 } 207 }
@@ -222,25 +218,25 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
222/** 218/**
223 * async_xor_val - attempt a xor parity check with a dma engine. 219 * async_xor_val - attempt a xor parity check with a dma engine.
224 * @dest: destination page used if the xor is performed synchronously 220 * @dest: destination page used if the xor is performed synchronously
225 * @src_list: array of source pages. The dest page must be listed as a source 221 * @src_list: array of source pages
226 * at index zero. The contents of this array may be overwritten.
227 * @offset: offset in pages to start transaction 222 * @offset: offset in pages to start transaction
228 * @src_cnt: number of source pages 223 * @src_cnt: number of source pages
229 * @len: length in bytes 224 * @len: length in bytes
230 * @result: 0 if sum == 0 else non-zero 225 * @result: 0 if sum == 0 else non-zero
231 * @flags: ASYNC_TX_ACK 226 * @submit: submission / completion modifiers
232 * @depend_tx: xor depends on the result of this transaction. 227 *
233 * @cb_fn: function to call when the xor completes 228 * honored flags: ASYNC_TX_ACK
234 * @cb_param: parameter to pass to the callback routine 229 *
230 * src_list note: if the dest is also a source it must be at index zero.
231 * The contents of this array will be overwritten if a scribble region
232 * is not specified.
235 */ 233 */
236struct dma_async_tx_descriptor * 234struct dma_async_tx_descriptor *
237async_xor_val(struct page *dest, struct page **src_list, 235async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
238 unsigned int offset, int src_cnt, size_t len, 236 int src_cnt, size_t len, u32 *result,
239 u32 *result, enum async_tx_flags flags, 237 struct async_submit_ctl *submit)
240 struct dma_async_tx_descriptor *depend_tx,
241 dma_async_tx_callback cb_fn, void *cb_param)
242{ 238{
243 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL, 239 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
244 &dest, 1, src_list, 240 &dest, 1, src_list,
245 src_cnt, len); 241 src_cnt, len);
246 struct dma_device *device = chan ? chan->device : NULL; 242 struct dma_device *device = chan ? chan->device : NULL;
@@ -250,11 +246,12 @@ async_xor_val(struct page *dest, struct page **src_list,
250 246
251 if (device && src_cnt <= device->max_xor) { 247 if (device && src_cnt <= device->max_xor) {
252 dma_addr_t *dma_src = (dma_addr_t *) src_list; 248 dma_addr_t *dma_src = (dma_addr_t *) src_list;
253 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 249 unsigned long dma_prep_flags;
254 int i; 250 int i;
255 251
256 pr_debug("%s: (async) len: %zu\n", __func__, len); 252 pr_debug("%s: (async) len: %zu\n", __func__, len);
257 253
254 dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
258 for (i = 0; i < src_cnt; i++) 255 for (i = 0; i < src_cnt; i++)
259 dma_src[i] = dma_map_page(device->dev, src_list[i], 256 dma_src[i] = dma_map_page(device->dev, src_list[i],
260 offset, len, DMA_TO_DEVICE); 257 offset, len, DMA_TO_DEVICE);
@@ -263,7 +260,7 @@ async_xor_val(struct page *dest, struct page **src_list,
263 len, result, 260 len, result,
264 dma_prep_flags); 261 dma_prep_flags);
265 if (unlikely(!tx)) { 262 if (unlikely(!tx)) {
266 async_tx_quiesce(&depend_tx); 263 async_tx_quiesce(&submit->depend_tx);
267 264
268 while (!tx) { 265 while (!tx) {
269 dma_async_issue_pending(chan); 266 dma_async_issue_pending(chan);
@@ -273,23 +270,23 @@ async_xor_val(struct page *dest, struct page **src_list,
273 } 270 }
274 } 271 }
275 272
276 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 273 async_tx_submit(chan, tx, submit);
277 } else { 274 } else {
278 unsigned long xor_flags = flags; 275 enum async_tx_flags flags_orig = submit->flags;
279 276
280 pr_debug("%s: (sync) len: %zu\n", __func__, len); 277 pr_debug("%s: (sync) len: %zu\n", __func__, len);
281 278
282 xor_flags |= ASYNC_TX_XOR_DROP_DST; 279 submit->flags |= ASYNC_TX_XOR_DROP_DST;
283 xor_flags &= ~ASYNC_TX_ACK; 280 submit->flags &= ~ASYNC_TX_ACK;
284 281
285 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, 282 tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
286 depend_tx, NULL, NULL);
287 283
288 async_tx_quiesce(&tx); 284 async_tx_quiesce(&tx);
289 285
290 *result = page_is_zero(dest, offset, len) ? 0 : 1; 286 *result = page_is_zero(dest, offset, len) ? 0 : 1;
291 287
292 async_tx_sync_epilog(cb_fn, cb_param); 288 async_tx_sync_epilog(submit);
289 submit->flags = flags_orig;
293 } 290 }
294 291
295 return tx; 292 return tx;