aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_xor.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-06-03 17:22:28 -0400
committerDan Williams <dan.j.williams@intel.com>2009-06-03 17:22:28 -0400
commit04ce9ab385dc97eb55299d533cd3af79b8fc7529 (patch)
tree9b8d0b9c1eba820a8a107d05abc2e2f8d4d20a59 /crypto/async_tx/async_xor.c
parenta08abd8ca890a377521d65d493d174bebcaf694b (diff)
async_xor: permit callers to pass in a 'dma/page scribble' region
async_xor() needs space to perform dma and page address conversions. In most cases the code can simply reuse the struct page * array because the size of the native pointer matches the size of a dma/page address. In order to support archs where sizeof(dma_addr_t) is larger than sizeof(struct page *), or to preserve the input parameters, we utilize a memory region passed in by the caller. Since the code is now prepared to handle the case where it cannot perform address conversions on the stack, we no longer need the !HIGHMEM64G dependency in drivers/dma/Kconfig. [ Impact: don't clobber input buffers for address conversions ] Reviewed-by: Andre Noll <maan@systemlinux.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto/async_tx/async_xor.c')
-rw-r--r--crypto/async_tx/async_xor.c60
1 files changed, 29 insertions, 31 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 691fa98a18c4..1e96c4df7061 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,11 +33,10 @@
33/* do_async_xor - dma map the pages and perform the xor with an engine */ 33/* do_async_xor - dma map the pages and perform the xor with an engine */
34static __async_inline struct dma_async_tx_descriptor * 34static __async_inline struct dma_async_tx_descriptor *
35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
36 unsigned int offset, int src_cnt, size_t len, 36 unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
37 struct async_submit_ctl *submit) 37 struct async_submit_ctl *submit)
38{ 38{
39 struct dma_device *dma = chan->device; 39 struct dma_device *dma = chan->device;
40 dma_addr_t *dma_src = (dma_addr_t *) src_list;
41 struct dma_async_tx_descriptor *tx = NULL; 40 struct dma_async_tx_descriptor *tx = NULL;
42 int src_off = 0; 41 int src_off = 0;
43 int i; 42 int i;
@@ -125,9 +124,14 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
125 int xor_src_cnt; 124 int xor_src_cnt;
126 int src_off = 0; 125 int src_off = 0;
127 void *dest_buf; 126 void *dest_buf;
128 void **srcs = (void **) src_list; 127 void **srcs;
129 128
130 /* reuse the 'src_list' array to convert to buffer pointers */ 129 if (submit->scribble)
130 srcs = submit->scribble;
131 else
132 srcs = (void **) src_list;
133
134 /* convert to buffer pointers */
131 for (i = 0; i < src_cnt; i++) 135 for (i = 0; i < src_cnt; i++)
132 srcs[i] = page_address(src_list[i]) + offset; 136 srcs[i] = page_address(src_list[i]) + offset;
133 137
@@ -178,17 +182,26 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
178 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, 182 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
179 &dest, 1, src_list, 183 &dest, 1, src_list,
180 src_cnt, len); 184 src_cnt, len);
185 dma_addr_t *dma_src = NULL;
186
181 BUG_ON(src_cnt <= 1); 187 BUG_ON(src_cnt <= 1);
182 188
183 if (chan) { 189 if (submit->scribble)
190 dma_src = submit->scribble;
191 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
192 dma_src = (dma_addr_t *) src_list;
193
194 if (dma_src && chan) {
184 /* run the xor asynchronously */ 195 /* run the xor asynchronously */
185 pr_debug("%s (async): len: %zu\n", __func__, len); 196 pr_debug("%s (async): len: %zu\n", __func__, len);
186 197
187 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, 198 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
188 submit); 199 dma_src, submit);
189 } else { 200 } else {
190 /* run the xor synchronously */ 201 /* run the xor synchronously */
191 pr_debug("%s (sync): len: %zu\n", __func__, len); 202 pr_debug("%s (sync): len: %zu\n", __func__, len);
203 WARN_ONCE(chan, "%s: no space for dma address conversion\n",
204 __func__);
192 205
193 /* in the sync case the dest is an implied source 206 /* in the sync case the dest is an implied source
194 * (assumes the dest is the first source) 207 * (assumes the dest is the first source)
@@ -241,11 +254,16 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
241 src_cnt, len); 254 src_cnt, len);
242 struct dma_device *device = chan ? chan->device : NULL; 255 struct dma_device *device = chan ? chan->device : NULL;
243 struct dma_async_tx_descriptor *tx = NULL; 256 struct dma_async_tx_descriptor *tx = NULL;
257 dma_addr_t *dma_src = NULL;
244 258
245 BUG_ON(src_cnt <= 1); 259 BUG_ON(src_cnt <= 1);
246 260
247 if (device && src_cnt <= device->max_xor) { 261 if (submit->scribble)
248 dma_addr_t *dma_src = (dma_addr_t *) src_list; 262 dma_src = submit->scribble;
263 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
264 dma_src = (dma_addr_t *) src_list;
265
266 if (dma_src && device && src_cnt <= device->max_xor) {
249 unsigned long dma_prep_flags; 267 unsigned long dma_prep_flags;
250 int i; 268 int i;
251 269
@@ -275,6 +293,9 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
275 enum async_tx_flags flags_orig = submit->flags; 293 enum async_tx_flags flags_orig = submit->flags;
276 294
277 pr_debug("%s: (sync) len: %zu\n", __func__, len); 295 pr_debug("%s: (sync) len: %zu\n", __func__, len);
296 WARN_ONCE(device && src_cnt <= device->max_xor,
297 "%s: no space for dma address conversion\n",
298 __func__);
278 299
279 submit->flags |= ASYNC_TX_XOR_DROP_DST; 300 submit->flags |= ASYNC_TX_XOR_DROP_DST;
280 submit->flags &= ~ASYNC_TX_ACK; 301 submit->flags &= ~ASYNC_TX_ACK;
@@ -293,29 +314,6 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
293} 314}
294EXPORT_SYMBOL_GPL(async_xor_val); 315EXPORT_SYMBOL_GPL(async_xor_val);
295 316
296static int __init async_xor_init(void)
297{
298 #ifdef CONFIG_DMA_ENGINE
299 /* To conserve stack space the input src_list (array of page pointers)
300 * is reused to hold the array of dma addresses passed to the driver.
301 * This conversion is only possible when dma_addr_t is less than the
302 * the size of a pointer. HIGHMEM64G is known to violate this
303 * assumption.
304 */
305 BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
306 #endif
307
308 return 0;
309}
310
311static void __exit async_xor_exit(void)
312{
313 do { } while (0);
314}
315
316module_init(async_xor_init);
317module_exit(async_xor_exit);
318
319MODULE_AUTHOR("Intel Corporation"); 317MODULE_AUTHOR("Intel Corporation");
320MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); 318MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
321MODULE_LICENSE("GPL"); 319MODULE_LICENSE("GPL");