aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/async-tx-api.txt43
-rw-r--r--crypto/async_tx/async_xor.c60
-rw-r--r--drivers/dma/Kconfig2
3 files changed, 58 insertions, 47 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index dfe0475f7919..6b15e488c0e7 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -115,29 +115,42 @@ of an operation.
115Perform a xor->copy->xor operation where each operation depends on the 115Perform a xor->copy->xor operation where each operation depends on the
116result from the previous operation: 116result from the previous operation:
117 117
118void complete_xor_copy_xor(void *param) 118void callback(void *param)
119{ 119{
120 printk("complete\n"); 120 struct completion *cmp = param;
121
122 complete(cmp);
121} 123}
122 124
123int run_xor_copy_xor(struct page **xor_srcs, 125void run_xor_copy_xor(struct page **xor_srcs,
124 int xor_src_cnt, 126 int xor_src_cnt,
125 struct page *xor_dest, 127 struct page *xor_dest,
126 size_t xor_len, 128 size_t xor_len,
127 struct page *copy_src, 129 struct page *copy_src,
128 struct page *copy_dest, 130 struct page *copy_dest,
129 size_t copy_len) 131 size_t copy_len)
130{ 132{
131 struct dma_async_tx_descriptor *tx; 133 struct dma_async_tx_descriptor *tx;
134 addr_conv_t addr_conv[xor_src_cnt];
135 struct async_submit_ctl submit;
136 addr_conv_t addr_conv[NDISKS];
137 struct completion cmp;
138
139 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL,
140 addr_conv);
141 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit)
132 142
133 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, 143 submit->depend_tx = tx;
134 ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); 144 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit);
135 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, tx, NULL, NULL); 145
136 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, 146 init_completion(&cmp);
137 ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, 147 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx,
138 tx, complete_xor_copy_xor, NULL); 148 callback, &cmp, addr_conv);
149 tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit);
139 150
140 async_tx_issue_pending_all(); 151 async_tx_issue_pending_all();
152
153 wait_for_completion(&cmp);
141} 154}
142 155
143See include/linux/async_tx.h for more information on the flags. See the 156See include/linux/async_tx.h for more information on the flags. See the
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 691fa98a18c4..1e96c4df7061 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,11 +33,10 @@
33/* do_async_xor - dma map the pages and perform the xor with an engine */ 33/* do_async_xor - dma map the pages and perform the xor with an engine */
34static __async_inline struct dma_async_tx_descriptor * 34static __async_inline struct dma_async_tx_descriptor *
35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, 35do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
36 unsigned int offset, int src_cnt, size_t len, 36 unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
37 struct async_submit_ctl *submit) 37 struct async_submit_ctl *submit)
38{ 38{
39 struct dma_device *dma = chan->device; 39 struct dma_device *dma = chan->device;
40 dma_addr_t *dma_src = (dma_addr_t *) src_list;
41 struct dma_async_tx_descriptor *tx = NULL; 40 struct dma_async_tx_descriptor *tx = NULL;
42 int src_off = 0; 41 int src_off = 0;
43 int i; 42 int i;
@@ -125,9 +124,14 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
125 int xor_src_cnt; 124 int xor_src_cnt;
126 int src_off = 0; 125 int src_off = 0;
127 void *dest_buf; 126 void *dest_buf;
128 void **srcs = (void **) src_list; 127 void **srcs;
129 128
130 /* reuse the 'src_list' array to convert to buffer pointers */ 129 if (submit->scribble)
130 srcs = submit->scribble;
131 else
132 srcs = (void **) src_list;
133
134 /* convert to buffer pointers */
131 for (i = 0; i < src_cnt; i++) 135 for (i = 0; i < src_cnt; i++)
132 srcs[i] = page_address(src_list[i]) + offset; 136 srcs[i] = page_address(src_list[i]) + offset;
133 137
@@ -178,17 +182,26 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
178 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, 182 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
179 &dest, 1, src_list, 183 &dest, 1, src_list,
180 src_cnt, len); 184 src_cnt, len);
185 dma_addr_t *dma_src = NULL;
186
181 BUG_ON(src_cnt <= 1); 187 BUG_ON(src_cnt <= 1);
182 188
183 if (chan) { 189 if (submit->scribble)
190 dma_src = submit->scribble;
191 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
192 dma_src = (dma_addr_t *) src_list;
193
194 if (dma_src && chan) {
184 /* run the xor asynchronously */ 195 /* run the xor asynchronously */
185 pr_debug("%s (async): len: %zu\n", __func__, len); 196 pr_debug("%s (async): len: %zu\n", __func__, len);
186 197
187 return do_async_xor(chan, dest, src_list, offset, src_cnt, len, 198 return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
188 submit); 199 dma_src, submit);
189 } else { 200 } else {
190 /* run the xor synchronously */ 201 /* run the xor synchronously */
191 pr_debug("%s (sync): len: %zu\n", __func__, len); 202 pr_debug("%s (sync): len: %zu\n", __func__, len);
203 WARN_ONCE(chan, "%s: no space for dma address conversion\n",
204 __func__);
192 205
193 /* in the sync case the dest is an implied source 206 /* in the sync case the dest is an implied source
194 * (assumes the dest is the first source) 207 * (assumes the dest is the first source)
@@ -241,11 +254,16 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
241 src_cnt, len); 254 src_cnt, len);
242 struct dma_device *device = chan ? chan->device : NULL; 255 struct dma_device *device = chan ? chan->device : NULL;
243 struct dma_async_tx_descriptor *tx = NULL; 256 struct dma_async_tx_descriptor *tx = NULL;
257 dma_addr_t *dma_src = NULL;
244 258
245 BUG_ON(src_cnt <= 1); 259 BUG_ON(src_cnt <= 1);
246 260
247 if (device && src_cnt <= device->max_xor) { 261 if (submit->scribble)
248 dma_addr_t *dma_src = (dma_addr_t *) src_list; 262 dma_src = submit->scribble;
263 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
264 dma_src = (dma_addr_t *) src_list;
265
266 if (dma_src && device && src_cnt <= device->max_xor) {
249 unsigned long dma_prep_flags; 267 unsigned long dma_prep_flags;
250 int i; 268 int i;
251 269
@@ -275,6 +293,9 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
275 enum async_tx_flags flags_orig = submit->flags; 293 enum async_tx_flags flags_orig = submit->flags;
276 294
277 pr_debug("%s: (sync) len: %zu\n", __func__, len); 295 pr_debug("%s: (sync) len: %zu\n", __func__, len);
296 WARN_ONCE(device && src_cnt <= device->max_xor,
297 "%s: no space for dma address conversion\n",
298 __func__);
278 299
279 submit->flags |= ASYNC_TX_XOR_DROP_DST; 300 submit->flags |= ASYNC_TX_XOR_DROP_DST;
280 submit->flags &= ~ASYNC_TX_ACK; 301 submit->flags &= ~ASYNC_TX_ACK;
@@ -293,29 +314,6 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
293} 314}
294EXPORT_SYMBOL_GPL(async_xor_val); 315EXPORT_SYMBOL_GPL(async_xor_val);
295 316
296static int __init async_xor_init(void)
297{
298 #ifdef CONFIG_DMA_ENGINE
299 /* To conserve stack space the input src_list (array of page pointers)
300 * is reused to hold the array of dma addresses passed to the driver.
301 * This conversion is only possible when dma_addr_t is less than the
302 * the size of a pointer. HIGHMEM64G is known to violate this
303 * assumption.
304 */
305 BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
306 #endif
307
308 return 0;
309}
310
311static void __exit async_xor_exit(void)
312{
313 do { } while (0);
314}
315
316module_init(async_xor_init);
317module_exit(async_xor_exit);
318
319MODULE_AUTHOR("Intel Corporation"); 317MODULE_AUTHOR("Intel Corporation");
320MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); 318MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
321MODULE_LICENSE("GPL"); 319MODULE_LICENSE("GPL");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 3b3c01b6f1ee..912a51b5cbd3 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on !HIGHMEM64G && HAS_DMA 7 depends on HAS_DMA
8 help 8 help
9 DMA engines can do asynchronous data transfers without 9 DMA engines can do asynchronous data transfers without
10 involving the host CPU. Currently, this framework can be 10 involving the host CPU. Currently, this framework can be