aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/Kconfig4
-rw-r--r--crypto/async_tx/Makefile1
-rw-r--r--crypto/async_tx/async_pq.c388
-rw-r--r--crypto/async_tx/async_xor.c2
4 files changed, 394 insertions, 1 deletions
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index d8fb39145986..cb6d7314f198 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -14,3 +14,7 @@ config ASYNC_MEMSET
14 tristate 14 tristate
15 select ASYNC_CORE 15 select ASYNC_CORE
16 16
17config ASYNC_PQ
18 tristate
19 select ASYNC_CORE
20
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile
index 27baa7d52fbc..1b9926588259 100644
--- a/crypto/async_tx/Makefile
+++ b/crypto/async_tx/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o
2obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o 2obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
3obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o 3obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
4obj-$(CONFIG_ASYNC_XOR) += async_xor.o 4obj-$(CONFIG_ASYNC_XOR) += async_xor.o
5obj-$(CONFIG_ASYNC_PQ) += async_pq.o
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
new file mode 100644
index 000000000000..108b21efb499
--- /dev/null
+++ b/crypto/async_tx/async_pq.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3 * Copyright(c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22#include <linux/kernel.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/raid/pq.h>
26#include <linux/async_tx.h>
27
28/**
29 * scribble - space to hold throwaway P buffer for synchronous gen_syndrome
30 */
31static struct page *scribble;
32
33static bool is_raid6_zero_block(struct page *p)
34{
35 return p == (void *) raid6_empty_zero_page;
36}
37
38/* the struct page *blocks[] parameter passed to async_gen_syndrome()
39 * and async_syndrome_val() contains the 'P' destination address at
40 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
41 *
42 * note: these are macros as they are used as lvalues
43 */
44#define P(b, d) (b[d-2])
45#define Q(b, d) (b[d-1])
46
47/**
48 * do_async_gen_syndrome - asynchronously calculate P and/or Q
49 */
50static __async_inline struct dma_async_tx_descriptor *
51do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
52 const unsigned char *scfs, unsigned int offset, int disks,
53 size_t len, dma_addr_t *dma_src,
54 struct async_submit_ctl *submit)
55{
56 struct dma_async_tx_descriptor *tx = NULL;
57 struct dma_device *dma = chan->device;
58 enum dma_ctrl_flags dma_flags = 0;
59 enum async_tx_flags flags_orig = submit->flags;
60 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
61 dma_async_tx_callback cb_param_orig = submit->cb_param;
62 int src_cnt = disks - 2;
63 unsigned char coefs[src_cnt];
64 unsigned short pq_src_cnt;
65 dma_addr_t dma_dest[2];
66 int src_off = 0;
67 int idx;
68 int i;
69
70 /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
71 if (P(blocks, disks))
72 dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
73 len, DMA_BIDIRECTIONAL);
74 else
75 dma_flags |= DMA_PREP_PQ_DISABLE_P;
76 if (Q(blocks, disks))
77 dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
78 len, DMA_BIDIRECTIONAL);
79 else
80 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
81
82 /* convert source addresses being careful to collapse 'empty'
83 * sources and update the coefficients accordingly
84 */
85 for (i = 0, idx = 0; i < src_cnt; i++) {
86 if (is_raid6_zero_block(blocks[i]))
87 continue;
88 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
89 DMA_TO_DEVICE);
90 coefs[idx] = scfs[i];
91 idx++;
92 }
93 src_cnt = idx;
94
95 while (src_cnt > 0) {
96 submit->flags = flags_orig;
97 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
98 /* if we are submitting additional pqs, leave the chain open,
99 * clear the callback parameters, and leave the destination
100 * buffers mapped
101 */
102 if (src_cnt > pq_src_cnt) {
103 submit->flags &= ~ASYNC_TX_ACK;
104 dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
105 submit->cb_fn = NULL;
106 submit->cb_param = NULL;
107 } else {
108 dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
109 submit->cb_fn = cb_fn_orig;
110 submit->cb_param = cb_param_orig;
111 if (cb_fn_orig)
112 dma_flags |= DMA_PREP_INTERRUPT;
113 }
114
115 /* Since we have clobbered the src_list we are committed
116 * to doing this asynchronously. Drivers force forward
117 * progress in case they can not provide a descriptor
118 */
119 for (;;) {
120 tx = dma->device_prep_dma_pq(chan, dma_dest,
121 &dma_src[src_off],
122 pq_src_cnt,
123 &coefs[src_off], len,
124 dma_flags);
125 if (likely(tx))
126 break;
127 async_tx_quiesce(&submit->depend_tx);
128 dma_async_issue_pending(chan);
129 }
130
131 async_tx_submit(chan, tx, submit);
132 submit->depend_tx = tx;
133
134 /* drop completed sources */
135 src_cnt -= pq_src_cnt;
136 src_off += pq_src_cnt;
137
138 dma_flags |= DMA_PREP_CONTINUE;
139 }
140
141 return tx;
142}
143
144/**
145 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
146 */
147static void
148do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
149 size_t len, struct async_submit_ctl *submit)
150{
151 void **srcs;
152 int i;
153
154 if (submit->scribble)
155 srcs = submit->scribble;
156 else
157 srcs = (void **) blocks;
158
159 for (i = 0; i < disks; i++) {
160 if (is_raid6_zero_block(blocks[i])) {
161 BUG_ON(i > disks - 3); /* P or Q can't be zero */
162 srcs[i] = blocks[i];
163 } else
164 srcs[i] = page_address(blocks[i]) + offset;
165 }
166 raid6_call.gen_syndrome(disks, len, srcs);
167 async_tx_sync_epilog(submit);
168}
169
170/**
171 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
172 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
173 * @offset: common offset into each block (src and dest) to start transaction
174 * @disks: number of blocks (including missing P or Q, see below)
175 * @len: length of operation in bytes
176 * @submit: submission/completion modifiers
177 *
178 * General note: This routine assumes a field of GF(2^8) with a
179 * primitive polynomial of 0x11d and a generator of {02}.
180 *
181 * 'disks' note: callers can optionally omit either P or Q (but not
182 * both) from the calculation by setting blocks[disks-2] or
183 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
184 * PAGE_SIZE as a temporary buffer of this size is used in the
185 * synchronous path. 'disks' always accounts for both destination
186 * buffers.
187 *
188 * 'blocks' note: if submit->scribble is NULL then the contents of
189 * 'blocks' may be overridden
190 */
191struct dma_async_tx_descriptor *
192async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
193 size_t len, struct async_submit_ctl *submit)
194{
195 int src_cnt = disks - 2;
196 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
197 &P(blocks, disks), 2,
198 blocks, src_cnt, len);
199 struct dma_device *device = chan ? chan->device : NULL;
200 dma_addr_t *dma_src = NULL;
201
202 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
203
204 if (submit->scribble)
205 dma_src = submit->scribble;
206 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
207 dma_src = (dma_addr_t *) blocks;
208
209 if (dma_src && device &&
210 (src_cnt <= dma_maxpq(device, 0) ||
211 dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) {
212 /* run the p+q asynchronously */
213 pr_debug("%s: (async) disks: %d len: %zu\n",
214 __func__, disks, len);
215 return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
216 disks, len, dma_src, submit);
217 }
218
219 /* run the pq synchronously */
220 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
221
222 /* wait for any prerequisite operations */
223 async_tx_quiesce(&submit->depend_tx);
224
225 if (!P(blocks, disks)) {
226 P(blocks, disks) = scribble;
227 BUG_ON(len + offset > PAGE_SIZE);
228 }
229 if (!Q(blocks, disks)) {
230 Q(blocks, disks) = scribble;
231 BUG_ON(len + offset > PAGE_SIZE);
232 }
233 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
234
235 return NULL;
236}
237EXPORT_SYMBOL_GPL(async_gen_syndrome);
238
239/**
240 * async_syndrome_val - asynchronously validate a raid6 syndrome
241 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
242 * @offset: common offset into each block (src and dest) to start transaction
243 * @disks: number of blocks (including missing P or Q, see below)
244 * @len: length of operation in bytes
245 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
246 * @spare: temporary result buffer for the synchronous case
247 * @submit: submission / completion modifiers
248 *
249 * The same notes from async_gen_syndrome apply to the 'blocks',
250 * and 'disks' parameters of this routine. The synchronous path
251 * requires a temporary result buffer and submit->scribble to be
252 * specified.
253 */
254struct dma_async_tx_descriptor *
255async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
256 size_t len, enum sum_check_flags *pqres, struct page *spare,
257 struct async_submit_ctl *submit)
258{
259 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
260 NULL, 0, blocks, disks,
261 len);
262 struct dma_device *device = chan ? chan->device : NULL;
263 struct dma_async_tx_descriptor *tx;
264 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
265 dma_addr_t *dma_src = NULL;
266
267 BUG_ON(disks < 4);
268
269 if (submit->scribble)
270 dma_src = submit->scribble;
271 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
272 dma_src = (dma_addr_t *) blocks;
273
274 if (dma_src && device && disks <= dma_maxpq(device, 0)) {
275 struct device *dev = device->dev;
276 dma_addr_t *pq = &dma_src[disks-2];
277 int i;
278
279 pr_debug("%s: (async) disks: %d len: %zu\n",
280 __func__, disks, len);
281 if (!P(blocks, disks))
282 dma_flags |= DMA_PREP_PQ_DISABLE_P;
283 if (!Q(blocks, disks))
284 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
285 for (i = 0; i < disks; i++)
286 if (likely(blocks[i])) {
287 BUG_ON(is_raid6_zero_block(blocks[i]));
288 dma_src[i] = dma_map_page(dev, blocks[i],
289 offset, len,
290 DMA_TO_DEVICE);
291 }
292
293 for (;;) {
294 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
295 disks - 2,
296 raid6_gfexp,
297 len, pqres,
298 dma_flags);
299 if (likely(tx))
300 break;
301 async_tx_quiesce(&submit->depend_tx);
302 dma_async_issue_pending(chan);
303 }
304 async_tx_submit(chan, tx, submit);
305
306 return tx;
307 } else {
308 struct page *p_src = P(blocks, disks);
309 struct page *q_src = Q(blocks, disks);
310 enum async_tx_flags flags_orig = submit->flags;
311 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
312 void *scribble = submit->scribble;
313 void *cb_param_orig = submit->cb_param;
314 void *p, *q, *s;
315
316 pr_debug("%s: (sync) disks: %d len: %zu\n",
317 __func__, disks, len);
318
319 /* caller must provide a temporary result buffer and
320 * allow the input parameters to be preserved
321 */
322 BUG_ON(!spare || !scribble);
323
324 /* wait for any prerequisite operations */
325 async_tx_quiesce(&submit->depend_tx);
326
327 /* recompute p and/or q into the temporary buffer and then
328 * check to see the result matches the current value
329 */
330 tx = NULL;
331 *pqres = 0;
332 if (p_src) {
333 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
334 NULL, NULL, scribble);
335 tx = async_xor(spare, blocks, offset, disks-2, len, submit);
336 async_tx_quiesce(&tx);
337 p = page_address(p_src) + offset;
338 s = page_address(spare) + offset;
339 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
340 }
341
342 if (q_src) {
343 P(blocks, disks) = NULL;
344 Q(blocks, disks) = spare;
345 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
346 tx = async_gen_syndrome(blocks, offset, disks, len, submit);
347 async_tx_quiesce(&tx);
348 q = page_address(q_src) + offset;
349 s = page_address(spare) + offset;
350 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
351 }
352
353 /* restore P, Q and submit */
354 P(blocks, disks) = p_src;
355 Q(blocks, disks) = q_src;
356
357 submit->cb_fn = cb_fn_orig;
358 submit->cb_param = cb_param_orig;
359 submit->flags = flags_orig;
360 async_tx_sync_epilog(submit);
361
362 return NULL;
363 }
364}
365EXPORT_SYMBOL_GPL(async_syndrome_val);
366
367static int __init async_pq_init(void)
368{
369 scribble = alloc_page(GFP_KERNEL);
370
371 if (scribble)
372 return 0;
373
374 pr_err("%s: failed to allocate required spare page\n", __func__);
375
376 return -ENOMEM;
377}
378
379static void __exit async_pq_exit(void)
380{
381 put_page(scribble);
382}
383
384module_init(async_pq_init);
385module_exit(async_pq_exit);
386
387MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
388MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 78fb7780272a..56b5f98da463 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -62,7 +62,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
62 while (src_cnt) { 62 while (src_cnt) {
63 submit->flags = flags_orig; 63 submit->flags = flags_orig;
64 dma_flags = 0; 64 dma_flags = 0;
65 xor_src_cnt = min(src_cnt, dma->max_xor); 65 xor_src_cnt = min(src_cnt, (int)dma->max_xor);
66 /* if we are submitting additional xors, leave the chain open, 66 /* if we are submitting additional xors, leave the chain open,
67 * clear the callback parameters, and leave the destination 67 * clear the callback parameters, and leave the destination
68 * buffer mapped 68 * buffer mapped