diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-04 09:59:47 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-04 09:59:47 -0500 |
commit | 5cb2faa6ede7ada9cb2bffc832c4ce60f53d6834 (patch) | |
tree | 7b72b66081d042a41dc822575503133364857ce2 /crypto | |
parent | e0ee98513d1a2e24d2ddbdecf4216bcca29d1158 (diff) | |
parent | 6060e8df517847bf445ebc61de7d4d9c7faae990 (diff) |
Merge branch 'pending-misc' (early part) into devel
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/Kconfig | 5 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 74 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 100 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 33 | ||||
-rw-r--r-- | crypto/gcm.c | 107 |
5 files changed, 214 insertions, 105 deletions
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index e5aeb2b79e6f..e28e276ac611 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV | |||
23 | select ASYNC_CORE | 23 | select ASYNC_CORE |
24 | select ASYNC_PQ | 24 | select ASYNC_PQ |
25 | 25 | ||
26 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
27 | bool | ||
28 | |||
29 | config ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
30 | bool | ||
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index b88db6d1dc65..ec87f53d5059 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -26,14 +26,10 @@ | |||
26 | #include <linux/async_tx.h> | 26 | #include <linux/async_tx.h> |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * scribble - space to hold throwaway P buffer for synchronous gen_syndrome | 29 | * pq_scribble_page - space to hold throwaway P or Q buffer for |
30 | * synchronous gen_syndrome | ||
30 | */ | 31 | */ |
31 | static struct page *scribble; | 32 | static struct page *pq_scribble_page; |
32 | |||
33 | static bool is_raid6_zero_block(struct page *p) | ||
34 | { | ||
35 | return p == (void *) raid6_empty_zero_page; | ||
36 | } | ||
37 | 33 | ||
38 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | 34 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() |
39 | * and async_syndrome_val() contains the 'P' destination address at | 35 | * and async_syndrome_val() contains the 'P' destination address at |
@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
83 | * sources and update the coefficients accordingly | 79 | * sources and update the coefficients accordingly |
84 | */ | 80 | */ |
85 | for (i = 0, idx = 0; i < src_cnt; i++) { | 81 | for (i = 0, idx = 0; i < src_cnt; i++) { |
86 | if (is_raid6_zero_block(blocks[i])) | 82 | if (blocks[i] == NULL) |
87 | continue; | 83 | continue; |
88 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | 84 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, |
89 | DMA_TO_DEVICE); | 85 | DMA_TO_DEVICE); |
@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
160 | srcs = (void **) blocks; | 156 | srcs = (void **) blocks; |
161 | 157 | ||
162 | for (i = 0; i < disks; i++) { | 158 | for (i = 0; i < disks; i++) { |
163 | if (is_raid6_zero_block(blocks[i])) { | 159 | if (blocks[i] == NULL) { |
164 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | 160 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ |
165 | srcs[i] = blocks[i]; | 161 | srcs[i] = (void*)raid6_empty_zero_page; |
166 | } else | 162 | } else |
167 | srcs[i] = page_address(blocks[i]) + offset; | 163 | srcs[i] = page_address(blocks[i]) + offset; |
168 | } | 164 | } |
@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
186 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | 182 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= |
187 | * PAGE_SIZE as a temporary buffer of this size is used in the | 183 | * PAGE_SIZE as a temporary buffer of this size is used in the |
188 | * synchronous path. 'disks' always accounts for both destination | 184 | * synchronous path. 'disks' always accounts for both destination |
189 | * buffers. | 185 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are |
186 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
187 | * in the synchronous path and omitted in the hardware-asynchronous | ||
188 | * path. | ||
190 | * | 189 | * |
191 | * 'blocks' note: if submit->scribble is NULL then the contents of | 190 | * 'blocks' note: if submit->scribble is NULL then the contents of |
192 | * 'blocks' may be overridden | 191 | * 'blocks' may be overwritten to perform address conversions |
192 | * (dma_map_page() or page_address()). | ||
193 | */ | 193 | */ |
194 | struct dma_async_tx_descriptor * | 194 | struct dma_async_tx_descriptor * |
195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 195 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
227 | async_tx_quiesce(&submit->depend_tx); | 227 | async_tx_quiesce(&submit->depend_tx); |
228 | 228 | ||
229 | if (!P(blocks, disks)) { | 229 | if (!P(blocks, disks)) { |
230 | P(blocks, disks) = scribble; | 230 | P(blocks, disks) = pq_scribble_page; |
231 | BUG_ON(len + offset > PAGE_SIZE); | 231 | BUG_ON(len + offset > PAGE_SIZE); |
232 | } | 232 | } |
233 | if (!Q(blocks, disks)) { | 233 | if (!Q(blocks, disks)) { |
234 | Q(blocks, disks) = scribble; | 234 | Q(blocks, disks) = pq_scribble_page; |
235 | BUG_ON(len + offset > PAGE_SIZE); | 235 | BUG_ON(len + offset > PAGE_SIZE); |
236 | } | 236 | } |
237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); | 237 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); |
@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
240 | } | 240 | } |
241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); | 241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); |
242 | 242 | ||
243 | static inline struct dma_chan * | ||
244 | pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) | ||
245 | { | ||
246 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
247 | return NULL; | ||
248 | #endif | ||
249 | return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, | ||
250 | disks, len); | ||
251 | } | ||
252 | |||
243 | /** | 253 | /** |
244 | * async_syndrome_val - asynchronously validate a raid6 syndrome | 254 | * async_syndrome_val - asynchronously validate a raid6 syndrome |
245 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | 255 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 |
@@ -260,13 +270,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
260 | size_t len, enum sum_check_flags *pqres, struct page *spare, | 270 | size_t len, enum sum_check_flags *pqres, struct page *spare, |
261 | struct async_submit_ctl *submit) | 271 | struct async_submit_ctl *submit) |
262 | { | 272 | { |
263 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, | 273 | struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); |
264 | NULL, 0, blocks, disks, | ||
265 | len); | ||
266 | struct dma_device *device = chan ? chan->device : NULL; | 274 | struct dma_device *device = chan ? chan->device : NULL; |
267 | struct dma_async_tx_descriptor *tx; | 275 | struct dma_async_tx_descriptor *tx; |
276 | unsigned char coefs[disks-2]; | ||
268 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | 277 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; |
269 | dma_addr_t *dma_src = NULL; | 278 | dma_addr_t *dma_src = NULL; |
279 | int src_cnt = 0; | ||
270 | 280 | ||
271 | BUG_ON(disks < 4); | 281 | BUG_ON(disks < 4); |
272 | 282 | ||
@@ -285,22 +295,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
285 | __func__, disks, len); | 295 | __func__, disks, len); |
286 | if (!P(blocks, disks)) | 296 | if (!P(blocks, disks)) |
287 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 297 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
298 | else | ||
299 | pq[0] = dma_map_page(dev, P(blocks, disks), | ||
300 | offset, len, | ||
301 | DMA_TO_DEVICE); | ||
288 | if (!Q(blocks, disks)) | 302 | if (!Q(blocks, disks)) |
289 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 303 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
304 | else | ||
305 | pq[1] = dma_map_page(dev, Q(blocks, disks), | ||
306 | offset, len, | ||
307 | DMA_TO_DEVICE); | ||
308 | |||
290 | if (submit->flags & ASYNC_TX_FENCE) | 309 | if (submit->flags & ASYNC_TX_FENCE) |
291 | dma_flags |= DMA_PREP_FENCE; | 310 | dma_flags |= DMA_PREP_FENCE; |
292 | for (i = 0; i < disks; i++) | 311 | for (i = 0; i < disks-2; i++) |
293 | if (likely(blocks[i])) { | 312 | if (likely(blocks[i])) { |
294 | BUG_ON(is_raid6_zero_block(blocks[i])); | 313 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], |
295 | dma_src[i] = dma_map_page(dev, blocks[i], | 314 | offset, len, |
296 | offset, len, | 315 | DMA_TO_DEVICE); |
297 | DMA_TO_DEVICE); | 316 | coefs[src_cnt] = raid6_gfexp[i]; |
317 | src_cnt++; | ||
298 | } | 318 | } |
299 | 319 | ||
300 | for (;;) { | 320 | for (;;) { |
301 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 321 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, |
302 | disks - 2, | 322 | src_cnt, |
303 | raid6_gfexp, | 323 | coefs, |
304 | len, pqres, | 324 | len, pqres, |
305 | dma_flags); | 325 | dma_flags); |
306 | if (likely(tx)) | 326 | if (likely(tx)) |
@@ -373,9 +393,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val); | |||
373 | 393 | ||
374 | static int __init async_pq_init(void) | 394 | static int __init async_pq_init(void) |
375 | { | 395 | { |
376 | scribble = alloc_page(GFP_KERNEL); | 396 | pq_scribble_page = alloc_page(GFP_KERNEL); |
377 | 397 | ||
378 | if (scribble) | 398 | if (pq_scribble_page) |
379 | return 0; | 399 | return 0; |
380 | 400 | ||
381 | pr_err("%s: failed to allocate required spare page\n", __func__); | 401 | pr_err("%s: failed to allocate required spare page\n", __func__); |
@@ -385,7 +405,7 @@ static int __init async_pq_init(void) | |||
385 | 405 | ||
386 | static void __exit async_pq_exit(void) | 406 | static void __exit async_pq_exit(void) |
387 | { | 407 | { |
388 | put_page(scribble); | 408 | put_page(pq_scribble_page); |
389 | } | 409 | } |
390 | 410 | ||
391 | module_init(async_pq_init); | 411 | module_init(async_pq_init); |
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 6d73dde4786d..943f2abac9b4 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
131 | } | 131 | } |
132 | 132 | ||
133 | static struct dma_async_tx_descriptor * | 133 | static struct dma_async_tx_descriptor * |
134 | __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | 134 | __2data_recov_4(int disks, size_t bytes, int faila, int failb, |
135 | struct async_submit_ctl *submit) | 135 | struct page **blocks, struct async_submit_ctl *submit) |
136 | { | 136 | { |
137 | struct dma_async_tx_descriptor *tx = NULL; | 137 | struct dma_async_tx_descriptor *tx = NULL; |
138 | struct page *p, *q, *a, *b; | 138 | struct page *p, *q, *a, *b; |
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | |||
143 | void *cb_param = submit->cb_param; | 143 | void *cb_param = submit->cb_param; |
144 | void *scribble = submit->scribble; | 144 | void *scribble = submit->scribble; |
145 | 145 | ||
146 | p = blocks[4-2]; | 146 | p = blocks[disks-2]; |
147 | q = blocks[4-1]; | 147 | q = blocks[disks-1]; |
148 | 148 | ||
149 | a = blocks[faila]; | 149 | a = blocks[faila]; |
150 | b = blocks[failb]; | 150 | b = blocks[failb]; |
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, | |||
170 | } | 170 | } |
171 | 171 | ||
172 | static struct dma_async_tx_descriptor * | 172 | static struct dma_async_tx_descriptor * |
173 | __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | 173 | __2data_recov_5(int disks, size_t bytes, int faila, int failb, |
174 | struct async_submit_ctl *submit) | 174 | struct page **blocks, struct async_submit_ctl *submit) |
175 | { | 175 | { |
176 | struct dma_async_tx_descriptor *tx = NULL; | 176 | struct dma_async_tx_descriptor *tx = NULL; |
177 | struct page *p, *q, *g, *dp, *dq; | 177 | struct page *p, *q, *g, *dp, *dq; |
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, | |||
181 | dma_async_tx_callback cb_fn = submit->cb_fn; | 181 | dma_async_tx_callback cb_fn = submit->cb_fn; |
182 | void *cb_param = submit->cb_param; | 182 | void *cb_param = submit->cb_param; |
183 | void *scribble = submit->scribble; | 183 | void *scribble = submit->scribble; |
184 | int uninitialized_var(good); | 184 | int good_srcs, good, i; |
185 | int i; | ||
186 | 185 | ||
187 | for (i = 0; i < 3; i++) { | 186 | good_srcs = 0; |
187 | good = -1; | ||
188 | for (i = 0; i < disks-2; i++) { | ||
189 | if (blocks[i] == NULL) | ||
190 | continue; | ||
188 | if (i == faila || i == failb) | 191 | if (i == faila || i == failb) |
189 | continue; | 192 | continue; |
190 | else { | 193 | good = i; |
191 | good = i; | 194 | good_srcs++; |
192 | break; | ||
193 | } | ||
194 | } | 195 | } |
195 | BUG_ON(i >= 3); | 196 | BUG_ON(good_srcs > 1); |
196 | 197 | ||
197 | p = blocks[5-2]; | 198 | p = blocks[disks-2]; |
198 | q = blocks[5-1]; | 199 | q = blocks[disks-1]; |
199 | g = blocks[good]; | 200 | g = blocks[good]; |
200 | 201 | ||
201 | /* Compute syndrome with zero for the missing data pages | 202 | /* Compute syndrome with zero for the missing data pages |
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, | |||
263 | * delta p and delta q | 264 | * delta p and delta q |
264 | */ | 265 | */ |
265 | dp = blocks[faila]; | 266 | dp = blocks[faila]; |
266 | blocks[faila] = (void *)raid6_empty_zero_page; | 267 | blocks[faila] = NULL; |
267 | blocks[disks-2] = dp; | 268 | blocks[disks-2] = dp; |
268 | dq = blocks[failb]; | 269 | dq = blocks[failb]; |
269 | blocks[failb] = (void *)raid6_empty_zero_page; | 270 | blocks[failb] = NULL; |
270 | blocks[disks-1] = dq; | 271 | blocks[disks-1] = dq; |
271 | 272 | ||
272 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | 273 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor * | |||
323 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | 324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, |
324 | struct page **blocks, struct async_submit_ctl *submit) | 325 | struct page **blocks, struct async_submit_ctl *submit) |
325 | { | 326 | { |
327 | int non_zero_srcs, i; | ||
328 | |||
326 | BUG_ON(faila == failb); | 329 | BUG_ON(faila == failb); |
327 | if (failb < faila) | 330 | if (failb < faila) |
328 | swap(faila, failb); | 331 | swap(faila, failb); |
@@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
334 | */ | 337 | */ |
335 | if (!submit->scribble) { | 338 | if (!submit->scribble) { |
336 | void **ptrs = (void **) blocks; | 339 | void **ptrs = (void **) blocks; |
337 | int i; | ||
338 | 340 | ||
339 | async_tx_quiesce(&submit->depend_tx); | 341 | async_tx_quiesce(&submit->depend_tx); |
340 | for (i = 0; i < disks; i++) | 342 | for (i = 0; i < disks; i++) |
341 | ptrs[i] = page_address(blocks[i]); | 343 | if (blocks[i] == NULL) |
344 | ptrs[i] = (void *) raid6_empty_zero_page; | ||
345 | else | ||
346 | ptrs[i] = page_address(blocks[i]); | ||
342 | 347 | ||
343 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | 348 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); |
344 | 349 | ||
@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
347 | return NULL; | 352 | return NULL; |
348 | } | 353 | } |
349 | 354 | ||
350 | switch (disks) { | 355 | non_zero_srcs = 0; |
351 | case 4: | 356 | for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) |
357 | if (blocks[i]) | ||
358 | non_zero_srcs++; | ||
359 | switch (non_zero_srcs) { | ||
360 | case 0: | ||
361 | case 1: | ||
362 | /* There must be at least 2 sources - the failed devices. */ | ||
363 | BUG(); | ||
364 | |||
365 | case 2: | ||
352 | /* dma devices do not uniformly understand a zero source pq | 366 | /* dma devices do not uniformly understand a zero source pq |
353 | * operation (in contrast to the synchronous case), so | 367 | * operation (in contrast to the synchronous case), so |
354 | * explicitly handle the 4 disk special case | 368 | * explicitly handle the special case of a 4 disk array with |
369 | * both data disks missing. | ||
355 | */ | 370 | */ |
356 | return __2data_recov_4(bytes, faila, failb, blocks, submit); | 371 | return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); |
357 | case 5: | 372 | case 3: |
358 | /* dma devices do not uniformly understand a single | 373 | /* dma devices do not uniformly understand a single |
359 | * source pq operation (in contrast to the synchronous | 374 | * source pq operation (in contrast to the synchronous |
360 | * case), so explicitly handle the 5 disk special case | 375 | * case), so explicitly handle the special case of a 5 disk |
376 | * array with 2 of 3 data disks missing. | ||
361 | */ | 377 | */ |
362 | return __2data_recov_5(bytes, faila, failb, blocks, submit); | 378 | return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); |
363 | default: | 379 | default: |
364 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | 380 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); |
365 | } | 381 | } |
@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
385 | dma_async_tx_callback cb_fn = submit->cb_fn; | 401 | dma_async_tx_callback cb_fn = submit->cb_fn; |
386 | void *cb_param = submit->cb_param; | 402 | void *cb_param = submit->cb_param; |
387 | void *scribble = submit->scribble; | 403 | void *scribble = submit->scribble; |
404 | int good_srcs, good, i; | ||
388 | struct page *srcs[2]; | 405 | struct page *srcs[2]; |
389 | 406 | ||
390 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | 407 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); |
@@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
394 | */ | 411 | */ |
395 | if (!scribble) { | 412 | if (!scribble) { |
396 | void **ptrs = (void **) blocks; | 413 | void **ptrs = (void **) blocks; |
397 | int i; | ||
398 | 414 | ||
399 | async_tx_quiesce(&submit->depend_tx); | 415 | async_tx_quiesce(&submit->depend_tx); |
400 | for (i = 0; i < disks; i++) | 416 | for (i = 0; i < disks; i++) |
401 | ptrs[i] = page_address(blocks[i]); | 417 | if (blocks[i] == NULL) |
418 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
419 | else | ||
420 | ptrs[i] = page_address(blocks[i]); | ||
402 | 421 | ||
403 | raid6_datap_recov(disks, bytes, faila, ptrs); | 422 | raid6_datap_recov(disks, bytes, faila, ptrs); |
404 | 423 | ||
@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
407 | return NULL; | 426 | return NULL; |
408 | } | 427 | } |
409 | 428 | ||
429 | good_srcs = 0; | ||
430 | good = -1; | ||
431 | for (i = 0; i < disks-2; i++) { | ||
432 | if (i == faila) | ||
433 | continue; | ||
434 | if (blocks[i]) { | ||
435 | good = i; | ||
436 | good_srcs++; | ||
437 | if (good_srcs > 1) | ||
438 | break; | ||
439 | } | ||
440 | } | ||
441 | BUG_ON(good_srcs == 0); | ||
442 | |||
410 | p = blocks[disks-2]; | 443 | p = blocks[disks-2]; |
411 | q = blocks[disks-1]; | 444 | q = blocks[disks-1]; |
412 | 445 | ||
@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, | |||
414 | * Use the dead data page as temporary storage for delta q | 447 | * Use the dead data page as temporary storage for delta q |
415 | */ | 448 | */ |
416 | dq = blocks[faila]; | 449 | dq = blocks[faila]; |
417 | blocks[faila] = (void *)raid6_empty_zero_page; | 450 | blocks[faila] = NULL; |
418 | blocks[disks-1] = dq; | 451 | blocks[disks-1] = dq; |
419 | 452 | ||
420 | /* in the 4 disk case we only need to perform a single source | 453 | /* in the 4-disk case we only need to perform a single source |
421 | * multiplication | 454 | * multiplication with the one good data block. |
422 | */ | 455 | */ |
423 | if (disks == 4) { | 456 | if (good_srcs == 1) { |
424 | int good = faila == 0 ? 1 : 0; | ||
425 | struct page *g = blocks[good]; | 457 | struct page *g = blocks[good]; |
426 | 458 | ||
427 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | 459 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index b459a9034aac..079ae8ca590b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
44 | void *cb_param_orig = submit->cb_param; | 44 | void *cb_param_orig = submit->cb_param; |
45 | enum async_tx_flags flags_orig = submit->flags; | 45 | enum async_tx_flags flags_orig = submit->flags; |
46 | enum dma_ctrl_flags dma_flags; | 46 | enum dma_ctrl_flags dma_flags; |
47 | int xor_src_cnt; | 47 | int xor_src_cnt = 0; |
48 | dma_addr_t dma_dest; | 48 | dma_addr_t dma_dest; |
49 | 49 | ||
50 | /* map the dest bidrectional in case it is re-used as a source */ | 50 | /* map the dest bidrectional in case it is re-used as a source */ |
51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | 51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); |
52 | for (i = 0; i < src_cnt; i++) { | 52 | for (i = 0; i < src_cnt; i++) { |
53 | /* only map the dest once */ | 53 | /* only map the dest once */ |
54 | if (!src_list[i]) | ||
55 | continue; | ||
54 | if (unlikely(src_list[i] == dest)) { | 56 | if (unlikely(src_list[i] == dest)) { |
55 | dma_src[i] = dma_dest; | 57 | dma_src[xor_src_cnt++] = dma_dest; |
56 | continue; | 58 | continue; |
57 | } | 59 | } |
58 | dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, | 60 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, |
59 | len, DMA_TO_DEVICE); | 61 | len, DMA_TO_DEVICE); |
60 | } | 62 | } |
63 | src_cnt = xor_src_cnt; | ||
61 | 64 | ||
62 | while (src_cnt) { | 65 | while (src_cnt) { |
63 | submit->flags = flags_orig; | 66 | submit->flags = flags_orig; |
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
123 | int src_cnt, size_t len, struct async_submit_ctl *submit) | 126 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
124 | { | 127 | { |
125 | int i; | 128 | int i; |
126 | int xor_src_cnt; | 129 | int xor_src_cnt = 0; |
127 | int src_off = 0; | 130 | int src_off = 0; |
128 | void *dest_buf; | 131 | void *dest_buf; |
129 | void **srcs; | 132 | void **srcs; |
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
135 | 138 | ||
136 | /* convert to buffer pointers */ | 139 | /* convert to buffer pointers */ |
137 | for (i = 0; i < src_cnt; i++) | 140 | for (i = 0; i < src_cnt; i++) |
138 | srcs[i] = page_address(src_list[i]) + offset; | 141 | if (src_list[i]) |
139 | 142 | srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; | |
143 | src_cnt = xor_src_cnt; | ||
140 | /* set destination address */ | 144 | /* set destination address */ |
141 | dest_buf = page_address(dest) + offset; | 145 | dest_buf = page_address(dest) + offset; |
142 | 146 | ||
@@ -230,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
230 | memcmp(a, a + 4, len - 4) == 0); | 234 | memcmp(a, a + 4, len - 4) == 0); |
231 | } | 235 | } |
232 | 236 | ||
237 | static inline struct dma_chan * | ||
238 | xor_val_chan(struct async_submit_ctl *submit, struct page *dest, | ||
239 | struct page **src_list, int src_cnt, size_t len) | ||
240 | { | ||
241 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
242 | return NULL; | ||
243 | #endif | ||
244 | return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, | ||
245 | src_cnt, len); | ||
246 | } | ||
247 | |||
233 | /** | 248 | /** |
234 | * async_xor_val - attempt a xor parity check with a dma engine. | 249 | * async_xor_val - attempt a xor parity check with a dma engine. |
235 | * @dest: destination page used if the xor is performed synchronously | 250 | * @dest: destination page used if the xor is performed synchronously |
@@ -251,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
251 | int src_cnt, size_t len, enum sum_check_flags *result, | 266 | int src_cnt, size_t len, enum sum_check_flags *result, |
252 | struct async_submit_ctl *submit) | 267 | struct async_submit_ctl *submit) |
253 | { | 268 | { |
254 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, | 269 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
255 | &dest, 1, src_list, | ||
256 | src_cnt, len); | ||
257 | struct dma_device *device = chan ? chan->device : NULL; | 270 | struct dma_device *device = chan ? chan->device : NULL; |
258 | struct dma_async_tx_descriptor *tx = NULL; | 271 | struct dma_async_tx_descriptor *tx = NULL; |
259 | dma_addr_t *dma_src = NULL; | 272 | dma_addr_t *dma_src = NULL; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 5fc3292483ef..c6547130624c 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx { | |||
40 | struct crypto_gcm_ghash_ctx { | 40 | struct crypto_gcm_ghash_ctx { |
41 | unsigned int cryptlen; | 41 | unsigned int cryptlen; |
42 | struct scatterlist *src; | 42 | struct scatterlist *src; |
43 | crypto_completion_t complete; | 43 | void (*complete)(struct aead_request *req, int err); |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct crypto_gcm_req_priv_ctx { | 46 | struct crypto_gcm_req_priv_ctx { |
@@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req, | |||
267 | return crypto_ahash_final(ahreq); | 267 | return crypto_ahash_final(ahreq); |
268 | } | 268 | } |
269 | 269 | ||
270 | static void gcm_hash_final_done(struct crypto_async_request *areq, | 270 | static void __gcm_hash_final_done(struct aead_request *req, int err) |
271 | int err) | ||
272 | { | 271 | { |
273 | struct aead_request *req = areq->data; | ||
274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 272 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
275 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 273 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
276 | 274 | ||
277 | if (!err) | 275 | if (!err) |
278 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | 276 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); |
279 | 277 | ||
280 | gctx->complete(areq, err); | 278 | gctx->complete(req, err); |
281 | } | 279 | } |
282 | 280 | ||
283 | static void gcm_hash_len_done(struct crypto_async_request *areq, | 281 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err) |
284 | int err) | ||
285 | { | 282 | { |
286 | struct aead_request *req = areq->data; | 283 | struct aead_request *req = areq->data; |
284 | |||
285 | __gcm_hash_final_done(req, err); | ||
286 | } | ||
287 | |||
288 | static void __gcm_hash_len_done(struct aead_request *req, int err) | ||
289 | { | ||
287 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 290 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
288 | 291 | ||
289 | if (!err) { | 292 | if (!err) { |
@@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq, | |||
292 | return; | 295 | return; |
293 | } | 296 | } |
294 | 297 | ||
295 | gcm_hash_final_done(areq, err); | 298 | __gcm_hash_final_done(req, err); |
296 | } | 299 | } |
297 | 300 | ||
298 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | 301 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) |
299 | int err) | ||
300 | { | 302 | { |
301 | struct aead_request *req = areq->data; | 303 | struct aead_request *req = areq->data; |
304 | |||
305 | __gcm_hash_len_done(req, err); | ||
306 | } | ||
307 | |||
308 | static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) | ||
309 | { | ||
302 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 310 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
303 | 311 | ||
304 | if (!err) { | 312 | if (!err) { |
@@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | |||
307 | return; | 315 | return; |
308 | } | 316 | } |
309 | 317 | ||
310 | gcm_hash_len_done(areq, err); | 318 | __gcm_hash_len_done(req, err); |
311 | } | 319 | } |
312 | 320 | ||
313 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, | 321 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, |
314 | int err) | 322 | int err) |
315 | { | 323 | { |
316 | struct aead_request *req = areq->data; | 324 | struct aead_request *req = areq->data; |
325 | |||
326 | __gcm_hash_crypt_remain_done(req, err); | ||
327 | } | ||
328 | |||
329 | static void __gcm_hash_crypt_done(struct aead_request *req, int err) | ||
330 | { | ||
317 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 331 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
318 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 332 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
319 | unsigned int remain; | 333 | unsigned int remain; |
@@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq, | |||
327 | return; | 341 | return; |
328 | } | 342 | } |
329 | 343 | ||
330 | gcm_hash_crypt_remain_done(areq, err); | 344 | __gcm_hash_crypt_remain_done(req, err); |
331 | } | 345 | } |
332 | 346 | ||
333 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | 347 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) |
334 | int err) | ||
335 | { | 348 | { |
336 | struct aead_request *req = areq->data; | 349 | struct aead_request *req = areq->data; |
350 | |||
351 | __gcm_hash_crypt_done(req, err); | ||
352 | } | ||
353 | |||
354 | static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) | ||
355 | { | ||
337 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 356 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
338 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 357 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
339 | crypto_completion_t complete; | 358 | crypto_completion_t complete; |
@@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | |||
350 | } | 369 | } |
351 | 370 | ||
352 | if (remain) | 371 | if (remain) |
353 | gcm_hash_crypt_done(areq, err); | 372 | __gcm_hash_crypt_done(req, err); |
354 | else | 373 | else |
355 | gcm_hash_crypt_remain_done(areq, err); | 374 | __gcm_hash_crypt_remain_done(req, err); |
356 | } | 375 | } |
357 | 376 | ||
358 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, | 377 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, |
359 | int err) | 378 | int err) |
360 | { | 379 | { |
361 | struct aead_request *req = areq->data; | 380 | struct aead_request *req = areq->data; |
381 | |||
382 | __gcm_hash_assoc_remain_done(req, err); | ||
383 | } | ||
384 | |||
385 | static void __gcm_hash_assoc_done(struct aead_request *req, int err) | ||
386 | { | ||
362 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 387 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
363 | unsigned int remain; | 388 | unsigned int remain; |
364 | 389 | ||
@@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, | |||
371 | return; | 396 | return; |
372 | } | 397 | } |
373 | 398 | ||
374 | gcm_hash_assoc_remain_done(areq, err); | 399 | __gcm_hash_assoc_remain_done(req, err); |
375 | } | 400 | } |
376 | 401 | ||
377 | static void gcm_hash_init_done(struct crypto_async_request *areq, | 402 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) |
378 | int err) | ||
379 | { | 403 | { |
380 | struct aead_request *req = areq->data; | 404 | struct aead_request *req = areq->data; |
405 | |||
406 | __gcm_hash_assoc_done(req, err); | ||
407 | } | ||
408 | |||
409 | static void __gcm_hash_init_done(struct aead_request *req, int err) | ||
410 | { | ||
381 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 411 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
382 | crypto_completion_t complete; | 412 | crypto_completion_t complete; |
383 | unsigned int remain = 0; | 413 | unsigned int remain = 0; |
@@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq, | |||
393 | } | 423 | } |
394 | 424 | ||
395 | if (remain) | 425 | if (remain) |
396 | gcm_hash_assoc_done(areq, err); | 426 | __gcm_hash_assoc_done(req, err); |
397 | else | 427 | else |
398 | gcm_hash_assoc_remain_done(areq, err); | 428 | __gcm_hash_assoc_remain_done(req, err); |
429 | } | ||
430 | |||
431 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) | ||
432 | { | ||
433 | struct aead_request *req = areq->data; | ||
434 | |||
435 | __gcm_hash_init_done(req, err); | ||
399 | } | 436 | } |
400 | 437 | ||
401 | static int gcm_hash(struct aead_request *req, | 438 | static int gcm_hash(struct aead_request *req, |
@@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req, | |||
457 | crypto_aead_authsize(aead), 1); | 494 | crypto_aead_authsize(aead), 1); |
458 | } | 495 | } |
459 | 496 | ||
460 | static void gcm_enc_hash_done(struct crypto_async_request *areq, | 497 | static void gcm_enc_hash_done(struct aead_request *req, int err) |
461 | int err) | ||
462 | { | 498 | { |
463 | struct aead_request *req = areq->data; | ||
464 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 499 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
465 | 500 | ||
466 | if (!err) | 501 | if (!err) |
@@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq, | |||
469 | aead_request_complete(req, err); | 504 | aead_request_complete(req, err); |
470 | } | 505 | } |
471 | 506 | ||
472 | static void gcm_encrypt_done(struct crypto_async_request *areq, | 507 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) |
473 | int err) | ||
474 | { | 508 | { |
475 | struct aead_request *req = areq->data; | 509 | struct aead_request *req = areq->data; |
476 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 510 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
@@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq, | |||
479 | err = gcm_hash(req, pctx); | 513 | err = gcm_hash(req, pctx); |
480 | if (err == -EINPROGRESS || err == -EBUSY) | 514 | if (err == -EINPROGRESS || err == -EBUSY) |
481 | return; | 515 | return; |
516 | else if (!err) { | ||
517 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
518 | gcm_enc_copy_hash(req, pctx); | ||
519 | } | ||
482 | } | 520 | } |
483 | 521 | ||
484 | gcm_enc_hash_done(areq, err); | 522 | aead_request_complete(req, err); |
485 | } | 523 | } |
486 | 524 | ||
487 | static int crypto_gcm_encrypt(struct aead_request *req) | 525 | static int crypto_gcm_encrypt(struct aead_request *req) |
@@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err) | |||
538 | aead_request_complete(req, err); | 576 | aead_request_complete(req, err); |
539 | } | 577 | } |
540 | 578 | ||
541 | static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) | 579 | static void gcm_dec_hash_done(struct aead_request *req, int err) |
542 | { | 580 | { |
543 | struct aead_request *req = areq->data; | ||
544 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 581 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
545 | struct ablkcipher_request *abreq = &pctx->u.abreq; | 582 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
546 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | 583 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
@@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) | |||
552 | err = crypto_ablkcipher_decrypt(abreq); | 589 | err = crypto_ablkcipher_decrypt(abreq); |
553 | if (err == -EINPROGRESS || err == -EBUSY) | 590 | if (err == -EINPROGRESS || err == -EBUSY) |
554 | return; | 591 | return; |
592 | else if (!err) | ||
593 | err = crypto_gcm_verify(req, pctx); | ||
555 | } | 594 | } |
556 | 595 | ||
557 | gcm_decrypt_done(areq, err); | 596 | aead_request_complete(req, err); |
558 | } | 597 | } |
559 | 598 | ||
560 | static int crypto_gcm_decrypt(struct aead_request *req) | 599 | static int crypto_gcm_decrypt(struct aead_request *req) |