diff options
Diffstat (limited to 'crypto')
62 files changed, 7021 insertions, 2335 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 4dfdd03e708f..e4bac29a32e7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -23,6 +23,7 @@ comment "Crypto core or helper" | |||
23 | 23 | ||
24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
26 | depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS | ||
26 | help | 27 | help |
27 | This options enables the fips boot option which is | 28 | This options enables the fips boot option which is |
28 | required if you want to system to operate in a FIPS 200 | 29 | required if you want to system to operate in a FIPS 200 |
@@ -78,6 +79,11 @@ config CRYPTO_RNG2 | |||
78 | 79 | ||
79 | config CRYPTO_PCOMP | 80 | config CRYPTO_PCOMP |
80 | tristate | 81 | tristate |
82 | select CRYPTO_PCOMP2 | ||
83 | select CRYPTO_ALGAPI | ||
84 | |||
85 | config CRYPTO_PCOMP2 | ||
86 | tristate | ||
81 | select CRYPTO_ALGAPI2 | 87 | select CRYPTO_ALGAPI2 |
82 | 88 | ||
83 | config CRYPTO_MANAGER | 89 | config CRYPTO_MANAGER |
@@ -92,7 +98,15 @@ config CRYPTO_MANAGER2 | |||
92 | select CRYPTO_AEAD2 | 98 | select CRYPTO_AEAD2 |
93 | select CRYPTO_HASH2 | 99 | select CRYPTO_HASH2 |
94 | select CRYPTO_BLKCIPHER2 | 100 | select CRYPTO_BLKCIPHER2 |
95 | select CRYPTO_PCOMP | 101 | select CRYPTO_PCOMP2 |
102 | |||
103 | config CRYPTO_MANAGER_DISABLE_TESTS | ||
104 | bool "Disable run-time self tests" | ||
105 | default y | ||
106 | depends on CRYPTO_MANAGER2 | ||
107 | help | ||
108 | Disable run-time self tests that normally take place at | ||
109 | algorithm registration. | ||
96 | 110 | ||
97 | config CRYPTO_GF128MUL | 111 | config CRYPTO_GF128MUL |
98 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" | 112 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" |
@@ -112,6 +126,16 @@ config CRYPTO_NULL | |||
112 | help | 126 | help |
113 | These are 'Null' algorithms, used by IPsec, which do nothing. | 127 | These are 'Null' algorithms, used by IPsec, which do nothing. |
114 | 128 | ||
129 | config CRYPTO_PCRYPT | ||
130 | tristate "Parallel crypto engine (EXPERIMENTAL)" | ||
131 | depends on SMP && EXPERIMENTAL | ||
132 | select PADATA | ||
133 | select CRYPTO_MANAGER | ||
134 | select CRYPTO_AEAD | ||
135 | help | ||
136 | This converts an arbitrary crypto algorithm into a parallel | ||
137 | algorithm that executes in kernel threads. | ||
138 | |||
115 | config CRYPTO_WORKQUEUE | 139 | config CRYPTO_WORKQUEUE |
116 | tristate | 140 | tristate |
117 | 141 | ||
@@ -156,7 +180,7 @@ config CRYPTO_GCM | |||
156 | tristate "GCM/GMAC support" | 180 | tristate "GCM/GMAC support" |
157 | select CRYPTO_CTR | 181 | select CRYPTO_CTR |
158 | select CRYPTO_AEAD | 182 | select CRYPTO_AEAD |
159 | select CRYPTO_GF128MUL | 183 | select CRYPTO_GHASH |
160 | help | 184 | help |
161 | Support for Galois/Counter Mode (GCM) and Galois Message | 185 | Support for Galois/Counter Mode (GCM) and Galois Message |
162 | Authentication Code (GMAC). Required for IPSec. | 186 | Authentication Code (GMAC). Required for IPSec. |
@@ -267,6 +291,18 @@ config CRYPTO_XCBC | |||
267 | http://csrc.nist.gov/encryption/modes/proposedmodes/ | 291 | http://csrc.nist.gov/encryption/modes/proposedmodes/ |
268 | xcbc-mac/xcbc-mac-spec.pdf | 292 | xcbc-mac/xcbc-mac-spec.pdf |
269 | 293 | ||
294 | config CRYPTO_VMAC | ||
295 | tristate "VMAC support" | ||
296 | depends on EXPERIMENTAL | ||
297 | select CRYPTO_HASH | ||
298 | select CRYPTO_MANAGER | ||
299 | help | ||
300 | VMAC is a message authentication algorithm designed for | ||
301 | very high speed on 64-bit architectures. | ||
302 | |||
303 | See also: | ||
304 | <http://fastcrypto.org/vmac> | ||
305 | |||
270 | comment "Digest" | 306 | comment "Digest" |
271 | 307 | ||
272 | config CRYPTO_CRC32C | 308 | config CRYPTO_CRC32C |
@@ -289,6 +325,13 @@ config CRYPTO_CRC32C_INTEL | |||
289 | gain performance compared with software implementation. | 325 | gain performance compared with software implementation. |
290 | Module will be crc32c-intel. | 326 | Module will be crc32c-intel. |
291 | 327 | ||
328 | config CRYPTO_GHASH | ||
329 | tristate "GHASH digest algorithm" | ||
330 | select CRYPTO_SHASH | ||
331 | select CRYPTO_GF128MUL | ||
332 | help | ||
333 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). | ||
334 | |||
292 | config CRYPTO_MD4 | 335 | config CRYPTO_MD4 |
293 | tristate "MD4 digest algorithm" | 336 | tristate "MD4 digest algorithm" |
294 | select CRYPTO_HASH | 337 | select CRYPTO_HASH |
@@ -321,7 +364,7 @@ config CRYPTO_RMD128 | |||
321 | RIPEMD-160 should be used. | 364 | RIPEMD-160 should be used. |
322 | 365 | ||
323 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 366 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
324 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 367 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
325 | 368 | ||
326 | config CRYPTO_RMD160 | 369 | config CRYPTO_RMD160 |
327 | tristate "RIPEMD-160 digest algorithm" | 370 | tristate "RIPEMD-160 digest algorithm" |
@@ -338,7 +381,7 @@ config CRYPTO_RMD160 | |||
338 | against RIPEMD-160. | 381 | against RIPEMD-160. |
339 | 382 | ||
340 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 383 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
341 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 384 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
342 | 385 | ||
343 | config CRYPTO_RMD256 | 386 | config CRYPTO_RMD256 |
344 | tristate "RIPEMD-256 digest algorithm" | 387 | tristate "RIPEMD-256 digest algorithm" |
@@ -350,7 +393,7 @@ config CRYPTO_RMD256 | |||
350 | (than RIPEMD-128). | 393 | (than RIPEMD-128). |
351 | 394 | ||
352 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 395 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
353 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 396 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
354 | 397 | ||
355 | config CRYPTO_RMD320 | 398 | config CRYPTO_RMD320 |
356 | tristate "RIPEMD-320 digest algorithm" | 399 | tristate "RIPEMD-320 digest algorithm" |
@@ -362,7 +405,7 @@ config CRYPTO_RMD320 | |||
362 | (than RIPEMD-160). | 405 | (than RIPEMD-160). |
363 | 406 | ||
364 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. | 407 | Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. |
365 | See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> | 408 | See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> |
366 | 409 | ||
367 | config CRYPTO_SHA1 | 410 | config CRYPTO_SHA1 |
368 | tristate "SHA1 digest algorithm" | 411 | tristate "SHA1 digest algorithm" |
@@ -417,7 +460,16 @@ config CRYPTO_WP512 | |||
417 | Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard | 460 | Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard |
418 | 461 | ||
419 | See also: | 462 | See also: |
420 | <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> | 463 | <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> |
464 | |||
465 | config CRYPTO_GHASH_CLMUL_NI_INTEL | ||
466 | tristate "GHASH digest algorithm (CLMUL-NI accelerated)" | ||
467 | depends on (X86 || UML_X86) && 64BIT | ||
468 | select CRYPTO_SHASH | ||
469 | select CRYPTO_CRYPTD | ||
470 | help | ||
471 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). | ||
472 | The implementation is accelerated by CLMUL-NI of Intel. | ||
421 | 473 | ||
422 | comment "Ciphers" | 474 | comment "Ciphers" |
423 | 475 | ||
@@ -526,8 +578,8 @@ config CRYPTO_ANUBIS | |||
526 | in the NESSIE competition. | 578 | in the NESSIE competition. |
527 | 579 | ||
528 | See also: | 580 | See also: |
529 | <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/> | 581 | <https://www.cosic.esat.kuleuven.be/nessie/reports/> |
530 | <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html> | 582 | <http://www.larc.usp.br/~pbarreto/AnubisPage.html> |
531 | 583 | ||
532 | config CRYPTO_ARC4 | 584 | config CRYPTO_ARC4 |
533 | tristate "ARC4 cipher algorithm" | 585 | tristate "ARC4 cipher algorithm" |
@@ -606,7 +658,7 @@ config CRYPTO_KHAZAD | |||
606 | on 32-bit processors. Khazad uses an 128 bit key size. | 658 | on 32-bit processors. Khazad uses an 128 bit key size. |
607 | 659 | ||
608 | See also: | 660 | See also: |
609 | <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html> | 661 | <http://www.larc.usp.br/~pbarreto/KhazadPage.html> |
610 | 662 | ||
611 | config CRYPTO_SALSA20 | 663 | config CRYPTO_SALSA20 |
612 | tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" | 664 | tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" |
@@ -780,13 +832,14 @@ comment "Random Number Generation" | |||
780 | 832 | ||
781 | config CRYPTO_ANSI_CPRNG | 833 | config CRYPTO_ANSI_CPRNG |
782 | tristate "Pseudo Random Number Generation for Cryptographic modules" | 834 | tristate "Pseudo Random Number Generation for Cryptographic modules" |
835 | default m | ||
783 | select CRYPTO_AES | 836 | select CRYPTO_AES |
784 | select CRYPTO_RNG | 837 | select CRYPTO_RNG |
785 | select CRYPTO_FIPS | ||
786 | help | 838 | help |
787 | This option enables the generic pseudo random number generator | 839 | This option enables the generic pseudo random number generator |
788 | for cryptographic modules. Uses the Algorithm specified in | 840 | for cryptographic modules. Uses the Algorithm specified in |
789 | ANSI X9.31 A.2.4 | 841 | ANSI X9.31 A.2.4. Note that this option must be enabled if |
842 | CRYPTO_FIPS is selected | ||
790 | 843 | ||
791 | source "drivers/crypto/Kconfig" | 844 | source "drivers/crypto/Kconfig" |
792 | 845 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 673d9f7c1bda..423b7de61f93 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO) += crypto.o | 5 | obj-$(CONFIG_CRYPTO) += crypto.o |
6 | crypto-objs := api.o cipher.o digest.o compress.o | 6 | crypto-objs := api.o cipher.o compress.o |
7 | 7 | ||
8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o |
9 | 9 | ||
@@ -22,17 +22,17 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o | |||
22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o | 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o |
23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o | 23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o |
24 | 24 | ||
25 | crypto_hash-objs := hash.o | ||
26 | crypto_hash-objs += ahash.o | 25 | crypto_hash-objs += ahash.o |
27 | crypto_hash-objs += shash.o | 26 | crypto_hash-objs += shash.o |
28 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
29 | 28 | ||
30 | obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o | 29 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o |
31 | 30 | ||
32 | cryptomgr-objs := algboss.o testmgr.o | 31 | cryptomgr-objs := algboss.o testmgr.o |
33 | 32 | ||
34 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o | 33 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o |
35 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o | 34 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o |
35 | obj-$(CONFIG_CRYPTO_VMAC) += vmac.o | ||
36 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o | 36 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o |
37 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o | 37 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o |
38 | obj-$(CONFIG_CRYPTO_MD4) += md4.o | 38 | obj-$(CONFIG_CRYPTO_MD4) += md4.o |
@@ -56,11 +56,12 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o | |||
56 | obj-$(CONFIG_CRYPTO_CTR) += ctr.o | 56 | obj-$(CONFIG_CRYPTO_CTR) += ctr.o |
57 | obj-$(CONFIG_CRYPTO_GCM) += gcm.o | 57 | obj-$(CONFIG_CRYPTO_GCM) += gcm.o |
58 | obj-$(CONFIG_CRYPTO_CCM) += ccm.o | 58 | obj-$(CONFIG_CRYPTO_CCM) += ccm.o |
59 | obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o | ||
59 | obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o | 60 | obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o |
60 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o | 61 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o |
61 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o | 62 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o |
62 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o | 63 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o |
63 | obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o | 64 | obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o |
64 | obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o | 65 | obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o |
65 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o | 66 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o |
66 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o | 67 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o |
@@ -83,6 +84,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o | |||
83 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | 84 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o |
84 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 85 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
85 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o | 86 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o |
87 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | ||
86 | 88 | ||
87 | # | 89 | # |
88 | # generic algorithms and the async_tx api | 90 | # generic algorithms and the async_tx api |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index e11ce37c7104..a854df2a5a4b 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Asynchronous block chaining cipher operations. | 2 | * Asynchronous block chaining cipher operations. |
3 | * | 3 | * |
4 | * This is the asynchronous version of blkcipher.c indicating completion | 4 | * This is the asynchronous version of blkcipher.c indicating completion |
5 | * via a callback. | 5 | * via a callback. |
6 | * | 6 | * |
@@ -8,12 +8,13 @@ | |||
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. | 12 | * any later version. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/internal/skcipher.h> | 16 | #include <crypto/internal/skcipher.h> |
17 | #include <linux/cpumask.h> | ||
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
@@ -23,8 +24,287 @@ | |||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
25 | 26 | ||
27 | #include <crypto/scatterwalk.h> | ||
28 | |||
26 | #include "internal.h" | 29 | #include "internal.h" |
27 | 30 | ||
31 | static const char *skcipher_default_geniv __read_mostly; | ||
32 | |||
33 | struct ablkcipher_buffer { | ||
34 | struct list_head entry; | ||
35 | struct scatter_walk dst; | ||
36 | unsigned int len; | ||
37 | void *data; | ||
38 | }; | ||
39 | |||
40 | enum { | ||
41 | ABLKCIPHER_WALK_SLOW = 1 << 0, | ||
42 | }; | ||
43 | |||
44 | static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) | ||
45 | { | ||
46 | scatterwalk_copychunks(p->data, &p->dst, p->len, 1); | ||
47 | } | ||
48 | |||
49 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) | ||
50 | { | ||
51 | struct ablkcipher_buffer *p, *tmp; | ||
52 | |||
53 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | ||
54 | ablkcipher_buffer_write(p); | ||
55 | list_del(&p->entry); | ||
56 | kfree(p); | ||
57 | } | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); | ||
60 | |||
61 | static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, | ||
62 | struct ablkcipher_buffer *p) | ||
63 | { | ||
64 | p->dst = walk->out; | ||
65 | list_add_tail(&p->entry, &walk->buffers); | ||
66 | } | ||
67 | |||
68 | /* Get a spot of the specified length that does not straddle a page. | ||
69 | * The caller needs to ensure that there is enough space for this operation. | ||
70 | */ | ||
71 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) | ||
72 | { | ||
73 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | ||
74 | return max(start, end_page); | ||
75 | } | ||
76 | |||
77 | static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, | ||
78 | unsigned int bsize) | ||
79 | { | ||
80 | unsigned int n = bsize; | ||
81 | |||
82 | for (;;) { | ||
83 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); | ||
84 | |||
85 | if (len_this_page > n) | ||
86 | len_this_page = n; | ||
87 | scatterwalk_advance(&walk->out, n); | ||
88 | if (n == len_this_page) | ||
89 | break; | ||
90 | n -= len_this_page; | ||
91 | scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); | ||
92 | } | ||
93 | |||
94 | return bsize; | ||
95 | } | ||
96 | |||
97 | static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, | ||
98 | unsigned int n) | ||
99 | { | ||
100 | scatterwalk_advance(&walk->in, n); | ||
101 | scatterwalk_advance(&walk->out, n); | ||
102 | |||
103 | return n; | ||
104 | } | ||
105 | |||
106 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | ||
107 | struct ablkcipher_walk *walk); | ||
108 | |||
109 | int ablkcipher_walk_done(struct ablkcipher_request *req, | ||
110 | struct ablkcipher_walk *walk, int err) | ||
111 | { | ||
112 | struct crypto_tfm *tfm = req->base.tfm; | ||
113 | unsigned int nbytes = 0; | ||
114 | |||
115 | if (likely(err >= 0)) { | ||
116 | unsigned int n = walk->nbytes - err; | ||
117 | |||
118 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) | ||
119 | n = ablkcipher_done_fast(walk, n); | ||
120 | else if (WARN_ON(err)) { | ||
121 | err = -EINVAL; | ||
122 | goto err; | ||
123 | } else | ||
124 | n = ablkcipher_done_slow(walk, n); | ||
125 | |||
126 | nbytes = walk->total - n; | ||
127 | err = 0; | ||
128 | } | ||
129 | |||
130 | scatterwalk_done(&walk->in, 0, nbytes); | ||
131 | scatterwalk_done(&walk->out, 1, nbytes); | ||
132 | |||
133 | err: | ||
134 | walk->total = nbytes; | ||
135 | walk->nbytes = nbytes; | ||
136 | |||
137 | if (nbytes) { | ||
138 | crypto_yield(req->base.flags); | ||
139 | return ablkcipher_walk_next(req, walk); | ||
140 | } | ||
141 | |||
142 | if (walk->iv != req->info) | ||
143 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); | ||
144 | if (walk->iv_buffer) | ||
145 | kfree(walk->iv_buffer); | ||
146 | |||
147 | return err; | ||
148 | } | ||
149 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); | ||
150 | |||
151 | static inline int ablkcipher_next_slow(struct ablkcipher_request *req, | ||
152 | struct ablkcipher_walk *walk, | ||
153 | unsigned int bsize, | ||
154 | unsigned int alignmask, | ||
155 | void **src_p, void **dst_p) | ||
156 | { | ||
157 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); | ||
158 | struct ablkcipher_buffer *p; | ||
159 | void *src, *dst, *base; | ||
160 | unsigned int n; | ||
161 | |||
162 | n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); | ||
163 | n += (aligned_bsize * 3 - (alignmask + 1) + | ||
164 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); | ||
165 | |||
166 | p = kmalloc(n, GFP_ATOMIC); | ||
167 | if (!p) | ||
168 | return ablkcipher_walk_done(req, walk, -ENOMEM); | ||
169 | |||
170 | base = p + 1; | ||
171 | |||
172 | dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); | ||
173 | src = dst = ablkcipher_get_spot(dst, bsize); | ||
174 | |||
175 | p->len = bsize; | ||
176 | p->data = dst; | ||
177 | |||
178 | scatterwalk_copychunks(src, &walk->in, bsize, 0); | ||
179 | |||
180 | ablkcipher_queue_write(walk, p); | ||
181 | |||
182 | walk->nbytes = bsize; | ||
183 | walk->flags |= ABLKCIPHER_WALK_SLOW; | ||
184 | |||
185 | *src_p = src; | ||
186 | *dst_p = dst; | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, | ||
192 | struct crypto_tfm *tfm, | ||
193 | unsigned int alignmask) | ||
194 | { | ||
195 | unsigned bs = walk->blocksize; | ||
196 | unsigned int ivsize = tfm->crt_ablkcipher.ivsize; | ||
197 | unsigned aligned_bs = ALIGN(bs, alignmask + 1); | ||
198 | unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - | ||
199 | (alignmask + 1); | ||
200 | u8 *iv; | ||
201 | |||
202 | size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); | ||
203 | walk->iv_buffer = kmalloc(size, GFP_ATOMIC); | ||
204 | if (!walk->iv_buffer) | ||
205 | return -ENOMEM; | ||
206 | |||
207 | iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); | ||
208 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; | ||
209 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; | ||
210 | iv = ablkcipher_get_spot(iv, ivsize); | ||
211 | |||
212 | walk->iv = memcpy(iv, walk->iv, ivsize); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static inline int ablkcipher_next_fast(struct ablkcipher_request *req, | ||
217 | struct ablkcipher_walk *walk) | ||
218 | { | ||
219 | walk->src.page = scatterwalk_page(&walk->in); | ||
220 | walk->src.offset = offset_in_page(walk->in.offset); | ||
221 | walk->dst.page = scatterwalk_page(&walk->out); | ||
222 | walk->dst.offset = offset_in_page(walk->out.offset); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | ||
228 | struct ablkcipher_walk *walk) | ||
229 | { | ||
230 | struct crypto_tfm *tfm = req->base.tfm; | ||
231 | unsigned int alignmask, bsize, n; | ||
232 | void *src, *dst; | ||
233 | int err; | ||
234 | |||
235 | alignmask = crypto_tfm_alg_alignmask(tfm); | ||
236 | n = walk->total; | ||
237 | if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { | ||
238 | req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | ||
239 | return ablkcipher_walk_done(req, walk, -EINVAL); | ||
240 | } | ||
241 | |||
242 | walk->flags &= ~ABLKCIPHER_WALK_SLOW; | ||
243 | src = dst = NULL; | ||
244 | |||
245 | bsize = min(walk->blocksize, n); | ||
246 | n = scatterwalk_clamp(&walk->in, n); | ||
247 | n = scatterwalk_clamp(&walk->out, n); | ||
248 | |||
249 | if (n < bsize || | ||
250 | !scatterwalk_aligned(&walk->in, alignmask) || | ||
251 | !scatterwalk_aligned(&walk->out, alignmask)) { | ||
252 | err = ablkcipher_next_slow(req, walk, bsize, alignmask, | ||
253 | &src, &dst); | ||
254 | goto set_phys_lowmem; | ||
255 | } | ||
256 | |||
257 | walk->nbytes = n; | ||
258 | |||
259 | return ablkcipher_next_fast(req, walk); | ||
260 | |||
261 | set_phys_lowmem: | ||
262 | if (err >= 0) { | ||
263 | walk->src.page = virt_to_page(src); | ||
264 | walk->dst.page = virt_to_page(dst); | ||
265 | walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); | ||
266 | walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); | ||
267 | } | ||
268 | |||
269 | return err; | ||
270 | } | ||
271 | |||
272 | static int ablkcipher_walk_first(struct ablkcipher_request *req, | ||
273 | struct ablkcipher_walk *walk) | ||
274 | { | ||
275 | struct crypto_tfm *tfm = req->base.tfm; | ||
276 | unsigned int alignmask; | ||
277 | |||
278 | alignmask = crypto_tfm_alg_alignmask(tfm); | ||
279 | if (WARN_ON_ONCE(in_irq())) | ||
280 | return -EDEADLK; | ||
281 | |||
282 | walk->nbytes = walk->total; | ||
283 | if (unlikely(!walk->total)) | ||
284 | return 0; | ||
285 | |||
286 | walk->iv_buffer = NULL; | ||
287 | walk->iv = req->info; | ||
288 | if (unlikely(((unsigned long)walk->iv & alignmask))) { | ||
289 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); | ||
290 | if (err) | ||
291 | return err; | ||
292 | } | ||
293 | |||
294 | scatterwalk_start(&walk->in, walk->in.sg); | ||
295 | scatterwalk_start(&walk->out, walk->out.sg); | ||
296 | |||
297 | return ablkcipher_walk_next(req, walk); | ||
298 | } | ||
299 | |||
300 | int ablkcipher_walk_phys(struct ablkcipher_request *req, | ||
301 | struct ablkcipher_walk *walk) | ||
302 | { | ||
303 | walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); | ||
304 | return ablkcipher_walk_first(req, walk); | ||
305 | } | ||
306 | EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); | ||
307 | |||
28 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, | 308 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
29 | unsigned int keylen) | 309 | unsigned int keylen) |
30 | { | 310 | { |
@@ -180,7 +460,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type); | |||
180 | 460 | ||
181 | const char *crypto_default_geniv(const struct crypto_alg *alg) | 461 | const char *crypto_default_geniv(const struct crypto_alg *alg) |
182 | { | 462 | { |
183 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; | 463 | if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
464 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : | ||
465 | alg->cra_ablkcipher.ivsize) != | ||
466 | alg->cra_blocksize) | ||
467 | return "chainiv"; | ||
468 | |||
469 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? | ||
470 | "eseqiv" : skcipher_default_geniv; | ||
184 | } | 471 | } |
185 | 472 | ||
186 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | 473 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) |
@@ -201,8 +488,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
201 | int err; | 488 | int err; |
202 | 489 | ||
203 | larval = crypto_larval_lookup(alg->cra_driver_name, | 490 | larval = crypto_larval_lookup(alg->cra_driver_name, |
491 | (type & ~CRYPTO_ALG_TYPE_MASK) | | ||
204 | CRYPTO_ALG_TYPE_GIVCIPHER, | 492 | CRYPTO_ALG_TYPE_GIVCIPHER, |
205 | CRYPTO_ALG_TYPE_MASK); | 493 | mask | CRYPTO_ALG_TYPE_MASK); |
206 | err = PTR_ERR(larval); | 494 | err = PTR_ERR(larval); |
207 | if (IS_ERR(larval)) | 495 | if (IS_ERR(larval)) |
208 | goto out; | 496 | goto out; |
@@ -360,3 +648,17 @@ err: | |||
360 | return ERR_PTR(err); | 648 | return ERR_PTR(err); |
361 | } | 649 | } |
362 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); | 650 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); |
651 | |||
652 | static int __init skcipher_module_init(void) | ||
653 | { | ||
654 | skcipher_default_geniv = num_possible_cpus() > 1 ? | ||
655 | "eseqiv" : "chainiv"; | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | static void skcipher_module_exit(void) | ||
660 | { | ||
661 | } | ||
662 | |||
663 | module_init(skcipher_module_init); | ||
664 | module_exit(skcipher_module_exit); | ||
diff --git a/crypto/aead.c b/crypto/aead.c index d9aa733db164..6729e8ff68e7 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * AEAD: Authenticated Encryption with Associated Data | 2 | * AEAD: Authenticated Encryption with Associated Data |
3 | * | 3 | * |
4 | * This file provides API support for AEAD algorithms. | 4 | * This file provides API support for AEAD algorithms. |
5 | * | 5 | * |
6 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> | 6 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | 9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) | 10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. | 11 | * any later version. |
12 | * | 12 | * |
13 | */ | 13 | */ |
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/sched.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
23 | 24 | ||
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index b8b66ec3883b..a68c73dae15a 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * AES Cipher Algorithm. | 4 | * AES Cipher Algorithm. |
@@ -1127,7 +1127,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
1127 | 1127 | ||
1128 | #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) | 1128 | #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) |
1129 | 1129 | ||
1130 | #define imix_col(y,x) do { \ | 1130 | #define imix_col(y, x) do { \ |
1131 | u = star_x(x); \ | 1131 | u = star_x(x); \ |
1132 | v = star_x(u); \ | 1132 | v = star_x(u); \ |
1133 | w = star_x(v); \ | 1133 | w = star_x(v); \ |
@@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
1174 | ctx->key_enc[6 * i + 11] = t; \ | 1174 | ctx->key_enc[6 * i + 11] = t; \ |
1175 | } while (0) | 1175 | } while (0) |
1176 | 1176 | ||
1177 | #define loop8(i) do { \ | 1177 | #define loop8tophalf(i) do { \ |
1178 | t = ror32(t, 8); \ | 1178 | t = ror32(t, 8); \ |
1179 | t = ls_box(t) ^ rco_tab[i]; \ | 1179 | t = ls_box(t) ^ rco_tab[i]; \ |
1180 | t ^= ctx->key_enc[8 * i]; \ | 1180 | t ^= ctx->key_enc[8 * i]; \ |
@@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
1185 | ctx->key_enc[8 * i + 10] = t; \ | 1185 | ctx->key_enc[8 * i + 10] = t; \ |
1186 | t ^= ctx->key_enc[8 * i + 3]; \ | 1186 | t ^= ctx->key_enc[8 * i + 3]; \ |
1187 | ctx->key_enc[8 * i + 11] = t; \ | 1187 | ctx->key_enc[8 * i + 11] = t; \ |
1188 | } while (0) | ||
1189 | |||
1190 | #define loop8(i) do { \ | ||
1191 | loop8tophalf(i); \ | ||
1188 | t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ | 1192 | t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ |
1189 | ctx->key_enc[8 * i + 12] = t; \ | 1193 | ctx->key_enc[8 * i + 12] = t; \ |
1190 | t ^= ctx->key_enc[8 * i + 5]; \ | 1194 | t ^= ctx->key_enc[8 * i + 5]; \ |
@@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
1245 | ctx->key_enc[5] = le32_to_cpu(key[5]); | 1249 | ctx->key_enc[5] = le32_to_cpu(key[5]); |
1246 | ctx->key_enc[6] = le32_to_cpu(key[6]); | 1250 | ctx->key_enc[6] = le32_to_cpu(key[6]); |
1247 | t = ctx->key_enc[7] = le32_to_cpu(key[7]); | 1251 | t = ctx->key_enc[7] = le32_to_cpu(key[7]); |
1248 | for (i = 0; i < 7; ++i) | 1252 | for (i = 0; i < 6; ++i) |
1249 | loop8(i); | 1253 | loop8(i); |
1254 | loop8tophalf(i); | ||
1250 | break; | 1255 | break; |
1251 | } | 1256 | } |
1252 | 1257 | ||
diff --git a/crypto/ahash.c b/crypto/ahash.c index f3476374f764..f669822a7a44 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -24,6 +24,19 @@ | |||
24 | 24 | ||
25 | #include "internal.h" | 25 | #include "internal.h" |
26 | 26 | ||
27 | struct ahash_request_priv { | ||
28 | crypto_completion_t complete; | ||
29 | void *data; | ||
30 | u8 *result; | ||
31 | void *ubuf[] CRYPTO_MINALIGN_ATTR; | ||
32 | }; | ||
33 | |||
34 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) | ||
35 | { | ||
36 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, | ||
37 | halg); | ||
38 | } | ||
39 | |||
27 | static int hash_walk_next(struct crypto_hash_walk *walk) | 40 | static int hash_walk_next(struct crypto_hash_walk *walk) |
28 | { | 41 | { |
29 | unsigned int alignmask = walk->alignmask; | 42 | unsigned int alignmask = walk->alignmask; |
@@ -34,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk) | |||
34 | walk->data = crypto_kmap(walk->pg, 0); | 47 | walk->data = crypto_kmap(walk->pg, 0); |
35 | walk->data += offset; | 48 | walk->data += offset; |
36 | 49 | ||
37 | if (offset & alignmask) | 50 | if (offset & alignmask) { |
38 | nbytes = alignmask + 1 - (offset & alignmask); | 51 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); |
52 | if (nbytes > unaligned) | ||
53 | nbytes = unaligned; | ||
54 | } | ||
39 | 55 | ||
40 | walk->entrylen -= nbytes; | 56 | walk->entrylen -= nbytes; |
41 | return nbytes; | 57 | return nbytes; |
@@ -65,7 +81,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) | |||
65 | walk->data -= walk->offset; | 81 | walk->data -= walk->offset; |
66 | 82 | ||
67 | if (nbytes && walk->offset & alignmask && !err) { | 83 | if (nbytes && walk->offset & alignmask && !err) { |
68 | walk->offset += alignmask - 1; | ||
69 | walk->offset = ALIGN(walk->offset, alignmask + 1); | 84 | walk->offset = ALIGN(walk->offset, alignmask + 1); |
70 | walk->data += walk->offset; | 85 | walk->data += walk->offset; |
71 | 86 | ||
@@ -132,36 +147,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, | |||
132 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, | 147 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
133 | unsigned int keylen) | 148 | unsigned int keylen) |
134 | { | 149 | { |
135 | struct ahash_alg *ahash = crypto_ahash_alg(tfm); | ||
136 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 150 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
137 | int ret; | 151 | int ret; |
138 | u8 *buffer, *alignbuffer; | 152 | u8 *buffer, *alignbuffer; |
139 | unsigned long absize; | 153 | unsigned long absize; |
140 | 154 | ||
141 | absize = keylen + alignmask; | 155 | absize = keylen + alignmask; |
142 | buffer = kmalloc(absize, GFP_ATOMIC); | 156 | buffer = kmalloc(absize, GFP_KERNEL); |
143 | if (!buffer) | 157 | if (!buffer) |
144 | return -ENOMEM; | 158 | return -ENOMEM; |
145 | 159 | ||
146 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 160 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
147 | memcpy(alignbuffer, key, keylen); | 161 | memcpy(alignbuffer, key, keylen); |
148 | ret = ahash->setkey(tfm, alignbuffer, keylen); | 162 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
149 | memset(alignbuffer, 0, keylen); | 163 | kzfree(buffer); |
150 | kfree(buffer); | ||
151 | return ret; | 164 | return ret; |
152 | } | 165 | } |
153 | 166 | ||
154 | static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | 167 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
155 | unsigned int keylen) | 168 | unsigned int keylen) |
156 | { | 169 | { |
157 | struct ahash_alg *ahash = crypto_ahash_alg(tfm); | ||
158 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 170 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
159 | 171 | ||
160 | if ((unsigned long)key & alignmask) | 172 | if ((unsigned long)key & alignmask) |
161 | return ahash_setkey_unaligned(tfm, key, keylen); | 173 | return ahash_setkey_unaligned(tfm, key, keylen); |
162 | 174 | ||
163 | return ahash->setkey(tfm, key, keylen); | 175 | return tfm->setkey(tfm, key, keylen); |
164 | } | 176 | } |
177 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); | ||
165 | 178 | ||
166 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, | 179 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
167 | unsigned int keylen) | 180 | unsigned int keylen) |
@@ -169,44 +182,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, | |||
169 | return -ENOSYS; | 182 | return -ENOSYS; |
170 | } | 183 | } |
171 | 184 | ||
172 | int crypto_ahash_import(struct ahash_request *req, const u8 *in) | 185 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
186 | unsigned long mask) | ||
187 | { | ||
188 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); | ||
189 | } | ||
190 | |||
191 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) | ||
192 | { | ||
193 | struct ahash_request_priv *priv = req->priv; | ||
194 | |||
195 | if (err == -EINPROGRESS) | ||
196 | return; | ||
197 | |||
198 | if (!err) | ||
199 | memcpy(priv->result, req->result, | ||
200 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | ||
201 | |||
202 | kzfree(priv); | ||
203 | } | ||
204 | |||
205 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | ||
206 | { | ||
207 | struct ahash_request *areq = req->data; | ||
208 | struct ahash_request_priv *priv = areq->priv; | ||
209 | crypto_completion_t complete = priv->complete; | ||
210 | void *data = priv->data; | ||
211 | |||
212 | ahash_op_unaligned_finish(areq, err); | ||
213 | |||
214 | complete(data, err); | ||
215 | } | ||
216 | |||
217 | static int ahash_op_unaligned(struct ahash_request *req, | ||
218 | int (*op)(struct ahash_request *)) | ||
173 | { | 219 | { |
174 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 220 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
175 | struct ahash_alg *alg = crypto_ahash_alg(tfm); | 221 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
222 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
223 | struct ahash_request_priv *priv; | ||
224 | int err; | ||
225 | |||
226 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
227 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
228 | GFP_KERNEL : GFP_ATOMIC); | ||
229 | if (!priv) | ||
230 | return -ENOMEM; | ||
176 | 231 | ||
177 | memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); | 232 | priv->result = req->result; |
233 | priv->complete = req->base.complete; | ||
234 | priv->data = req->base.data; | ||
178 | 235 | ||
179 | if (alg->reinit) | 236 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
180 | alg->reinit(req); | 237 | req->base.complete = ahash_op_unaligned_done; |
238 | req->base.data = req; | ||
239 | req->priv = priv; | ||
181 | 240 | ||
182 | return 0; | 241 | err = op(req); |
242 | ahash_op_unaligned_finish(req, err); | ||
243 | |||
244 | return err; | ||
183 | } | 245 | } |
184 | EXPORT_SYMBOL_GPL(crypto_ahash_import); | ||
185 | 246 | ||
186 | static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, | 247 | static int crypto_ahash_op(struct ahash_request *req, |
187 | u32 mask) | 248 | int (*op)(struct ahash_request *)) |
188 | { | 249 | { |
189 | return alg->cra_ctxsize; | 250 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
251 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
252 | |||
253 | if ((unsigned long)req->result & alignmask) | ||
254 | return ahash_op_unaligned(req, op); | ||
255 | |||
256 | return op(req); | ||
190 | } | 257 | } |
191 | 258 | ||
192 | static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | 259 | int crypto_ahash_final(struct ahash_request *req) |
193 | { | 260 | { |
194 | struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; | 261 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); |
195 | struct ahash_tfm *crt = &tfm->crt_ahash; | 262 | } |
263 | EXPORT_SYMBOL_GPL(crypto_ahash_final); | ||
196 | 264 | ||
197 | if (alg->digestsize > PAGE_SIZE / 8) | 265 | int crypto_ahash_finup(struct ahash_request *req) |
198 | return -EINVAL; | 266 | { |
267 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); | ||
268 | } | ||
269 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); | ||
270 | |||
271 | int crypto_ahash_digest(struct ahash_request *req) | ||
272 | { | ||
273 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); | ||
274 | } | ||
275 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); | ||
276 | |||
277 | static void ahash_def_finup_finish2(struct ahash_request *req, int err) | ||
278 | { | ||
279 | struct ahash_request_priv *priv = req->priv; | ||
280 | |||
281 | if (err == -EINPROGRESS) | ||
282 | return; | ||
283 | |||
284 | if (!err) | ||
285 | memcpy(priv->result, req->result, | ||
286 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | ||
287 | |||
288 | kzfree(priv); | ||
289 | } | ||
290 | |||
291 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) | ||
292 | { | ||
293 | struct ahash_request *areq = req->data; | ||
294 | struct ahash_request_priv *priv = areq->priv; | ||
295 | crypto_completion_t complete = priv->complete; | ||
296 | void *data = priv->data; | ||
297 | |||
298 | ahash_def_finup_finish2(areq, err); | ||
299 | |||
300 | complete(data, err); | ||
301 | } | ||
302 | |||
303 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) | ||
304 | { | ||
305 | if (err) | ||
306 | goto out; | ||
307 | |||
308 | req->base.complete = ahash_def_finup_done2; | ||
309 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
310 | err = crypto_ahash_reqtfm(req)->final(req); | ||
311 | |||
312 | out: | ||
313 | ahash_def_finup_finish2(req, err); | ||
314 | return err; | ||
315 | } | ||
316 | |||
317 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) | ||
318 | { | ||
319 | struct ahash_request *areq = req->data; | ||
320 | struct ahash_request_priv *priv = areq->priv; | ||
321 | crypto_completion_t complete = priv->complete; | ||
322 | void *data = priv->data; | ||
323 | |||
324 | err = ahash_def_finup_finish1(areq, err); | ||
325 | |||
326 | complete(data, err); | ||
327 | } | ||
328 | |||
329 | static int ahash_def_finup(struct ahash_request *req) | ||
330 | { | ||
331 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
332 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
333 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
334 | struct ahash_request_priv *priv; | ||
199 | 335 | ||
200 | crt->init = alg->init; | 336 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), |
201 | crt->update = alg->update; | 337 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
202 | crt->final = alg->final; | 338 | GFP_KERNEL : GFP_ATOMIC); |
203 | crt->digest = alg->digest; | 339 | if (!priv) |
204 | crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; | 340 | return -ENOMEM; |
205 | crt->digestsize = alg->digestsize; | 341 | |
342 | priv->result = req->result; | ||
343 | priv->complete = req->base.complete; | ||
344 | priv->data = req->base.data; | ||
345 | |||
346 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | ||
347 | req->base.complete = ahash_def_finup_done1; | ||
348 | req->base.data = req; | ||
349 | req->priv = priv; | ||
350 | |||
351 | return ahash_def_finup_finish1(req, tfm->update(req)); | ||
352 | } | ||
353 | |||
354 | static int ahash_no_export(struct ahash_request *req, void *out) | ||
355 | { | ||
356 | return -ENOSYS; | ||
357 | } | ||
358 | |||
359 | static int ahash_no_import(struct ahash_request *req, const void *in) | ||
360 | { | ||
361 | return -ENOSYS; | ||
362 | } | ||
363 | |||
364 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) | ||
365 | { | ||
366 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); | ||
367 | struct ahash_alg *alg = crypto_ahash_alg(hash); | ||
368 | |||
369 | hash->setkey = ahash_nosetkey; | ||
370 | hash->export = ahash_no_export; | ||
371 | hash->import = ahash_no_import; | ||
372 | |||
373 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) | ||
374 | return crypto_init_shash_ops_async(tfm); | ||
375 | |||
376 | hash->init = alg->init; | ||
377 | hash->update = alg->update; | ||
378 | hash->final = alg->final; | ||
379 | hash->finup = alg->finup ?: ahash_def_finup; | ||
380 | hash->digest = alg->digest; | ||
381 | |||
382 | if (alg->setkey) | ||
383 | hash->setkey = alg->setkey; | ||
384 | if (alg->export) | ||
385 | hash->export = alg->export; | ||
386 | if (alg->import) | ||
387 | hash->import = alg->import; | ||
206 | 388 | ||
207 | return 0; | 389 | return 0; |
208 | } | 390 | } |
209 | 391 | ||
392 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) | ||
393 | { | ||
394 | if (alg->cra_type == &crypto_ahash_type) | ||
395 | return alg->cra_ctxsize; | ||
396 | |||
397 | return sizeof(struct crypto_shash *); | ||
398 | } | ||
399 | |||
210 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | 400 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
211 | __attribute__ ((unused)); | 401 | __attribute__ ((unused)); |
212 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | 402 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
@@ -215,17 +405,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | |||
215 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | 405 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
216 | "yes" : "no"); | 406 | "yes" : "no"); |
217 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 407 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
218 | seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize); | 408 | seq_printf(m, "digestsize : %u\n", |
409 | __crypto_hash_alg_common(alg)->digestsize); | ||
219 | } | 410 | } |
220 | 411 | ||
221 | const struct crypto_type crypto_ahash_type = { | 412 | const struct crypto_type crypto_ahash_type = { |
222 | .ctxsize = crypto_ahash_ctxsize, | 413 | .extsize = crypto_ahash_extsize, |
223 | .init = crypto_init_ahash_ops, | 414 | .init_tfm = crypto_ahash_init_tfm, |
224 | #ifdef CONFIG_PROC_FS | 415 | #ifdef CONFIG_PROC_FS |
225 | .show = crypto_ahash_show, | 416 | .show = crypto_ahash_show, |
226 | #endif | 417 | #endif |
418 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
419 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, | ||
420 | .type = CRYPTO_ALG_TYPE_AHASH, | ||
421 | .tfmsize = offsetof(struct crypto_ahash, base), | ||
227 | }; | 422 | }; |
228 | EXPORT_SYMBOL_GPL(crypto_ahash_type); | 423 | EXPORT_SYMBOL_GPL(crypto_ahash_type); |
229 | 424 | ||
425 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, | ||
426 | u32 mask) | ||
427 | { | ||
428 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); | ||
429 | } | ||
430 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); | ||
431 | |||
432 | static int ahash_prepare_alg(struct ahash_alg *alg) | ||
433 | { | ||
434 | struct crypto_alg *base = &alg->halg.base; | ||
435 | |||
436 | if (alg->halg.digestsize > PAGE_SIZE / 8 || | ||
437 | alg->halg.statesize > PAGE_SIZE / 8) | ||
438 | return -EINVAL; | ||
439 | |||
440 | base->cra_type = &crypto_ahash_type; | ||
441 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
442 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | int crypto_register_ahash(struct ahash_alg *alg) | ||
448 | { | ||
449 | struct crypto_alg *base = &alg->halg.base; | ||
450 | int err; | ||
451 | |||
452 | err = ahash_prepare_alg(alg); | ||
453 | if (err) | ||
454 | return err; | ||
455 | |||
456 | return crypto_register_alg(base); | ||
457 | } | ||
458 | EXPORT_SYMBOL_GPL(crypto_register_ahash); | ||
459 | |||
460 | int crypto_unregister_ahash(struct ahash_alg *alg) | ||
461 | { | ||
462 | return crypto_unregister_alg(&alg->halg.base); | ||
463 | } | ||
464 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); | ||
465 | |||
466 | int ahash_register_instance(struct crypto_template *tmpl, | ||
467 | struct ahash_instance *inst) | ||
468 | { | ||
469 | int err; | ||
470 | |||
471 | err = ahash_prepare_alg(&inst->alg); | ||
472 | if (err) | ||
473 | return err; | ||
474 | |||
475 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); | ||
476 | } | ||
477 | EXPORT_SYMBOL_GPL(ahash_register_instance); | ||
478 | |||
479 | void ahash_free_instance(struct crypto_instance *inst) | ||
480 | { | ||
481 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
482 | kfree(ahash_instance(inst)); | ||
483 | } | ||
484 | EXPORT_SYMBOL_GPL(ahash_free_instance); | ||
485 | |||
486 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | ||
487 | struct hash_alg_common *alg, | ||
488 | struct crypto_instance *inst) | ||
489 | { | ||
490 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | ||
491 | &crypto_ahash_type); | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); | ||
494 | |||
495 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | ||
496 | { | ||
497 | struct crypto_alg *alg; | ||
498 | |||
499 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); | ||
500 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); | ||
501 | } | ||
502 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | ||
503 | |||
230 | MODULE_LICENSE("GPL"); | 504 | MODULE_LICENSE("GPL"); |
231 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); | 505 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 56c62e2858d5..c3cf1a69a47a 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/string.h> | 21 | #include <linux/string.h> |
21 | 22 | ||
22 | #include "internal.h" | 23 | #include "internal.h" |
@@ -81,16 +82,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg) | |||
81 | crypto_tmpl_put(tmpl); | 82 | crypto_tmpl_put(tmpl); |
82 | } | 83 | } |
83 | 84 | ||
85 | static struct list_head *crypto_more_spawns(struct crypto_alg *alg, | ||
86 | struct list_head *stack, | ||
87 | struct list_head *top, | ||
88 | struct list_head *secondary_spawns) | ||
89 | { | ||
90 | struct crypto_spawn *spawn, *n; | ||
91 | |||
92 | if (list_empty(stack)) | ||
93 | return NULL; | ||
94 | |||
95 | spawn = list_first_entry(stack, struct crypto_spawn, list); | ||
96 | n = list_entry(spawn->list.next, struct crypto_spawn, list); | ||
97 | |||
98 | if (spawn->alg && &n->list != stack && !n->alg) | ||
99 | n->alg = (n->list.next == stack) ? alg : | ||
100 | &list_entry(n->list.next, struct crypto_spawn, | ||
101 | list)->inst->alg; | ||
102 | |||
103 | list_move(&spawn->list, secondary_spawns); | ||
104 | |||
105 | return &n->list == stack ? top : &n->inst->alg.cra_users; | ||
106 | } | ||
107 | |||
84 | static void crypto_remove_spawn(struct crypto_spawn *spawn, | 108 | static void crypto_remove_spawn(struct crypto_spawn *spawn, |
85 | struct list_head *list, | 109 | struct list_head *list) |
86 | struct list_head *secondary_spawns) | ||
87 | { | 110 | { |
88 | struct crypto_instance *inst = spawn->inst; | 111 | struct crypto_instance *inst = spawn->inst; |
89 | struct crypto_template *tmpl = inst->tmpl; | 112 | struct crypto_template *tmpl = inst->tmpl; |
90 | 113 | ||
91 | list_del_init(&spawn->list); | ||
92 | spawn->alg = NULL; | ||
93 | |||
94 | if (crypto_is_dead(&inst->alg)) | 114 | if (crypto_is_dead(&inst->alg)) |
95 | return; | 115 | return; |
96 | 116 | ||
@@ -106,25 +126,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn, | |||
106 | hlist_del(&inst->list); | 126 | hlist_del(&inst->list); |
107 | inst->alg.cra_destroy = crypto_destroy_instance; | 127 | inst->alg.cra_destroy = crypto_destroy_instance; |
108 | 128 | ||
109 | list_splice(&inst->alg.cra_users, secondary_spawns); | 129 | BUG_ON(!list_empty(&inst->alg.cra_users)); |
110 | } | 130 | } |
111 | 131 | ||
112 | static void crypto_remove_spawns(struct list_head *spawns, | 132 | static void crypto_remove_spawns(struct crypto_alg *alg, |
113 | struct list_head *list, u32 new_type) | 133 | struct list_head *list, |
134 | struct crypto_alg *nalg) | ||
114 | { | 135 | { |
136 | u32 new_type = (nalg ?: alg)->cra_flags; | ||
115 | struct crypto_spawn *spawn, *n; | 137 | struct crypto_spawn *spawn, *n; |
116 | LIST_HEAD(secondary_spawns); | 138 | LIST_HEAD(secondary_spawns); |
139 | struct list_head *spawns; | ||
140 | LIST_HEAD(stack); | ||
141 | LIST_HEAD(top); | ||
117 | 142 | ||
143 | spawns = &alg->cra_users; | ||
118 | list_for_each_entry_safe(spawn, n, spawns, list) { | 144 | list_for_each_entry_safe(spawn, n, spawns, list) { |
119 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) | 145 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) |
120 | continue; | 146 | continue; |
121 | 147 | ||
122 | crypto_remove_spawn(spawn, list, &secondary_spawns); | 148 | list_move(&spawn->list, &top); |
123 | } | 149 | } |
124 | 150 | ||
125 | while (!list_empty(&secondary_spawns)) { | 151 | spawns = ⊤ |
126 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) | 152 | do { |
127 | crypto_remove_spawn(spawn, list, &secondary_spawns); | 153 | while (!list_empty(spawns)) { |
154 | struct crypto_instance *inst; | ||
155 | |||
156 | spawn = list_first_entry(spawns, struct crypto_spawn, | ||
157 | list); | ||
158 | inst = spawn->inst; | ||
159 | |||
160 | BUG_ON(&inst->alg == alg); | ||
161 | |||
162 | list_move(&spawn->list, &stack); | ||
163 | |||
164 | if (&inst->alg == nalg) | ||
165 | break; | ||
166 | |||
167 | spawn->alg = NULL; | ||
168 | spawns = &inst->alg.cra_users; | ||
169 | } | ||
170 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, | ||
171 | &secondary_spawns))); | ||
172 | |||
173 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { | ||
174 | if (spawn->alg) | ||
175 | list_move(&spawn->list, &spawn->alg->cra_users); | ||
176 | else | ||
177 | crypto_remove_spawn(spawn, list); | ||
128 | } | 178 | } |
129 | } | 179 | } |
130 | 180 | ||
@@ -181,7 +231,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) | |||
181 | list_add(&alg->cra_list, &crypto_alg_list); | 231 | list_add(&alg->cra_list, &crypto_alg_list); |
182 | list_add(&larval->alg.cra_list, &crypto_alg_list); | 232 | list_add(&larval->alg.cra_list, &crypto_alg_list); |
183 | 233 | ||
184 | out: | 234 | out: |
185 | return larval; | 235 | return larval; |
186 | 236 | ||
187 | free_larval: | 237 | free_larval: |
@@ -258,7 +308,7 @@ found: | |||
258 | q->cra_priority > alg->cra_priority) | 308 | q->cra_priority > alg->cra_priority) |
259 | continue; | 309 | continue; |
260 | 310 | ||
261 | crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags); | 311 | crypto_remove_spawns(q, &list, alg); |
262 | } | 312 | } |
263 | 313 | ||
264 | complete: | 314 | complete: |
@@ -330,7 +380,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) | |||
330 | 380 | ||
331 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); | 381 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); |
332 | list_del_init(&alg->cra_list); | 382 | list_del_init(&alg->cra_list); |
333 | crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); | 383 | crypto_remove_spawns(alg, list, NULL); |
334 | 384 | ||
335 | return 0; | 385 | return 0; |
336 | } | 386 | } |
@@ -339,7 +389,7 @@ int crypto_unregister_alg(struct crypto_alg *alg) | |||
339 | { | 389 | { |
340 | int ret; | 390 | int ret; |
341 | LIST_HEAD(list); | 391 | LIST_HEAD(list); |
342 | 392 | ||
343 | down_write(&crypto_alg_sem); | 393 | down_write(&crypto_alg_sem); |
344 | ret = crypto_remove_alg(alg, &list); | 394 | ret = crypto_remove_alg(alg, &list); |
345 | up_write(&crypto_alg_sem); | 395 | up_write(&crypto_alg_sem); |
@@ -488,20 +538,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | |||
488 | } | 538 | } |
489 | EXPORT_SYMBOL_GPL(crypto_init_spawn); | 539 | EXPORT_SYMBOL_GPL(crypto_init_spawn); |
490 | 540 | ||
541 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | ||
542 | struct crypto_instance *inst, | ||
543 | const struct crypto_type *frontend) | ||
544 | { | ||
545 | int err = -EINVAL; | ||
546 | |||
547 | if ((alg->cra_flags ^ frontend->type) & frontend->maskset) | ||
548 | goto out; | ||
549 | |||
550 | spawn->frontend = frontend; | ||
551 | err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); | ||
552 | |||
553 | out: | ||
554 | return err; | ||
555 | } | ||
556 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); | ||
557 | |||
491 | void crypto_drop_spawn(struct crypto_spawn *spawn) | 558 | void crypto_drop_spawn(struct crypto_spawn *spawn) |
492 | { | 559 | { |
560 | if (!spawn->alg) | ||
561 | return; | ||
562 | |||
493 | down_write(&crypto_alg_sem); | 563 | down_write(&crypto_alg_sem); |
494 | list_del(&spawn->list); | 564 | list_del(&spawn->list); |
495 | up_write(&crypto_alg_sem); | 565 | up_write(&crypto_alg_sem); |
496 | } | 566 | } |
497 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); | 567 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); |
498 | 568 | ||
499 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 569 | static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) |
500 | u32 mask) | ||
501 | { | 570 | { |
502 | struct crypto_alg *alg; | 571 | struct crypto_alg *alg; |
503 | struct crypto_alg *alg2; | 572 | struct crypto_alg *alg2; |
504 | struct crypto_tfm *tfm; | ||
505 | 573 | ||
506 | down_read(&crypto_alg_sem); | 574 | down_read(&crypto_alg_sem); |
507 | alg = spawn->alg; | 575 | alg = spawn->alg; |
@@ -516,6 +584,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | |||
516 | return ERR_PTR(-EAGAIN); | 584 | return ERR_PTR(-EAGAIN); |
517 | } | 585 | } |
518 | 586 | ||
587 | return alg; | ||
588 | } | ||
589 | |||
590 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | ||
591 | u32 mask) | ||
592 | { | ||
593 | struct crypto_alg *alg; | ||
594 | struct crypto_tfm *tfm; | ||
595 | |||
596 | alg = crypto_spawn_alg(spawn); | ||
597 | if (IS_ERR(alg)) | ||
598 | return ERR_CAST(alg); | ||
599 | |||
519 | tfm = ERR_PTR(-EINVAL); | 600 | tfm = ERR_PTR(-EINVAL); |
520 | if (unlikely((alg->cra_flags ^ type) & mask)) | 601 | if (unlikely((alg->cra_flags ^ type) & mask)) |
521 | goto out_put_alg; | 602 | goto out_put_alg; |
@@ -532,6 +613,27 @@ out_put_alg: | |||
532 | } | 613 | } |
533 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); | 614 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); |
534 | 615 | ||
616 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn) | ||
617 | { | ||
618 | struct crypto_alg *alg; | ||
619 | struct crypto_tfm *tfm; | ||
620 | |||
621 | alg = crypto_spawn_alg(spawn); | ||
622 | if (IS_ERR(alg)) | ||
623 | return ERR_CAST(alg); | ||
624 | |||
625 | tfm = crypto_create_tfm(alg, spawn->frontend); | ||
626 | if (IS_ERR(tfm)) | ||
627 | goto out_put_alg; | ||
628 | |||
629 | return tfm; | ||
630 | |||
631 | out_put_alg: | ||
632 | crypto_mod_put(alg); | ||
633 | return tfm; | ||
634 | } | ||
635 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); | ||
636 | |||
535 | int crypto_register_notifier(struct notifier_block *nb) | 637 | int crypto_register_notifier(struct notifier_block *nb) |
536 | { | 638 | { |
537 | return blocking_notifier_chain_register(&crypto_chain, nb); | 639 | return blocking_notifier_chain_register(&crypto_chain, nb); |
@@ -595,7 +697,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta) | |||
595 | } | 697 | } |
596 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); | 698 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); |
597 | 699 | ||
598 | struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) | 700 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
701 | const struct crypto_type *frontend, | ||
702 | u32 type, u32 mask) | ||
599 | { | 703 | { |
600 | const char *name; | 704 | const char *name; |
601 | int err; | 705 | int err; |
@@ -605,9 +709,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |||
605 | if (IS_ERR(name)) | 709 | if (IS_ERR(name)) |
606 | return ERR_PTR(err); | 710 | return ERR_PTR(err); |
607 | 711 | ||
608 | return crypto_alg_mod_lookup(name, type, mask); | 712 | return crypto_find_alg(name, frontend, type, mask); |
609 | } | 713 | } |
610 | EXPORT_SYMBOL_GPL(crypto_attr_alg); | 714 | EXPORT_SYMBOL_GPL(crypto_attr_alg2); |
611 | 715 | ||
612 | int crypto_attr_u32(struct rtattr *rta, u32 *num) | 716 | int crypto_attr_u32(struct rtattr *rta, u32 *num) |
613 | { | 717 | { |
@@ -627,17 +731,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num) | |||
627 | } | 731 | } |
628 | EXPORT_SYMBOL_GPL(crypto_attr_u32); | 732 | EXPORT_SYMBOL_GPL(crypto_attr_u32); |
629 | 733 | ||
630 | struct crypto_instance *crypto_alloc_instance(const char *name, | 734 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
631 | struct crypto_alg *alg) | 735 | unsigned int head) |
632 | { | 736 | { |
633 | struct crypto_instance *inst; | 737 | struct crypto_instance *inst; |
634 | struct crypto_spawn *spawn; | 738 | char *p; |
635 | int err; | 739 | int err; |
636 | 740 | ||
637 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 741 | p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), |
638 | if (!inst) | 742 | GFP_KERNEL); |
743 | if (!p) | ||
639 | return ERR_PTR(-ENOMEM); | 744 | return ERR_PTR(-ENOMEM); |
640 | 745 | ||
746 | inst = (void *)(p + head); | ||
747 | |||
641 | err = -ENAMETOOLONG; | 748 | err = -ENAMETOOLONG; |
642 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, | 749 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, |
643 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) | 750 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) |
@@ -647,6 +754,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
647 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 754 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
648 | goto err_free_inst; | 755 | goto err_free_inst; |
649 | 756 | ||
757 | return p; | ||
758 | |||
759 | err_free_inst: | ||
760 | kfree(p); | ||
761 | return ERR_PTR(err); | ||
762 | } | ||
763 | EXPORT_SYMBOL_GPL(crypto_alloc_instance2); | ||
764 | |||
765 | struct crypto_instance *crypto_alloc_instance(const char *name, | ||
766 | struct crypto_alg *alg) | ||
767 | { | ||
768 | struct crypto_instance *inst; | ||
769 | struct crypto_spawn *spawn; | ||
770 | int err; | ||
771 | |||
772 | inst = crypto_alloc_instance2(name, alg, 0); | ||
773 | if (IS_ERR(inst)) | ||
774 | goto out; | ||
775 | |||
650 | spawn = crypto_instance_ctx(inst); | 776 | spawn = crypto_instance_ctx(inst); |
651 | err = crypto_init_spawn(spawn, alg, inst, | 777 | err = crypto_init_spawn(spawn, alg, inst, |
652 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 778 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
@@ -658,7 +784,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
658 | 784 | ||
659 | err_free_inst: | 785 | err_free_inst: |
660 | kfree(inst); | 786 | kfree(inst); |
661 | return ERR_PTR(err); | 787 | inst = ERR_PTR(err); |
788 | |||
789 | out: | ||
790 | return inst; | ||
662 | } | 791 | } |
663 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); | 792 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); |
664 | 793 | ||
@@ -692,7 +821,7 @@ out: | |||
692 | } | 821 | } |
693 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); | 822 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); |
694 | 823 | ||
695 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | 824 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) |
696 | { | 825 | { |
697 | struct list_head *request; | 826 | struct list_head *request; |
698 | 827 | ||
@@ -707,7 +836,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | |||
707 | request = queue->list.next; | 836 | request = queue->list.next; |
708 | list_del(request); | 837 | list_del(request); |
709 | 838 | ||
710 | return list_entry(request, struct crypto_async_request, list); | 839 | return (char *)list_entry(request, struct crypto_async_request, list) - |
840 | offset; | ||
841 | } | ||
842 | EXPORT_SYMBOL_GPL(__crypto_dequeue_request); | ||
843 | |||
844 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | ||
845 | { | ||
846 | return __crypto_dequeue_request(queue, 0); | ||
711 | } | 847 | } |
712 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); | 848 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); |
713 | 849 | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 9908dd830c26..791d194958fa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | ||
22 | #include <linux/string.h> | 23 | #include <linux/string.h> |
23 | 24 | ||
24 | #include "internal.h" | 25 | #include "internal.h" |
@@ -68,6 +69,11 @@ static int cryptomgr_probe(void *data) | |||
68 | goto err; | 69 | goto err; |
69 | 70 | ||
70 | do { | 71 | do { |
72 | if (tmpl->create) { | ||
73 | err = tmpl->create(tmpl, param->tb); | ||
74 | continue; | ||
75 | } | ||
76 | |||
71 | inst = tmpl->alloc(param->tb); | 77 | inst = tmpl->alloc(param->tb); |
72 | if (IS_ERR(inst)) | 78 | if (IS_ERR(inst)) |
73 | err = PTR_ERR(inst); | 79 | err = PTR_ERR(inst); |
@@ -206,6 +212,10 @@ static int cryptomgr_test(void *data) | |||
206 | u32 type = param->type; | 212 | u32 type = param->type; |
207 | int err = 0; | 213 | int err = 0; |
208 | 214 | ||
215 | #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS | ||
216 | goto skiptest; | ||
217 | #endif | ||
218 | |||
209 | if (type & CRYPTO_ALG_TESTED) | 219 | if (type & CRYPTO_ALG_TESTED) |
210 | goto skiptest; | 220 | goto skiptest; |
211 | 221 | ||
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index d80ed4c1e009..2bc332142849 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
@@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2, | |||
85 | * Returns DEFAULT_BLK_SZ bytes of random data per call | 85 | * Returns DEFAULT_BLK_SZ bytes of random data per call |
86 | * returns 0 if generation succeded, <0 if something went wrong | 86 | * returns 0 if generation succeded, <0 if something went wrong |
87 | */ | 87 | */ |
88 | static int _get_more_prng_bytes(struct prng_context *ctx) | 88 | static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) |
89 | { | 89 | { |
90 | int i; | 90 | int i; |
91 | unsigned char tmp[DEFAULT_BLK_SZ]; | 91 | unsigned char tmp[DEFAULT_BLK_SZ]; |
@@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx) | |||
132 | */ | 132 | */ |
133 | if (!memcmp(ctx->rand_data, ctx->last_rand_data, | 133 | if (!memcmp(ctx->rand_data, ctx->last_rand_data, |
134 | DEFAULT_BLK_SZ)) { | 134 | DEFAULT_BLK_SZ)) { |
135 | if (fips_enabled) { | 135 | if (cont_test) { |
136 | panic("cprng %p Failed repetition check!\n", | 136 | panic("cprng %p Failed repetition check!\n", |
137 | ctx); | 137 | ctx); |
138 | } | 138 | } |
@@ -185,18 +185,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | /* Our exported functions */ | 187 | /* Our exported functions */ |
188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | 188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, |
189 | int do_cont_test) | ||
189 | { | 190 | { |
190 | unsigned long flags; | ||
191 | unsigned char *ptr = buf; | 191 | unsigned char *ptr = buf; |
192 | unsigned int byte_count = (unsigned int)nbytes; | 192 | unsigned int byte_count = (unsigned int)nbytes; |
193 | int err; | 193 | int err; |
194 | 194 | ||
195 | 195 | ||
196 | if (nbytes < 0) | 196 | spin_lock_bh(&ctx->prng_lock); |
197 | return -EINVAL; | ||
198 | |||
199 | spin_lock_irqsave(&ctx->prng_lock, flags); | ||
200 | 197 | ||
201 | err = -EINVAL; | 198 | err = -EINVAL; |
202 | if (ctx->flags & PRNG_NEED_RESET) | 199 | if (ctx->flags & PRNG_NEED_RESET) |
@@ -221,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | |||
221 | 218 | ||
222 | remainder: | 219 | remainder: |
223 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { | 220 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { |
224 | if (_get_more_prng_bytes(ctx) < 0) { | 221 | if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { |
225 | memset(buf, 0, nbytes); | 222 | memset(buf, 0, nbytes); |
226 | err = -EINVAL; | 223 | err = -EINVAL; |
227 | goto done; | 224 | goto done; |
@@ -248,7 +245,7 @@ empty_rbuf: | |||
248 | */ | 245 | */ |
249 | for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { | 246 | for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { |
250 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { | 247 | if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { |
251 | if (_get_more_prng_bytes(ctx) < 0) { | 248 | if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { |
252 | memset(buf, 0, nbytes); | 249 | memset(buf, 0, nbytes); |
253 | err = -EINVAL; | 250 | err = -EINVAL; |
254 | goto done; | 251 | goto done; |
@@ -268,7 +265,7 @@ empty_rbuf: | |||
268 | goto remainder; | 265 | goto remainder; |
269 | 266 | ||
270 | done: | 267 | done: |
271 | spin_unlock_irqrestore(&ctx->prng_lock, flags); | 268 | spin_unlock_bh(&ctx->prng_lock); |
272 | dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", | 269 | dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", |
273 | err, ctx); | 270 | err, ctx); |
274 | return err; | 271 | return err; |
@@ -284,10 +281,9 @@ static int reset_prng_context(struct prng_context *ctx, | |||
284 | unsigned char *V, unsigned char *DT) | 281 | unsigned char *V, unsigned char *DT) |
285 | { | 282 | { |
286 | int ret; | 283 | int ret; |
287 | int rc = -EINVAL; | ||
288 | unsigned char *prng_key; | 284 | unsigned char *prng_key; |
289 | 285 | ||
290 | spin_lock(&ctx->prng_lock); | 286 | spin_lock_bh(&ctx->prng_lock); |
291 | ctx->flags |= PRNG_NEED_RESET; | 287 | ctx->flags |= PRNG_NEED_RESET; |
292 | 288 | ||
293 | prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; | 289 | prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; |
@@ -308,34 +304,20 @@ static int reset_prng_context(struct prng_context *ctx, | |||
308 | memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); | 304 | memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); |
309 | memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); | 305 | memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); |
310 | 306 | ||
311 | if (ctx->tfm) | ||
312 | crypto_free_cipher(ctx->tfm); | ||
313 | |||
314 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); | ||
315 | if (IS_ERR(ctx->tfm)) { | ||
316 | dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", | ||
317 | ctx); | ||
318 | ctx->tfm = NULL; | ||
319 | goto out; | ||
320 | } | ||
321 | |||
322 | ctx->rand_data_valid = DEFAULT_BLK_SZ; | 307 | ctx->rand_data_valid = DEFAULT_BLK_SZ; |
323 | 308 | ||
324 | ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); | 309 | ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); |
325 | if (ret) { | 310 | if (ret) { |
326 | dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", | 311 | dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", |
327 | crypto_cipher_get_flags(ctx->tfm)); | 312 | crypto_cipher_get_flags(ctx->tfm)); |
328 | crypto_free_cipher(ctx->tfm); | ||
329 | goto out; | 313 | goto out; |
330 | } | 314 | } |
331 | 315 | ||
332 | rc = 0; | 316 | ret = 0; |
333 | ctx->flags &= ~PRNG_NEED_RESET; | 317 | ctx->flags &= ~PRNG_NEED_RESET; |
334 | out: | 318 | out: |
335 | spin_unlock(&ctx->prng_lock); | 319 | spin_unlock_bh(&ctx->prng_lock); |
336 | 320 | return ret; | |
337 | return rc; | ||
338 | |||
339 | } | 321 | } |
340 | 322 | ||
341 | static int cprng_init(struct crypto_tfm *tfm) | 323 | static int cprng_init(struct crypto_tfm *tfm) |
@@ -343,6 +325,12 @@ static int cprng_init(struct crypto_tfm *tfm) | |||
343 | struct prng_context *ctx = crypto_tfm_ctx(tfm); | 325 | struct prng_context *ctx = crypto_tfm_ctx(tfm); |
344 | 326 | ||
345 | spin_lock_init(&ctx->prng_lock); | 327 | spin_lock_init(&ctx->prng_lock); |
328 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); | ||
329 | if (IS_ERR(ctx->tfm)) { | ||
330 | dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", | ||
331 | ctx); | ||
332 | return PTR_ERR(ctx->tfm); | ||
333 | } | ||
346 | 334 | ||
347 | if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) | 335 | if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) |
348 | return -EINVAL; | 336 | return -EINVAL; |
@@ -366,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | |||
366 | { | 354 | { |
367 | struct prng_context *prng = crypto_rng_ctx(tfm); | 355 | struct prng_context *prng = crypto_rng_ctx(tfm); |
368 | 356 | ||
369 | return get_prng_bytes(rdata, dlen, prng); | 357 | return get_prng_bytes(rdata, dlen, prng, 0); |
370 | } | 358 | } |
371 | 359 | ||
372 | /* | 360 | /* |
@@ -414,26 +402,79 @@ static struct crypto_alg rng_alg = { | |||
414 | } | 402 | } |
415 | }; | 403 | }; |
416 | 404 | ||
405 | #ifdef CONFIG_CRYPTO_FIPS | ||
406 | static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, | ||
407 | unsigned int dlen) | ||
408 | { | ||
409 | struct prng_context *prng = crypto_rng_ctx(tfm); | ||
410 | |||
411 | return get_prng_bytes(rdata, dlen, prng, 1); | ||
412 | } | ||
413 | |||
414 | static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) | ||
415 | { | ||
416 | u8 rdata[DEFAULT_BLK_SZ]; | ||
417 | int rc; | ||
418 | |||
419 | struct prng_context *prng = crypto_rng_ctx(tfm); | ||
420 | |||
421 | rc = cprng_reset(tfm, seed, slen); | ||
422 | |||
423 | if (!rc) | ||
424 | goto out; | ||
425 | |||
426 | /* this primes our continuity test */ | ||
427 | rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0); | ||
428 | prng->rand_data_valid = DEFAULT_BLK_SZ; | ||
429 | |||
430 | out: | ||
431 | return rc; | ||
432 | } | ||
433 | |||
434 | static struct crypto_alg fips_rng_alg = { | ||
435 | .cra_name = "fips(ansi_cprng)", | ||
436 | .cra_driver_name = "fips_ansi_cprng", | ||
437 | .cra_priority = 300, | ||
438 | .cra_flags = CRYPTO_ALG_TYPE_RNG, | ||
439 | .cra_ctxsize = sizeof(struct prng_context), | ||
440 | .cra_type = &crypto_rng_type, | ||
441 | .cra_module = THIS_MODULE, | ||
442 | .cra_list = LIST_HEAD_INIT(rng_alg.cra_list), | ||
443 | .cra_init = cprng_init, | ||
444 | .cra_exit = cprng_exit, | ||
445 | .cra_u = { | ||
446 | .rng = { | ||
447 | .rng_make_random = fips_cprng_get_random, | ||
448 | .rng_reset = fips_cprng_reset, | ||
449 | .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, | ||
450 | } | ||
451 | } | ||
452 | }; | ||
453 | #endif | ||
417 | 454 | ||
418 | /* Module initalization */ | 455 | /* Module initalization */ |
419 | static int __init prng_mod_init(void) | 456 | static int __init prng_mod_init(void) |
420 | { | 457 | { |
421 | int ret = 0; | 458 | int rc = 0; |
422 | 459 | ||
423 | if (fips_enabled) | 460 | rc = crypto_register_alg(&rng_alg); |
424 | rng_alg.cra_priority += 200; | 461 | #ifdef CONFIG_CRYPTO_FIPS |
462 | if (rc) | ||
463 | goto out; | ||
425 | 464 | ||
426 | ret = crypto_register_alg(&rng_alg); | 465 | rc = crypto_register_alg(&fips_rng_alg); |
427 | 466 | ||
428 | if (ret) | ||
429 | goto out; | ||
430 | out: | 467 | out: |
431 | return 0; | 468 | #endif |
469 | return rc; | ||
432 | } | 470 | } |
433 | 471 | ||
434 | static void __exit prng_mod_fini(void) | 472 | static void __exit prng_mod_fini(void) |
435 | { | 473 | { |
436 | crypto_unregister_alg(&rng_alg); | 474 | crypto_unregister_alg(&rng_alg); |
475 | #ifdef CONFIG_CRYPTO_FIPS | ||
476 | crypto_unregister_alg(&fips_rng_alg); | ||
477 | #endif | ||
437 | return; | 478 | return; |
438 | } | 479 | } |
439 | 480 | ||
diff --git a/crypto/anubis.c b/crypto/anubis.c index e42c3a8ba4aa..77530d571c96 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c | |||
@@ -469,14 +469,13 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, | |||
469 | u32 kappa[ANUBIS_MAX_N]; | 469 | u32 kappa[ANUBIS_MAX_N]; |
470 | u32 inter[ANUBIS_MAX_N]; | 470 | u32 inter[ANUBIS_MAX_N]; |
471 | 471 | ||
472 | switch (key_len) | 472 | switch (key_len) { |
473 | { | ||
474 | case 16: case 20: case 24: case 28: | 473 | case 16: case 20: case 24: case 28: |
475 | case 32: case 36: case 40: | 474 | case 32: case 36: case 40: |
476 | break; | 475 | break; |
477 | default: | 476 | default: |
478 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 477 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
479 | return - EINVAL; | 478 | return -EINVAL; |
480 | } | 479 | } |
481 | 480 | ||
482 | ctx->key_len = key_len * 8; | 481 | ctx->key_len = key_len * 8; |
@@ -530,23 +529,24 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, | |||
530 | /* | 529 | /* |
531 | * compute kappa^{r+1} from kappa^r: | 530 | * compute kappa^{r+1} from kappa^r: |
532 | */ | 531 | */ |
533 | if (r == R) { | 532 | if (r == R) |
534 | break; | 533 | break; |
535 | } | ||
536 | for (i = 0; i < N; i++) { | 534 | for (i = 0; i < N; i++) { |
537 | int j = i; | 535 | int j = i; |
538 | inter[i] = T0[(kappa[j--] >> 24) ]; | 536 | inter[i] = T0[(kappa[j--] >> 24) ]; |
539 | if (j < 0) j = N - 1; | 537 | if (j < 0) |
538 | j = N - 1; | ||
540 | inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; | 539 | inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; |
541 | if (j < 0) j = N - 1; | 540 | if (j < 0) |
541 | j = N - 1; | ||
542 | inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; | 542 | inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; |
543 | if (j < 0) j = N - 1; | 543 | if (j < 0) |
544 | j = N - 1; | ||
544 | inter[i] ^= T3[(kappa[j ] ) & 0xff]; | 545 | inter[i] ^= T3[(kappa[j ] ) & 0xff]; |
545 | } | 546 | } |
546 | kappa[0] = inter[0] ^ rc[r]; | 547 | kappa[0] = inter[0] ^ rc[r]; |
547 | for (i = 1; i < N; i++) { | 548 | for (i = 1; i < N; i++) |
548 | kappa[i] = inter[i]; | 549 | kappa[i] = inter[i]; |
549 | } | ||
550 | } | 550 | } |
551 | 551 | ||
552 | /* | 552 | /* |
@@ -690,7 +690,7 @@ static struct crypto_alg anubis_alg = { | |||
690 | static int __init anubis_mod_init(void) | 690 | static int __init anubis_mod_init(void) |
691 | { | 691 | { |
692 | int ret = 0; | 692 | int ret = 0; |
693 | 693 | ||
694 | ret = crypto_register_alg(&anubis_alg); | 694 | ret = crypto_register_alg(&anubis_alg); |
695 | return ret; | 695 | return ret; |
696 | } | 696 | } |
diff --git a/crypto/api.c b/crypto/api.c index d5944f92b416..033a7147e5eb 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation; either version 2 of the License, or (at your option) | 13 | * Software Foundation; either version 2 of the License, or (at your option) |
14 | * any later version. | 14 | * any later version. |
15 | * | 15 | * |
16 | */ | 16 | */ |
@@ -285,21 +285,14 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
285 | switch (crypto_tfm_alg_type(tfm)) { | 285 | switch (crypto_tfm_alg_type(tfm)) { |
286 | case CRYPTO_ALG_TYPE_CIPHER: | 286 | case CRYPTO_ALG_TYPE_CIPHER: |
287 | return crypto_init_cipher_ops(tfm); | 287 | return crypto_init_cipher_ops(tfm); |
288 | |||
289 | case CRYPTO_ALG_TYPE_DIGEST: | ||
290 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != | ||
291 | CRYPTO_ALG_TYPE_HASH_MASK) | ||
292 | return crypto_init_digest_ops_async(tfm); | ||
293 | else | ||
294 | return crypto_init_digest_ops(tfm); | ||
295 | 288 | ||
296 | case CRYPTO_ALG_TYPE_COMPRESS: | 289 | case CRYPTO_ALG_TYPE_COMPRESS: |
297 | return crypto_init_compress_ops(tfm); | 290 | return crypto_init_compress_ops(tfm); |
298 | 291 | ||
299 | default: | 292 | default: |
300 | break; | 293 | break; |
301 | } | 294 | } |
302 | 295 | ||
303 | BUG(); | 296 | BUG(); |
304 | return -EINVAL; | 297 | return -EINVAL; |
305 | } | 298 | } |
@@ -318,18 +311,13 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) | |||
318 | case CRYPTO_ALG_TYPE_CIPHER: | 311 | case CRYPTO_ALG_TYPE_CIPHER: |
319 | crypto_exit_cipher_ops(tfm); | 312 | crypto_exit_cipher_ops(tfm); |
320 | break; | 313 | break; |
321 | 314 | ||
322 | case CRYPTO_ALG_TYPE_DIGEST: | ||
323 | crypto_exit_digest_ops(tfm); | ||
324 | break; | ||
325 | |||
326 | case CRYPTO_ALG_TYPE_COMPRESS: | 315 | case CRYPTO_ALG_TYPE_COMPRESS: |
327 | crypto_exit_compress_ops(tfm); | 316 | crypto_exit_compress_ops(tfm); |
328 | break; | 317 | break; |
329 | 318 | ||
330 | default: | 319 | default: |
331 | BUG(); | 320 | BUG(); |
332 | |||
333 | } | 321 | } |
334 | } | 322 | } |
335 | 323 | ||
@@ -349,11 +337,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) | |||
349 | case CRYPTO_ALG_TYPE_CIPHER: | 337 | case CRYPTO_ALG_TYPE_CIPHER: |
350 | len += crypto_cipher_ctxsize(alg); | 338 | len += crypto_cipher_ctxsize(alg); |
351 | break; | 339 | break; |
352 | 340 | ||
353 | case CRYPTO_ALG_TYPE_DIGEST: | ||
354 | len += crypto_digest_ctxsize(alg); | ||
355 | break; | ||
356 | |||
357 | case CRYPTO_ALG_TYPE_COMPRESS: | 341 | case CRYPTO_ALG_TYPE_COMPRESS: |
358 | len += crypto_compress_ctxsize(alg); | 342 | len += crypto_compress_ctxsize(alg); |
359 | break; | 343 | break; |
@@ -472,7 +456,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, | |||
472 | int err = -ENOMEM; | 456 | int err = -ENOMEM; |
473 | 457 | ||
474 | tfmsize = frontend->tfmsize; | 458 | tfmsize = frontend->tfmsize; |
475 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); | 459 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); |
476 | 460 | ||
477 | mem = kzalloc(total, GFP_KERNEL); | 461 | mem = kzalloc(total, GFP_KERNEL); |
478 | if (mem == NULL) | 462 | if (mem == NULL) |
@@ -481,7 +465,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, | |||
481 | tfm = (struct crypto_tfm *)(mem + tfmsize); | 465 | tfm = (struct crypto_tfm *)(mem + tfmsize); |
482 | tfm->__crt_alg = alg; | 466 | tfm->__crt_alg = alg; |
483 | 467 | ||
484 | err = frontend->init_tfm(tfm, frontend); | 468 | err = frontend->init_tfm(tfm); |
485 | if (err) | 469 | if (err) |
486 | goto out_free_tfm; | 470 | goto out_free_tfm; |
487 | 471 | ||
@@ -503,6 +487,27 @@ out: | |||
503 | } | 487 | } |
504 | EXPORT_SYMBOL_GPL(crypto_create_tfm); | 488 | EXPORT_SYMBOL_GPL(crypto_create_tfm); |
505 | 489 | ||
490 | struct crypto_alg *crypto_find_alg(const char *alg_name, | ||
491 | const struct crypto_type *frontend, | ||
492 | u32 type, u32 mask) | ||
493 | { | ||
494 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = | ||
495 | crypto_alg_mod_lookup; | ||
496 | |||
497 | if (frontend) { | ||
498 | type &= frontend->maskclear; | ||
499 | mask &= frontend->maskclear; | ||
500 | type |= frontend->type; | ||
501 | mask |= frontend->maskset; | ||
502 | |||
503 | if (frontend->lookup) | ||
504 | lookup = frontend->lookup; | ||
505 | } | ||
506 | |||
507 | return lookup(alg_name, type, mask); | ||
508 | } | ||
509 | EXPORT_SYMBOL_GPL(crypto_find_alg); | ||
510 | |||
506 | /* | 511 | /* |
507 | * crypto_alloc_tfm - Locate algorithm and allocate transform | 512 | * crypto_alloc_tfm - Locate algorithm and allocate transform |
508 | * @alg_name: Name of algorithm | 513 | * @alg_name: Name of algorithm |
@@ -526,21 +531,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm); | |||
526 | void *crypto_alloc_tfm(const char *alg_name, | 531 | void *crypto_alloc_tfm(const char *alg_name, |
527 | const struct crypto_type *frontend, u32 type, u32 mask) | 532 | const struct crypto_type *frontend, u32 type, u32 mask) |
528 | { | 533 | { |
529 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | ||
530 | void *tfm; | 534 | void *tfm; |
531 | int err; | 535 | int err; |
532 | 536 | ||
533 | type &= frontend->maskclear; | ||
534 | mask &= frontend->maskclear; | ||
535 | type |= frontend->type; | ||
536 | mask |= frontend->maskset; | ||
537 | |||
538 | lookup = frontend->lookup ?: crypto_alg_mod_lookup; | ||
539 | |||
540 | for (;;) { | 537 | for (;;) { |
541 | struct crypto_alg *alg; | 538 | struct crypto_alg *alg; |
542 | 539 | ||
543 | alg = lookup(alg_name, type, mask); | 540 | alg = crypto_find_alg(alg_name, frontend, type, mask); |
544 | if (IS_ERR(alg)) { | 541 | if (IS_ERR(alg)) { |
545 | err = PTR_ERR(alg); | 542 | err = PTR_ERR(alg); |
546 | goto err; | 543 | goto err; |
@@ -595,12 +592,12 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) | |||
595 | { | 592 | { |
596 | int ret = 0; | 593 | int ret = 0; |
597 | struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); | 594 | struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); |
598 | 595 | ||
599 | if (!IS_ERR(alg)) { | 596 | if (!IS_ERR(alg)) { |
600 | crypto_mod_put(alg); | 597 | crypto_mod_put(alg); |
601 | ret = 1; | 598 | ret = 1; |
602 | } | 599 | } |
603 | 600 | ||
604 | return ret; | 601 | return ret; |
605 | } | 602 | } |
606 | EXPORT_SYMBOL_GPL(crypto_has_alg); | 603 | EXPORT_SYMBOL_GPL(crypto_has_alg); |
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index d8fb39145986..1b11abbb5c91 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
@@ -14,3 +14,18 @@ config ASYNC_MEMSET | |||
14 | tristate | 14 | tristate |
15 | select ASYNC_CORE | 15 | select ASYNC_CORE |
16 | 16 | ||
17 | config ASYNC_PQ | ||
18 | tristate | ||
19 | select ASYNC_CORE | ||
20 | |||
21 | config ASYNC_RAID6_RECOV | ||
22 | tristate | ||
23 | select ASYNC_CORE | ||
24 | select ASYNC_PQ | ||
25 | select ASYNC_XOR | ||
26 | |||
27 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
28 | bool | ||
29 | |||
30 | config ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
31 | bool | ||
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 27baa7d52fbc..d1e0e6f72bc1 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile | |||
@@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o | |||
2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o | 2 | obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o |
3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o | 3 | obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o |
4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o | 4 | obj-$(CONFIG_ASYNC_XOR) += async_xor.o |
5 | obj-$(CONFIG_ASYNC_PQ) += async_pq.o | ||
6 | obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o | ||
7 | obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o | ||
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index ddccfb01c416..518c22bd9562 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -33,28 +33,31 @@ | |||
33 | * async_memcpy - attempt to copy memory with a dma engine. | 33 | * async_memcpy - attempt to copy memory with a dma engine. |
34 | * @dest: destination page | 34 | * @dest: destination page |
35 | * @src: src page | 35 | * @src: src page |
36 | * @offset: offset in pages to start transaction | 36 | * @dest_offset: offset into 'dest' to start transaction |
37 | * @src_offset: offset into 'src' to start transaction | ||
37 | * @len: length in bytes | 38 | * @len: length in bytes |
38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, | 39 | * @submit: submission / completion modifiers |
39 | * @depend_tx: memcpy depends on the result of this transaction | 40 | * |
40 | * @cb_fn: function to call when the memcpy completes | 41 | * honored flags: ASYNC_TX_ACK |
41 | * @cb_param: parameter to pass to the callback routine | ||
42 | */ | 42 | */ |
43 | struct dma_async_tx_descriptor * | 43 | struct dma_async_tx_descriptor * |
44 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | 44 | async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, |
45 | unsigned int src_offset, size_t len, enum async_tx_flags flags, | 45 | unsigned int src_offset, size_t len, |
46 | struct dma_async_tx_descriptor *depend_tx, | 46 | struct async_submit_ctl *submit) |
47 | dma_async_tx_callback cb_fn, void *cb_param) | ||
48 | { | 47 | { |
49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, | 48 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, |
50 | &dest, 1, &src, 1, len); | 49 | &dest, 1, &src, 1, len); |
51 | struct dma_device *device = chan ? chan->device : NULL; | 50 | struct dma_device *device = chan ? chan->device : NULL; |
52 | struct dma_async_tx_descriptor *tx = NULL; | 51 | struct dma_async_tx_descriptor *tx = NULL; |
53 | 52 | ||
54 | if (device) { | 53 | if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { |
55 | dma_addr_t dma_dest, dma_src; | 54 | dma_addr_t dma_dest, dma_src; |
56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 55 | unsigned long dma_prep_flags = 0; |
57 | 56 | ||
57 | if (submit->cb_fn) | ||
58 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
59 | if (submit->flags & ASYNC_TX_FENCE) | ||
60 | dma_prep_flags |= DMA_PREP_FENCE; | ||
58 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, | 61 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, |
59 | DMA_FROM_DEVICE); | 62 | DMA_FROM_DEVICE); |
60 | 63 | ||
@@ -67,42 +70,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
67 | 70 | ||
68 | if (tx) { | 71 | if (tx) { |
69 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 72 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
70 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 73 | async_tx_submit(chan, tx, submit); |
71 | } else { | 74 | } else { |
72 | void *dest_buf, *src_buf; | 75 | void *dest_buf, *src_buf; |
73 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 76 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
74 | 77 | ||
75 | /* wait for any prerequisite operations */ | 78 | /* wait for any prerequisite operations */ |
76 | async_tx_quiesce(&depend_tx); | 79 | async_tx_quiesce(&submit->depend_tx); |
77 | 80 | ||
78 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; | 81 | dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; |
79 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; | 82 | src_buf = kmap_atomic(src, KM_USER1) + src_offset; |
80 | 83 | ||
81 | memcpy(dest_buf, src_buf, len); | 84 | memcpy(dest_buf, src_buf, len); |
82 | 85 | ||
83 | kunmap_atomic(dest_buf, KM_USER0); | ||
84 | kunmap_atomic(src_buf, KM_USER1); | 86 | kunmap_atomic(src_buf, KM_USER1); |
87 | kunmap_atomic(dest_buf, KM_USER0); | ||
85 | 88 | ||
86 | async_tx_sync_epilog(cb_fn, cb_param); | 89 | async_tx_sync_epilog(submit); |
87 | } | 90 | } |
88 | 91 | ||
89 | return tx; | 92 | return tx; |
90 | } | 93 | } |
91 | EXPORT_SYMBOL_GPL(async_memcpy); | 94 | EXPORT_SYMBOL_GPL(async_memcpy); |
92 | 95 | ||
93 | static int __init async_memcpy_init(void) | ||
94 | { | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static void __exit async_memcpy_exit(void) | ||
99 | { | ||
100 | do { } while (0); | ||
101 | } | ||
102 | |||
103 | module_init(async_memcpy_init); | ||
104 | module_exit(async_memcpy_exit); | ||
105 | |||
106 | MODULE_AUTHOR("Intel Corporation"); | 96 | MODULE_AUTHOR("Intel Corporation"); |
107 | MODULE_DESCRIPTION("asynchronous memcpy api"); | 97 | MODULE_DESCRIPTION("asynchronous memcpy api"); |
108 | MODULE_LICENSE("GPL"); | 98 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 5b5eb99bb244..58e4a8752aee 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c | |||
@@ -35,26 +35,26 @@ | |||
35 | * @val: fill value | 35 | * @val: fill value |
36 | * @offset: offset in pages to start transaction | 36 | * @offset: offset in pages to start transaction |
37 | * @len: length in bytes | 37 | * @len: length in bytes |
38 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 38 | * |
39 | * @depend_tx: memset depends on the result of this transaction | 39 | * honored flags: ASYNC_TX_ACK |
40 | * @cb_fn: function to call when the memcpy completes | ||
41 | * @cb_param: parameter to pass to the callback routine | ||
42 | */ | 40 | */ |
43 | struct dma_async_tx_descriptor * | 41 | struct dma_async_tx_descriptor * |
44 | async_memset(struct page *dest, int val, unsigned int offset, | 42 | async_memset(struct page *dest, int val, unsigned int offset, size_t len, |
45 | size_t len, enum async_tx_flags flags, | 43 | struct async_submit_ctl *submit) |
46 | struct dma_async_tx_descriptor *depend_tx, | ||
47 | dma_async_tx_callback cb_fn, void *cb_param) | ||
48 | { | 44 | { |
49 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, | 45 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, |
50 | &dest, 1, NULL, 0, len); | 46 | &dest, 1, NULL, 0, len); |
51 | struct dma_device *device = chan ? chan->device : NULL; | 47 | struct dma_device *device = chan ? chan->device : NULL; |
52 | struct dma_async_tx_descriptor *tx = NULL; | 48 | struct dma_async_tx_descriptor *tx = NULL; |
53 | 49 | ||
54 | if (device) { | 50 | if (device && is_dma_fill_aligned(device, offset, 0, len)) { |
55 | dma_addr_t dma_dest; | 51 | dma_addr_t dma_dest; |
56 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 52 | unsigned long dma_prep_flags = 0; |
57 | 53 | ||
54 | if (submit->cb_fn) | ||
55 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
56 | if (submit->flags & ASYNC_TX_FENCE) | ||
57 | dma_prep_flags |= DMA_PREP_FENCE; | ||
58 | dma_dest = dma_map_page(device->dev, dest, offset, len, | 58 | dma_dest = dma_map_page(device->dev, dest, offset, len, |
59 | DMA_FROM_DEVICE); | 59 | DMA_FROM_DEVICE); |
60 | 60 | ||
@@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset, | |||
64 | 64 | ||
65 | if (tx) { | 65 | if (tx) { |
66 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 66 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
67 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 67 | async_tx_submit(chan, tx, submit); |
68 | } else { /* run the memset synchronously */ | 68 | } else { /* run the memset synchronously */ |
69 | void *dest_buf; | 69 | void *dest_buf; |
70 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 70 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
71 | 71 | ||
72 | dest_buf = (void *) (((char *) page_address(dest)) + offset); | 72 | dest_buf = page_address(dest) + offset; |
73 | 73 | ||
74 | /* wait for any prerequisite operations */ | 74 | /* wait for any prerequisite operations */ |
75 | async_tx_quiesce(&depend_tx); | 75 | async_tx_quiesce(&submit->depend_tx); |
76 | 76 | ||
77 | memset(dest_buf, val, len); | 77 | memset(dest_buf, val, len); |
78 | 78 | ||
79 | async_tx_sync_epilog(cb_fn, cb_param); | 79 | async_tx_sync_epilog(submit); |
80 | } | 80 | } |
81 | 81 | ||
82 | return tx; | 82 | return tx; |
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(async_memset); | 84 | EXPORT_SYMBOL_GPL(async_memset); |
85 | 85 | ||
86 | static int __init async_memset_init(void) | ||
87 | { | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static void __exit async_memset_exit(void) | ||
92 | { | ||
93 | do { } while (0); | ||
94 | } | ||
95 | |||
96 | module_init(async_memset_init); | ||
97 | module_exit(async_memset_exit); | ||
98 | |||
99 | MODULE_AUTHOR("Intel Corporation"); | 86 | MODULE_AUTHOR("Intel Corporation"); |
100 | MODULE_DESCRIPTION("asynchronous memset api"); | 87 | MODULE_DESCRIPTION("asynchronous memset api"); |
101 | MODULE_LICENSE("GPL"); | 88 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c new file mode 100644 index 000000000000..fdd8257d35d9 --- /dev/null +++ b/crypto/async_tx/async_pq.c | |||
@@ -0,0 +1,416 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com> | ||
3 | * Copyright(c) 2009 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the Free | ||
7 | * Software Foundation; either version 2 of the License, or (at your option) | ||
8 | * any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
17 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | * | ||
19 | * The full GNU General Public License is included in this distribution in the | ||
20 | * file called COPYING. | ||
21 | */ | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/raid/pq.h> | ||
26 | #include <linux/async_tx.h> | ||
27 | #include <linux/gfp.h> | ||
28 | |||
29 | /** | ||
30 | * pq_scribble_page - space to hold throwaway P or Q buffer for | ||
31 | * synchronous gen_syndrome | ||
32 | */ | ||
33 | static struct page *pq_scribble_page; | ||
34 | |||
35 | /* the struct page *blocks[] parameter passed to async_gen_syndrome() | ||
36 | * and async_syndrome_val() contains the 'P' destination address at | ||
37 | * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] | ||
38 | * | ||
39 | * note: these are macros as they are used as lvalues | ||
40 | */ | ||
41 | #define P(b, d) (b[d-2]) | ||
42 | #define Q(b, d) (b[d-1]) | ||
43 | |||
44 | /** | ||
45 | * do_async_gen_syndrome - asynchronously calculate P and/or Q | ||
46 | */ | ||
47 | static __async_inline struct dma_async_tx_descriptor * | ||
48 | do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | ||
49 | const unsigned char *scfs, unsigned int offset, int disks, | ||
50 | size_t len, dma_addr_t *dma_src, | ||
51 | struct async_submit_ctl *submit) | ||
52 | { | ||
53 | struct dma_async_tx_descriptor *tx = NULL; | ||
54 | struct dma_device *dma = chan->device; | ||
55 | enum dma_ctrl_flags dma_flags = 0; | ||
56 | enum async_tx_flags flags_orig = submit->flags; | ||
57 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | ||
58 | dma_async_tx_callback cb_param_orig = submit->cb_param; | ||
59 | int src_cnt = disks - 2; | ||
60 | unsigned char coefs[src_cnt]; | ||
61 | unsigned short pq_src_cnt; | ||
62 | dma_addr_t dma_dest[2]; | ||
63 | int src_off = 0; | ||
64 | int idx; | ||
65 | int i; | ||
66 | |||
67 | /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ | ||
68 | if (P(blocks, disks)) | ||
69 | dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, | ||
70 | len, DMA_BIDIRECTIONAL); | ||
71 | else | ||
72 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
73 | if (Q(blocks, disks)) | ||
74 | dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, | ||
75 | len, DMA_BIDIRECTIONAL); | ||
76 | else | ||
77 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
78 | |||
79 | /* convert source addresses being careful to collapse 'empty' | ||
80 | * sources and update the coefficients accordingly | ||
81 | */ | ||
82 | for (i = 0, idx = 0; i < src_cnt; i++) { | ||
83 | if (blocks[i] == NULL) | ||
84 | continue; | ||
85 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | ||
86 | DMA_TO_DEVICE); | ||
87 | coefs[idx] = scfs[i]; | ||
88 | idx++; | ||
89 | } | ||
90 | src_cnt = idx; | ||
91 | |||
92 | while (src_cnt > 0) { | ||
93 | submit->flags = flags_orig; | ||
94 | pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); | ||
95 | /* if we are submitting additional pqs, leave the chain open, | ||
96 | * clear the callback parameters, and leave the destination | ||
97 | * buffers mapped | ||
98 | */ | ||
99 | if (src_cnt > pq_src_cnt) { | ||
100 | submit->flags &= ~ASYNC_TX_ACK; | ||
101 | submit->flags |= ASYNC_TX_FENCE; | ||
102 | dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; | ||
103 | submit->cb_fn = NULL; | ||
104 | submit->cb_param = NULL; | ||
105 | } else { | ||
106 | dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; | ||
107 | submit->cb_fn = cb_fn_orig; | ||
108 | submit->cb_param = cb_param_orig; | ||
109 | if (cb_fn_orig) | ||
110 | dma_flags |= DMA_PREP_INTERRUPT; | ||
111 | } | ||
112 | if (submit->flags & ASYNC_TX_FENCE) | ||
113 | dma_flags |= DMA_PREP_FENCE; | ||
114 | |||
115 | /* Since we have clobbered the src_list we are committed | ||
116 | * to doing this asynchronously. Drivers force forward | ||
117 | * progress in case they can not provide a descriptor | ||
118 | */ | ||
119 | for (;;) { | ||
120 | tx = dma->device_prep_dma_pq(chan, dma_dest, | ||
121 | &dma_src[src_off], | ||
122 | pq_src_cnt, | ||
123 | &coefs[src_off], len, | ||
124 | dma_flags); | ||
125 | if (likely(tx)) | ||
126 | break; | ||
127 | async_tx_quiesce(&submit->depend_tx); | ||
128 | dma_async_issue_pending(chan); | ||
129 | } | ||
130 | |||
131 | async_tx_submit(chan, tx, submit); | ||
132 | submit->depend_tx = tx; | ||
133 | |||
134 | /* drop completed sources */ | ||
135 | src_cnt -= pq_src_cnt; | ||
136 | src_off += pq_src_cnt; | ||
137 | |||
138 | dma_flags |= DMA_PREP_CONTINUE; | ||
139 | } | ||
140 | |||
141 | return tx; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome | ||
146 | */ | ||
147 | static void | ||
148 | do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | ||
149 | size_t len, struct async_submit_ctl *submit) | ||
150 | { | ||
151 | void **srcs; | ||
152 | int i; | ||
153 | |||
154 | if (submit->scribble) | ||
155 | srcs = submit->scribble; | ||
156 | else | ||
157 | srcs = (void **) blocks; | ||
158 | |||
159 | for (i = 0; i < disks; i++) { | ||
160 | if (blocks[i] == NULL) { | ||
161 | BUG_ON(i > disks - 3); /* P or Q can't be zero */ | ||
162 | srcs[i] = (void*)raid6_empty_zero_page; | ||
163 | } else | ||
164 | srcs[i] = page_address(blocks[i]) + offset; | ||
165 | } | ||
166 | raid6_call.gen_syndrome(disks, len, srcs); | ||
167 | async_tx_sync_epilog(submit); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * async_gen_syndrome - asynchronously calculate a raid6 syndrome | ||
172 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | ||
173 | * @offset: common offset into each block (src and dest) to start transaction | ||
174 | * @disks: number of blocks (including missing P or Q, see below) | ||
175 | * @len: length of operation in bytes | ||
176 | * @submit: submission/completion modifiers | ||
177 | * | ||
178 | * General note: This routine assumes a field of GF(2^8) with a | ||
179 | * primitive polynomial of 0x11d and a generator of {02}. | ||
180 | * | ||
181 | * 'disks' note: callers can optionally omit either P or Q (but not | ||
182 | * both) from the calculation by setting blocks[disks-2] or | ||
183 | * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= | ||
184 | * PAGE_SIZE as a temporary buffer of this size is used in the | ||
185 | * synchronous path. 'disks' always accounts for both destination | ||
186 | * buffers. If any source buffers (blocks[i] where i < disks - 2) are | ||
187 | * set to NULL those buffers will be replaced with the raid6_zero_page | ||
188 | * in the synchronous path and omitted in the hardware-asynchronous | ||
189 | * path. | ||
190 | * | ||
191 | * 'blocks' note: if submit->scribble is NULL then the contents of | ||
192 | * 'blocks' may be overwritten to perform address conversions | ||
193 | * (dma_map_page() or page_address()). | ||
194 | */ | ||
195 | struct dma_async_tx_descriptor * | ||
196 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | ||
197 | size_t len, struct async_submit_ctl *submit) | ||
198 | { | ||
199 | int src_cnt = disks - 2; | ||
200 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
201 | &P(blocks, disks), 2, | ||
202 | blocks, src_cnt, len); | ||
203 | struct dma_device *device = chan ? chan->device : NULL; | ||
204 | dma_addr_t *dma_src = NULL; | ||
205 | |||
206 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); | ||
207 | |||
208 | if (submit->scribble) | ||
209 | dma_src = submit->scribble; | ||
210 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
211 | dma_src = (dma_addr_t *) blocks; | ||
212 | |||
213 | if (dma_src && device && | ||
214 | (src_cnt <= dma_maxpq(device, 0) || | ||
215 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && | ||
216 | is_dma_pq_aligned(device, offset, 0, len)) { | ||
217 | /* run the p+q asynchronously */ | ||
218 | pr_debug("%s: (async) disks: %d len: %zu\n", | ||
219 | __func__, disks, len); | ||
220 | return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, | ||
221 | disks, len, dma_src, submit); | ||
222 | } | ||
223 | |||
224 | /* run the pq synchronously */ | ||
225 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); | ||
226 | |||
227 | /* wait for any prerequisite operations */ | ||
228 | async_tx_quiesce(&submit->depend_tx); | ||
229 | |||
230 | if (!P(blocks, disks)) { | ||
231 | P(blocks, disks) = pq_scribble_page; | ||
232 | BUG_ON(len + offset > PAGE_SIZE); | ||
233 | } | ||
234 | if (!Q(blocks, disks)) { | ||
235 | Q(blocks, disks) = pq_scribble_page; | ||
236 | BUG_ON(len + offset > PAGE_SIZE); | ||
237 | } | ||
238 | do_sync_gen_syndrome(blocks, offset, disks, len, submit); | ||
239 | |||
240 | return NULL; | ||
241 | } | ||
242 | EXPORT_SYMBOL_GPL(async_gen_syndrome); | ||
243 | |||
244 | static inline struct dma_chan * | ||
245 | pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) | ||
246 | { | ||
247 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
248 | return NULL; | ||
249 | #endif | ||
250 | return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, | ||
251 | disks, len); | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * async_syndrome_val - asynchronously validate a raid6 syndrome | ||
256 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | ||
257 | * @offset: common offset into each block (src and dest) to start transaction | ||
258 | * @disks: number of blocks (including missing P or Q, see below) | ||
259 | * @len: length of operation in bytes | ||
260 | * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set | ||
261 | * @spare: temporary result buffer for the synchronous case | ||
262 | * @submit: submission / completion modifiers | ||
263 | * | ||
264 | * The same notes from async_gen_syndrome apply to the 'blocks', | ||
265 | * and 'disks' parameters of this routine. The synchronous path | ||
266 | * requires a temporary result buffer and submit->scribble to be | ||
267 | * specified. | ||
268 | */ | ||
269 | struct dma_async_tx_descriptor * | ||
270 | async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | ||
271 | size_t len, enum sum_check_flags *pqres, struct page *spare, | ||
272 | struct async_submit_ctl *submit) | ||
273 | { | ||
274 | struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); | ||
275 | struct dma_device *device = chan ? chan->device : NULL; | ||
276 | struct dma_async_tx_descriptor *tx; | ||
277 | unsigned char coefs[disks-2]; | ||
278 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | ||
279 | dma_addr_t *dma_src = NULL; | ||
280 | int src_cnt = 0; | ||
281 | |||
282 | BUG_ON(disks < 4); | ||
283 | |||
284 | if (submit->scribble) | ||
285 | dma_src = submit->scribble; | ||
286 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
287 | dma_src = (dma_addr_t *) blocks; | ||
288 | |||
289 | if (dma_src && device && disks <= dma_maxpq(device, 0) && | ||
290 | is_dma_pq_aligned(device, offset, 0, len)) { | ||
291 | struct device *dev = device->dev; | ||
292 | dma_addr_t *pq = &dma_src[disks-2]; | ||
293 | int i; | ||
294 | |||
295 | pr_debug("%s: (async) disks: %d len: %zu\n", | ||
296 | __func__, disks, len); | ||
297 | if (!P(blocks, disks)) | ||
298 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
299 | else | ||
300 | pq[0] = dma_map_page(dev, P(blocks, disks), | ||
301 | offset, len, | ||
302 | DMA_TO_DEVICE); | ||
303 | if (!Q(blocks, disks)) | ||
304 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
305 | else | ||
306 | pq[1] = dma_map_page(dev, Q(blocks, disks), | ||
307 | offset, len, | ||
308 | DMA_TO_DEVICE); | ||
309 | |||
310 | if (submit->flags & ASYNC_TX_FENCE) | ||
311 | dma_flags |= DMA_PREP_FENCE; | ||
312 | for (i = 0; i < disks-2; i++) | ||
313 | if (likely(blocks[i])) { | ||
314 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], | ||
315 | offset, len, | ||
316 | DMA_TO_DEVICE); | ||
317 | coefs[src_cnt] = raid6_gfexp[i]; | ||
318 | src_cnt++; | ||
319 | } | ||
320 | |||
321 | for (;;) { | ||
322 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | ||
323 | src_cnt, | ||
324 | coefs, | ||
325 | len, pqres, | ||
326 | dma_flags); | ||
327 | if (likely(tx)) | ||
328 | break; | ||
329 | async_tx_quiesce(&submit->depend_tx); | ||
330 | dma_async_issue_pending(chan); | ||
331 | } | ||
332 | async_tx_submit(chan, tx, submit); | ||
333 | |||
334 | return tx; | ||
335 | } else { | ||
336 | struct page *p_src = P(blocks, disks); | ||
337 | struct page *q_src = Q(blocks, disks); | ||
338 | enum async_tx_flags flags_orig = submit->flags; | ||
339 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | ||
340 | void *scribble = submit->scribble; | ||
341 | void *cb_param_orig = submit->cb_param; | ||
342 | void *p, *q, *s; | ||
343 | |||
344 | pr_debug("%s: (sync) disks: %d len: %zu\n", | ||
345 | __func__, disks, len); | ||
346 | |||
347 | /* caller must provide a temporary result buffer and | ||
348 | * allow the input parameters to be preserved | ||
349 | */ | ||
350 | BUG_ON(!spare || !scribble); | ||
351 | |||
352 | /* wait for any prerequisite operations */ | ||
353 | async_tx_quiesce(&submit->depend_tx); | ||
354 | |||
355 | /* recompute p and/or q into the temporary buffer and then | ||
356 | * check to see the result matches the current value | ||
357 | */ | ||
358 | tx = NULL; | ||
359 | *pqres = 0; | ||
360 | if (p_src) { | ||
361 | init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
362 | NULL, NULL, scribble); | ||
363 | tx = async_xor(spare, blocks, offset, disks-2, len, submit); | ||
364 | async_tx_quiesce(&tx); | ||
365 | p = page_address(p_src) + offset; | ||
366 | s = page_address(spare) + offset; | ||
367 | *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; | ||
368 | } | ||
369 | |||
370 | if (q_src) { | ||
371 | P(blocks, disks) = NULL; | ||
372 | Q(blocks, disks) = spare; | ||
373 | init_async_submit(submit, 0, NULL, NULL, NULL, scribble); | ||
374 | tx = async_gen_syndrome(blocks, offset, disks, len, submit); | ||
375 | async_tx_quiesce(&tx); | ||
376 | q = page_address(q_src) + offset; | ||
377 | s = page_address(spare) + offset; | ||
378 | *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; | ||
379 | } | ||
380 | |||
381 | /* restore P, Q and submit */ | ||
382 | P(blocks, disks) = p_src; | ||
383 | Q(blocks, disks) = q_src; | ||
384 | |||
385 | submit->cb_fn = cb_fn_orig; | ||
386 | submit->cb_param = cb_param_orig; | ||
387 | submit->flags = flags_orig; | ||
388 | async_tx_sync_epilog(submit); | ||
389 | |||
390 | return NULL; | ||
391 | } | ||
392 | } | ||
393 | EXPORT_SYMBOL_GPL(async_syndrome_val); | ||
394 | |||
395 | static int __init async_pq_init(void) | ||
396 | { | ||
397 | pq_scribble_page = alloc_page(GFP_KERNEL); | ||
398 | |||
399 | if (pq_scribble_page) | ||
400 | return 0; | ||
401 | |||
402 | pr_err("%s: failed to allocate required spare page\n", __func__); | ||
403 | |||
404 | return -ENOMEM; | ||
405 | } | ||
406 | |||
407 | static void __exit async_pq_exit(void) | ||
408 | { | ||
409 | put_page(pq_scribble_page); | ||
410 | } | ||
411 | |||
412 | module_init(async_pq_init); | ||
413 | module_exit(async_pq_exit); | ||
414 | |||
415 | MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); | ||
416 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c new file mode 100644 index 000000000000..ce038d861eb9 --- /dev/null +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -0,0 +1,505 @@ | |||
1 | /* | ||
2 | * Asynchronous RAID-6 recovery calculations ASYNC_TX API. | ||
3 | * Copyright(c) 2009 Intel Corporation | ||
4 | * | ||
5 | * based on raid6recov.c: | ||
6 | * Copyright 2002 H. Peter Anvin | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 51 | ||
20 | * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/raid/pq.h> | ||
27 | #include <linux/async_tx.h> | ||
28 | |||
29 | static struct dma_async_tx_descriptor * | ||
30 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | ||
31 | size_t len, struct async_submit_ctl *submit) | ||
32 | { | ||
33 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
34 | &dest, 1, srcs, 2, len); | ||
35 | struct dma_device *dma = chan ? chan->device : NULL; | ||
36 | const u8 *amul, *bmul; | ||
37 | u8 ax, bx; | ||
38 | u8 *a, *b, *c; | ||
39 | |||
40 | if (dma) { | ||
41 | dma_addr_t dma_dest[2]; | ||
42 | dma_addr_t dma_src[2]; | ||
43 | struct device *dev = dma->dev; | ||
44 | struct dma_async_tx_descriptor *tx; | ||
45 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | ||
46 | |||
47 | if (submit->flags & ASYNC_TX_FENCE) | ||
48 | dma_flags |= DMA_PREP_FENCE; | ||
49 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
50 | dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); | ||
51 | dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); | ||
52 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, | ||
53 | len, dma_flags); | ||
54 | if (tx) { | ||
55 | async_tx_submit(chan, tx, submit); | ||
56 | return tx; | ||
57 | } | ||
58 | |||
59 | /* could not get a descriptor, unmap and fall through to | ||
60 | * the synchronous path | ||
61 | */ | ||
62 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | ||
63 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
64 | dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); | ||
65 | } | ||
66 | |||
67 | /* run the operation synchronously */ | ||
68 | async_tx_quiesce(&submit->depend_tx); | ||
69 | amul = raid6_gfmul[coef[0]]; | ||
70 | bmul = raid6_gfmul[coef[1]]; | ||
71 | a = page_address(srcs[0]); | ||
72 | b = page_address(srcs[1]); | ||
73 | c = page_address(dest); | ||
74 | |||
75 | while (len--) { | ||
76 | ax = amul[*a++]; | ||
77 | bx = bmul[*b++]; | ||
78 | *c++ = ax ^ bx; | ||
79 | } | ||
80 | |||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | static struct dma_async_tx_descriptor * | ||
85 | async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | ||
86 | struct async_submit_ctl *submit) | ||
87 | { | ||
88 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | ||
89 | &dest, 1, &src, 1, len); | ||
90 | struct dma_device *dma = chan ? chan->device : NULL; | ||
91 | const u8 *qmul; /* Q multiplier table */ | ||
92 | u8 *d, *s; | ||
93 | |||
94 | if (dma) { | ||
95 | dma_addr_t dma_dest[2]; | ||
96 | dma_addr_t dma_src[1]; | ||
97 | struct device *dev = dma->dev; | ||
98 | struct dma_async_tx_descriptor *tx; | ||
99 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | ||
100 | |||
101 | if (submit->flags & ASYNC_TX_FENCE) | ||
102 | dma_flags |= DMA_PREP_FENCE; | ||
103 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
104 | dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); | ||
105 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, | ||
106 | len, dma_flags); | ||
107 | if (tx) { | ||
108 | async_tx_submit(chan, tx, submit); | ||
109 | return tx; | ||
110 | } | ||
111 | |||
112 | /* could not get a descriptor, unmap and fall through to | ||
113 | * the synchronous path | ||
114 | */ | ||
115 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | ||
116 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
117 | } | ||
118 | |||
119 | /* no channel available, or failed to allocate a descriptor, so | ||
120 | * perform the operation synchronously | ||
121 | */ | ||
122 | async_tx_quiesce(&submit->depend_tx); | ||
123 | qmul = raid6_gfmul[coef]; | ||
124 | d = page_address(dest); | ||
125 | s = page_address(src); | ||
126 | |||
127 | while (len--) | ||
128 | *d++ = qmul[*s++]; | ||
129 | |||
130 | return NULL; | ||
131 | } | ||
132 | |||
133 | static struct dma_async_tx_descriptor * | ||
134 | __2data_recov_4(int disks, size_t bytes, int faila, int failb, | ||
135 | struct page **blocks, struct async_submit_ctl *submit) | ||
136 | { | ||
137 | struct dma_async_tx_descriptor *tx = NULL; | ||
138 | struct page *p, *q, *a, *b; | ||
139 | struct page *srcs[2]; | ||
140 | unsigned char coef[2]; | ||
141 | enum async_tx_flags flags = submit->flags; | ||
142 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
143 | void *cb_param = submit->cb_param; | ||
144 | void *scribble = submit->scribble; | ||
145 | |||
146 | p = blocks[disks-2]; | ||
147 | q = blocks[disks-1]; | ||
148 | |||
149 | a = blocks[faila]; | ||
150 | b = blocks[failb]; | ||
151 | |||
152 | /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ | ||
153 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
154 | srcs[0] = p; | ||
155 | srcs[1] = q; | ||
156 | coef[0] = raid6_gfexi[failb-faila]; | ||
157 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
158 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
159 | tx = async_sum_product(b, srcs, coef, bytes, submit); | ||
160 | |||
161 | /* Dy = P+Pxy+Dx */ | ||
162 | srcs[0] = p; | ||
163 | srcs[1] = b; | ||
164 | init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, | ||
165 | cb_param, scribble); | ||
166 | tx = async_xor(a, srcs, 0, 2, bytes, submit); | ||
167 | |||
168 | return tx; | ||
169 | |||
170 | } | ||
171 | |||
172 | static struct dma_async_tx_descriptor * | ||
173 | __2data_recov_5(int disks, size_t bytes, int faila, int failb, | ||
174 | struct page **blocks, struct async_submit_ctl *submit) | ||
175 | { | ||
176 | struct dma_async_tx_descriptor *tx = NULL; | ||
177 | struct page *p, *q, *g, *dp, *dq; | ||
178 | struct page *srcs[2]; | ||
179 | unsigned char coef[2]; | ||
180 | enum async_tx_flags flags = submit->flags; | ||
181 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
182 | void *cb_param = submit->cb_param; | ||
183 | void *scribble = submit->scribble; | ||
184 | int good_srcs, good, i; | ||
185 | |||
186 | good_srcs = 0; | ||
187 | good = -1; | ||
188 | for (i = 0; i < disks-2; i++) { | ||
189 | if (blocks[i] == NULL) | ||
190 | continue; | ||
191 | if (i == faila || i == failb) | ||
192 | continue; | ||
193 | good = i; | ||
194 | good_srcs++; | ||
195 | } | ||
196 | BUG_ON(good_srcs > 1); | ||
197 | |||
198 | p = blocks[disks-2]; | ||
199 | q = blocks[disks-1]; | ||
200 | g = blocks[good]; | ||
201 | |||
202 | /* Compute syndrome with zero for the missing data pages | ||
203 | * Use the dead data pages as temporary storage for delta p and | ||
204 | * delta q | ||
205 | */ | ||
206 | dp = blocks[faila]; | ||
207 | dq = blocks[failb]; | ||
208 | |||
209 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
210 | tx = async_memcpy(dp, g, 0, 0, bytes, submit); | ||
211 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
212 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); | ||
213 | |||
214 | /* compute P + Pxy */ | ||
215 | srcs[0] = dp; | ||
216 | srcs[1] = p; | ||
217 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
218 | NULL, NULL, scribble); | ||
219 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
220 | |||
221 | /* compute Q + Qxy */ | ||
222 | srcs[0] = dq; | ||
223 | srcs[1] = q; | ||
224 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
225 | NULL, NULL, scribble); | ||
226 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
227 | |||
228 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
229 | srcs[0] = dp; | ||
230 | srcs[1] = dq; | ||
231 | coef[0] = raid6_gfexi[failb-faila]; | ||
232 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
233 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
234 | tx = async_sum_product(dq, srcs, coef, bytes, submit); | ||
235 | |||
236 | /* Dy = P+Pxy+Dx */ | ||
237 | srcs[0] = dp; | ||
238 | srcs[1] = dq; | ||
239 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
240 | cb_param, scribble); | ||
241 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
242 | |||
243 | return tx; | ||
244 | } | ||
245 | |||
246 | static struct dma_async_tx_descriptor * | ||
247 | __2data_recov_n(int disks, size_t bytes, int faila, int failb, | ||
248 | struct page **blocks, struct async_submit_ctl *submit) | ||
249 | { | ||
250 | struct dma_async_tx_descriptor *tx = NULL; | ||
251 | struct page *p, *q, *dp, *dq; | ||
252 | struct page *srcs[2]; | ||
253 | unsigned char coef[2]; | ||
254 | enum async_tx_flags flags = submit->flags; | ||
255 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
256 | void *cb_param = submit->cb_param; | ||
257 | void *scribble = submit->scribble; | ||
258 | |||
259 | p = blocks[disks-2]; | ||
260 | q = blocks[disks-1]; | ||
261 | |||
262 | /* Compute syndrome with zero for the missing data pages | ||
263 | * Use the dead data pages as temporary storage for | ||
264 | * delta p and delta q | ||
265 | */ | ||
266 | dp = blocks[faila]; | ||
267 | blocks[faila] = NULL; | ||
268 | blocks[disks-2] = dp; | ||
269 | dq = blocks[failb]; | ||
270 | blocks[failb] = NULL; | ||
271 | blocks[disks-1] = dq; | ||
272 | |||
273 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
274 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); | ||
275 | |||
276 | /* Restore pointer table */ | ||
277 | blocks[faila] = dp; | ||
278 | blocks[failb] = dq; | ||
279 | blocks[disks-2] = p; | ||
280 | blocks[disks-1] = q; | ||
281 | |||
282 | /* compute P + Pxy */ | ||
283 | srcs[0] = dp; | ||
284 | srcs[1] = p; | ||
285 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
286 | NULL, NULL, scribble); | ||
287 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
288 | |||
289 | /* compute Q + Qxy */ | ||
290 | srcs[0] = dq; | ||
291 | srcs[1] = q; | ||
292 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
293 | NULL, NULL, scribble); | ||
294 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
295 | |||
296 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | ||
297 | srcs[0] = dp; | ||
298 | srcs[1] = dq; | ||
299 | coef[0] = raid6_gfexi[failb-faila]; | ||
300 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | ||
301 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
302 | tx = async_sum_product(dq, srcs, coef, bytes, submit); | ||
303 | |||
304 | /* Dy = P+Pxy+Dx */ | ||
305 | srcs[0] = dp; | ||
306 | srcs[1] = dq; | ||
307 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
308 | cb_param, scribble); | ||
309 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | ||
310 | |||
311 | return tx; | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * async_raid6_2data_recov - asynchronously calculate two missing data blocks | ||
316 | * @disks: number of disks in the RAID-6 array | ||
317 | * @bytes: block size | ||
318 | * @faila: first failed drive index | ||
319 | * @failb: second failed drive index | ||
320 | * @blocks: array of source pointers where the last two entries are p and q | ||
321 | * @submit: submission/completion modifiers | ||
322 | */ | ||
323 | struct dma_async_tx_descriptor * | ||
324 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | ||
325 | struct page **blocks, struct async_submit_ctl *submit) | ||
326 | { | ||
327 | void *scribble = submit->scribble; | ||
328 | int non_zero_srcs, i; | ||
329 | |||
330 | BUG_ON(faila == failb); | ||
331 | if (failb < faila) | ||
332 | swap(faila, failb); | ||
333 | |||
334 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | ||
335 | |||
336 | /* if a dma resource is not available or a scribble buffer is not | ||
337 | * available punt to the synchronous path. In the 'dma not | ||
338 | * available' case be sure to use the scribble buffer to | ||
339 | * preserve the content of 'blocks' as the caller intended. | ||
340 | */ | ||
341 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { | ||
342 | void **ptrs = scribble ? scribble : (void **) blocks; | ||
343 | |||
344 | async_tx_quiesce(&submit->depend_tx); | ||
345 | for (i = 0; i < disks; i++) | ||
346 | if (blocks[i] == NULL) | ||
347 | ptrs[i] = (void *) raid6_empty_zero_page; | ||
348 | else | ||
349 | ptrs[i] = page_address(blocks[i]); | ||
350 | |||
351 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | ||
352 | |||
353 | async_tx_sync_epilog(submit); | ||
354 | |||
355 | return NULL; | ||
356 | } | ||
357 | |||
358 | non_zero_srcs = 0; | ||
359 | for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) | ||
360 | if (blocks[i]) | ||
361 | non_zero_srcs++; | ||
362 | switch (non_zero_srcs) { | ||
363 | case 0: | ||
364 | case 1: | ||
365 | /* There must be at least 2 sources - the failed devices. */ | ||
366 | BUG(); | ||
367 | |||
368 | case 2: | ||
369 | /* dma devices do not uniformly understand a zero source pq | ||
370 | * operation (in contrast to the synchronous case), so | ||
371 | * explicitly handle the special case of a 4 disk array with | ||
372 | * both data disks missing. | ||
373 | */ | ||
374 | return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); | ||
375 | case 3: | ||
376 | /* dma devices do not uniformly understand a single | ||
377 | * source pq operation (in contrast to the synchronous | ||
378 | * case), so explicitly handle the special case of a 5 disk | ||
379 | * array with 2 of 3 data disks missing. | ||
380 | */ | ||
381 | return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); | ||
382 | default: | ||
383 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | ||
384 | } | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(async_raid6_2data_recov); | ||
387 | |||
388 | /** | ||
389 | * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block | ||
390 | * @disks: number of disks in the RAID-6 array | ||
391 | * @bytes: block size | ||
392 | * @faila: failed drive index | ||
393 | * @blocks: array of source pointers where the last two entries are p and q | ||
394 | * @submit: submission/completion modifiers | ||
395 | */ | ||
396 | struct dma_async_tx_descriptor * | ||
397 | async_raid6_datap_recov(int disks, size_t bytes, int faila, | ||
398 | struct page **blocks, struct async_submit_ctl *submit) | ||
399 | { | ||
400 | struct dma_async_tx_descriptor *tx = NULL; | ||
401 | struct page *p, *q, *dq; | ||
402 | u8 coef; | ||
403 | enum async_tx_flags flags = submit->flags; | ||
404 | dma_async_tx_callback cb_fn = submit->cb_fn; | ||
405 | void *cb_param = submit->cb_param; | ||
406 | void *scribble = submit->scribble; | ||
407 | int good_srcs, good, i; | ||
408 | struct page *srcs[2]; | ||
409 | |||
410 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | ||
411 | |||
412 | /* if a dma resource is not available or a scribble buffer is not | ||
413 | * available punt to the synchronous path. In the 'dma not | ||
414 | * available' case be sure to use the scribble buffer to | ||
415 | * preserve the content of 'blocks' as the caller intended. | ||
416 | */ | ||
417 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { | ||
418 | void **ptrs = scribble ? scribble : (void **) blocks; | ||
419 | |||
420 | async_tx_quiesce(&submit->depend_tx); | ||
421 | for (i = 0; i < disks; i++) | ||
422 | if (blocks[i] == NULL) | ||
423 | ptrs[i] = (void*)raid6_empty_zero_page; | ||
424 | else | ||
425 | ptrs[i] = page_address(blocks[i]); | ||
426 | |||
427 | raid6_datap_recov(disks, bytes, faila, ptrs); | ||
428 | |||
429 | async_tx_sync_epilog(submit); | ||
430 | |||
431 | return NULL; | ||
432 | } | ||
433 | |||
434 | good_srcs = 0; | ||
435 | good = -1; | ||
436 | for (i = 0; i < disks-2; i++) { | ||
437 | if (i == faila) | ||
438 | continue; | ||
439 | if (blocks[i]) { | ||
440 | good = i; | ||
441 | good_srcs++; | ||
442 | if (good_srcs > 1) | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | BUG_ON(good_srcs == 0); | ||
447 | |||
448 | p = blocks[disks-2]; | ||
449 | q = blocks[disks-1]; | ||
450 | |||
451 | /* Compute syndrome with zero for the missing data page | ||
452 | * Use the dead data page as temporary storage for delta q | ||
453 | */ | ||
454 | dq = blocks[faila]; | ||
455 | blocks[faila] = NULL; | ||
456 | blocks[disks-1] = dq; | ||
457 | |||
458 | /* in the 4-disk case we only need to perform a single source | ||
459 | * multiplication with the one good data block. | ||
460 | */ | ||
461 | if (good_srcs == 1) { | ||
462 | struct page *g = blocks[good]; | ||
463 | |||
464 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | ||
465 | scribble); | ||
466 | tx = async_memcpy(p, g, 0, 0, bytes, submit); | ||
467 | |||
468 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | ||
469 | scribble); | ||
470 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); | ||
471 | } else { | ||
472 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, | ||
473 | scribble); | ||
474 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); | ||
475 | } | ||
476 | |||
477 | /* Restore pointer table */ | ||
478 | blocks[faila] = dq; | ||
479 | blocks[disks-1] = q; | ||
480 | |||
481 | /* calculate g^{-faila} */ | ||
482 | coef = raid6_gfinv[raid6_gfexp[faila]]; | ||
483 | |||
484 | srcs[0] = dq; | ||
485 | srcs[1] = q; | ||
486 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, | ||
487 | NULL, NULL, scribble); | ||
488 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); | ||
489 | |||
490 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); | ||
491 | tx = async_mult(dq, dq, coef, bytes, submit); | ||
492 | |||
493 | srcs[0] = p; | ||
494 | srcs[1] = dq; | ||
495 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | ||
496 | cb_param, scribble); | ||
497 | tx = async_xor(p, srcs, 0, 2, bytes, submit); | ||
498 | |||
499 | return tx; | ||
500 | } | ||
501 | EXPORT_SYMBOL_GPL(async_raid6_datap_recov); | ||
502 | |||
503 | MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); | ||
504 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); | ||
505 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 06eb6cc09fef..7f2c00a45205 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -42,16 +42,21 @@ static void __exit async_tx_exit(void) | |||
42 | async_dmaengine_put(); | 42 | async_dmaengine_put(); |
43 | } | 43 | } |
44 | 44 | ||
45 | module_init(async_tx_init); | ||
46 | module_exit(async_tx_exit); | ||
47 | |||
45 | /** | 48 | /** |
46 | * __async_tx_find_channel - find a channel to carry out the operation or let | 49 | * __async_tx_find_channel - find a channel to carry out the operation or let |
47 | * the transaction execute synchronously | 50 | * the transaction execute synchronously |
48 | * @depend_tx: transaction dependency | 51 | * @submit: transaction dependency and submission modifiers |
49 | * @tx_type: transaction type | 52 | * @tx_type: transaction type |
50 | */ | 53 | */ |
51 | struct dma_chan * | 54 | struct dma_chan * |
52 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 55 | __async_tx_find_channel(struct async_submit_ctl *submit, |
53 | enum dma_transaction_type tx_type) | 56 | enum dma_transaction_type tx_type) |
54 | { | 57 | { |
58 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; | ||
59 | |||
55 | /* see if we can keep the chain on one channel */ | 60 | /* see if we can keep the chain on one channel */ |
56 | if (depend_tx && | 61 | if (depend_tx && |
57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 62 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
@@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
59 | return async_dma_find_channel(tx_type); | 64 | return async_dma_find_channel(tx_type); |
60 | } | 65 | } |
61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 66 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
62 | #else | ||
63 | static int __init async_tx_init(void) | ||
64 | { | ||
65 | printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void __exit async_tx_exit(void) | ||
70 | { | ||
71 | do { } while (0); | ||
72 | } | ||
73 | #endif | 67 | #endif |
74 | 68 | ||
75 | 69 | ||
@@ -83,24 +77,23 @@ static void | |||
83 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | 77 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, |
84 | struct dma_async_tx_descriptor *tx) | 78 | struct dma_async_tx_descriptor *tx) |
85 | { | 79 | { |
86 | struct dma_chan *chan; | 80 | struct dma_chan *chan = depend_tx->chan; |
87 | struct dma_device *device; | 81 | struct dma_device *device = chan->device; |
88 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 82 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
89 | 83 | ||
90 | /* first check to see if we can still append to depend_tx */ | 84 | /* first check to see if we can still append to depend_tx */ |
91 | spin_lock_bh(&depend_tx->lock); | 85 | txd_lock(depend_tx); |
92 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 86 | if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { |
93 | tx->parent = depend_tx; | 87 | txd_chain(depend_tx, tx); |
94 | depend_tx->next = tx; | ||
95 | intr_tx = NULL; | 88 | intr_tx = NULL; |
96 | } | 89 | } |
97 | spin_unlock_bh(&depend_tx->lock); | 90 | txd_unlock(depend_tx); |
98 | 91 | ||
99 | if (!intr_tx) | 92 | /* attached dependency, flush the parent channel */ |
93 | if (!intr_tx) { | ||
94 | device->device_issue_pending(chan); | ||
100 | return; | 95 | return; |
101 | 96 | } | |
102 | chan = depend_tx->chan; | ||
103 | device = chan->device; | ||
104 | 97 | ||
105 | /* see if we can schedule an interrupt | 98 | /* see if we can schedule an interrupt |
106 | * otherwise poll for completion | 99 | * otherwise poll for completion |
@@ -113,27 +106,26 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
113 | if (intr_tx) { | 106 | if (intr_tx) { |
114 | intr_tx->callback = NULL; | 107 | intr_tx->callback = NULL; |
115 | intr_tx->callback_param = NULL; | 108 | intr_tx->callback_param = NULL; |
116 | tx->parent = intr_tx; | 109 | /* safe to chain outside the lock since we know we are |
117 | /* safe to set ->next outside the lock since we know we are | ||
118 | * not submitted yet | 110 | * not submitted yet |
119 | */ | 111 | */ |
120 | intr_tx->next = tx; | 112 | txd_chain(intr_tx, tx); |
121 | 113 | ||
122 | /* check if we need to append */ | 114 | /* check if we need to append */ |
123 | spin_lock_bh(&depend_tx->lock); | 115 | txd_lock(depend_tx); |
124 | if (depend_tx->parent) { | 116 | if (txd_parent(depend_tx)) { |
125 | intr_tx->parent = depend_tx; | 117 | txd_chain(depend_tx, intr_tx); |
126 | depend_tx->next = intr_tx; | ||
127 | async_tx_ack(intr_tx); | 118 | async_tx_ack(intr_tx); |
128 | intr_tx = NULL; | 119 | intr_tx = NULL; |
129 | } | 120 | } |
130 | spin_unlock_bh(&depend_tx->lock); | 121 | txd_unlock(depend_tx); |
131 | 122 | ||
132 | if (intr_tx) { | 123 | if (intr_tx) { |
133 | intr_tx->parent = NULL; | 124 | txd_clear_parent(intr_tx); |
134 | intr_tx->tx_submit(intr_tx); | 125 | intr_tx->tx_submit(intr_tx); |
135 | async_tx_ack(intr_tx); | 126 | async_tx_ack(intr_tx); |
136 | } | 127 | } |
128 | device->device_issue_pending(chan); | ||
137 | } else { | 129 | } else { |
138 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 130 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
139 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 131 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
@@ -144,13 +136,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
144 | 136 | ||
145 | 137 | ||
146 | /** | 138 | /** |
147 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | 139 | * submit_disposition - flags for routing an incoming operation |
148 | * new operations to prevent a circular locking dependency with | ||
149 | * drivers that already hold a channel lock when calling | ||
150 | * async_tx_run_dependencies. | ||
151 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | 140 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock |
152 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | 141 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch |
153 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | 142 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly |
143 | * | ||
144 | * while holding depend_tx->lock we must avoid submitting new operations | ||
145 | * to prevent a circular locking dependency with drivers that already | ||
146 | * hold a channel lock when calling async_tx_run_dependencies. | ||
154 | */ | 147 | */ |
155 | enum submit_disposition { | 148 | enum submit_disposition { |
156 | ASYNC_TX_SUBMITTED, | 149 | ASYNC_TX_SUBMITTED, |
@@ -160,11 +153,12 @@ enum submit_disposition { | |||
160 | 153 | ||
161 | void | 154 | void |
162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 155 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
163 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 156 | struct async_submit_ctl *submit) |
164 | dma_async_tx_callback cb_fn, void *cb_param) | ||
165 | { | 157 | { |
166 | tx->callback = cb_fn; | 158 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; |
167 | tx->callback_param = cb_param; | 159 | |
160 | tx->callback = submit->cb_fn; | ||
161 | tx->callback_param = submit->cb_param; | ||
168 | 162 | ||
169 | if (depend_tx) { | 163 | if (depend_tx) { |
170 | enum submit_disposition s; | 164 | enum submit_disposition s; |
@@ -175,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
175 | * 2/ dependencies are 1:1 i.e. two transactions can | 169 | * 2/ dependencies are 1:1 i.e. two transactions can |
176 | * not depend on the same parent | 170 | * not depend on the same parent |
177 | */ | 171 | */ |
178 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || | 172 | BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || |
179 | tx->parent); | 173 | txd_parent(tx)); |
180 | 174 | ||
181 | /* the lock prevents async_tx_run_dependencies from missing | 175 | /* the lock prevents async_tx_run_dependencies from missing |
182 | * the setting of ->next when ->parent != NULL | 176 | * the setting of ->next when ->parent != NULL |
183 | */ | 177 | */ |
184 | spin_lock_bh(&depend_tx->lock); | 178 | txd_lock(depend_tx); |
185 | if (depend_tx->parent) { | 179 | if (txd_parent(depend_tx)) { |
186 | /* we have a parent so we can not submit directly | 180 | /* we have a parent so we can not submit directly |
187 | * if we are staying on the same channel: append | 181 | * if we are staying on the same channel: append |
188 | * else: channel switch | 182 | * else: channel switch |
189 | */ | 183 | */ |
190 | if (depend_tx->chan == chan) { | 184 | if (depend_tx->chan == chan) { |
191 | tx->parent = depend_tx; | 185 | txd_chain(depend_tx, tx); |
192 | depend_tx->next = tx; | ||
193 | s = ASYNC_TX_SUBMITTED; | 186 | s = ASYNC_TX_SUBMITTED; |
194 | } else | 187 | } else |
195 | s = ASYNC_TX_CHANNEL_SWITCH; | 188 | s = ASYNC_TX_CHANNEL_SWITCH; |
@@ -202,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
202 | else | 195 | else |
203 | s = ASYNC_TX_CHANNEL_SWITCH; | 196 | s = ASYNC_TX_CHANNEL_SWITCH; |
204 | } | 197 | } |
205 | spin_unlock_bh(&depend_tx->lock); | 198 | txd_unlock(depend_tx); |
206 | 199 | ||
207 | switch (s) { | 200 | switch (s) { |
208 | case ASYNC_TX_SUBMITTED: | 201 | case ASYNC_TX_SUBMITTED: |
@@ -211,39 +204,38 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | |||
211 | async_tx_channel_switch(depend_tx, tx); | 204 | async_tx_channel_switch(depend_tx, tx); |
212 | break; | 205 | break; |
213 | case ASYNC_TX_DIRECT_SUBMIT: | 206 | case ASYNC_TX_DIRECT_SUBMIT: |
214 | tx->parent = NULL; | 207 | txd_clear_parent(tx); |
215 | tx->tx_submit(tx); | 208 | tx->tx_submit(tx); |
216 | break; | 209 | break; |
217 | } | 210 | } |
218 | } else { | 211 | } else { |
219 | tx->parent = NULL; | 212 | txd_clear_parent(tx); |
220 | tx->tx_submit(tx); | 213 | tx->tx_submit(tx); |
221 | } | 214 | } |
222 | 215 | ||
223 | if (flags & ASYNC_TX_ACK) | 216 | if (submit->flags & ASYNC_TX_ACK) |
224 | async_tx_ack(tx); | 217 | async_tx_ack(tx); |
225 | 218 | ||
226 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) | 219 | if (depend_tx) |
227 | async_tx_ack(depend_tx); | 220 | async_tx_ack(depend_tx); |
228 | } | 221 | } |
229 | EXPORT_SYMBOL_GPL(async_tx_submit); | 222 | EXPORT_SYMBOL_GPL(async_tx_submit); |
230 | 223 | ||
231 | /** | 224 | /** |
232 | * async_trigger_callback - schedules the callback function to be run after | 225 | * async_trigger_callback - schedules the callback function to be run |
233 | * any dependent operations have been completed. | 226 | * @submit: submission and completion parameters |
234 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 227 | * |
235 | * @depend_tx: 'callback' requires the completion of this transaction | 228 | * honored flags: ASYNC_TX_ACK |
236 | * @cb_fn: function to call after depend_tx completes | 229 | * |
237 | * @cb_param: parameter to pass to the callback routine | 230 | * The callback is run after any dependent operations have completed. |
238 | */ | 231 | */ |
239 | struct dma_async_tx_descriptor * | 232 | struct dma_async_tx_descriptor * |
240 | async_trigger_callback(enum async_tx_flags flags, | 233 | async_trigger_callback(struct async_submit_ctl *submit) |
241 | struct dma_async_tx_descriptor *depend_tx, | ||
242 | dma_async_tx_callback cb_fn, void *cb_param) | ||
243 | { | 234 | { |
244 | struct dma_chan *chan; | 235 | struct dma_chan *chan; |
245 | struct dma_device *device; | 236 | struct dma_device *device; |
246 | struct dma_async_tx_descriptor *tx; | 237 | struct dma_async_tx_descriptor *tx; |
238 | struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; | ||
247 | 239 | ||
248 | if (depend_tx) { | 240 | if (depend_tx) { |
249 | chan = depend_tx->chan; | 241 | chan = depend_tx->chan; |
@@ -262,14 +254,14 @@ async_trigger_callback(enum async_tx_flags flags, | |||
262 | if (tx) { | 254 | if (tx) { |
263 | pr_debug("%s: (async)\n", __func__); | 255 | pr_debug("%s: (async)\n", __func__); |
264 | 256 | ||
265 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 257 | async_tx_submit(chan, tx, submit); |
266 | } else { | 258 | } else { |
267 | pr_debug("%s: (sync)\n", __func__); | 259 | pr_debug("%s: (sync)\n", __func__); |
268 | 260 | ||
269 | /* wait for any prerequisite operations */ | 261 | /* wait for any prerequisite operations */ |
270 | async_tx_quiesce(&depend_tx); | 262 | async_tx_quiesce(&submit->depend_tx); |
271 | 263 | ||
272 | async_tx_sync_epilog(cb_fn, cb_param); | 264 | async_tx_sync_epilog(submit); |
273 | } | 265 | } |
274 | 266 | ||
275 | return tx; | 267 | return tx; |
@@ -295,9 +287,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |||
295 | } | 287 | } |
296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); | 288 | EXPORT_SYMBOL_GPL(async_tx_quiesce); |
297 | 289 | ||
298 | module_init(async_tx_init); | ||
299 | module_exit(async_tx_exit); | ||
300 | |||
301 | MODULE_AUTHOR("Intel Corporation"); | 290 | MODULE_AUTHOR("Intel Corporation"); |
302 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); | 291 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); |
303 | MODULE_LICENSE("GPL"); | 292 | MODULE_LICENSE("GPL"); |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 95fe2c8d6c51..079ae8ca590b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -33,55 +33,57 @@ | |||
33 | /* do_async_xor - dma map the pages and perform the xor with an engine */ | 33 | /* do_async_xor - dma map the pages and perform the xor with an engine */ |
34 | static __async_inline struct dma_async_tx_descriptor * | 34 | static __async_inline struct dma_async_tx_descriptor * |
35 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | 35 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, |
36 | unsigned int offset, int src_cnt, size_t len, | 36 | unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, |
37 | enum async_tx_flags flags, | 37 | struct async_submit_ctl *submit) |
38 | struct dma_async_tx_descriptor *depend_tx, | ||
39 | dma_async_tx_callback cb_fn, void *cb_param) | ||
40 | { | 38 | { |
41 | struct dma_device *dma = chan->device; | 39 | struct dma_device *dma = chan->device; |
42 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | ||
43 | struct dma_async_tx_descriptor *tx = NULL; | 40 | struct dma_async_tx_descriptor *tx = NULL; |
44 | int src_off = 0; | 41 | int src_off = 0; |
45 | int i; | 42 | int i; |
46 | dma_async_tx_callback _cb_fn; | 43 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
47 | void *_cb_param; | 44 | void *cb_param_orig = submit->cb_param; |
48 | enum async_tx_flags async_flags; | 45 | enum async_tx_flags flags_orig = submit->flags; |
49 | enum dma_ctrl_flags dma_flags; | 46 | enum dma_ctrl_flags dma_flags; |
50 | int xor_src_cnt; | 47 | int xor_src_cnt = 0; |
51 | dma_addr_t dma_dest; | 48 | dma_addr_t dma_dest; |
52 | 49 | ||
53 | /* map the dest bidrectional in case it is re-used as a source */ | 50 | /* map the dest bidrectional in case it is re-used as a source */ |
54 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | 51 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); |
55 | for (i = 0; i < src_cnt; i++) { | 52 | for (i = 0; i < src_cnt; i++) { |
56 | /* only map the dest once */ | 53 | /* only map the dest once */ |
54 | if (!src_list[i]) | ||
55 | continue; | ||
57 | if (unlikely(src_list[i] == dest)) { | 56 | if (unlikely(src_list[i] == dest)) { |
58 | dma_src[i] = dma_dest; | 57 | dma_src[xor_src_cnt++] = dma_dest; |
59 | continue; | 58 | continue; |
60 | } | 59 | } |
61 | dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, | 60 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, |
62 | len, DMA_TO_DEVICE); | 61 | len, DMA_TO_DEVICE); |
63 | } | 62 | } |
63 | src_cnt = xor_src_cnt; | ||
64 | 64 | ||
65 | while (src_cnt) { | 65 | while (src_cnt) { |
66 | async_flags = flags; | 66 | submit->flags = flags_orig; |
67 | dma_flags = 0; | 67 | dma_flags = 0; |
68 | xor_src_cnt = min(src_cnt, dma->max_xor); | 68 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); |
69 | /* if we are submitting additional xors, leave the chain open, | 69 | /* if we are submitting additional xors, leave the chain open, |
70 | * clear the callback parameters, and leave the destination | 70 | * clear the callback parameters, and leave the destination |
71 | * buffer mapped | 71 | * buffer mapped |
72 | */ | 72 | */ |
73 | if (src_cnt > xor_src_cnt) { | 73 | if (src_cnt > xor_src_cnt) { |
74 | async_flags &= ~ASYNC_TX_ACK; | 74 | submit->flags &= ~ASYNC_TX_ACK; |
75 | submit->flags |= ASYNC_TX_FENCE; | ||
75 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; | 76 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; |
76 | _cb_fn = NULL; | 77 | submit->cb_fn = NULL; |
77 | _cb_param = NULL; | 78 | submit->cb_param = NULL; |
78 | } else { | 79 | } else { |
79 | _cb_fn = cb_fn; | 80 | submit->cb_fn = cb_fn_orig; |
80 | _cb_param = cb_param; | 81 | submit->cb_param = cb_param_orig; |
81 | } | 82 | } |
82 | if (_cb_fn) | 83 | if (submit->cb_fn) |
83 | dma_flags |= DMA_PREP_INTERRUPT; | 84 | dma_flags |= DMA_PREP_INTERRUPT; |
84 | 85 | if (submit->flags & ASYNC_TX_FENCE) | |
86 | dma_flags |= DMA_PREP_FENCE; | ||
85 | /* Since we have clobbered the src_list we are committed | 87 | /* Since we have clobbered the src_list we are committed |
86 | * to doing this asynchronously. Drivers force forward progress | 88 | * to doing this asynchronously. Drivers force forward progress |
87 | * in case they can not provide a descriptor | 89 | * in case they can not provide a descriptor |
@@ -90,7 +92,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
90 | xor_src_cnt, len, dma_flags); | 92 | xor_src_cnt, len, dma_flags); |
91 | 93 | ||
92 | if (unlikely(!tx)) | 94 | if (unlikely(!tx)) |
93 | async_tx_quiesce(&depend_tx); | 95 | async_tx_quiesce(&submit->depend_tx); |
94 | 96 | ||
95 | /* spin wait for the preceeding transactions to complete */ | 97 | /* spin wait for the preceeding transactions to complete */ |
96 | while (unlikely(!tx)) { | 98 | while (unlikely(!tx)) { |
@@ -101,11 +103,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
101 | dma_flags); | 103 | dma_flags); |
102 | } | 104 | } |
103 | 105 | ||
104 | async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, | 106 | async_tx_submit(chan, tx, submit); |
105 | _cb_param); | 107 | submit->depend_tx = tx; |
106 | |||
107 | depend_tx = tx; | ||
108 | flags |= ASYNC_TX_DEP_ACK; | ||
109 | 108 | ||
110 | if (src_cnt > xor_src_cnt) { | 109 | if (src_cnt > xor_src_cnt) { |
111 | /* drop completed sources */ | 110 | /* drop completed sources */ |
@@ -124,23 +123,28 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
124 | 123 | ||
125 | static void | 124 | static void |
126 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | 125 | do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, |
127 | int src_cnt, size_t len, enum async_tx_flags flags, | 126 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
128 | dma_async_tx_callback cb_fn, void *cb_param) | ||
129 | { | 127 | { |
130 | int i; | 128 | int i; |
131 | int xor_src_cnt; | 129 | int xor_src_cnt = 0; |
132 | int src_off = 0; | 130 | int src_off = 0; |
133 | void *dest_buf; | 131 | void *dest_buf; |
134 | void **srcs = (void **) src_list; | 132 | void **srcs; |
135 | 133 | ||
136 | /* reuse the 'src_list' array to convert to buffer pointers */ | 134 | if (submit->scribble) |
137 | for (i = 0; i < src_cnt; i++) | 135 | srcs = submit->scribble; |
138 | srcs[i] = page_address(src_list[i]) + offset; | 136 | else |
137 | srcs = (void **) src_list; | ||
139 | 138 | ||
139 | /* convert to buffer pointers */ | ||
140 | for (i = 0; i < src_cnt; i++) | ||
141 | if (src_list[i]) | ||
142 | srcs[xor_src_cnt++] = page_address(src_list[i]) + offset; | ||
143 | src_cnt = xor_src_cnt; | ||
140 | /* set destination address */ | 144 | /* set destination address */ |
141 | dest_buf = page_address(dest) + offset; | 145 | dest_buf = page_address(dest) + offset; |
142 | 146 | ||
143 | if (flags & ASYNC_TX_XOR_ZERO_DST) | 147 | if (submit->flags & ASYNC_TX_XOR_ZERO_DST) |
144 | memset(dest_buf, 0, len); | 148 | memset(dest_buf, 0, len); |
145 | 149 | ||
146 | while (src_cnt > 0) { | 150 | while (src_cnt > 0) { |
@@ -153,61 +157,70 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
153 | src_off += xor_src_cnt; | 157 | src_off += xor_src_cnt; |
154 | } | 158 | } |
155 | 159 | ||
156 | async_tx_sync_epilog(cb_fn, cb_param); | 160 | async_tx_sync_epilog(submit); |
157 | } | 161 | } |
158 | 162 | ||
159 | /** | 163 | /** |
160 | * async_xor - attempt to xor a set of blocks with a dma engine. | 164 | * async_xor - attempt to xor a set of blocks with a dma engine. |
161 | * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST | ||
162 | * flag must be set to not include dest data in the calculation. The | ||
163 | * assumption with dma eninges is that they only use the destination | ||
164 | * buffer as a source when it is explicity specified in the source list. | ||
165 | * @dest: destination page | 165 | * @dest: destination page |
166 | * @src_list: array of source pages (if the dest is also a source it must be | 166 | * @src_list: array of source pages |
167 | * at index zero). The contents of this array may be overwritten. | 167 | * @offset: common src/dst offset to start transaction |
168 | * @offset: offset in pages to start transaction | ||
169 | * @src_cnt: number of source pages | 168 | * @src_cnt: number of source pages |
170 | * @len: length in bytes | 169 | * @len: length in bytes |
171 | * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, | 170 | * @submit: submission / completion modifiers |
172 | * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 171 | * |
173 | * @depend_tx: xor depends on the result of this transaction. | 172 | * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST |
174 | * @cb_fn: function to call when the xor completes | 173 | * |
175 | * @cb_param: parameter to pass to the callback routine | 174 | * xor_blocks always uses the dest as a source so the |
175 | * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in | ||
176 | * the calculation. The assumption with dma eninges is that they only | ||
177 | * use the destination buffer as a source when it is explicity specified | ||
178 | * in the source list. | ||
179 | * | ||
180 | * src_list note: if the dest is also a source it must be at index zero. | ||
181 | * The contents of this array will be overwritten if a scribble region | ||
182 | * is not specified. | ||
176 | */ | 183 | */ |
177 | struct dma_async_tx_descriptor * | 184 | struct dma_async_tx_descriptor * |
178 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, | 185 | async_xor(struct page *dest, struct page **src_list, unsigned int offset, |
179 | int src_cnt, size_t len, enum async_tx_flags flags, | 186 | int src_cnt, size_t len, struct async_submit_ctl *submit) |
180 | struct dma_async_tx_descriptor *depend_tx, | ||
181 | dma_async_tx_callback cb_fn, void *cb_param) | ||
182 | { | 187 | { |
183 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, | 188 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, |
184 | &dest, 1, src_list, | 189 | &dest, 1, src_list, |
185 | src_cnt, len); | 190 | src_cnt, len); |
191 | dma_addr_t *dma_src = NULL; | ||
192 | |||
186 | BUG_ON(src_cnt <= 1); | 193 | BUG_ON(src_cnt <= 1); |
187 | 194 | ||
188 | if (chan) { | 195 | if (submit->scribble) |
196 | dma_src = submit->scribble; | ||
197 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
198 | dma_src = (dma_addr_t *) src_list; | ||
199 | |||
200 | if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { | ||
189 | /* run the xor asynchronously */ | 201 | /* run the xor asynchronously */ |
190 | pr_debug("%s (async): len: %zu\n", __func__, len); | 202 | pr_debug("%s (async): len: %zu\n", __func__, len); |
191 | 203 | ||
192 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, | 204 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, |
193 | flags, depend_tx, cb_fn, cb_param); | 205 | dma_src, submit); |
194 | } else { | 206 | } else { |
195 | /* run the xor synchronously */ | 207 | /* run the xor synchronously */ |
196 | pr_debug("%s (sync): len: %zu\n", __func__, len); | 208 | pr_debug("%s (sync): len: %zu\n", __func__, len); |
209 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", | ||
210 | __func__); | ||
197 | 211 | ||
198 | /* in the sync case the dest is an implied source | 212 | /* in the sync case the dest is an implied source |
199 | * (assumes the dest is the first source) | 213 | * (assumes the dest is the first source) |
200 | */ | 214 | */ |
201 | if (flags & ASYNC_TX_XOR_DROP_DST) { | 215 | if (submit->flags & ASYNC_TX_XOR_DROP_DST) { |
202 | src_cnt--; | 216 | src_cnt--; |
203 | src_list++; | 217 | src_list++; |
204 | } | 218 | } |
205 | 219 | ||
206 | /* wait for any prerequisite operations */ | 220 | /* wait for any prerequisite operations */ |
207 | async_tx_quiesce(&depend_tx); | 221 | async_tx_quiesce(&submit->depend_tx); |
208 | 222 | ||
209 | do_sync_xor(dest, src_list, offset, src_cnt, len, | 223 | do_sync_xor(dest, src_list, offset, src_cnt, len, submit); |
210 | flags, cb_fn, cb_param); | ||
211 | 224 | ||
212 | return NULL; | 225 | return NULL; |
213 | } | 226 | } |
@@ -221,105 +234,104 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
221 | memcmp(a, a + 4, len - 4) == 0); | 234 | memcmp(a, a + 4, len - 4) == 0); |
222 | } | 235 | } |
223 | 236 | ||
237 | static inline struct dma_chan * | ||
238 | xor_val_chan(struct async_submit_ctl *submit, struct page *dest, | ||
239 | struct page **src_list, int src_cnt, size_t len) | ||
240 | { | ||
241 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
242 | return NULL; | ||
243 | #endif | ||
244 | return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, | ||
245 | src_cnt, len); | ||
246 | } | ||
247 | |||
224 | /** | 248 | /** |
225 | * async_xor_zero_sum - attempt a xor parity check with a dma engine. | 249 | * async_xor_val - attempt a xor parity check with a dma engine. |
226 | * @dest: destination page used if the xor is performed synchronously | 250 | * @dest: destination page used if the xor is performed synchronously |
227 | * @src_list: array of source pages. The dest page must be listed as a source | 251 | * @src_list: array of source pages |
228 | * at index zero. The contents of this array may be overwritten. | ||
229 | * @offset: offset in pages to start transaction | 252 | * @offset: offset in pages to start transaction |
230 | * @src_cnt: number of source pages | 253 | * @src_cnt: number of source pages |
231 | * @len: length in bytes | 254 | * @len: length in bytes |
232 | * @result: 0 if sum == 0 else non-zero | 255 | * @result: 0 if sum == 0 else non-zero |
233 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 256 | * @submit: submission / completion modifiers |
234 | * @depend_tx: xor depends on the result of this transaction. | 257 | * |
235 | * @cb_fn: function to call when the xor completes | 258 | * honored flags: ASYNC_TX_ACK |
236 | * @cb_param: parameter to pass to the callback routine | 259 | * |
260 | * src_list note: if the dest is also a source it must be at index zero. | ||
261 | * The contents of this array will be overwritten if a scribble region | ||
262 | * is not specified. | ||
237 | */ | 263 | */ |
238 | struct dma_async_tx_descriptor * | 264 | struct dma_async_tx_descriptor * |
239 | async_xor_zero_sum(struct page *dest, struct page **src_list, | 265 | async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, |
240 | unsigned int offset, int src_cnt, size_t len, | 266 | int src_cnt, size_t len, enum sum_check_flags *result, |
241 | u32 *result, enum async_tx_flags flags, | 267 | struct async_submit_ctl *submit) |
242 | struct dma_async_tx_descriptor *depend_tx, | ||
243 | dma_async_tx_callback cb_fn, void *cb_param) | ||
244 | { | 268 | { |
245 | struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM, | 269 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
246 | &dest, 1, src_list, | ||
247 | src_cnt, len); | ||
248 | struct dma_device *device = chan ? chan->device : NULL; | 270 | struct dma_device *device = chan ? chan->device : NULL; |
249 | struct dma_async_tx_descriptor *tx = NULL; | 271 | struct dma_async_tx_descriptor *tx = NULL; |
272 | dma_addr_t *dma_src = NULL; | ||
250 | 273 | ||
251 | BUG_ON(src_cnt <= 1); | 274 | BUG_ON(src_cnt <= 1); |
252 | 275 | ||
253 | if (device && src_cnt <= device->max_xor) { | 276 | if (submit->scribble) |
254 | dma_addr_t *dma_src = (dma_addr_t *) src_list; | 277 | dma_src = submit->scribble; |
255 | unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; | 278 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) |
279 | dma_src = (dma_addr_t *) src_list; | ||
280 | |||
281 | if (dma_src && device && src_cnt <= device->max_xor && | ||
282 | is_dma_xor_aligned(device, offset, 0, len)) { | ||
283 | unsigned long dma_prep_flags = 0; | ||
256 | int i; | 284 | int i; |
257 | 285 | ||
258 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 286 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
259 | 287 | ||
288 | if (submit->cb_fn) | ||
289 | dma_prep_flags |= DMA_PREP_INTERRUPT; | ||
290 | if (submit->flags & ASYNC_TX_FENCE) | ||
291 | dma_prep_flags |= DMA_PREP_FENCE; | ||
260 | for (i = 0; i < src_cnt; i++) | 292 | for (i = 0; i < src_cnt; i++) |
261 | dma_src[i] = dma_map_page(device->dev, src_list[i], | 293 | dma_src[i] = dma_map_page(device->dev, src_list[i], |
262 | offset, len, DMA_TO_DEVICE); | 294 | offset, len, DMA_TO_DEVICE); |
263 | 295 | ||
264 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, | 296 | tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, |
265 | len, result, | 297 | len, result, |
266 | dma_prep_flags); | 298 | dma_prep_flags); |
267 | if (unlikely(!tx)) { | 299 | if (unlikely(!tx)) { |
268 | async_tx_quiesce(&depend_tx); | 300 | async_tx_quiesce(&submit->depend_tx); |
269 | 301 | ||
270 | while (!tx) { | 302 | while (!tx) { |
271 | dma_async_issue_pending(chan); | 303 | dma_async_issue_pending(chan); |
272 | tx = device->device_prep_dma_zero_sum(chan, | 304 | tx = device->device_prep_dma_xor_val(chan, |
273 | dma_src, src_cnt, len, result, | 305 | dma_src, src_cnt, len, result, |
274 | dma_prep_flags); | 306 | dma_prep_flags); |
275 | } | 307 | } |
276 | } | 308 | } |
277 | 309 | ||
278 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 310 | async_tx_submit(chan, tx, submit); |
279 | } else { | 311 | } else { |
280 | unsigned long xor_flags = flags; | 312 | enum async_tx_flags flags_orig = submit->flags; |
281 | 313 | ||
282 | pr_debug("%s: (sync) len: %zu\n", __func__, len); | 314 | pr_debug("%s: (sync) len: %zu\n", __func__, len); |
315 | WARN_ONCE(device && src_cnt <= device->max_xor, | ||
316 | "%s: no space for dma address conversion\n", | ||
317 | __func__); | ||
283 | 318 | ||
284 | xor_flags |= ASYNC_TX_XOR_DROP_DST; | 319 | submit->flags |= ASYNC_TX_XOR_DROP_DST; |
285 | xor_flags &= ~ASYNC_TX_ACK; | 320 | submit->flags &= ~ASYNC_TX_ACK; |
286 | 321 | ||
287 | tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, | 322 | tx = async_xor(dest, src_list, offset, src_cnt, len, submit); |
288 | depend_tx, NULL, NULL); | ||
289 | 323 | ||
290 | async_tx_quiesce(&tx); | 324 | async_tx_quiesce(&tx); |
291 | 325 | ||
292 | *result = page_is_zero(dest, offset, len) ? 0 : 1; | 326 | *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P; |
293 | 327 | ||
294 | async_tx_sync_epilog(cb_fn, cb_param); | 328 | async_tx_sync_epilog(submit); |
329 | submit->flags = flags_orig; | ||
295 | } | 330 | } |
296 | 331 | ||
297 | return tx; | 332 | return tx; |
298 | } | 333 | } |
299 | EXPORT_SYMBOL_GPL(async_xor_zero_sum); | 334 | EXPORT_SYMBOL_GPL(async_xor_val); |
300 | |||
301 | static int __init async_xor_init(void) | ||
302 | { | ||
303 | #ifdef CONFIG_DMA_ENGINE | ||
304 | /* To conserve stack space the input src_list (array of page pointers) | ||
305 | * is reused to hold the array of dma addresses passed to the driver. | ||
306 | * This conversion is only possible when dma_addr_t is less than the | ||
307 | * the size of a pointer. HIGHMEM64G is known to violate this | ||
308 | * assumption. | ||
309 | */ | ||
310 | BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *)); | ||
311 | #endif | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | static void __exit async_xor_exit(void) | ||
317 | { | ||
318 | do { } while (0); | ||
319 | } | ||
320 | |||
321 | module_init(async_xor_init); | ||
322 | module_exit(async_xor_exit); | ||
323 | 335 | ||
324 | MODULE_AUTHOR("Intel Corporation"); | 336 | MODULE_AUTHOR("Intel Corporation"); |
325 | MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); | 337 | MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); |
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c new file mode 100644 index 000000000000..c1321935ebcc --- /dev/null +++ b/crypto/async_tx/raid6test.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * asynchronous raid6 recovery self test | ||
3 | * Copyright (c) 2009, Intel Corporation. | ||
4 | * | ||
5 | * based on drivers/md/raid6test/test.c: | ||
6 | * Copyright 2002-2007 H. Peter Anvin | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms and conditions of the GNU General Public License, | ||
10 | * version 2, as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | * | ||
21 | */ | ||
22 | #include <linux/async_tx.h> | ||
23 | #include <linux/gfp.h> | ||
24 | #include <linux/random.h> | ||
25 | |||
26 | #undef pr | ||
27 | #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) | ||
28 | |||
29 | #define NDISKS 16 /* Including P and Q */ | ||
30 | |||
31 | static struct page *dataptrs[NDISKS]; | ||
32 | static addr_conv_t addr_conv[NDISKS]; | ||
33 | static struct page *data[NDISKS+3]; | ||
34 | static struct page *spare; | ||
35 | static struct page *recovi; | ||
36 | static struct page *recovj; | ||
37 | |||
38 | static void callback(void *param) | ||
39 | { | ||
40 | struct completion *cmp = param; | ||
41 | |||
42 | complete(cmp); | ||
43 | } | ||
44 | |||
45 | static void makedata(int disks) | ||
46 | { | ||
47 | int i, j; | ||
48 | |||
49 | for (i = 0; i < disks; i++) { | ||
50 | for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) { | ||
51 | u32 *p = page_address(data[i]) + j; | ||
52 | |||
53 | *p = random32(); | ||
54 | } | ||
55 | |||
56 | dataptrs[i] = data[i]; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | static char disk_type(int d, int disks) | ||
61 | { | ||
62 | if (d == disks - 2) | ||
63 | return 'P'; | ||
64 | else if (d == disks - 1) | ||
65 | return 'Q'; | ||
66 | else | ||
67 | return 'D'; | ||
68 | } | ||
69 | |||
70 | /* Recover two failed blocks. */ | ||
71 | static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs) | ||
72 | { | ||
73 | struct async_submit_ctl submit; | ||
74 | struct completion cmp; | ||
75 | struct dma_async_tx_descriptor *tx = NULL; | ||
76 | enum sum_check_flags result = ~0; | ||
77 | |||
78 | if (faila > failb) | ||
79 | swap(faila, failb); | ||
80 | |||
81 | if (failb == disks-1) { | ||
82 | if (faila == disks-2) { | ||
83 | /* P+Q failure. Just rebuild the syndrome. */ | ||
84 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
85 | tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); | ||
86 | } else { | ||
87 | struct page *blocks[disks]; | ||
88 | struct page *dest; | ||
89 | int count = 0; | ||
90 | int i; | ||
91 | |||
92 | /* data+Q failure. Reconstruct data from P, | ||
93 | * then rebuild syndrome | ||
94 | */ | ||
95 | for (i = disks; i-- ; ) { | ||
96 | if (i == faila || i == failb) | ||
97 | continue; | ||
98 | blocks[count++] = ptrs[i]; | ||
99 | } | ||
100 | dest = ptrs[faila]; | ||
101 | init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, | ||
102 | NULL, NULL, addr_conv); | ||
103 | tx = async_xor(dest, blocks, 0, count, bytes, &submit); | ||
104 | |||
105 | init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); | ||
106 | tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); | ||
107 | } | ||
108 | } else { | ||
109 | if (failb == disks-2) { | ||
110 | /* data+P failure. */ | ||
111 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
112 | tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); | ||
113 | } else { | ||
114 | /* data+data failure. */ | ||
115 | init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); | ||
116 | tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); | ||
117 | } | ||
118 | } | ||
119 | init_completion(&cmp); | ||
120 | init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); | ||
121 | tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); | ||
122 | async_tx_issue_pending(tx); | ||
123 | |||
124 | if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) | ||
125 | pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", | ||
126 | __func__, faila, failb, disks); | ||
127 | |||
128 | if (result != 0) | ||
129 | pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n", | ||
130 | __func__, faila, failb, result); | ||
131 | } | ||
132 | |||
133 | static int test_disks(int i, int j, int disks) | ||
134 | { | ||
135 | int erra, errb; | ||
136 | |||
137 | memset(page_address(recovi), 0xf0, PAGE_SIZE); | ||
138 | memset(page_address(recovj), 0xba, PAGE_SIZE); | ||
139 | |||
140 | dataptrs[i] = recovi; | ||
141 | dataptrs[j] = recovj; | ||
142 | |||
143 | raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs); | ||
144 | |||
145 | erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); | ||
146 | errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); | ||
147 | |||
148 | pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n", | ||
149 | __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks), | ||
150 | (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB"); | ||
151 | |||
152 | dataptrs[i] = data[i]; | ||
153 | dataptrs[j] = data[j]; | ||
154 | |||
155 | return erra || errb; | ||
156 | } | ||
157 | |||
158 | static int test(int disks, int *tests) | ||
159 | { | ||
160 | struct dma_async_tx_descriptor *tx; | ||
161 | struct async_submit_ctl submit; | ||
162 | struct completion cmp; | ||
163 | int err = 0; | ||
164 | int i, j; | ||
165 | |||
166 | recovi = data[disks]; | ||
167 | recovj = data[disks+1]; | ||
168 | spare = data[disks+2]; | ||
169 | |||
170 | makedata(disks); | ||
171 | |||
172 | /* Nuke syndromes */ | ||
173 | memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); | ||
174 | memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); | ||
175 | |||
176 | /* Generate assumed good syndrome */ | ||
177 | init_completion(&cmp); | ||
178 | init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv); | ||
179 | tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); | ||
180 | async_tx_issue_pending(tx); | ||
181 | |||
182 | if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) { | ||
183 | pr("error: initial gen_syndrome(%d) timed out\n", disks); | ||
184 | return 1; | ||
185 | } | ||
186 | |||
187 | pr("testing the %d-disk case...\n", disks); | ||
188 | for (i = 0; i < disks-1; i++) | ||
189 | for (j = i+1; j < disks; j++) { | ||
190 | (*tests)++; | ||
191 | err += test_disks(i, j, disks); | ||
192 | } | ||
193 | |||
194 | return err; | ||
195 | } | ||
196 | |||
197 | |||
198 | static int raid6_test(void) | ||
199 | { | ||
200 | int err = 0; | ||
201 | int tests = 0; | ||
202 | int i; | ||
203 | |||
204 | for (i = 0; i < NDISKS+3; i++) { | ||
205 | data[i] = alloc_page(GFP_KERNEL); | ||
206 | if (!data[i]) { | ||
207 | while (i--) | ||
208 | put_page(data[i]); | ||
209 | return -ENOMEM; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | /* the 4-disk and 5-disk cases are special for the recovery code */ | ||
214 | if (NDISKS > 4) | ||
215 | err += test(4, &tests); | ||
216 | if (NDISKS > 5) | ||
217 | err += test(5, &tests); | ||
218 | /* the 11 and 12 disk cases are special for ioatdma (p-disabled | ||
219 | * q-continuation without extended descriptor) | ||
220 | */ | ||
221 | if (NDISKS > 12) { | ||
222 | err += test(11, &tests); | ||
223 | err += test(12, &tests); | ||
224 | } | ||
225 | err += test(NDISKS, &tests); | ||
226 | |||
227 | pr("\n"); | ||
228 | pr("complete (%d tests, %d failure%s)\n", | ||
229 | tests, err, err == 1 ? "" : "s"); | ||
230 | |||
231 | for (i = 0; i < NDISKS+3; i++) | ||
232 | put_page(data[i]); | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static void raid6_test_exit(void) | ||
238 | { | ||
239 | } | ||
240 | |||
241 | /* when compiled-in wait for drivers to load first (assumes dma drivers | ||
242 | * are also compliled-in) | ||
243 | */ | ||
244 | late_initcall(raid6_test); | ||
245 | module_exit(raid6_test_exit); | ||
246 | MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); | ||
247 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests"); | ||
248 | MODULE_LICENSE("GPL"); | ||
diff --git a/crypto/authenc.c b/crypto/authenc.c index 5793b64c81a8..a5a22cfcd07b 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -23,24 +23,42 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | 25 | ||
26 | typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); | ||
27 | |||
26 | struct authenc_instance_ctx { | 28 | struct authenc_instance_ctx { |
27 | struct crypto_spawn auth; | 29 | struct crypto_ahash_spawn auth; |
28 | struct crypto_skcipher_spawn enc; | 30 | struct crypto_skcipher_spawn enc; |
29 | }; | 31 | }; |
30 | 32 | ||
31 | struct crypto_authenc_ctx { | 33 | struct crypto_authenc_ctx { |
32 | spinlock_t auth_lock; | 34 | unsigned int reqoff; |
33 | struct crypto_hash *auth; | 35 | struct crypto_ahash *auth; |
34 | struct crypto_ablkcipher *enc; | 36 | struct crypto_ablkcipher *enc; |
35 | }; | 37 | }; |
36 | 38 | ||
39 | struct authenc_request_ctx { | ||
40 | unsigned int cryptlen; | ||
41 | struct scatterlist *sg; | ||
42 | struct scatterlist asg[2]; | ||
43 | struct scatterlist cipher[2]; | ||
44 | crypto_completion_t complete; | ||
45 | crypto_completion_t update_complete; | ||
46 | char tail[]; | ||
47 | }; | ||
48 | |||
49 | static void authenc_request_complete(struct aead_request *req, int err) | ||
50 | { | ||
51 | if (err != -EINPROGRESS) | ||
52 | aead_request_complete(req, err); | ||
53 | } | ||
54 | |||
37 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 55 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
38 | unsigned int keylen) | 56 | unsigned int keylen) |
39 | { | 57 | { |
40 | unsigned int authkeylen; | 58 | unsigned int authkeylen; |
41 | unsigned int enckeylen; | 59 | unsigned int enckeylen; |
42 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 60 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
43 | struct crypto_hash *auth = ctx->auth; | 61 | struct crypto_ahash *auth = ctx->auth; |
44 | struct crypto_ablkcipher *enc = ctx->enc; | 62 | struct crypto_ablkcipher *enc = ctx->enc; |
45 | struct rtattr *rta = (void *)key; | 63 | struct rtattr *rta = (void *)key; |
46 | struct crypto_authenc_key_param *param; | 64 | struct crypto_authenc_key_param *param; |
@@ -64,11 +82,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
64 | 82 | ||
65 | authkeylen = keylen - enckeylen; | 83 | authkeylen = keylen - enckeylen; |
66 | 84 | ||
67 | crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 85 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
68 | crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & | 86 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & |
69 | CRYPTO_TFM_REQ_MASK); | 87 | CRYPTO_TFM_REQ_MASK); |
70 | err = crypto_hash_setkey(auth, key, authkeylen); | 88 | err = crypto_ahash_setkey(auth, key, authkeylen); |
71 | crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & | 89 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & |
72 | CRYPTO_TFM_RES_MASK); | 90 | CRYPTO_TFM_RES_MASK); |
73 | 91 | ||
74 | if (err) | 92 | if (err) |
@@ -103,40 +121,202 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, | |||
103 | sg_mark_end(head); | 121 | sg_mark_end(head); |
104 | } | 122 | } |
105 | 123 | ||
106 | static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, | 124 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, |
107 | struct scatterlist *cipher, | 125 | int err) |
108 | unsigned int cryptlen) | 126 | { |
127 | struct aead_request *req = areq->data; | ||
128 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
129 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
130 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
131 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
132 | |||
133 | if (err) | ||
134 | goto out; | ||
135 | |||
136 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
137 | areq_ctx->cryptlen); | ||
138 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
139 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
140 | areq_ctx->complete, req); | ||
141 | |||
142 | err = crypto_ahash_finup(ahreq); | ||
143 | if (err) | ||
144 | goto out; | ||
145 | |||
146 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
147 | areq_ctx->cryptlen, | ||
148 | crypto_aead_authsize(authenc), 1); | ||
149 | |||
150 | out: | ||
151 | authenc_request_complete(req, err); | ||
152 | } | ||
153 | |||
154 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) | ||
155 | { | ||
156 | struct aead_request *req = areq->data; | ||
157 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
158 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
159 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
160 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
161 | |||
162 | if (err) | ||
163 | goto out; | ||
164 | |||
165 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
166 | areq_ctx->cryptlen, | ||
167 | crypto_aead_authsize(authenc), 1); | ||
168 | |||
169 | out: | ||
170 | aead_request_complete(req, err); | ||
171 | } | ||
172 | |||
173 | static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | ||
174 | int err) | ||
175 | { | ||
176 | u8 *ihash; | ||
177 | unsigned int authsize; | ||
178 | struct ablkcipher_request *abreq; | ||
179 | struct aead_request *req = areq->data; | ||
180 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
181 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
182 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
183 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
184 | unsigned int cryptlen = req->cryptlen; | ||
185 | |||
186 | if (err) | ||
187 | goto out; | ||
188 | |||
189 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
190 | areq_ctx->cryptlen); | ||
191 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
192 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
193 | areq_ctx->complete, req); | ||
194 | |||
195 | err = crypto_ahash_finup(ahreq); | ||
196 | if (err) | ||
197 | goto out; | ||
198 | |||
199 | authsize = crypto_aead_authsize(authenc); | ||
200 | cryptlen -= authsize; | ||
201 | ihash = ahreq->result + authsize; | ||
202 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
203 | authsize, 0); | ||
204 | |||
205 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
206 | if (err) | ||
207 | goto out; | ||
208 | |||
209 | abreq = aead_request_ctx(req); | ||
210 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
211 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
212 | req->base.complete, req->base.data); | ||
213 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
214 | cryptlen, req->iv); | ||
215 | |||
216 | err = crypto_ablkcipher_decrypt(abreq); | ||
217 | |||
218 | out: | ||
219 | authenc_request_complete(req, err); | ||
220 | } | ||
221 | |||
222 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, | ||
223 | int err) | ||
109 | { | 224 | { |
225 | u8 *ihash; | ||
226 | unsigned int authsize; | ||
227 | struct ablkcipher_request *abreq; | ||
228 | struct aead_request *req = areq->data; | ||
110 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 229 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
111 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 230 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
112 | struct crypto_hash *auth = ctx->auth; | 231 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
113 | struct hash_desc desc = { | 232 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
114 | .tfm = auth, | 233 | unsigned int cryptlen = req->cryptlen; |
115 | .flags = aead_request_flags(req) & flags, | 234 | |
116 | }; | 235 | if (err) |
117 | u8 *hash = aead_request_ctx(req); | 236 | goto out; |
237 | |||
238 | authsize = crypto_aead_authsize(authenc); | ||
239 | cryptlen -= authsize; | ||
240 | ihash = ahreq->result + authsize; | ||
241 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
242 | authsize, 0); | ||
243 | |||
244 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | ||
245 | if (err) | ||
246 | goto out; | ||
247 | |||
248 | abreq = aead_request_ctx(req); | ||
249 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
250 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
251 | req->base.complete, req->base.data); | ||
252 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
253 | cryptlen, req->iv); | ||
254 | |||
255 | err = crypto_ablkcipher_decrypt(abreq); | ||
256 | |||
257 | out: | ||
258 | authenc_request_complete(req, err); | ||
259 | } | ||
260 | |||
261 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) | ||
262 | { | ||
263 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
264 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
265 | struct crypto_ahash *auth = ctx->auth; | ||
266 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
267 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
268 | u8 *hash = areq_ctx->tail; | ||
118 | int err; | 269 | int err; |
119 | 270 | ||
120 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), | 271 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
121 | crypto_hash_alignmask(auth) + 1); | 272 | crypto_ahash_alignmask(auth) + 1); |
273 | |||
274 | ahash_request_set_tfm(ahreq, auth); | ||
122 | 275 | ||
123 | spin_lock_bh(&ctx->auth_lock); | 276 | err = crypto_ahash_init(ahreq); |
124 | err = crypto_hash_init(&desc); | ||
125 | if (err) | 277 | if (err) |
126 | goto auth_unlock; | 278 | return ERR_PTR(err); |
279 | |||
280 | ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); | ||
281 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
282 | areq_ctx->update_complete, req); | ||
127 | 283 | ||
128 | err = crypto_hash_update(&desc, req->assoc, req->assoclen); | 284 | err = crypto_ahash_update(ahreq); |
129 | if (err) | 285 | if (err) |
130 | goto auth_unlock; | 286 | return ERR_PTR(err); |
131 | 287 | ||
132 | err = crypto_hash_update(&desc, cipher, cryptlen); | 288 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, |
289 | areq_ctx->cryptlen); | ||
290 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
291 | areq_ctx->complete, req); | ||
292 | |||
293 | err = crypto_ahash_finup(ahreq); | ||
133 | if (err) | 294 | if (err) |
134 | goto auth_unlock; | 295 | return ERR_PTR(err); |
135 | 296 | ||
136 | err = crypto_hash_final(&desc, hash); | 297 | return hash; |
137 | auth_unlock: | 298 | } |
138 | spin_unlock_bh(&ctx->auth_lock); | ||
139 | 299 | ||
300 | static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) | ||
301 | { | ||
302 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
303 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
304 | struct crypto_ahash *auth = ctx->auth; | ||
305 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
306 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
307 | u8 *hash = areq_ctx->tail; | ||
308 | int err; | ||
309 | |||
310 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | ||
311 | crypto_ahash_alignmask(auth) + 1); | ||
312 | |||
313 | ahash_request_set_tfm(ahreq, auth); | ||
314 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | ||
315 | areq_ctx->cryptlen); | ||
316 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
317 | areq_ctx->complete, req); | ||
318 | |||
319 | err = crypto_ahash_digest(ahreq); | ||
140 | if (err) | 320 | if (err) |
141 | return ERR_PTR(err); | 321 | return ERR_PTR(err); |
142 | 322 | ||
@@ -147,11 +327,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
147 | unsigned int flags) | 327 | unsigned int flags) |
148 | { | 328 | { |
149 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 329 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
330 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
150 | struct scatterlist *dst = req->dst; | 331 | struct scatterlist *dst = req->dst; |
151 | struct scatterlist cipher[2]; | 332 | struct scatterlist *assoc = req->assoc; |
152 | struct page *dstp; | 333 | struct scatterlist *cipher = areq_ctx->cipher; |
334 | struct scatterlist *asg = areq_ctx->asg; | ||
153 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 335 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
154 | unsigned int cryptlen; | 336 | unsigned int cryptlen = req->cryptlen; |
337 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
338 | struct page *dstp; | ||
155 | u8 *vdst; | 339 | u8 *vdst; |
156 | u8 *hash; | 340 | u8 *hash; |
157 | 341 | ||
@@ -163,10 +347,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
163 | sg_set_buf(cipher, iv, ivsize); | 347 | sg_set_buf(cipher, iv, ivsize); |
164 | authenc_chain(cipher, dst, vdst == iv + ivsize); | 348 | authenc_chain(cipher, dst, vdst == iv + ivsize); |
165 | dst = cipher; | 349 | dst = cipher; |
350 | cryptlen += ivsize; | ||
351 | } | ||
352 | |||
353 | if (sg_is_last(assoc)) { | ||
354 | authenc_ahash_fn = crypto_authenc_ahash; | ||
355 | sg_init_table(asg, 2); | ||
356 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
357 | authenc_chain(asg, dst, 0); | ||
358 | dst = asg; | ||
359 | cryptlen += req->assoclen; | ||
166 | } | 360 | } |
167 | 361 | ||
168 | cryptlen = req->cryptlen + ivsize; | 362 | areq_ctx->cryptlen = cryptlen; |
169 | hash = crypto_authenc_hash(req, flags, dst, cryptlen); | 363 | areq_ctx->sg = dst; |
364 | |||
365 | areq_ctx->complete = authenc_geniv_ahash_done; | ||
366 | areq_ctx->update_complete = authenc_geniv_ahash_update_done; | ||
367 | |||
368 | hash = authenc_ahash_fn(req, flags); | ||
170 | if (IS_ERR(hash)) | 369 | if (IS_ERR(hash)) |
171 | return PTR_ERR(hash); | 370 | return PTR_ERR(hash); |
172 | 371 | ||
@@ -190,18 +389,20 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, | |||
190 | err = crypto_authenc_genicv(areq, iv, 0); | 389 | err = crypto_authenc_genicv(areq, iv, 0); |
191 | } | 390 | } |
192 | 391 | ||
193 | aead_request_complete(areq, err); | 392 | authenc_request_complete(areq, err); |
194 | } | 393 | } |
195 | 394 | ||
196 | static int crypto_authenc_encrypt(struct aead_request *req) | 395 | static int crypto_authenc_encrypt(struct aead_request *req) |
197 | { | 396 | { |
198 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 397 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
199 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 398 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
200 | struct ablkcipher_request *abreq = aead_request_ctx(req); | 399 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
201 | struct crypto_ablkcipher *enc = ctx->enc; | 400 | struct crypto_ablkcipher *enc = ctx->enc; |
202 | struct scatterlist *dst = req->dst; | 401 | struct scatterlist *dst = req->dst; |
203 | unsigned int cryptlen = req->cryptlen; | 402 | unsigned int cryptlen = req->cryptlen; |
204 | u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc); | 403 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
404 | + ctx->reqoff); | ||
405 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc); | ||
205 | int err; | 406 | int err; |
206 | 407 | ||
207 | ablkcipher_request_set_tfm(abreq, enc); | 408 | ablkcipher_request_set_tfm(abreq, enc); |
@@ -229,7 +430,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, | |||
229 | err = crypto_authenc_genicv(areq, greq->giv, 0); | 430 | err = crypto_authenc_genicv(areq, greq->giv, 0); |
230 | } | 431 | } |
231 | 432 | ||
232 | aead_request_complete(areq, err); | 433 | authenc_request_complete(areq, err); |
233 | } | 434 | } |
234 | 435 | ||
235 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | 436 | static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) |
@@ -256,33 +457,40 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | |||
256 | } | 457 | } |
257 | 458 | ||
258 | static int crypto_authenc_verify(struct aead_request *req, | 459 | static int crypto_authenc_verify(struct aead_request *req, |
259 | struct scatterlist *cipher, | 460 | authenc_ahash_t authenc_ahash_fn) |
260 | unsigned int cryptlen) | ||
261 | { | 461 | { |
262 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 462 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
463 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
263 | u8 *ohash; | 464 | u8 *ohash; |
264 | u8 *ihash; | 465 | u8 *ihash; |
265 | unsigned int authsize; | 466 | unsigned int authsize; |
266 | 467 | ||
267 | ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, | 468 | areq_ctx->complete = authenc_verify_ahash_done; |
268 | cryptlen); | 469 | areq_ctx->update_complete = authenc_verify_ahash_update_done; |
470 | |||
471 | ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
269 | if (IS_ERR(ohash)) | 472 | if (IS_ERR(ohash)) |
270 | return PTR_ERR(ohash); | 473 | return PTR_ERR(ohash); |
271 | 474 | ||
272 | authsize = crypto_aead_authsize(authenc); | 475 | authsize = crypto_aead_authsize(authenc); |
273 | ihash = ohash + authsize; | 476 | ihash = ohash + authsize; |
274 | scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); | 477 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
275 | return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; | 478 | authsize, 0); |
479 | return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; | ||
276 | } | 480 | } |
277 | 481 | ||
278 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | 482 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, |
279 | unsigned int cryptlen) | 483 | unsigned int cryptlen) |
280 | { | 484 | { |
281 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 485 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
486 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
282 | struct scatterlist *src = req->src; | 487 | struct scatterlist *src = req->src; |
283 | struct scatterlist cipher[2]; | 488 | struct scatterlist *assoc = req->assoc; |
284 | struct page *srcp; | 489 | struct scatterlist *cipher = areq_ctx->cipher; |
490 | struct scatterlist *asg = areq_ctx->asg; | ||
285 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 491 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
492 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
493 | struct page *srcp; | ||
286 | u8 *vsrc; | 494 | u8 *vsrc; |
287 | 495 | ||
288 | srcp = sg_page(src); | 496 | srcp = sg_page(src); |
@@ -293,9 +501,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
293 | sg_set_buf(cipher, iv, ivsize); | 501 | sg_set_buf(cipher, iv, ivsize); |
294 | authenc_chain(cipher, src, vsrc == iv + ivsize); | 502 | authenc_chain(cipher, src, vsrc == iv + ivsize); |
295 | src = cipher; | 503 | src = cipher; |
504 | cryptlen += ivsize; | ||
505 | } | ||
506 | |||
507 | if (sg_is_last(assoc)) { | ||
508 | authenc_ahash_fn = crypto_authenc_ahash; | ||
509 | sg_init_table(asg, 2); | ||
510 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
511 | authenc_chain(asg, src, 0); | ||
512 | src = asg; | ||
513 | cryptlen += req->assoclen; | ||
296 | } | 514 | } |
297 | 515 | ||
298 | return crypto_authenc_verify(req, src, cryptlen + ivsize); | 516 | areq_ctx->cryptlen = cryptlen; |
517 | areq_ctx->sg = src; | ||
518 | |||
519 | return crypto_authenc_verify(req, authenc_ahash_fn); | ||
299 | } | 520 | } |
300 | 521 | ||
301 | static int crypto_authenc_decrypt(struct aead_request *req) | 522 | static int crypto_authenc_decrypt(struct aead_request *req) |
@@ -326,38 +547,42 @@ static int crypto_authenc_decrypt(struct aead_request *req) | |||
326 | 547 | ||
327 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | 548 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) |
328 | { | 549 | { |
329 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 550 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
330 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); | 551 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); |
331 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 552 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
332 | struct crypto_hash *auth; | 553 | struct crypto_ahash *auth; |
333 | struct crypto_ablkcipher *enc; | 554 | struct crypto_ablkcipher *enc; |
334 | int err; | 555 | int err; |
335 | 556 | ||
336 | auth = crypto_spawn_hash(&ictx->auth); | 557 | auth = crypto_spawn_ahash(&ictx->auth); |
337 | if (IS_ERR(auth)) | 558 | if (IS_ERR(auth)) |
338 | return PTR_ERR(auth); | 559 | return PTR_ERR(auth); |
339 | 560 | ||
340 | enc = crypto_spawn_skcipher(&ictx->enc); | 561 | enc = crypto_spawn_skcipher(&ictx->enc); |
341 | err = PTR_ERR(enc); | 562 | err = PTR_ERR(enc); |
342 | if (IS_ERR(enc)) | 563 | if (IS_ERR(enc)) |
343 | goto err_free_hash; | 564 | goto err_free_ahash; |
344 | 565 | ||
345 | ctx->auth = auth; | 566 | ctx->auth = auth; |
346 | ctx->enc = enc; | 567 | ctx->enc = enc; |
347 | tfm->crt_aead.reqsize = max_t(unsigned int, | ||
348 | (crypto_hash_alignmask(auth) & | ||
349 | ~(crypto_tfm_ctx_alignment() - 1)) + | ||
350 | crypto_hash_digestsize(auth) * 2, | ||
351 | sizeof(struct skcipher_givcrypt_request) + | ||
352 | crypto_ablkcipher_reqsize(enc) + | ||
353 | crypto_ablkcipher_ivsize(enc)); | ||
354 | 568 | ||
355 | spin_lock_init(&ctx->auth_lock); | 569 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + |
570 | crypto_ahash_alignmask(auth), | ||
571 | crypto_ahash_alignmask(auth) + 1) + | ||
572 | crypto_ablkcipher_ivsize(enc); | ||
573 | |||
574 | tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) + | ||
575 | ctx->reqoff + | ||
576 | max_t(unsigned int, | ||
577 | crypto_ahash_reqsize(auth) + | ||
578 | sizeof(struct ahash_request), | ||
579 | sizeof(struct skcipher_givcrypt_request) + | ||
580 | crypto_ablkcipher_reqsize(enc)); | ||
356 | 581 | ||
357 | return 0; | 582 | return 0; |
358 | 583 | ||
359 | err_free_hash: | 584 | err_free_ahash: |
360 | crypto_free_hash(auth); | 585 | crypto_free_ahash(auth); |
361 | return err; | 586 | return err; |
362 | } | 587 | } |
363 | 588 | ||
@@ -365,7 +590,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) | |||
365 | { | 590 | { |
366 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 591 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
367 | 592 | ||
368 | crypto_free_hash(ctx->auth); | 593 | crypto_free_ahash(ctx->auth); |
369 | crypto_free_ablkcipher(ctx->enc); | 594 | crypto_free_ablkcipher(ctx->enc); |
370 | } | 595 | } |
371 | 596 | ||
@@ -373,7 +598,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
373 | { | 598 | { |
374 | struct crypto_attr_type *algt; | 599 | struct crypto_attr_type *algt; |
375 | struct crypto_instance *inst; | 600 | struct crypto_instance *inst; |
376 | struct crypto_alg *auth; | 601 | struct hash_alg_common *auth; |
602 | struct crypto_alg *auth_base; | ||
377 | struct crypto_alg *enc; | 603 | struct crypto_alg *enc; |
378 | struct authenc_instance_ctx *ctx; | 604 | struct authenc_instance_ctx *ctx; |
379 | const char *enc_name; | 605 | const char *enc_name; |
@@ -387,10 +613,12 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
387 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 613 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
388 | return ERR_PTR(-EINVAL); | 614 | return ERR_PTR(-EINVAL); |
389 | 615 | ||
390 | auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 616 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
391 | CRYPTO_ALG_TYPE_HASH_MASK); | 617 | CRYPTO_ALG_TYPE_AHASH_MASK); |
392 | if (IS_ERR(auth)) | 618 | if (IS_ERR(auth)) |
393 | return ERR_PTR(PTR_ERR(auth)); | 619 | return ERR_CAST(auth); |
620 | |||
621 | auth_base = &auth->base; | ||
394 | 622 | ||
395 | enc_name = crypto_attr_alg_name(tb[2]); | 623 | enc_name = crypto_attr_alg_name(tb[2]); |
396 | err = PTR_ERR(enc_name); | 624 | err = PTR_ERR(enc_name); |
@@ -404,7 +632,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
404 | 632 | ||
405 | ctx = crypto_instance_ctx(inst); | 633 | ctx = crypto_instance_ctx(inst); |
406 | 634 | ||
407 | err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); | 635 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); |
408 | if (err) | 636 | if (err) |
409 | goto err_free_inst; | 637 | goto err_free_inst; |
410 | 638 | ||
@@ -419,28 +647,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
419 | 647 | ||
420 | err = -ENAMETOOLONG; | 648 | err = -ENAMETOOLONG; |
421 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 649 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
422 | "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= | 650 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= |
423 | CRYPTO_MAX_ALG_NAME) | 651 | CRYPTO_MAX_ALG_NAME) |
424 | goto err_drop_enc; | 652 | goto err_drop_enc; |
425 | 653 | ||
426 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 654 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
427 | "authenc(%s,%s)", auth->cra_driver_name, | 655 | "authenc(%s,%s)", auth_base->cra_driver_name, |
428 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 656 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
429 | goto err_drop_enc; | 657 | goto err_drop_enc; |
430 | 658 | ||
431 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 659 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
432 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 660 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; |
433 | inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; | 661 | inst->alg.cra_priority = enc->cra_priority * |
662 | 10 + auth_base->cra_priority; | ||
434 | inst->alg.cra_blocksize = enc->cra_blocksize; | 663 | inst->alg.cra_blocksize = enc->cra_blocksize; |
435 | inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; | 664 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; |
436 | inst->alg.cra_type = &crypto_aead_type; | 665 | inst->alg.cra_type = &crypto_aead_type; |
437 | 666 | ||
438 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 667 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; |
439 | inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? | 668 | inst->alg.cra_aead.maxauthsize = auth->digestsize; |
440 | auth->cra_hash.digestsize : | ||
441 | auth->cra_type ? | ||
442 | __crypto_shash_alg(auth)->digestsize : | ||
443 | auth->cra_digest.dia_digestsize; | ||
444 | 669 | ||
445 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); | 670 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); |
446 | 671 | ||
@@ -453,13 +678,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
453 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; | 678 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; |
454 | 679 | ||
455 | out: | 680 | out: |
456 | crypto_mod_put(auth); | 681 | crypto_mod_put(auth_base); |
457 | return inst; | 682 | return inst; |
458 | 683 | ||
459 | err_drop_enc: | 684 | err_drop_enc: |
460 | crypto_drop_skcipher(&ctx->enc); | 685 | crypto_drop_skcipher(&ctx->enc); |
461 | err_drop_auth: | 686 | err_drop_auth: |
462 | crypto_drop_spawn(&ctx->auth); | 687 | crypto_drop_ahash(&ctx->auth); |
463 | err_free_inst: | 688 | err_free_inst: |
464 | kfree(inst); | 689 | kfree(inst); |
465 | out_put_auth: | 690 | out_put_auth: |
@@ -472,7 +697,7 @@ static void crypto_authenc_free(struct crypto_instance *inst) | |||
472 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); | 697 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); |
473 | 698 | ||
474 | crypto_drop_skcipher(&ctx->enc); | 699 | crypto_drop_skcipher(&ctx->enc); |
475 | crypto_drop_spawn(&ctx->auth); | 700 | crypto_drop_ahash(&ctx->auth); |
476 | kfree(inst); | 701 | kfree(inst); |
477 | } | 702 | } |
478 | 703 | ||
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 90d26c91f4e9..7a7219266e3c 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, | |||
89 | memcpy(walk->dst.virt.addr, walk->page, n); | 89 | memcpy(walk->dst.virt.addr, walk->page, n); |
90 | blkcipher_unmap_dst(walk); | 90 | blkcipher_unmap_dst(walk); |
91 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { | 91 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { |
92 | blkcipher_unmap_src(walk); | ||
93 | if (walk->flags & BLKCIPHER_WALK_DIFF) | 92 | if (walk->flags & BLKCIPHER_WALK_DIFF) |
94 | blkcipher_unmap_dst(walk); | 93 | blkcipher_unmap_dst(walk); |
94 | blkcipher_unmap_src(walk); | ||
95 | } | 95 | } |
96 | 96 | ||
97 | scatterwalk_advance(&walk->in, n); | 97 | scatterwalk_advance(&walk->in, n); |
diff --git a/crypto/blowfish.c b/crypto/blowfish.c index 6f5b48731922..a67d52ee0580 100644 --- a/crypto/blowfish.c +++ b/crypto/blowfish.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * Blowfish Cipher Algorithm, by Bruce Schneier. | 4 | * Blowfish Cipher Algorithm, by Bruce Schneier. |
@@ -299,7 +299,7 @@ static const u32 bf_sbox[256 * 4] = { | |||
299 | 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, | 299 | 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, |
300 | }; | 300 | }; |
301 | 301 | ||
302 | /* | 302 | /* |
303 | * Round loop unrolling macros, S is a pointer to a S-Box array | 303 | * Round loop unrolling macros, S is a pointer to a S-Box array |
304 | * organized in 4 unsigned longs at a row. | 304 | * organized in 4 unsigned longs at a row. |
305 | */ | 305 | */ |
@@ -315,7 +315,7 @@ static const u32 bf_sbox[256 * 4] = { | |||
315 | 315 | ||
316 | /* | 316 | /* |
317 | * The blowfish encipher, processes 64-bit blocks. | 317 | * The blowfish encipher, processes 64-bit blocks. |
318 | * NOTE: This function MUSTN'T respect endianess | 318 | * NOTE: This function MUSTN'T respect endianess |
319 | */ | 319 | */ |
320 | static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) | 320 | static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) |
321 | { | 321 | { |
@@ -395,7 +395,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
395 | out_blk[1] = cpu_to_be32(yl); | 395 | out_blk[1] = cpu_to_be32(yl); |
396 | } | 396 | } |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * Calculates the blowfish S and P boxes for encryption and decryption. | 399 | * Calculates the blowfish S and P boxes for encryption and decryption. |
400 | */ | 400 | */ |
401 | static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) | 401 | static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) |
@@ -417,10 +417,10 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) | |||
417 | 417 | ||
418 | /* Actual subkey generation */ | 418 | /* Actual subkey generation */ |
419 | for (j = 0, i = 0; i < 16 + 2; i++) { | 419 | for (j = 0, i = 0; i < 16 + 2; i++) { |
420 | temp = (((u32 )key[j] << 24) | | 420 | temp = (((u32)key[j] << 24) | |
421 | ((u32 )key[(j + 1) % keylen] << 16) | | 421 | ((u32)key[(j + 1) % keylen] << 16) | |
422 | ((u32 )key[(j + 2) % keylen] << 8) | | 422 | ((u32)key[(j + 2) % keylen] << 8) | |
423 | ((u32 )key[(j + 3) % keylen])); | 423 | ((u32)key[(j + 3) % keylen])); |
424 | 424 | ||
425 | P[i] = P[i] ^ temp; | 425 | P[i] = P[i] ^ temp; |
426 | j = (j + 4) % keylen; | 426 | j = (j + 4) % keylen; |
@@ -444,7 +444,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) | |||
444 | S[count + 1] = data[1]; | 444 | S[count + 1] = data[1]; |
445 | } | 445 | } |
446 | } | 446 | } |
447 | 447 | ||
448 | /* Bruce says not to bother with the weak key check. */ | 448 | /* Bruce says not to bother with the weak key check. */ |
449 | return 0; | 449 | return 0; |
450 | } | 450 | } |
diff --git a/crypto/camellia.c b/crypto/camellia.c index 964635d163f4..64cff46ea5e4 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c | |||
@@ -39,271 +39,271 @@ | |||
39 | #include <asm/unaligned.h> | 39 | #include <asm/unaligned.h> |
40 | 40 | ||
41 | static const u32 camellia_sp1110[256] = { | 41 | static const u32 camellia_sp1110[256] = { |
42 | 0x70707000,0x82828200,0x2c2c2c00,0xececec00, | 42 | 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00, |
43 | 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500, | 43 | 0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500, |
44 | 0xe4e4e400,0x85858500,0x57575700,0x35353500, | 44 | 0xe4e4e400, 0x85858500, 0x57575700, 0x35353500, |
45 | 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100, | 45 | 0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100, |
46 | 0x23232300,0xefefef00,0x6b6b6b00,0x93939300, | 46 | 0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300, |
47 | 0x45454500,0x19191900,0xa5a5a500,0x21212100, | 47 | 0x45454500, 0x19191900, 0xa5a5a500, 0x21212100, |
48 | 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00, | 48 | 0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00, |
49 | 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00, | 49 | 0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00, |
50 | 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00, | 50 | 0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00, |
51 | 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00, | 51 | 0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00, |
52 | 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00, | 52 | 0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00, |
53 | 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00, | 53 | 0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00, |
54 | 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00, | 54 | 0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00, |
55 | 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00, | 55 | 0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00, |
56 | 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600, | 56 | 0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600, |
57 | 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00, | 57 | 0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00, |
58 | 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600, | 58 | 0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600, |
59 | 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00, | 59 | 0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00, |
60 | 0x74747400,0x12121200,0x2b2b2b00,0x20202000, | 60 | 0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000, |
61 | 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900, | 61 | 0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900, |
62 | 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200, | 62 | 0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200, |
63 | 0x34343400,0x7e7e7e00,0x76767600,0x05050500, | 63 | 0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500, |
64 | 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100, | 64 | 0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100, |
65 | 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700, | 65 | 0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700, |
66 | 0x14141400,0x58585800,0x3a3a3a00,0x61616100, | 66 | 0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100, |
67 | 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00, | 67 | 0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00, |
68 | 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600, | 68 | 0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600, |
69 | 0x53535300,0x18181800,0xf2f2f200,0x22222200, | 69 | 0x53535300, 0x18181800, 0xf2f2f200, 0x22222200, |
70 | 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200, | 70 | 0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200, |
71 | 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100, | 71 | 0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100, |
72 | 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800, | 72 | 0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800, |
73 | 0x60606000,0xfcfcfc00,0x69696900,0x50505000, | 73 | 0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000, |
74 | 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00, | 74 | 0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00, |
75 | 0xa1a1a100,0x89898900,0x62626200,0x97979700, | 75 | 0xa1a1a100, 0x89898900, 0x62626200, 0x97979700, |
76 | 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500, | 76 | 0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500, |
77 | 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200, | 77 | 0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200, |
78 | 0x10101000,0xc4c4c400,0x00000000,0x48484800, | 78 | 0x10101000, 0xc4c4c400, 0x00000000, 0x48484800, |
79 | 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00, | 79 | 0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00, |
80 | 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00, | 80 | 0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00, |
81 | 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400, | 81 | 0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400, |
82 | 0x87878700,0x5c5c5c00,0x83838300,0x02020200, | 82 | 0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200, |
83 | 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300, | 83 | 0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300, |
84 | 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300, | 84 | 0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300, |
85 | 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200, | 85 | 0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200, |
86 | 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600, | 86 | 0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600, |
87 | 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00, | 87 | 0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00, |
88 | 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00, | 88 | 0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00, |
89 | 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00, | 89 | 0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00, |
90 | 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00, | 90 | 0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00, |
91 | 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00, | 91 | 0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00, |
92 | 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600, | 92 | 0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600, |
93 | 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900, | 93 | 0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900, |
94 | 0x78787800,0x98989800,0x06060600,0x6a6a6a00, | 94 | 0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00, |
95 | 0xe7e7e700,0x46464600,0x71717100,0xbababa00, | 95 | 0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00, |
96 | 0xd4d4d400,0x25252500,0xababab00,0x42424200, | 96 | 0xd4d4d400, 0x25252500, 0xababab00, 0x42424200, |
97 | 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00, | 97 | 0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00, |
98 | 0x72727200,0x07070700,0xb9b9b900,0x55555500, | 98 | 0x72727200, 0x07070700, 0xb9b9b900, 0x55555500, |
99 | 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00, | 99 | 0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00, |
100 | 0x36363600,0x49494900,0x2a2a2a00,0x68686800, | 100 | 0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800, |
101 | 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400, | 101 | 0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400, |
102 | 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00, | 102 | 0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00, |
103 | 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100, | 103 | 0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100, |
104 | 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400, | 104 | 0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400, |
105 | 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00, | 105 | 0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00, |
106 | }; | 106 | }; |
107 | 107 | ||
108 | static const u32 camellia_sp0222[256] = { | 108 | static const u32 camellia_sp0222[256] = { |
109 | 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9, | 109 | 0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9, |
110 | 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb, | 110 | 0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb, |
111 | 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a, | 111 | 0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a, |
112 | 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282, | 112 | 0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282, |
113 | 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727, | 113 | 0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727, |
114 | 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242, | 114 | 0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242, |
115 | 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c, | 115 | 0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c, |
116 | 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b, | 116 | 0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b, |
117 | 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f, | 117 | 0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f, |
118 | 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d, | 118 | 0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d, |
119 | 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe, | 119 | 0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe, |
120 | 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434, | 120 | 0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434, |
121 | 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595, | 121 | 0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595, |
122 | 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a, | 122 | 0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a, |
123 | 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad, | 123 | 0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad, |
124 | 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a, | 124 | 0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a, |
125 | 0x00171717,0x001a1a1a,0x00353535,0x00cccccc, | 125 | 0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc, |
126 | 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a, | 126 | 0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a, |
127 | 0x00e8e8e8,0x00242424,0x00565656,0x00404040, | 127 | 0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040, |
128 | 0x00e1e1e1,0x00636363,0x00090909,0x00333333, | 128 | 0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333, |
129 | 0x00bfbfbf,0x00989898,0x00979797,0x00858585, | 129 | 0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585, |
130 | 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a, | 130 | 0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a, |
131 | 0x00dadada,0x006f6f6f,0x00535353,0x00626262, | 131 | 0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262, |
132 | 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf, | 132 | 0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf, |
133 | 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2, | 133 | 0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2, |
134 | 0x00bdbdbd,0x00363636,0x00222222,0x00383838, | 134 | 0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838, |
135 | 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c, | 135 | 0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c, |
136 | 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444, | 136 | 0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444, |
137 | 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565, | 137 | 0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565, |
138 | 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323, | 138 | 0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323, |
139 | 0x00484848,0x00101010,0x00d1d1d1,0x00515151, | 139 | 0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151, |
140 | 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0, | 140 | 0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0, |
141 | 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa, | 141 | 0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa, |
142 | 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f, | 142 | 0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f, |
143 | 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b, | 143 | 0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b, |
144 | 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5, | 144 | 0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5, |
145 | 0x00202020,0x00898989,0x00000000,0x00909090, | 145 | 0x00202020, 0x00898989, 0x00000000, 0x00909090, |
146 | 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7, | 146 | 0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7, |
147 | 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5, | 147 | 0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5, |
148 | 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929, | 148 | 0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929, |
149 | 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404, | 149 | 0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404, |
150 | 0x009b9b9b,0x00949494,0x00212121,0x00666666, | 150 | 0x009b9b9b, 0x00949494, 0x00212121, 0x00666666, |
151 | 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7, | 151 | 0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7, |
152 | 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5, | 152 | 0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5, |
153 | 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c, | 153 | 0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c, |
154 | 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676, | 154 | 0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676, |
155 | 0x00030303,0x002d2d2d,0x00dedede,0x00969696, | 155 | 0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696, |
156 | 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c, | 156 | 0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c, |
157 | 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919, | 157 | 0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919, |
158 | 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d, | 158 | 0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d, |
159 | 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d, | 159 | 0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d, |
160 | 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2, | 160 | 0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2, |
161 | 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4, | 161 | 0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4, |
162 | 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575, | 162 | 0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575, |
163 | 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484, | 163 | 0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484, |
164 | 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5, | 164 | 0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5, |
165 | 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa, | 165 | 0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa, |
166 | 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414, | 166 | 0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414, |
167 | 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0, | 167 | 0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0, |
168 | 0x00787878,0x00707070,0x00e3e3e3,0x00494949, | 168 | 0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949, |
169 | 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6, | 169 | 0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6, |
170 | 0x00777777,0x00939393,0x00868686,0x00838383, | 170 | 0x00777777, 0x00939393, 0x00868686, 0x00838383, |
171 | 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9, | 171 | 0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9, |
172 | 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d, | 172 | 0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d, |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static const u32 camellia_sp3033[256] = { | 175 | static const u32 camellia_sp3033[256] = { |
176 | 0x38003838,0x41004141,0x16001616,0x76007676, | 176 | 0x38003838, 0x41004141, 0x16001616, 0x76007676, |
177 | 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2, | 177 | 0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2, |
178 | 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a, | 178 | 0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a, |
179 | 0x75007575,0x06000606,0x57005757,0xa000a0a0, | 179 | 0x75007575, 0x06000606, 0x57005757, 0xa000a0a0, |
180 | 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9, | 180 | 0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9, |
181 | 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090, | 181 | 0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090, |
182 | 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727, | 182 | 0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727, |
183 | 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede, | 183 | 0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede, |
184 | 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7, | 184 | 0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7, |
185 | 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767, | 185 | 0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767, |
186 | 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf, | 186 | 0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf, |
187 | 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d, | 187 | 0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d, |
188 | 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565, | 188 | 0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565, |
189 | 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e, | 189 | 0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e, |
190 | 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b, | 190 | 0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b, |
191 | 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6, | 191 | 0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6, |
192 | 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333, | 192 | 0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333, |
193 | 0xfd00fdfd,0x66006666,0x58005858,0x96009696, | 193 | 0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696, |
194 | 0x3a003a3a,0x09000909,0x95009595,0x10001010, | 194 | 0x3a003a3a, 0x09000909, 0x95009595, 0x10001010, |
195 | 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc, | 195 | 0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc, |
196 | 0xef00efef,0x26002626,0xe500e5e5,0x61006161, | 196 | 0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161, |
197 | 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282, | 197 | 0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282, |
198 | 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898, | 198 | 0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898, |
199 | 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb, | 199 | 0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb, |
200 | 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0, | 200 | 0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0, |
201 | 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e, | 201 | 0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e, |
202 | 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b, | 202 | 0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b, |
203 | 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111, | 203 | 0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111, |
204 | 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959, | 204 | 0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959, |
205 | 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8, | 205 | 0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8, |
206 | 0x12001212,0x04000404,0x74007474,0x54005454, | 206 | 0x12001212, 0x04000404, 0x74007474, 0x54005454, |
207 | 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828, | 207 | 0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828, |
208 | 0x55005555,0x68006868,0x50005050,0xbe00bebe, | 208 | 0x55005555, 0x68006868, 0x50005050, 0xbe00bebe, |
209 | 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb, | 209 | 0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb, |
210 | 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca, | 210 | 0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca, |
211 | 0x70007070,0xff00ffff,0x32003232,0x69006969, | 211 | 0x70007070, 0xff00ffff, 0x32003232, 0x69006969, |
212 | 0x08000808,0x62006262,0x00000000,0x24002424, | 212 | 0x08000808, 0x62006262, 0x00000000, 0x24002424, |
213 | 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded, | 213 | 0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded, |
214 | 0x45004545,0x81008181,0x73007373,0x6d006d6d, | 214 | 0x45004545, 0x81008181, 0x73007373, 0x6d006d6d, |
215 | 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a, | 215 | 0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a, |
216 | 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101, | 216 | 0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101, |
217 | 0xe600e6e6,0x25002525,0x48004848,0x99009999, | 217 | 0xe600e6e6, 0x25002525, 0x48004848, 0x99009999, |
218 | 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9, | 218 | 0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9, |
219 | 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171, | 219 | 0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171, |
220 | 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313, | 220 | 0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313, |
221 | 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d, | 221 | 0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d, |
222 | 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5, | 222 | 0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5, |
223 | 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717, | 223 | 0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717, |
224 | 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646, | 224 | 0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646, |
225 | 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747, | 225 | 0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747, |
226 | 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b, | 226 | 0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b, |
227 | 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac, | 227 | 0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac, |
228 | 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535, | 228 | 0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535, |
229 | 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d, | 229 | 0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d, |
230 | 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121, | 230 | 0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121, |
231 | 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d, | 231 | 0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d, |
232 | 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa, | 232 | 0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa, |
233 | 0x7c007c7c,0x77007777,0x56005656,0x05000505, | 233 | 0x7c007c7c, 0x77007777, 0x56005656, 0x05000505, |
234 | 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434, | 234 | 0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434, |
235 | 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252, | 235 | 0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252, |
236 | 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd, | 236 | 0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd, |
237 | 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0, | 237 | 0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0, |
238 | 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a, | 238 | 0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a, |
239 | 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f, | 239 | 0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f, |
240 | }; | 240 | }; |
241 | 241 | ||
242 | static const u32 camellia_sp4404[256] = { | 242 | static const u32 camellia_sp4404[256] = { |
243 | 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0, | 243 | 0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0, |
244 | 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae, | 244 | 0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae, |
245 | 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5, | 245 | 0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5, |
246 | 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092, | 246 | 0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092, |
247 | 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f, | 247 | 0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f, |
248 | 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b, | 248 | 0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b, |
249 | 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d, | 249 | 0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d, |
250 | 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c, | 250 | 0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c, |
251 | 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0, | 251 | 0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0, |
252 | 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084, | 252 | 0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084, |
253 | 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076, | 253 | 0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076, |
254 | 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004, | 254 | 0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004, |
255 | 0x14140014,0x3a3a003a,0xdede00de,0x11110011, | 255 | 0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011, |
256 | 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2, | 256 | 0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2, |
257 | 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a, | 257 | 0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a, |
258 | 0x24240024,0xe8e800e8,0x60600060,0x69690069, | 258 | 0x24240024, 0xe8e800e8, 0x60600060, 0x69690069, |
259 | 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062, | 259 | 0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062, |
260 | 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064, | 260 | 0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064, |
261 | 0x10100010,0x00000000,0xa3a300a3,0x75750075, | 261 | 0x10100010, 0x00000000, 0xa3a300a3, 0x75750075, |
262 | 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd, | 262 | 0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd, |
263 | 0x87870087,0x83830083,0xcdcd00cd,0x90900090, | 263 | 0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090, |
264 | 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf, | 264 | 0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf, |
265 | 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6, | 265 | 0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6, |
266 | 0x81810081,0x6f6f006f,0x13130013,0x63630063, | 266 | 0x81810081, 0x6f6f006f, 0x13130013, 0x63630063, |
267 | 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc, | 267 | 0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc, |
268 | 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4, | 268 | 0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4, |
269 | 0x78780078,0x06060006,0xe7e700e7,0x71710071, | 269 | 0x78780078, 0x06060006, 0xe7e700e7, 0x71710071, |
270 | 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d, | 270 | 0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d, |
271 | 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac, | 271 | 0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac, |
272 | 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1, | 272 | 0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1, |
273 | 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043, | 273 | 0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043, |
274 | 0x15150015,0xadad00ad,0x77770077,0x80800080, | 274 | 0x15150015, 0xadad00ad, 0x77770077, 0x80800080, |
275 | 0x82820082,0xecec00ec,0x27270027,0xe5e500e5, | 275 | 0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5, |
276 | 0x85850085,0x35350035,0x0c0c000c,0x41410041, | 276 | 0x85850085, 0x35350035, 0x0c0c000c, 0x41410041, |
277 | 0xefef00ef,0x93930093,0x19190019,0x21210021, | 277 | 0xefef00ef, 0x93930093, 0x19190019, 0x21210021, |
278 | 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd, | 278 | 0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd, |
279 | 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce, | 279 | 0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce, |
280 | 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a, | 280 | 0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a, |
281 | 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d, | 281 | 0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d, |
282 | 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d, | 282 | 0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d, |
283 | 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d, | 283 | 0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d, |
284 | 0x12120012,0x20200020,0xb1b100b1,0x99990099, | 284 | 0x12120012, 0x20200020, 0xb1b100b1, 0x99990099, |
285 | 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005, | 285 | 0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005, |
286 | 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7, | 286 | 0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7, |
287 | 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c, | 287 | 0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c, |
288 | 0x0f0f000f,0x16160016,0x18180018,0x22220022, | 288 | 0x0f0f000f, 0x16160016, 0x18180018, 0x22220022, |
289 | 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091, | 289 | 0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091, |
290 | 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050, | 290 | 0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050, |
291 | 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097, | 291 | 0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097, |
292 | 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2, | 292 | 0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2, |
293 | 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db, | 293 | 0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db, |
294 | 0x03030003,0xdada00da,0x3f3f003f,0x94940094, | 294 | 0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094, |
295 | 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033, | 295 | 0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033, |
296 | 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2, | 296 | 0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2, |
297 | 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b, | 297 | 0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b, |
298 | 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e, | 298 | 0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e, |
299 | 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e, | 299 | 0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e, |
300 | 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059, | 300 | 0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059, |
301 | 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba, | 301 | 0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba, |
302 | 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa, | 302 | 0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa, |
303 | 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a, | 303 | 0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a, |
304 | 0x49490049,0x68680068,0x38380038,0xa4a400a4, | 304 | 0x49490049, 0x68680068, 0x38380038, 0xa4a400a4, |
305 | 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1, | 305 | 0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1, |
306 | 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e, | 306 | 0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e, |
307 | }; | 307 | }; |
308 | 308 | ||
309 | 309 | ||
@@ -344,7 +344,7 @@ static const u32 camellia_sp4404[256] = { | |||
344 | lr = (lr << bits) + (rl >> (32 - bits)); \ | 344 | lr = (lr << bits) + (rl >> (32 - bits)); \ |
345 | rl = (rl << bits) + (rr >> (32 - bits)); \ | 345 | rl = (rl << bits) + (rr >> (32 - bits)); \ |
346 | rr = (rr << bits) + (w0 >> (32 - bits)); \ | 346 | rr = (rr << bits) + (w0 >> (32 - bits)); \ |
347 | } while(0) | 347 | } while (0) |
348 | 348 | ||
349 | #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ | 349 | #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ |
350 | do { \ | 350 | do { \ |
@@ -354,7 +354,7 @@ static const u32 camellia_sp4404[256] = { | |||
354 | lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ | 354 | lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ |
355 | rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ | 355 | rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ |
356 | rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ | 356 | rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ |
357 | } while(0) | 357 | } while (0) |
358 | 358 | ||
359 | #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ | 359 | #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ |
360 | do { \ | 360 | do { \ |
@@ -373,7 +373,7 @@ static const u32 camellia_sp4404[256] = { | |||
373 | yl ^= yr; \ | 373 | yl ^= yr; \ |
374 | yr = ror32(yr, 8); \ | 374 | yr = ror32(yr, 8); \ |
375 | yr ^= yl; \ | 375 | yr ^= yl; \ |
376 | } while(0) | 376 | } while (0) |
377 | 377 | ||
378 | #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) | 378 | #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) |
379 | #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) | 379 | #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) |
@@ -835,7 +835,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) | |||
835 | static void camellia_setup192(const unsigned char *key, u32 *subkey) | 835 | static void camellia_setup192(const unsigned char *key, u32 *subkey) |
836 | { | 836 | { |
837 | unsigned char kk[32]; | 837 | unsigned char kk[32]; |
838 | u32 krll, krlr, krrl,krrr; | 838 | u32 krll, krlr, krrl, krrr; |
839 | 839 | ||
840 | memcpy(kk, key, 24); | 840 | memcpy(kk, key, 24); |
841 | memcpy((unsigned char *)&krll, key+16, 4); | 841 | memcpy((unsigned char *)&krll, key+16, 4); |
@@ -865,7 +865,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) | |||
865 | t1 |= lr; \ | 865 | t1 |= lr; \ |
866 | ll ^= t1; \ | 866 | ll ^= t1; \ |
867 | rr ^= rol32(t3, 1); \ | 867 | rr ^= rol32(t3, 1); \ |
868 | } while(0) | 868 | } while (0) |
869 | 869 | ||
870 | #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ | 870 | #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ |
871 | do { \ | 871 | do { \ |
@@ -881,12 +881,12 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) | |||
881 | ir ^= il ^ kr; \ | 881 | ir ^= il ^ kr; \ |
882 | yl ^= ir; \ | 882 | yl ^= ir; \ |
883 | yr ^= ror32(il, 8) ^ ir; \ | 883 | yr ^= ror32(il, 8) ^ ir; \ |
884 | } while(0) | 884 | } while (0) |
885 | 885 | ||
886 | /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ | 886 | /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ |
887 | static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) | 887 | static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) |
888 | { | 888 | { |
889 | u32 il,ir,t0,t1; /* temporary variables */ | 889 | u32 il, ir, t0, t1; /* temporary variables */ |
890 | 890 | ||
891 | /* pre whitening but absorb kw2 */ | 891 | /* pre whitening but absorb kw2 */ |
892 | io[0] ^= SUBKEY_L(0); | 892 | io[0] ^= SUBKEY_L(0); |
@@ -894,30 +894,30 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) | |||
894 | 894 | ||
895 | /* main iteration */ | 895 | /* main iteration */ |
896 | #define ROUNDS(i) do { \ | 896 | #define ROUNDS(i) do { \ |
897 | CAMELLIA_ROUNDSM(io[0],io[1], \ | 897 | CAMELLIA_ROUNDSM(io[0], io[1], \ |
898 | SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ | 898 | SUBKEY_L(i + 2), SUBKEY_R(i + 2), \ |
899 | io[2],io[3],il,ir); \ | 899 | io[2], io[3], il, ir); \ |
900 | CAMELLIA_ROUNDSM(io[2],io[3], \ | 900 | CAMELLIA_ROUNDSM(io[2], io[3], \ |
901 | SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ | 901 | SUBKEY_L(i + 3), SUBKEY_R(i + 3), \ |
902 | io[0],io[1],il,ir); \ | 902 | io[0], io[1], il, ir); \ |
903 | CAMELLIA_ROUNDSM(io[0],io[1], \ | 903 | CAMELLIA_ROUNDSM(io[0], io[1], \ |
904 | SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ | 904 | SUBKEY_L(i + 4), SUBKEY_R(i + 4), \ |
905 | io[2],io[3],il,ir); \ | 905 | io[2], io[3], il, ir); \ |
906 | CAMELLIA_ROUNDSM(io[2],io[3], \ | 906 | CAMELLIA_ROUNDSM(io[2], io[3], \ |
907 | SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ | 907 | SUBKEY_L(i + 5), SUBKEY_R(i + 5), \ |
908 | io[0],io[1],il,ir); \ | 908 | io[0], io[1], il, ir); \ |
909 | CAMELLIA_ROUNDSM(io[0],io[1], \ | 909 | CAMELLIA_ROUNDSM(io[0], io[1], \ |
910 | SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ | 910 | SUBKEY_L(i + 6), SUBKEY_R(i + 6), \ |
911 | io[2],io[3],il,ir); \ | 911 | io[2], io[3], il, ir); \ |
912 | CAMELLIA_ROUNDSM(io[2],io[3], \ | 912 | CAMELLIA_ROUNDSM(io[2], io[3], \ |
913 | SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ | 913 | SUBKEY_L(i + 7), SUBKEY_R(i + 7), \ |
914 | io[0],io[1],il,ir); \ | 914 | io[0], io[1], il, ir); \ |
915 | } while (0) | 915 | } while (0) |
916 | #define FLS(i) do { \ | 916 | #define FLS(i) do { \ |
917 | CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ | 917 | CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ |
918 | SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ | 918 | SUBKEY_L(i + 0), SUBKEY_R(i + 0), \ |
919 | SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ | 919 | SUBKEY_L(i + 1), SUBKEY_R(i + 1), \ |
920 | t0,t1,il,ir); \ | 920 | t0, t1, il, ir); \ |
921 | } while (0) | 921 | } while (0) |
922 | 922 | ||
923 | ROUNDS(0); | 923 | ROUNDS(0); |
@@ -941,7 +941,7 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) | |||
941 | 941 | ||
942 | static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) | 942 | static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) |
943 | { | 943 | { |
944 | u32 il,ir,t0,t1; /* temporary variables */ | 944 | u32 il, ir, t0, t1; /* temporary variables */ |
945 | 945 | ||
946 | /* pre whitening but absorb kw2 */ | 946 | /* pre whitening but absorb kw2 */ |
947 | io[0] ^= SUBKEY_L(i); | 947 | io[0] ^= SUBKEY_L(i); |
@@ -949,30 +949,30 @@ static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) | |||
949 | 949 | ||
950 | /* main iteration */ | 950 | /* main iteration */ |
951 | #define ROUNDS(i) do { \ | 951 | #define ROUNDS(i) do { \ |
952 | CAMELLIA_ROUNDSM(io[0],io[1], \ | 952 | CAMELLIA_ROUNDSM(io[0], io[1], \ |
953 | SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ | 953 | SUBKEY_L(i + 7), SUBKEY_R(i + 7), \ |
954 | io[2],io[3],il,ir); \ | 954 | io[2], io[3], il, ir); \ |
955 | CAMELLIA_ROUNDSM(io[2],io[3], \ | 955 | CAMELLIA_ROUNDSM(io[2], io[3], \ |
956 | SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ | 956 | SUBKEY_L(i + 6), SUBKEY_R(i + 6), \ |
957 | io[0],io[1],il,ir); \ | 957 | io[0], io[1], il, ir); \ |
958 | CAMELLIA_ROUNDSM(io[0],io[1], \ | 958 | CAMELLIA_ROUNDSM(io[0], io[1], \ |
959 | SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ | 959 | SUBKEY_L(i + 5), SUBKEY_R(i + 5), \ |
960 | io[2],io[3],il,ir); \ | 960 | io[2], io[3], il, ir); \ |
961 | CAMELLIA_ROUNDSM(io[2],io[3], \ | 961 | CAMELLIA_ROUNDSM(io[2], io[3], \ |
962 | SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ | 962 | SUBKEY_L(i + 4), SUBKEY_R(i + 4), \ |
963 | io[0],io[1],il,ir); \ | 963 | io[0], io[1], il, ir); \ |
964 | CAMELLIA_ROUNDSM(io[0],io[1], \ | 964 | CAMELLIA_ROUNDSM(io[0], io[1], \ |
965 | SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ | 965 | SUBKEY_L(i + 3), SUBKEY_R(i + 3), \ |
966 | io[2],io[3],il,ir); \ | 966 | io[2], io[3], il, ir); \ |
967 | CAMELLIA_ROUNDSM(io[2],io[3], \ | 967 | CAMELLIA_ROUNDSM(io[2], io[3], \ |
968 | SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ | 968 | SUBKEY_L(i + 2), SUBKEY_R(i + 2), \ |
969 | io[0],io[1],il,ir); \ | 969 | io[0], io[1], il, ir); \ |
970 | } while (0) | 970 | } while (0) |
971 | #define FLS(i) do { \ | 971 | #define FLS(i) do { \ |
972 | CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ | 972 | CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ |
973 | SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ | 973 | SUBKEY_L(i + 1), SUBKEY_R(i + 1), \ |
974 | SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ | 974 | SUBKEY_L(i + 0), SUBKEY_R(i + 0), \ |
975 | t0,t1,il,ir); \ | 975 | t0, t1, il, ir); \ |
976 | } while (0) | 976 | } while (0) |
977 | 977 | ||
978 | if (i == 32) { | 978 | if (i == 32) { |
diff --git a/crypto/cast5.c b/crypto/cast5.c index 8cbe28fa0e0c..a1d2294b50ad 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c | |||
@@ -569,12 +569,12 @@ static const u32 sb8[256] = { | |||
569 | 0xeaee6801, 0x8db2a283, 0xea8bf59e | 569 | 0xeaee6801, 0x8db2a283, 0xea8bf59e |
570 | }; | 570 | }; |
571 | 571 | ||
572 | #define F1(D,m,r) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ | 572 | #define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \ |
573 | (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) | 573 | (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) |
574 | #define F2(D,m,r) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ | 574 | #define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ |
575 | (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) | 575 | (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) |
576 | #define F3(D,m,r) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ | 576 | #define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \ |
577 | (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) | 577 | (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) |
578 | 578 | ||
579 | 579 | ||
580 | static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) | 580 | static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) |
@@ -694,7 +694,7 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) | |||
694 | dst[1] = cpu_to_be32(l); | 694 | dst[1] = cpu_to_be32(l); |
695 | } | 695 | } |
696 | 696 | ||
697 | static void key_schedule(u32 * x, u32 * z, u32 * k) | 697 | static void key_schedule(u32 *x, u32 *z, u32 *k) |
698 | { | 698 | { |
699 | 699 | ||
700 | #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) | 700 | #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) |
diff --git a/crypto/cast6.c b/crypto/cast6.c index 007d02beed67..e0c15a6c7c34 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * under the terms of GNU General Public License as published by the Free | 11 | * under the terms of GNU General Public License as published by the Free |
12 | * Software Foundation; either version 2 of the License, or (at your option) | 12 | * Software Foundation; either version 2 of the License, or (at your option) |
13 | * any later version. | 13 | * any later version. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | 17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA |
@@ -35,12 +35,12 @@ struct cast6_ctx { | |||
35 | u8 Kr[12][4]; | 35 | u8 Kr[12][4]; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #define F1(D,r,m) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ | 38 | #define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \ |
39 | (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) | 39 | (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) |
40 | #define F2(D,r,m) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ | 40 | #define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ |
41 | (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) | 41 | (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) |
42 | #define F3(D,r,m) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ | 42 | #define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \ |
43 | (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) | 43 | (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) |
44 | 44 | ||
45 | static const u32 s1[256] = { | 45 | static const u32 s1[256] = { |
46 | 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, | 46 | 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, |
@@ -312,7 +312,7 @@ static const u32 s4[256] = { | |||
312 | 312 | ||
313 | static const u32 Tm[24][8] = { | 313 | static const u32 Tm[24][8] = { |
314 | { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, | 314 | { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, |
315 | 0x84c413be, 0xf39dff5f, 0x6277eb00 } , | 315 | 0x84c413be, 0xf39dff5f, 0x6277eb00 } , |
316 | { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, | 316 | { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, |
317 | 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , | 317 | 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , |
318 | { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, | 318 | { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, |
@@ -369,7 +369,8 @@ static const u8 Tr[4][8] = { | |||
369 | }; | 369 | }; |
370 | 370 | ||
371 | /* forward octave */ | 371 | /* forward octave */ |
372 | static void W(u32 *key, unsigned int i) { | 372 | static void W(u32 *key, unsigned int i) |
373 | { | ||
373 | u32 I; | 374 | u32 I; |
374 | key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); | 375 | key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); |
375 | key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); | 376 | key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); |
@@ -377,7 +378,7 @@ static void W(u32 *key, unsigned int i) { | |||
377 | key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); | 378 | key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); |
378 | key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); | 379 | key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); |
379 | key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); | 380 | key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); |
380 | key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); | 381 | key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); |
381 | key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); | 382 | key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); |
382 | } | 383 | } |
383 | 384 | ||
@@ -393,11 +394,11 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, | |||
393 | if (key_len % 4 != 0) { | 394 | if (key_len % 4 != 0) { |
394 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 395 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
395 | return -EINVAL; | 396 | return -EINVAL; |
396 | } | 397 | } |
398 | |||
399 | memset(p_key, 0, 32); | ||
400 | memcpy(p_key, in_key, key_len); | ||
397 | 401 | ||
398 | memset (p_key, 0, 32); | ||
399 | memcpy (p_key, in_key, key_len); | ||
400 | |||
401 | key[0] = be32_to_cpu(p_key[0]); /* A */ | 402 | key[0] = be32_to_cpu(p_key[0]); /* A */ |
402 | key[1] = be32_to_cpu(p_key[1]); /* B */ | 403 | key[1] = be32_to_cpu(p_key[1]); /* B */ |
403 | key[2] = be32_to_cpu(p_key[2]); /* C */ | 404 | key[2] = be32_to_cpu(p_key[2]); /* C */ |
@@ -406,18 +407,16 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, | |||
406 | key[5] = be32_to_cpu(p_key[5]); /* F */ | 407 | key[5] = be32_to_cpu(p_key[5]); /* F */ |
407 | key[6] = be32_to_cpu(p_key[6]); /* G */ | 408 | key[6] = be32_to_cpu(p_key[6]); /* G */ |
408 | key[7] = be32_to_cpu(p_key[7]); /* H */ | 409 | key[7] = be32_to_cpu(p_key[7]); /* H */ |
409 | |||
410 | |||
411 | 410 | ||
412 | for (i = 0; i < 12; i++) { | 411 | for (i = 0; i < 12; i++) { |
413 | W (key, 2 * i); | 412 | W(key, 2 * i); |
414 | W (key, 2 * i + 1); | 413 | W(key, 2 * i + 1); |
415 | 414 | ||
416 | c->Kr[i][0] = key[0] & 0x1f; | 415 | c->Kr[i][0] = key[0] & 0x1f; |
417 | c->Kr[i][1] = key[2] & 0x1f; | 416 | c->Kr[i][1] = key[2] & 0x1f; |
418 | c->Kr[i][2] = key[4] & 0x1f; | 417 | c->Kr[i][2] = key[4] & 0x1f; |
419 | c->Kr[i][3] = key[6] & 0x1f; | 418 | c->Kr[i][3] = key[6] & 0x1f; |
420 | 419 | ||
421 | c->Km[i][0] = key[7]; | 420 | c->Km[i][0] = key[7]; |
422 | c->Km[i][1] = key[5]; | 421 | c->Km[i][1] = key[5]; |
423 | c->Km[i][2] = key[3]; | 422 | c->Km[i][2] = key[3]; |
@@ -428,21 +427,23 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, | |||
428 | } | 427 | } |
429 | 428 | ||
430 | /*forward quad round*/ | 429 | /*forward quad round*/ |
431 | static void Q (u32 * block, u8 * Kr, u32 * Km) { | 430 | static void Q(u32 *block, u8 *Kr, u32 *Km) |
431 | { | ||
432 | u32 I; | 432 | u32 I; |
433 | block[2] ^= F1(block[3], Kr[0], Km[0]); | 433 | block[2] ^= F1(block[3], Kr[0], Km[0]); |
434 | block[1] ^= F2(block[2], Kr[1], Km[1]); | 434 | block[1] ^= F2(block[2], Kr[1], Km[1]); |
435 | block[0] ^= F3(block[1], Kr[2], Km[2]); | 435 | block[0] ^= F3(block[1], Kr[2], Km[2]); |
436 | block[3] ^= F1(block[0], Kr[3], Km[3]); | 436 | block[3] ^= F1(block[0], Kr[3], Km[3]); |
437 | } | 437 | } |
438 | 438 | ||
439 | /*reverse quad round*/ | 439 | /*reverse quad round*/ |
440 | static void QBAR (u32 * block, u8 * Kr, u32 * Km) { | 440 | static void QBAR(u32 *block, u8 *Kr, u32 *Km) |
441 | { | ||
441 | u32 I; | 442 | u32 I; |
442 | block[3] ^= F1(block[0], Kr[3], Km[3]); | 443 | block[3] ^= F1(block[0], Kr[3], Km[3]); |
443 | block[0] ^= F3(block[1], Kr[2], Km[2]); | 444 | block[0] ^= F3(block[1], Kr[2], Km[2]); |
444 | block[1] ^= F2(block[2], Kr[1], Km[1]); | 445 | block[1] ^= F2(block[2], Kr[1], Km[1]); |
445 | block[2] ^= F1(block[3], Kr[0], Km[0]); | 446 | block[2] ^= F1(block[3], Kr[0], Km[0]); |
446 | } | 447 | } |
447 | 448 | ||
448 | static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) | 449 | static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) |
@@ -451,64 +452,65 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) | |||
451 | const __be32 *src = (const __be32 *)inbuf; | 452 | const __be32 *src = (const __be32 *)inbuf; |
452 | __be32 *dst = (__be32 *)outbuf; | 453 | __be32 *dst = (__be32 *)outbuf; |
453 | u32 block[4]; | 454 | u32 block[4]; |
454 | u32 * Km; | 455 | u32 *Km; |
455 | u8 * Kr; | 456 | u8 *Kr; |
456 | 457 | ||
457 | block[0] = be32_to_cpu(src[0]); | 458 | block[0] = be32_to_cpu(src[0]); |
458 | block[1] = be32_to_cpu(src[1]); | 459 | block[1] = be32_to_cpu(src[1]); |
459 | block[2] = be32_to_cpu(src[2]); | 460 | block[2] = be32_to_cpu(src[2]); |
460 | block[3] = be32_to_cpu(src[3]); | 461 | block[3] = be32_to_cpu(src[3]); |
461 | 462 | ||
462 | Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); | 463 | Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); |
463 | Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); | 464 | Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); |
464 | Km = c->Km[2]; Kr = c->Kr[2]; Q (block, Kr, Km); | 465 | Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km); |
465 | Km = c->Km[3]; Kr = c->Kr[3]; Q (block, Kr, Km); | 466 | Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km); |
466 | Km = c->Km[4]; Kr = c->Kr[4]; Q (block, Kr, Km); | 467 | Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km); |
467 | Km = c->Km[5]; Kr = c->Kr[5]; Q (block, Kr, Km); | 468 | Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km); |
468 | Km = c->Km[6]; Kr = c->Kr[6]; QBAR (block, Kr, Km); | 469 | Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km); |
469 | Km = c->Km[7]; Kr = c->Kr[7]; QBAR (block, Kr, Km); | 470 | Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km); |
470 | Km = c->Km[8]; Kr = c->Kr[8]; QBAR (block, Kr, Km); | 471 | Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km); |
471 | Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); | 472 | Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km); |
472 | Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); | 473 | Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); |
473 | Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); | 474 | Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); |
474 | 475 | ||
475 | dst[0] = cpu_to_be32(block[0]); | 476 | dst[0] = cpu_to_be32(block[0]); |
476 | dst[1] = cpu_to_be32(block[1]); | 477 | dst[1] = cpu_to_be32(block[1]); |
477 | dst[2] = cpu_to_be32(block[2]); | 478 | dst[2] = cpu_to_be32(block[2]); |
478 | dst[3] = cpu_to_be32(block[3]); | 479 | dst[3] = cpu_to_be32(block[3]); |
479 | } | 480 | } |
480 | 481 | ||
481 | static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { | 482 | static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) |
482 | struct cast6_ctx * c = crypto_tfm_ctx(tfm); | 483 | { |
484 | struct cast6_ctx *c = crypto_tfm_ctx(tfm); | ||
483 | const __be32 *src = (const __be32 *)inbuf; | 485 | const __be32 *src = (const __be32 *)inbuf; |
484 | __be32 *dst = (__be32 *)outbuf; | 486 | __be32 *dst = (__be32 *)outbuf; |
485 | u32 block[4]; | 487 | u32 block[4]; |
486 | u32 * Km; | 488 | u32 *Km; |
487 | u8 * Kr; | 489 | u8 *Kr; |
488 | 490 | ||
489 | block[0] = be32_to_cpu(src[0]); | 491 | block[0] = be32_to_cpu(src[0]); |
490 | block[1] = be32_to_cpu(src[1]); | 492 | block[1] = be32_to_cpu(src[1]); |
491 | block[2] = be32_to_cpu(src[2]); | 493 | block[2] = be32_to_cpu(src[2]); |
492 | block[3] = be32_to_cpu(src[3]); | 494 | block[3] = be32_to_cpu(src[3]); |
493 | 495 | ||
494 | Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); | 496 | Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); |
495 | Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); | 497 | Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); |
496 | Km = c->Km[9]; Kr = c->Kr[9]; Q (block, Kr, Km); | 498 | Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km); |
497 | Km = c->Km[8]; Kr = c->Kr[8]; Q (block, Kr, Km); | 499 | Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km); |
498 | Km = c->Km[7]; Kr = c->Kr[7]; Q (block, Kr, Km); | 500 | Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km); |
499 | Km = c->Km[6]; Kr = c->Kr[6]; Q (block, Kr, Km); | 501 | Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km); |
500 | Km = c->Km[5]; Kr = c->Kr[5]; QBAR (block, Kr, Km); | 502 | Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km); |
501 | Km = c->Km[4]; Kr = c->Kr[4]; QBAR (block, Kr, Km); | 503 | Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km); |
502 | Km = c->Km[3]; Kr = c->Kr[3]; QBAR (block, Kr, Km); | 504 | Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km); |
503 | Km = c->Km[2]; Kr = c->Kr[2]; QBAR (block, Kr, Km); | 505 | Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km); |
504 | Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); | 506 | Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); |
505 | Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); | 507 | Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); |
506 | 508 | ||
507 | dst[0] = cpu_to_be32(block[0]); | 509 | dst[0] = cpu_to_be32(block[0]); |
508 | dst[1] = cpu_to_be32(block[1]); | 510 | dst[1] = cpu_to_be32(block[1]); |
509 | dst[2] = cpu_to_be32(block[2]); | 511 | dst[2] = cpu_to_be32(block[2]); |
510 | dst[3] = cpu_to_be32(block[3]); | 512 | dst[3] = cpu_to_be32(block[3]); |
511 | } | 513 | } |
512 | 514 | ||
513 | static struct crypto_alg alg = { | 515 | static struct crypto_alg alg = { |
514 | .cra_name = "cast6", | 516 | .cra_name = "cast6", |
diff --git a/crypto/cipher.c b/crypto/cipher.c index 9a1a7316eeac..39541e0e537d 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. | 12 | * any later version. |
13 | * | 13 | * |
14 | */ | 14 | */ |
diff --git a/crypto/compress.c b/crypto/compress.c index 1ee357085d3a..c33f0763a956 100644 --- a/crypto/compress.c +++ b/crypto/compress.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | 9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) | 10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. | 11 | * any later version. |
12 | * | 12 | * |
13 | */ | 13 | */ |
@@ -39,7 +39,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) | |||
39 | 39 | ||
40 | ops->cot_compress = crypto_compress; | 40 | ops->cot_compress = crypto_compress; |
41 | ops->cot_decompress = crypto_decompress; | 41 | ops->cot_decompress = crypto_decompress; |
42 | 42 | ||
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
diff --git a/crypto/crc32c.c b/crypto/crc32c.c index 973bc2cfab2e..de9e55c29794 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * CRC32C chksum | 4 | * CRC32C chksum |
@@ -30,7 +30,7 @@ | |||
30 | * | 30 | * |
31 | * This program is free software; you can redistribute it and/or modify it | 31 | * This program is free software; you can redistribute it and/or modify it |
32 | * under the terms of the GNU General Public License as published by the Free | 32 | * under the terms of the GNU General Public License as published by the Free |
33 | * Software Foundation; either version 2 of the License, or (at your option) | 33 | * Software Foundation; either version 2 of the License, or (at your option) |
34 | * any later version. | 34 | * any later version. |
35 | * | 35 | * |
36 | */ | 36 | */ |
@@ -142,7 +142,7 @@ static u32 crc32c(u32 crc, const u8 *data, unsigned int length) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Steps through buffer one byte at at time, calculates reflected | 145 | * Steps through buffer one byte at at time, calculates reflected |
146 | * crc using table. | 146 | * crc using table. |
147 | */ | 147 | */ |
148 | 148 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ae5fa99d5d36..e46d21ae26bc 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -3,6 +3,13 @@ | |||
3 | * | 3 | * |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | * | 5 | * |
6 | * Added AEAD support to cryptd. | ||
7 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) | ||
8 | * Adrian Hoban <adrian.hoban@intel.com> | ||
9 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
10 | * Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
11 | * Copyright (c) 2010, Intel Corporation. | ||
12 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | 13 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 14 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 15 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -12,6 +19,7 @@ | |||
12 | 19 | ||
13 | #include <crypto/algapi.h> | 20 | #include <crypto/algapi.h> |
14 | #include <crypto/internal/hash.h> | 21 | #include <crypto/internal/hash.h> |
22 | #include <crypto/internal/aead.h> | ||
15 | #include <crypto/cryptd.h> | 23 | #include <crypto/cryptd.h> |
16 | #include <crypto/crypto_wq.h> | 24 | #include <crypto/crypto_wq.h> |
17 | #include <linux/err.h> | 25 | #include <linux/err.h> |
@@ -31,7 +39,7 @@ struct cryptd_cpu_queue { | |||
31 | }; | 39 | }; |
32 | 40 | ||
33 | struct cryptd_queue { | 41 | struct cryptd_queue { |
34 | struct cryptd_cpu_queue *cpu_queue; | 42 | struct cryptd_cpu_queue __percpu *cpu_queue; |
35 | }; | 43 | }; |
36 | 44 | ||
37 | struct cryptd_instance_ctx { | 45 | struct cryptd_instance_ctx { |
@@ -39,6 +47,16 @@ struct cryptd_instance_ctx { | |||
39 | struct cryptd_queue *queue; | 47 | struct cryptd_queue *queue; |
40 | }; | 48 | }; |
41 | 49 | ||
50 | struct hashd_instance_ctx { | ||
51 | struct crypto_shash_spawn spawn; | ||
52 | struct cryptd_queue *queue; | ||
53 | }; | ||
54 | |||
55 | struct aead_instance_ctx { | ||
56 | struct crypto_aead_spawn aead_spawn; | ||
57 | struct cryptd_queue *queue; | ||
58 | }; | ||
59 | |||
42 | struct cryptd_blkcipher_ctx { | 60 | struct cryptd_blkcipher_ctx { |
43 | struct crypto_blkcipher *child; | 61 | struct crypto_blkcipher *child; |
44 | }; | 62 | }; |
@@ -48,11 +66,20 @@ struct cryptd_blkcipher_request_ctx { | |||
48 | }; | 66 | }; |
49 | 67 | ||
50 | struct cryptd_hash_ctx { | 68 | struct cryptd_hash_ctx { |
51 | struct crypto_hash *child; | 69 | struct crypto_shash *child; |
52 | }; | 70 | }; |
53 | 71 | ||
54 | struct cryptd_hash_request_ctx { | 72 | struct cryptd_hash_request_ctx { |
55 | crypto_completion_t complete; | 73 | crypto_completion_t complete; |
74 | struct shash_desc desc; | ||
75 | }; | ||
76 | |||
77 | struct cryptd_aead_ctx { | ||
78 | struct crypto_aead *child; | ||
79 | }; | ||
80 | |||
81 | struct cryptd_aead_request_ctx { | ||
82 | crypto_completion_t complete; | ||
56 | }; | 83 | }; |
57 | 84 | ||
58 | static void cryptd_queue_worker(struct work_struct *work); | 85 | static void cryptd_queue_worker(struct work_struct *work); |
@@ -93,7 +120,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
93 | struct cryptd_cpu_queue *cpu_queue; | 120 | struct cryptd_cpu_queue *cpu_queue; |
94 | 121 | ||
95 | cpu = get_cpu(); | 122 | cpu = get_cpu(); |
96 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 123 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
97 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 124 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
98 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 125 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
99 | put_cpu(); | 126 | put_cpu(); |
@@ -249,32 +276,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |||
249 | crypto_free_blkcipher(ctx->child); | 276 | crypto_free_blkcipher(ctx->child); |
250 | } | 277 | } |
251 | 278 | ||
252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 279 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
253 | struct cryptd_queue *queue) | 280 | unsigned int tail) |
254 | { | 281 | { |
282 | char *p; | ||
255 | struct crypto_instance *inst; | 283 | struct crypto_instance *inst; |
256 | struct cryptd_instance_ctx *ctx; | ||
257 | int err; | 284 | int err; |
258 | 285 | ||
259 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 286 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
260 | if (!inst) { | 287 | if (!p) |
261 | inst = ERR_PTR(-ENOMEM); | 288 | return ERR_PTR(-ENOMEM); |
262 | goto out; | 289 | |
263 | } | 290 | inst = (void *)(p + head); |
264 | 291 | ||
265 | err = -ENAMETOOLONG; | 292 | err = -ENAMETOOLONG; |
266 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 293 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
267 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 294 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
268 | goto out_free_inst; | 295 | goto out_free_inst; |
269 | 296 | ||
270 | ctx = crypto_instance_ctx(inst); | ||
271 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
272 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
273 | if (err) | ||
274 | goto out_free_inst; | ||
275 | |||
276 | ctx->queue = queue; | ||
277 | |||
278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 297 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
279 | 298 | ||
280 | inst->alg.cra_priority = alg->cra_priority + 50; | 299 | inst->alg.cra_priority = alg->cra_priority + 50; |
@@ -282,29 +301,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |||
282 | inst->alg.cra_alignmask = alg->cra_alignmask; | 301 | inst->alg.cra_alignmask = alg->cra_alignmask; |
283 | 302 | ||
284 | out: | 303 | out: |
285 | return inst; | 304 | return p; |
286 | 305 | ||
287 | out_free_inst: | 306 | out_free_inst: |
288 | kfree(inst); | 307 | kfree(p); |
289 | inst = ERR_PTR(err); | 308 | p = ERR_PTR(err); |
290 | goto out; | 309 | goto out; |
291 | } | 310 | } |
292 | 311 | ||
293 | static struct crypto_instance *cryptd_alloc_blkcipher( | 312 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
294 | struct rtattr **tb, struct cryptd_queue *queue) | 313 | struct rtattr **tb, |
314 | struct cryptd_queue *queue) | ||
295 | { | 315 | { |
316 | struct cryptd_instance_ctx *ctx; | ||
296 | struct crypto_instance *inst; | 317 | struct crypto_instance *inst; |
297 | struct crypto_alg *alg; | 318 | struct crypto_alg *alg; |
319 | int err; | ||
298 | 320 | ||
299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 321 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
300 | CRYPTO_ALG_TYPE_MASK); | 322 | CRYPTO_ALG_TYPE_MASK); |
301 | if (IS_ERR(alg)) | 323 | if (IS_ERR(alg)) |
302 | return ERR_CAST(alg); | 324 | return PTR_ERR(alg); |
303 | 325 | ||
304 | inst = cryptd_alloc_instance(alg, queue); | 326 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
327 | err = PTR_ERR(inst); | ||
305 | if (IS_ERR(inst)) | 328 | if (IS_ERR(inst)) |
306 | goto out_put_alg; | 329 | goto out_put_alg; |
307 | 330 | ||
331 | ctx = crypto_instance_ctx(inst); | ||
332 | ctx->queue = queue; | ||
333 | |||
334 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
335 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
336 | if (err) | ||
337 | goto out_free_inst; | ||
338 | |||
308 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 339 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
309 | inst->alg.cra_type = &crypto_ablkcipher_type; | 340 | inst->alg.cra_type = &crypto_ablkcipher_type; |
310 | 341 | ||
@@ -323,26 +354,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher( | |||
323 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 354 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
324 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 355 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
325 | 356 | ||
357 | err = crypto_register_instance(tmpl, inst); | ||
358 | if (err) { | ||
359 | crypto_drop_spawn(&ctx->spawn); | ||
360 | out_free_inst: | ||
361 | kfree(inst); | ||
362 | } | ||
363 | |||
326 | out_put_alg: | 364 | out_put_alg: |
327 | crypto_mod_put(alg); | 365 | crypto_mod_put(alg); |
328 | return inst; | 366 | return err; |
329 | } | 367 | } |
330 | 368 | ||
331 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 369 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
332 | { | 370 | { |
333 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 371 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
334 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 372 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
335 | struct crypto_spawn *spawn = &ictx->spawn; | 373 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
336 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 374 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
337 | struct crypto_hash *cipher; | 375 | struct crypto_shash *hash; |
338 | 376 | ||
339 | cipher = crypto_spawn_hash(spawn); | 377 | hash = crypto_spawn_shash(spawn); |
340 | if (IS_ERR(cipher)) | 378 | if (IS_ERR(hash)) |
341 | return PTR_ERR(cipher); | 379 | return PTR_ERR(hash); |
342 | 380 | ||
343 | ctx->child = cipher; | 381 | ctx->child = hash; |
344 | tfm->crt_ahash.reqsize = | 382 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
345 | sizeof(struct cryptd_hash_request_ctx); | 383 | sizeof(struct cryptd_hash_request_ctx) + |
384 | crypto_shash_descsize(hash)); | ||
346 | return 0; | 385 | return 0; |
347 | } | 386 | } |
348 | 387 | ||
@@ -350,22 +389,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |||
350 | { | 389 | { |
351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 390 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
352 | 391 | ||
353 | crypto_free_hash(ctx->child); | 392 | crypto_free_shash(ctx->child); |
354 | } | 393 | } |
355 | 394 | ||
356 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 395 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
357 | const u8 *key, unsigned int keylen) | 396 | const u8 *key, unsigned int keylen) |
358 | { | 397 | { |
359 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 398 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
360 | struct crypto_hash *child = ctx->child; | 399 | struct crypto_shash *child = ctx->child; |
361 | int err; | 400 | int err; |
362 | 401 | ||
363 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 402 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
364 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | 403 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
365 | CRYPTO_TFM_REQ_MASK); | 404 | CRYPTO_TFM_REQ_MASK); |
366 | err = crypto_hash_setkey(child, key, keylen); | 405 | err = crypto_shash_setkey(child, key, keylen); |
367 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | 406 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
368 | CRYPTO_TFM_RES_MASK); | 407 | CRYPTO_TFM_RES_MASK); |
369 | return err; | 408 | return err; |
370 | } | 409 | } |
371 | 410 | ||
@@ -385,21 +424,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req, | |||
385 | 424 | ||
386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 425 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
387 | { | 426 | { |
388 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 427 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
389 | struct crypto_hash *child = ctx->child; | 428 | struct crypto_shash *child = ctx->child; |
390 | struct ahash_request *req = ahash_request_cast(req_async); | 429 | struct ahash_request *req = ahash_request_cast(req_async); |
391 | struct cryptd_hash_request_ctx *rctx; | 430 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
392 | struct hash_desc desc; | 431 | struct shash_desc *desc = &rctx->desc; |
393 | |||
394 | rctx = ahash_request_ctx(req); | ||
395 | 432 | ||
396 | if (unlikely(err == -EINPROGRESS)) | 433 | if (unlikely(err == -EINPROGRESS)) |
397 | goto out; | 434 | goto out; |
398 | 435 | ||
399 | desc.tfm = child; | 436 | desc->tfm = child; |
400 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 437 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
401 | 438 | ||
402 | err = crypto_hash_crt(child)->init(&desc); | 439 | err = crypto_shash_init(desc); |
403 | 440 | ||
404 | req->base.complete = rctx->complete; | 441 | req->base.complete = rctx->complete; |
405 | 442 | ||
@@ -416,23 +453,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req) | |||
416 | 453 | ||
417 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 454 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
418 | { | 455 | { |
419 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 456 | struct ahash_request *req = ahash_request_cast(req_async); |
420 | struct crypto_hash *child = ctx->child; | ||
421 | struct ahash_request *req = ahash_request_cast(req_async); | ||
422 | struct cryptd_hash_request_ctx *rctx; | 457 | struct cryptd_hash_request_ctx *rctx; |
423 | struct hash_desc desc; | ||
424 | 458 | ||
425 | rctx = ahash_request_ctx(req); | 459 | rctx = ahash_request_ctx(req); |
426 | 460 | ||
427 | if (unlikely(err == -EINPROGRESS)) | 461 | if (unlikely(err == -EINPROGRESS)) |
428 | goto out; | 462 | goto out; |
429 | 463 | ||
430 | desc.tfm = child; | 464 | err = shash_ahash_update(req, &rctx->desc); |
431 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
432 | |||
433 | err = crypto_hash_crt(child)->update(&desc, | ||
434 | req->src, | ||
435 | req->nbytes); | ||
436 | 465 | ||
437 | req->base.complete = rctx->complete; | 466 | req->base.complete = rctx->complete; |
438 | 467 | ||
@@ -449,21 +478,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req) | |||
449 | 478 | ||
450 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 479 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
451 | { | 480 | { |
452 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 481 | struct ahash_request *req = ahash_request_cast(req_async); |
453 | struct crypto_hash *child = ctx->child; | 482 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
454 | struct ahash_request *req = ahash_request_cast(req_async); | ||
455 | struct cryptd_hash_request_ctx *rctx; | ||
456 | struct hash_desc desc; | ||
457 | |||
458 | rctx = ahash_request_ctx(req); | ||
459 | 483 | ||
460 | if (unlikely(err == -EINPROGRESS)) | 484 | if (unlikely(err == -EINPROGRESS)) |
461 | goto out; | 485 | goto out; |
462 | 486 | ||
463 | desc.tfm = child; | 487 | err = crypto_shash_final(&rctx->desc, req->result); |
464 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
465 | |||
466 | err = crypto_hash_crt(child)->final(&desc, req->result); | ||
467 | 488 | ||
468 | req->base.complete = rctx->complete; | 489 | req->base.complete = rctx->complete; |
469 | 490 | ||
@@ -478,26 +499,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req) | |||
478 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 499 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
479 | } | 500 | } |
480 | 501 | ||
481 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 502 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
482 | { | 503 | { |
483 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 504 | struct ahash_request *req = ahash_request_cast(req_async); |
484 | struct crypto_hash *child = ctx->child; | 505 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
485 | struct ahash_request *req = ahash_request_cast(req_async); | ||
486 | struct cryptd_hash_request_ctx *rctx; | ||
487 | struct hash_desc desc; | ||
488 | 506 | ||
489 | rctx = ahash_request_ctx(req); | 507 | if (unlikely(err == -EINPROGRESS)) |
508 | goto out; | ||
509 | |||
510 | err = shash_ahash_finup(req, &rctx->desc); | ||
511 | |||
512 | req->base.complete = rctx->complete; | ||
513 | |||
514 | out: | ||
515 | local_bh_disable(); | ||
516 | rctx->complete(&req->base, err); | ||
517 | local_bh_enable(); | ||
518 | } | ||
519 | |||
520 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | ||
521 | { | ||
522 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | ||
523 | } | ||
524 | |||
525 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | ||
526 | { | ||
527 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
528 | struct crypto_shash *child = ctx->child; | ||
529 | struct ahash_request *req = ahash_request_cast(req_async); | ||
530 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
531 | struct shash_desc *desc = &rctx->desc; | ||
490 | 532 | ||
491 | if (unlikely(err == -EINPROGRESS)) | 533 | if (unlikely(err == -EINPROGRESS)) |
492 | goto out; | 534 | goto out; |
493 | 535 | ||
494 | desc.tfm = child; | 536 | desc->tfm = child; |
495 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 537 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
496 | 538 | ||
497 | err = crypto_hash_crt(child)->digest(&desc, | 539 | err = shash_ahash_digest(req, desc); |
498 | req->src, | ||
499 | req->nbytes, | ||
500 | req->result); | ||
501 | 540 | ||
502 | req->base.complete = rctx->complete; | 541 | req->base.complete = rctx->complete; |
503 | 542 | ||
@@ -512,72 +551,261 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |||
512 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 551 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
513 | } | 552 | } |
514 | 553 | ||
515 | static struct crypto_instance *cryptd_alloc_hash( | 554 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
516 | struct rtattr **tb, struct cryptd_queue *queue) | ||
517 | { | 555 | { |
518 | struct crypto_instance *inst; | 556 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
557 | |||
558 | return crypto_shash_export(&rctx->desc, out); | ||
559 | } | ||
560 | |||
561 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | ||
562 | { | ||
563 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
564 | |||
565 | return crypto_shash_import(&rctx->desc, in); | ||
566 | } | ||
567 | |||
568 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | ||
569 | struct cryptd_queue *queue) | ||
570 | { | ||
571 | struct hashd_instance_ctx *ctx; | ||
572 | struct ahash_instance *inst; | ||
573 | struct shash_alg *salg; | ||
519 | struct crypto_alg *alg; | 574 | struct crypto_alg *alg; |
575 | int err; | ||
520 | 576 | ||
521 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 577 | salg = shash_attr_alg(tb[1], 0, 0); |
522 | CRYPTO_ALG_TYPE_HASH_MASK); | 578 | if (IS_ERR(salg)) |
523 | if (IS_ERR(alg)) | 579 | return PTR_ERR(salg); |
524 | return ERR_PTR(PTR_ERR(alg)); | ||
525 | 580 | ||
526 | inst = cryptd_alloc_instance(alg, queue); | 581 | alg = &salg->base; |
582 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), | ||
583 | sizeof(*ctx)); | ||
584 | err = PTR_ERR(inst); | ||
527 | if (IS_ERR(inst)) | 585 | if (IS_ERR(inst)) |
528 | goto out_put_alg; | 586 | goto out_put_alg; |
529 | 587 | ||
530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | 588 | ctx = ahash_instance_ctx(inst); |
531 | inst->alg.cra_type = &crypto_ahash_type; | 589 | ctx->queue = queue; |
590 | |||
591 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | ||
592 | ahash_crypto_instance(inst)); | ||
593 | if (err) | ||
594 | goto out_free_inst; | ||
595 | |||
596 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; | ||
597 | |||
598 | inst->alg.halg.digestsize = salg->digestsize; | ||
599 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | ||
600 | |||
601 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; | ||
602 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; | ||
603 | |||
604 | inst->alg.init = cryptd_hash_init_enqueue; | ||
605 | inst->alg.update = cryptd_hash_update_enqueue; | ||
606 | inst->alg.final = cryptd_hash_final_enqueue; | ||
607 | inst->alg.finup = cryptd_hash_finup_enqueue; | ||
608 | inst->alg.export = cryptd_hash_export; | ||
609 | inst->alg.import = cryptd_hash_import; | ||
610 | inst->alg.setkey = cryptd_hash_setkey; | ||
611 | inst->alg.digest = cryptd_hash_digest_enqueue; | ||
612 | |||
613 | err = ahash_register_instance(tmpl, inst); | ||
614 | if (err) { | ||
615 | crypto_drop_shash(&ctx->spawn); | ||
616 | out_free_inst: | ||
617 | kfree(inst); | ||
618 | } | ||
619 | |||
620 | out_put_alg: | ||
621 | crypto_mod_put(alg); | ||
622 | return err; | ||
623 | } | ||
624 | |||
625 | static void cryptd_aead_crypt(struct aead_request *req, | ||
626 | struct crypto_aead *child, | ||
627 | int err, | ||
628 | int (*crypt)(struct aead_request *req)) | ||
629 | { | ||
630 | struct cryptd_aead_request_ctx *rctx; | ||
631 | rctx = aead_request_ctx(req); | ||
632 | |||
633 | if (unlikely(err == -EINPROGRESS)) | ||
634 | goto out; | ||
635 | aead_request_set_tfm(req, child); | ||
636 | err = crypt( req ); | ||
637 | req->base.complete = rctx->complete; | ||
638 | out: | ||
639 | local_bh_disable(); | ||
640 | rctx->complete(&req->base, err); | ||
641 | local_bh_enable(); | ||
642 | } | ||
643 | |||
644 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | ||
645 | { | ||
646 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | ||
647 | struct crypto_aead *child = ctx->child; | ||
648 | struct aead_request *req; | ||
649 | |||
650 | req = container_of(areq, struct aead_request, base); | ||
651 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); | ||
652 | } | ||
653 | |||
654 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | ||
655 | { | ||
656 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | ||
657 | struct crypto_aead *child = ctx->child; | ||
658 | struct aead_request *req; | ||
659 | |||
660 | req = container_of(areq, struct aead_request, base); | ||
661 | cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); | ||
662 | } | ||
663 | |||
664 | static int cryptd_aead_enqueue(struct aead_request *req, | ||
665 | crypto_completion_t complete) | ||
666 | { | ||
667 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | ||
668 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
669 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | ||
670 | |||
671 | rctx->complete = req->base.complete; | ||
672 | req->base.complete = complete; | ||
673 | return cryptd_enqueue_request(queue, &req->base); | ||
674 | } | ||
675 | |||
676 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) | ||
677 | { | ||
678 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); | ||
679 | } | ||
680 | |||
681 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | ||
682 | { | ||
683 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | ||
684 | } | ||
685 | |||
686 | static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) | ||
687 | { | ||
688 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
689 | struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
690 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; | ||
691 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
692 | struct crypto_aead *cipher; | ||
693 | |||
694 | cipher = crypto_spawn_aead(spawn); | ||
695 | if (IS_ERR(cipher)) | ||
696 | return PTR_ERR(cipher); | ||
697 | |||
698 | crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
699 | ctx->child = cipher; | ||
700 | tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) | ||
705 | { | ||
706 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
707 | crypto_free_aead(ctx->child); | ||
708 | } | ||
709 | |||
710 | static int cryptd_create_aead(struct crypto_template *tmpl, | ||
711 | struct rtattr **tb, | ||
712 | struct cryptd_queue *queue) | ||
713 | { | ||
714 | struct aead_instance_ctx *ctx; | ||
715 | struct crypto_instance *inst; | ||
716 | struct crypto_alg *alg; | ||
717 | int err; | ||
718 | |||
719 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, | ||
720 | CRYPTO_ALG_TYPE_MASK); | ||
721 | if (IS_ERR(alg)) | ||
722 | return PTR_ERR(alg); | ||
532 | 723 | ||
533 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | 724 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 725 | err = PTR_ERR(inst); |
726 | if (IS_ERR(inst)) | ||
727 | goto out_put_alg; | ||
535 | 728 | ||
536 | inst->alg.cra_init = cryptd_hash_init_tfm; | 729 | ctx = crypto_instance_ctx(inst); |
537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | 730 | ctx->queue = queue; |
538 | 731 | ||
539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | 732 | err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, |
540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | 733 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | 734 | if (err) |
542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | 735 | goto out_free_inst; |
543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | ||
544 | 736 | ||
737 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
738 | inst->alg.cra_type = alg->cra_type; | ||
739 | inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); | ||
740 | inst->alg.cra_init = cryptd_aead_init_tfm; | ||
741 | inst->alg.cra_exit = cryptd_aead_exit_tfm; | ||
742 | inst->alg.cra_aead.setkey = alg->cra_aead.setkey; | ||
743 | inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; | ||
744 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | ||
745 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | ||
746 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | ||
747 | inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; | ||
748 | inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; | ||
749 | inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; | ||
750 | inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; | ||
751 | |||
752 | err = crypto_register_instance(tmpl, inst); | ||
753 | if (err) { | ||
754 | crypto_drop_spawn(&ctx->aead_spawn.base); | ||
755 | out_free_inst: | ||
756 | kfree(inst); | ||
757 | } | ||
545 | out_put_alg: | 758 | out_put_alg: |
546 | crypto_mod_put(alg); | 759 | crypto_mod_put(alg); |
547 | return inst; | 760 | return err; |
548 | } | 761 | } |
549 | 762 | ||
550 | static struct cryptd_queue queue; | 763 | static struct cryptd_queue queue; |
551 | 764 | ||
552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 765 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
553 | { | 766 | { |
554 | struct crypto_attr_type *algt; | 767 | struct crypto_attr_type *algt; |
555 | 768 | ||
556 | algt = crypto_get_attr_type(tb); | 769 | algt = crypto_get_attr_type(tb); |
557 | if (IS_ERR(algt)) | 770 | if (IS_ERR(algt)) |
558 | return ERR_CAST(algt); | 771 | return PTR_ERR(algt); |
559 | 772 | ||
560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 773 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
561 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 774 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
562 | return cryptd_alloc_blkcipher(tb, &queue); | 775 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
563 | case CRYPTO_ALG_TYPE_DIGEST: | 776 | case CRYPTO_ALG_TYPE_DIGEST: |
564 | return cryptd_alloc_hash(tb, &queue); | 777 | return cryptd_create_hash(tmpl, tb, &queue); |
778 | case CRYPTO_ALG_TYPE_AEAD: | ||
779 | return cryptd_create_aead(tmpl, tb, &queue); | ||
565 | } | 780 | } |
566 | 781 | ||
567 | return ERR_PTR(-EINVAL); | 782 | return -EINVAL; |
568 | } | 783 | } |
569 | 784 | ||
570 | static void cryptd_free(struct crypto_instance *inst) | 785 | static void cryptd_free(struct crypto_instance *inst) |
571 | { | 786 | { |
572 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 787 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
788 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | ||
789 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); | ||
573 | 790 | ||
574 | crypto_drop_spawn(&ctx->spawn); | 791 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { |
575 | kfree(inst); | 792 | case CRYPTO_ALG_TYPE_AHASH: |
793 | crypto_drop_shash(&hctx->spawn); | ||
794 | kfree(ahash_instance(inst)); | ||
795 | return; | ||
796 | case CRYPTO_ALG_TYPE_AEAD: | ||
797 | crypto_drop_spawn(&aead_ctx->aead_spawn.base); | ||
798 | kfree(inst); | ||
799 | return; | ||
800 | default: | ||
801 | crypto_drop_spawn(&ctx->spawn); | ||
802 | kfree(inst); | ||
803 | } | ||
576 | } | 804 | } |
577 | 805 | ||
578 | static struct crypto_template cryptd_tmpl = { | 806 | static struct crypto_template cryptd_tmpl = { |
579 | .name = "cryptd", | 807 | .name = "cryptd", |
580 | .alloc = cryptd_alloc, | 808 | .create = cryptd_create, |
581 | .free = cryptd_free, | 809 | .free = cryptd_free, |
582 | .module = THIS_MODULE, | 810 | .module = THIS_MODULE, |
583 | }; | 811 | }; |
@@ -620,6 +848,82 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |||
620 | } | 848 | } |
621 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 849 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
622 | 850 | ||
851 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | ||
852 | u32 type, u32 mask) | ||
853 | { | ||
854 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
855 | struct crypto_ahash *tfm; | ||
856 | |||
857 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
858 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
859 | return ERR_PTR(-EINVAL); | ||
860 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | ||
861 | if (IS_ERR(tfm)) | ||
862 | return ERR_CAST(tfm); | ||
863 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
864 | crypto_free_ahash(tfm); | ||
865 | return ERR_PTR(-EINVAL); | ||
866 | } | ||
867 | |||
868 | return __cryptd_ahash_cast(tfm); | ||
869 | } | ||
870 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | ||
871 | |||
872 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | ||
873 | { | ||
874 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | ||
875 | |||
876 | return ctx->child; | ||
877 | } | ||
878 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | ||
879 | |||
880 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) | ||
881 | { | ||
882 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
883 | return &rctx->desc; | ||
884 | } | ||
885 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | ||
886 | |||
887 | void cryptd_free_ahash(struct cryptd_ahash *tfm) | ||
888 | { | ||
889 | crypto_free_ahash(&tfm->base); | ||
890 | } | ||
891 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | ||
892 | |||
893 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | ||
894 | u32 type, u32 mask) | ||
895 | { | ||
896 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
897 | struct crypto_aead *tfm; | ||
898 | |||
899 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
900 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
901 | return ERR_PTR(-EINVAL); | ||
902 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); | ||
903 | if (IS_ERR(tfm)) | ||
904 | return ERR_CAST(tfm); | ||
905 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
906 | crypto_free_aead(tfm); | ||
907 | return ERR_PTR(-EINVAL); | ||
908 | } | ||
909 | return __cryptd_aead_cast(tfm); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); | ||
912 | |||
913 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) | ||
914 | { | ||
915 | struct cryptd_aead_ctx *ctx; | ||
916 | ctx = crypto_aead_ctx(&tfm->base); | ||
917 | return ctx->child; | ||
918 | } | ||
919 | EXPORT_SYMBOL_GPL(cryptd_aead_child); | ||
920 | |||
921 | void cryptd_free_aead(struct cryptd_aead *tfm) | ||
922 | { | ||
923 | crypto_free_aead(&tfm->base); | ||
924 | } | ||
925 | EXPORT_SYMBOL_GPL(cryptd_free_aead); | ||
926 | |||
623 | static int __init cryptd_init(void) | 927 | static int __init cryptd_init(void) |
624 | { | 928 | { |
625 | int err; | 929 | int err; |
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index cb71c9122bc0..07a8a96d46fc 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * Null algorithms, aka Much Ado About Nothing. | 4 | * Null algorithms, aka Much Ado About Nothing. |
5 | * | 5 | * |
6 | * These are needed for IPsec, and may be useful in general for | 6 | * These are needed for IPsec, and may be useful in general for |
7 | * testing & debugging. | 7 | * testing & debugging. |
8 | * | 8 | * |
9 | * The null cipher is compliant with RFC2410. | 9 | * The null cipher is compliant with RFC2410. |
10 | * | 10 | * |
11 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 11 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
@@ -163,7 +163,7 @@ MODULE_ALIAS("cipher_null"); | |||
163 | static int __init crypto_null_mod_init(void) | 163 | static int __init crypto_null_mod_init(void) |
164 | { | 164 | { |
165 | int ret = 0; | 165 | int ret = 0; |
166 | 166 | ||
167 | ret = crypto_register_alg(&cipher_null); | 167 | ret = crypto_register_alg(&cipher_null); |
168 | if (ret < 0) | 168 | if (ret < 0) |
169 | goto out; | 169 | goto out; |
@@ -180,7 +180,7 @@ static int __init crypto_null_mod_init(void) | |||
180 | if (ret < 0) | 180 | if (ret < 0) |
181 | goto out_unregister_digest; | 181 | goto out_unregister_digest; |
182 | 182 | ||
183 | out: | 183 | out: |
184 | return ret; | 184 | return ret; |
185 | 185 | ||
186 | out_unregister_digest: | 186 | out_unregister_digest: |
diff --git a/crypto/ctr.c b/crypto/ctr.c index 2d7425f0e7b8..4ca7222cfeb6 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | |||
185 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, | 185 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, |
186 | CRYPTO_ALG_TYPE_MASK); | 186 | CRYPTO_ALG_TYPE_MASK); |
187 | if (IS_ERR(alg)) | 187 | if (IS_ERR(alg)) |
188 | return ERR_PTR(PTR_ERR(alg)); | 188 | return ERR_CAST(alg); |
189 | 189 | ||
190 | /* Block size must be >= 4 bytes. */ | 190 | /* Block size must be >= 4 bytes. */ |
191 | err = -EINVAL; | 191 | err = -EINVAL; |
@@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | |||
219 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; | 219 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; |
220 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; | 220 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; |
221 | 221 | ||
222 | inst->alg.cra_blkcipher.geniv = "chainiv"; | ||
223 | |||
222 | out: | 224 | out: |
223 | crypto_mod_put(alg); | 225 | crypto_mod_put(alg); |
224 | return inst; | 226 | return inst; |
diff --git a/crypto/deflate.c b/crypto/deflate.c index 9128da44e953..463dc859aa05 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c | |||
@@ -1,14 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * Deflate algorithm (RFC 1951), implemented here primarily for use | 4 | * Deflate algorithm (RFC 1951), implemented here primarily for use |
5 | * by IPCOMP (RFC 3173 & RFC 2394). | 5 | * by IPCOMP (RFC 3173 & RFC 2394). |
6 | * | 6 | * |
7 | * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> | 7 | * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 10 | * under the terms of the GNU General Public License as published by the Free |
11 | * Software Foundation; either version 2 of the License, or (at your option) | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
12 | * any later version. | 12 | * any later version. |
13 | * | 13 | * |
14 | * FIXME: deflate transforms will require up to a total of about 436k of kernel | 14 | * FIXME: deflate transforms will require up to a total of about 436k of kernel |
@@ -49,7 +49,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx) | |||
49 | struct z_stream_s *stream = &ctx->comp_stream; | 49 | struct z_stream_s *stream = &ctx->comp_stream; |
50 | 50 | ||
51 | stream->workspace = vmalloc(zlib_deflate_workspacesize()); | 51 | stream->workspace = vmalloc(zlib_deflate_workspacesize()); |
52 | if (!stream->workspace ) { | 52 | if (!stream->workspace) { |
53 | ret = -ENOMEM; | 53 | ret = -ENOMEM; |
54 | goto out; | 54 | goto out; |
55 | } | 55 | } |
@@ -61,7 +61,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx) | |||
61 | ret = -EINVAL; | 61 | ret = -EINVAL; |
62 | goto out_free; | 62 | goto out_free; |
63 | } | 63 | } |
64 | out: | 64 | out: |
65 | return ret; | 65 | return ret; |
66 | out_free: | 66 | out_free: |
67 | vfree(stream->workspace); | 67 | vfree(stream->workspace); |
@@ -74,7 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx) | |||
74 | struct z_stream_s *stream = &ctx->decomp_stream; | 74 | struct z_stream_s *stream = &ctx->decomp_stream; |
75 | 75 | ||
76 | stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); | 76 | stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); |
77 | if (!stream->workspace ) { | 77 | if (!stream->workspace) { |
78 | ret = -ENOMEM; | 78 | ret = -ENOMEM; |
79 | goto out; | 79 | goto out; |
80 | } | 80 | } |
@@ -106,7 +106,7 @@ static int deflate_init(struct crypto_tfm *tfm) | |||
106 | { | 106 | { |
107 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); | 107 | struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); |
108 | int ret; | 108 | int ret; |
109 | 109 | ||
110 | ret = deflate_comp_init(ctx); | 110 | ret = deflate_comp_init(ctx); |
111 | if (ret) | 111 | if (ret) |
112 | goto out; | 112 | goto out; |
@@ -153,11 +153,11 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, | |||
153 | out: | 153 | out: |
154 | return ret; | 154 | return ret; |
155 | } | 155 | } |
156 | 156 | ||
157 | static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, | 157 | static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, |
158 | unsigned int slen, u8 *dst, unsigned int *dlen) | 158 | unsigned int slen, u8 *dst, unsigned int *dlen) |
159 | { | 159 | { |
160 | 160 | ||
161 | int ret = 0; | 161 | int ret = 0; |
162 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); | 162 | struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); |
163 | struct z_stream_s *stream = &dctx->decomp_stream; | 163 | struct z_stream_s *stream = &dctx->decomp_stream; |
@@ -182,7 +182,7 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, | |||
182 | if (ret == Z_OK && !stream->avail_in && stream->avail_out) { | 182 | if (ret == Z_OK && !stream->avail_in && stream->avail_out) { |
183 | u8 zerostuff = 0; | 183 | u8 zerostuff = 0; |
184 | stream->next_in = &zerostuff; | 184 | stream->next_in = &zerostuff; |
185 | stream->avail_in = 1; | 185 | stream->avail_in = 1; |
186 | ret = zlib_inflate(stream, Z_FINISH); | 186 | ret = zlib_inflate(stream, Z_FINISH); |
187 | } | 187 | } |
188 | if (ret != Z_STREAM_END) { | 188 | if (ret != Z_STREAM_END) { |
diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 5bd3ee345a64..873818d48e86 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c | |||
@@ -614,7 +614,7 @@ static const u32 S8[64] = { | |||
614 | #define T3(x) pt[2 * (x) + 2] | 614 | #define T3(x) pt[2 * (x) + 2] |
615 | #define T4(x) pt[2 * (x) + 3] | 615 | #define T4(x) pt[2 * (x) + 3] |
616 | 616 | ||
617 | #define PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) | 617 | #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) |
618 | 618 | ||
619 | /* | 619 | /* |
620 | * Encryption key expansion | 620 | * Encryption key expansion |
@@ -639,22 +639,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k) | |||
639 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; | 639 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; |
640 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; | 640 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; |
641 | 641 | ||
642 | pe[15 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; | 642 | pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; |
643 | pe[14 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 643 | pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
644 | pe[13 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 644 | pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
645 | pe[12 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 645 | pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
646 | pe[11 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 646 | pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
647 | pe[10 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 647 | pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
648 | pe[ 9 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 648 | pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
649 | pe[ 8 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; | 649 | pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; |
650 | pe[ 7 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 650 | pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
651 | pe[ 6 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 651 | pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
652 | pe[ 5 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 652 | pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
653 | pe[ 4 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 653 | pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
654 | pe[ 3 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 654 | pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
655 | pe[ 2 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 655 | pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
656 | pe[ 1 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; | 656 | pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; |
657 | pe[ 0 * 2 + 0] = PC2(b, c, d, a); | 657 | pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); |
658 | 658 | ||
659 | /* Check if first half is weak */ | 659 | /* Check if first half is weak */ |
660 | w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); | 660 | w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); |
@@ -670,22 +670,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k) | |||
670 | /* Check if second half is weak */ | 670 | /* Check if second half is weak */ |
671 | w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); | 671 | w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); |
672 | 672 | ||
673 | pe[15 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; | 673 | pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; |
674 | pe[14 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 674 | pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
675 | pe[13 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 675 | pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
676 | pe[12 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 676 | pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
677 | pe[11 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 677 | pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
678 | pe[10 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 678 | pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
679 | pe[ 9 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 679 | pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
680 | pe[ 8 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; | 680 | pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; |
681 | pe[ 7 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 681 | pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
682 | pe[ 6 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 682 | pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
683 | pe[ 5 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 683 | pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
684 | pe[ 4 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 684 | pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
685 | pe[ 3 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 685 | pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
686 | pe[ 2 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 686 | pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
687 | pe[ 1 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; | 687 | pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; |
688 | pe[ 0 * 2 + 1] = PC2(b, c, d, a); | 688 | pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); |
689 | 689 | ||
690 | /* Fixup: 2413 5768 -> 1357 2468 */ | 690 | /* Fixup: 2413 5768 -> 1357 2468 */ |
691 | for (d = 0; d < 16; ++d) { | 691 | for (d = 0; d < 16; ++d) { |
@@ -722,22 +722,22 @@ static void dkey(u32 *pe, const u8 *k) | |||
722 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; | 722 | b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; |
723 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; | 723 | a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; |
724 | 724 | ||
725 | pe[ 0 * 2] = PC2(a, b, c, d); d = rs[d]; | 725 | pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; |
726 | pe[ 1 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 726 | pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
727 | pe[ 2 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 727 | pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
728 | pe[ 3 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 728 | pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
729 | pe[ 4 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 729 | pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
730 | pe[ 5 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 730 | pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
731 | pe[ 6 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 731 | pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
732 | pe[ 7 * 2] = PC2(d, a, b, c); c = rs[c]; | 732 | pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; |
733 | pe[ 8 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 733 | pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
734 | pe[ 9 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 734 | pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
735 | pe[10 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 735 | pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
736 | pe[11 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 736 | pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
737 | pe[12 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 737 | pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
738 | pe[13 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 738 | pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
739 | pe[14 * 2] = PC2(c, d, a, b); b = rs[b]; | 739 | pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; |
740 | pe[15 * 2] = PC2(b, c, d, a); | 740 | pe[15 * 2] = DES_PC2(b, c, d, a); |
741 | 741 | ||
742 | /* Skip to next table set */ | 742 | /* Skip to next table set */ |
743 | pt += 512; | 743 | pt += 512; |
@@ -747,22 +747,22 @@ static void dkey(u32 *pe, const u8 *k) | |||
747 | b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; | 747 | b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; |
748 | a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; | 748 | a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; |
749 | 749 | ||
750 | pe[ 0 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; | 750 | pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; |
751 | pe[ 1 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 751 | pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
752 | pe[ 2 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 752 | pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
753 | pe[ 3 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 753 | pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
754 | pe[ 4 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 754 | pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
755 | pe[ 5 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; | 755 | pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; |
756 | pe[ 6 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; | 756 | pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; |
757 | pe[ 7 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; | 757 | pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; |
758 | pe[ 8 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 758 | pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
759 | pe[ 9 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 759 | pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
760 | pe[10 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 760 | pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
761 | pe[11 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 761 | pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
762 | pe[12 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; | 762 | pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; |
763 | pe[13 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; | 763 | pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; |
764 | pe[14 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; | 764 | pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; |
765 | pe[15 * 2 + 1] = PC2(b, c, d, a); | 765 | pe[15 * 2 + 1] = DES_PC2(b, c, d, a); |
766 | 766 | ||
767 | /* Fixup: 2413 5768 -> 1357 2468 */ | 767 | /* Fixup: 2413 5768 -> 1357 2468 */ |
768 | for (d = 0; d < 16; ++d) { | 768 | for (d = 0; d < 16; ++d) { |
@@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
869 | 869 | ||
870 | if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | 870 | if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || |
871 | !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && | 871 | !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && |
872 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) | 872 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
873 | { | ||
874 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | 873 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
875 | return -EINVAL; | 874 | return -EINVAL; |
876 | } | 875 | } |
diff --git a/crypto/digest.c b/crypto/digest.c deleted file mode 100644 index 5d3f1303da98..000000000000 --- a/crypto/digest.c +++ /dev/null | |||
@@ -1,240 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Digest operations. | ||
5 | * | ||
6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <crypto/internal/hash.h> | ||
16 | #include <crypto/scatterwalk.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/hardirq.h> | ||
20 | #include <linux/highmem.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/scatterlist.h> | ||
24 | |||
25 | #include "internal.h" | ||
26 | |||
27 | static int init(struct hash_desc *desc) | ||
28 | { | ||
29 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | ||
30 | |||
31 | tfm->__crt_alg->cra_digest.dia_init(tfm); | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int update2(struct hash_desc *desc, | ||
36 | struct scatterlist *sg, unsigned int nbytes) | ||
37 | { | ||
38 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | ||
39 | unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); | ||
40 | |||
41 | if (!nbytes) | ||
42 | return 0; | ||
43 | |||
44 | for (;;) { | ||
45 | struct page *pg = sg_page(sg); | ||
46 | unsigned int offset = sg->offset; | ||
47 | unsigned int l = sg->length; | ||
48 | |||
49 | if (unlikely(l > nbytes)) | ||
50 | l = nbytes; | ||
51 | nbytes -= l; | ||
52 | |||
53 | do { | ||
54 | unsigned int bytes_from_page = min(l, ((unsigned int) | ||
55 | (PAGE_SIZE)) - | ||
56 | offset); | ||
57 | char *src = crypto_kmap(pg, 0); | ||
58 | char *p = src + offset; | ||
59 | |||
60 | if (unlikely(offset & alignmask)) { | ||
61 | unsigned int bytes = | ||
62 | alignmask + 1 - (offset & alignmask); | ||
63 | bytes = min(bytes, bytes_from_page); | ||
64 | tfm->__crt_alg->cra_digest.dia_update(tfm, p, | ||
65 | bytes); | ||
66 | p += bytes; | ||
67 | bytes_from_page -= bytes; | ||
68 | l -= bytes; | ||
69 | } | ||
70 | tfm->__crt_alg->cra_digest.dia_update(tfm, p, | ||
71 | bytes_from_page); | ||
72 | crypto_kunmap(src, 0); | ||
73 | crypto_yield(desc->flags); | ||
74 | offset = 0; | ||
75 | pg++; | ||
76 | l -= bytes_from_page; | ||
77 | } while (l > 0); | ||
78 | |||
79 | if (!nbytes) | ||
80 | break; | ||
81 | sg = scatterwalk_sg_next(sg); | ||
82 | } | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int update(struct hash_desc *desc, | ||
88 | struct scatterlist *sg, unsigned int nbytes) | ||
89 | { | ||
90 | if (WARN_ON_ONCE(in_irq())) | ||
91 | return -EDEADLK; | ||
92 | return update2(desc, sg, nbytes); | ||
93 | } | ||
94 | |||
95 | static int final(struct hash_desc *desc, u8 *out) | ||
96 | { | ||
97 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | ||
98 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | ||
99 | struct digest_alg *digest = &tfm->__crt_alg->cra_digest; | ||
100 | |||
101 | if (unlikely((unsigned long)out & alignmask)) { | ||
102 | unsigned long align = alignmask + 1; | ||
103 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); | ||
104 | u8 *dst = (u8 *)ALIGN(addr, align) + | ||
105 | ALIGN(tfm->__crt_alg->cra_ctxsize, align); | ||
106 | |||
107 | digest->dia_final(tfm, dst); | ||
108 | memcpy(out, dst, digest->dia_digestsize); | ||
109 | } else | ||
110 | digest->dia_final(tfm, out); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen) | ||
116 | { | ||
117 | crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK); | ||
118 | return -ENOSYS; | ||
119 | } | ||
120 | |||
121 | static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) | ||
122 | { | ||
123 | struct crypto_tfm *tfm = crypto_hash_tfm(hash); | ||
124 | |||
125 | crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK); | ||
126 | return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen); | ||
127 | } | ||
128 | |||
129 | static int digest(struct hash_desc *desc, | ||
130 | struct scatterlist *sg, unsigned int nbytes, u8 *out) | ||
131 | { | ||
132 | if (WARN_ON_ONCE(in_irq())) | ||
133 | return -EDEADLK; | ||
134 | |||
135 | init(desc); | ||
136 | update2(desc, sg, nbytes); | ||
137 | return final(desc, out); | ||
138 | } | ||
139 | |||
140 | int crypto_init_digest_ops(struct crypto_tfm *tfm) | ||
141 | { | ||
142 | struct hash_tfm *ops = &tfm->crt_hash; | ||
143 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
144 | |||
145 | if (dalg->dia_digestsize > PAGE_SIZE / 8) | ||
146 | return -EINVAL; | ||
147 | |||
148 | ops->init = init; | ||
149 | ops->update = update; | ||
150 | ops->final = final; | ||
151 | ops->digest = digest; | ||
152 | ops->setkey = dalg->dia_setkey ? setkey : nosetkey; | ||
153 | ops->digestsize = dalg->dia_digestsize; | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | void crypto_exit_digest_ops(struct crypto_tfm *tfm) | ||
159 | { | ||
160 | } | ||
161 | |||
162 | static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key, | ||
163 | unsigned int keylen) | ||
164 | { | ||
165 | crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); | ||
166 | return -ENOSYS; | ||
167 | } | ||
168 | |||
169 | static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, | ||
170 | unsigned int keylen) | ||
171 | { | ||
172 | struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); | ||
173 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
174 | |||
175 | crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); | ||
176 | return dalg->dia_setkey(tfm, key, keylen); | ||
177 | } | ||
178 | |||
179 | static int digest_async_init(struct ahash_request *req) | ||
180 | { | ||
181 | struct crypto_tfm *tfm = req->base.tfm; | ||
182 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
183 | |||
184 | dalg->dia_init(tfm); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static int digest_async_update(struct ahash_request *req) | ||
189 | { | ||
190 | struct crypto_tfm *tfm = req->base.tfm; | ||
191 | struct hash_desc desc = { | ||
192 | .tfm = __crypto_hash_cast(tfm), | ||
193 | .flags = req->base.flags, | ||
194 | }; | ||
195 | |||
196 | update(&desc, req->src, req->nbytes); | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static int digest_async_final(struct ahash_request *req) | ||
201 | { | ||
202 | struct crypto_tfm *tfm = req->base.tfm; | ||
203 | struct hash_desc desc = { | ||
204 | .tfm = __crypto_hash_cast(tfm), | ||
205 | .flags = req->base.flags, | ||
206 | }; | ||
207 | |||
208 | final(&desc, req->result); | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int digest_async_digest(struct ahash_request *req) | ||
213 | { | ||
214 | struct crypto_tfm *tfm = req->base.tfm; | ||
215 | struct hash_desc desc = { | ||
216 | .tfm = __crypto_hash_cast(tfm), | ||
217 | .flags = req->base.flags, | ||
218 | }; | ||
219 | |||
220 | return digest(&desc, req->src, req->nbytes, req->result); | ||
221 | } | ||
222 | |||
223 | int crypto_init_digest_ops_async(struct crypto_tfm *tfm) | ||
224 | { | ||
225 | struct ahash_tfm *crt = &tfm->crt_ahash; | ||
226 | struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; | ||
227 | |||
228 | if (dalg->dia_digestsize > PAGE_SIZE / 8) | ||
229 | return -EINVAL; | ||
230 | |||
231 | crt->init = digest_async_init; | ||
232 | crt->update = digest_async_update; | ||
233 | crt->final = digest_async_final; | ||
234 | crt->digest = digest_async_digest; | ||
235 | crt->setkey = dalg->dia_setkey ? digest_async_setkey : | ||
236 | digest_async_nosetkey; | ||
237 | crt->digestsize = dalg->dia_digestsize; | ||
238 | |||
239 | return 0; | ||
240 | } | ||
diff --git a/crypto/ecb.c b/crypto/ecb.c index a46838e98a71..935cfef4aa84 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c | |||
@@ -55,7 +55,7 @@ static int crypto_ecb_crypt(struct blkcipher_desc *desc, | |||
55 | 55 | ||
56 | do { | 56 | do { |
57 | fn(crypto_cipher_tfm(tfm), wdst, wsrc); | 57 | fn(crypto_cipher_tfm(tfm), wdst, wsrc); |
58 | 58 | ||
59 | wsrc += bsize; | 59 | wsrc += bsize; |
60 | wdst += bsize; | 60 | wdst += bsize; |
61 | } while ((nbytes -= bsize) >= bsize); | 61 | } while ((nbytes -= bsize) >= bsize); |
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index b82d61f4e26c..c33107e340b6 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c | |||
@@ -60,13 +60,13 @@ do { \ | |||
60 | u32 t = lo & ((1 << n) - 1); \ | 60 | u32 t = lo & ((1 << n) - 1); \ |
61 | lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \ | 61 | lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \ |
62 | hi = (hi >> n) | (t << (24-n)); \ | 62 | hi = (hi >> n) | (t << (24-n)); \ |
63 | } while(0) | 63 | } while (0) |
64 | 64 | ||
65 | /* Rotate right one 64 bit number as a 56 bit number */ | 65 | /* Rotate right one 64 bit number as a 56 bit number */ |
66 | #define ror56_64(k, n) \ | 66 | #define ror56_64(k, n) \ |
67 | do { \ | 67 | do { \ |
68 | k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ | 68 | k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ |
69 | } while(0) | 69 | } while (0) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Sboxes for Feistel network derived from | 72 | * Sboxes for Feistel network derived from |
@@ -228,7 +228,7 @@ do { \ | |||
228 | union lc4 { __be32 l; u8 c[4]; } u; \ | 228 | union lc4 { __be32 l; u8 c[4]; } u; \ |
229 | u.l = sched ^ R; \ | 229 | u.l = sched ^ R; \ |
230 | L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ | 230 | L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ |
231 | } while(0) | 231 | } while (0) |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * encryptor | 234 | * encryptor |
diff --git a/crypto/gcm.c b/crypto/gcm.c index e70afd0c73dd..2f5fbba6576c 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -11,7 +11,10 @@ | |||
11 | #include <crypto/gf128mul.h> | 11 | #include <crypto/gf128mul.h> |
12 | #include <crypto/internal/aead.h> | 12 | #include <crypto/internal/aead.h> |
13 | #include <crypto/internal/skcipher.h> | 13 | #include <crypto/internal/skcipher.h> |
14 | #include <crypto/internal/hash.h> | ||
14 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
16 | #include <crypto/hash.h> | ||
17 | #include "internal.h" | ||
15 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
16 | #include <linux/err.h> | 19 | #include <linux/err.h> |
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
@@ -21,11 +24,12 @@ | |||
21 | 24 | ||
22 | struct gcm_instance_ctx { | 25 | struct gcm_instance_ctx { |
23 | struct crypto_skcipher_spawn ctr; | 26 | struct crypto_skcipher_spawn ctr; |
27 | struct crypto_ahash_spawn ghash; | ||
24 | }; | 28 | }; |
25 | 29 | ||
26 | struct crypto_gcm_ctx { | 30 | struct crypto_gcm_ctx { |
27 | struct crypto_ablkcipher *ctr; | 31 | struct crypto_ablkcipher *ctr; |
28 | struct gf128mul_4k *gf128; | 32 | struct crypto_ahash *ghash; |
29 | }; | 33 | }; |
30 | 34 | ||
31 | struct crypto_rfc4106_ctx { | 35 | struct crypto_rfc4106_ctx { |
@@ -33,11 +37,23 @@ struct crypto_rfc4106_ctx { | |||
33 | u8 nonce[4]; | 37 | u8 nonce[4]; |
34 | }; | 38 | }; |
35 | 39 | ||
40 | struct crypto_rfc4543_ctx { | ||
41 | struct crypto_aead *child; | ||
42 | u8 nonce[4]; | ||
43 | }; | ||
44 | |||
45 | struct crypto_rfc4543_req_ctx { | ||
46 | u8 auth_tag[16]; | ||
47 | struct scatterlist cipher[1]; | ||
48 | struct scatterlist payload[2]; | ||
49 | struct scatterlist assoc[2]; | ||
50 | struct aead_request subreq; | ||
51 | }; | ||
52 | |||
36 | struct crypto_gcm_ghash_ctx { | 53 | struct crypto_gcm_ghash_ctx { |
37 | u32 bytes; | 54 | unsigned int cryptlen; |
38 | u32 flags; | 55 | struct scatterlist *src; |
39 | struct gf128mul_4k *gf128; | 56 | void (*complete)(struct aead_request *req, int err); |
40 | u8 buffer[16]; | ||
41 | }; | 57 | }; |
42 | 58 | ||
43 | struct crypto_gcm_req_priv_ctx { | 59 | struct crypto_gcm_req_priv_ctx { |
@@ -45,8 +61,11 @@ struct crypto_gcm_req_priv_ctx { | |||
45 | u8 iauth_tag[16]; | 61 | u8 iauth_tag[16]; |
46 | struct scatterlist src[2]; | 62 | struct scatterlist src[2]; |
47 | struct scatterlist dst[2]; | 63 | struct scatterlist dst[2]; |
48 | struct crypto_gcm_ghash_ctx ghash; | 64 | struct crypto_gcm_ghash_ctx ghash_ctx; |
49 | struct ablkcipher_request abreq; | 65 | union { |
66 | struct ahash_request ahreq; | ||
67 | struct ablkcipher_request abreq; | ||
68 | } u; | ||
50 | }; | 69 | }; |
51 | 70 | ||
52 | struct crypto_gcm_setkey_result { | 71 | struct crypto_gcm_setkey_result { |
@@ -54,6 +73,8 @@ struct crypto_gcm_setkey_result { | |||
54 | struct completion completion; | 73 | struct completion completion; |
55 | }; | 74 | }; |
56 | 75 | ||
76 | static void *gcm_zeroes; | ||
77 | |||
57 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | 78 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( |
58 | struct aead_request *req) | 79 | struct aead_request *req) |
59 | { | 80 | { |
@@ -62,113 +83,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | |||
62 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 83 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
63 | } | 84 | } |
64 | 85 | ||
65 | static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, | ||
66 | struct gf128mul_4k *gf128) | ||
67 | { | ||
68 | ctx->bytes = 0; | ||
69 | ctx->flags = flags; | ||
70 | ctx->gf128 = gf128; | ||
71 | memset(ctx->buffer, 0, 16); | ||
72 | } | ||
73 | |||
74 | static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx, | ||
75 | const u8 *src, unsigned int srclen) | ||
76 | { | ||
77 | u8 *dst = ctx->buffer; | ||
78 | |||
79 | if (ctx->bytes) { | ||
80 | int n = min(srclen, ctx->bytes); | ||
81 | u8 *pos = dst + (16 - ctx->bytes); | ||
82 | |||
83 | ctx->bytes -= n; | ||
84 | srclen -= n; | ||
85 | |||
86 | while (n--) | ||
87 | *pos++ ^= *src++; | ||
88 | |||
89 | if (!ctx->bytes) | ||
90 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
91 | } | ||
92 | |||
93 | while (srclen >= 16) { | ||
94 | crypto_xor(dst, src, 16); | ||
95 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
96 | src += 16; | ||
97 | srclen -= 16; | ||
98 | } | ||
99 | |||
100 | if (srclen) { | ||
101 | ctx->bytes = 16 - srclen; | ||
102 | while (srclen--) | ||
103 | *dst++ ^= *src++; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, | ||
108 | struct scatterlist *sg, int len) | ||
109 | { | ||
110 | struct scatter_walk walk; | ||
111 | u8 *src; | ||
112 | int n; | ||
113 | |||
114 | if (!len) | ||
115 | return; | ||
116 | |||
117 | scatterwalk_start(&walk, sg); | ||
118 | |||
119 | while (len) { | ||
120 | n = scatterwalk_clamp(&walk, len); | ||
121 | |||
122 | if (!n) { | ||
123 | scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); | ||
124 | n = scatterwalk_clamp(&walk, len); | ||
125 | } | ||
126 | |||
127 | src = scatterwalk_map(&walk, 0); | ||
128 | |||
129 | crypto_gcm_ghash_update(ctx, src, n); | ||
130 | len -= n; | ||
131 | |||
132 | scatterwalk_unmap(src, 0); | ||
133 | scatterwalk_advance(&walk, n); | ||
134 | scatterwalk_done(&walk, 0, len); | ||
135 | if (len) | ||
136 | crypto_yield(ctx->flags); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx) | ||
141 | { | ||
142 | u8 *dst = ctx->buffer; | ||
143 | |||
144 | if (ctx->bytes) { | ||
145 | u8 *tmp = dst + (16 - ctx->bytes); | ||
146 | |||
147 | while (ctx->bytes--) | ||
148 | *tmp++ ^= 0; | ||
149 | |||
150 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
151 | } | ||
152 | |||
153 | ctx->bytes = 0; | ||
154 | } | ||
155 | |||
156 | static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, | ||
157 | unsigned int authlen, | ||
158 | unsigned int cryptlen, u8 *dst) | ||
159 | { | ||
160 | u8 *buf = ctx->buffer; | ||
161 | u128 lengths; | ||
162 | |||
163 | lengths.a = cpu_to_be64(authlen * 8); | ||
164 | lengths.b = cpu_to_be64(cryptlen * 8); | ||
165 | |||
166 | crypto_gcm_ghash_flush(ctx); | ||
167 | crypto_xor(buf, (u8 *)&lengths, 16); | ||
168 | gf128mul_4k_lle((be128 *)buf, ctx->gf128); | ||
169 | crypto_xor(dst, buf, 16); | ||
170 | } | ||
171 | |||
172 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) | 86 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) |
173 | { | 87 | { |
174 | struct crypto_gcm_setkey_result *result = req->data; | 88 | struct crypto_gcm_setkey_result *result = req->data; |
@@ -184,6 +98,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
184 | unsigned int keylen) | 98 | unsigned int keylen) |
185 | { | 99 | { |
186 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 100 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
101 | struct crypto_ahash *ghash = ctx->ghash; | ||
187 | struct crypto_ablkcipher *ctr = ctx->ctr; | 102 | struct crypto_ablkcipher *ctr = ctx->ctr; |
188 | struct { | 103 | struct { |
189 | be128 hash; | 104 | be128 hash; |
@@ -233,13 +148,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
233 | if (err) | 148 | if (err) |
234 | goto out; | 149 | goto out; |
235 | 150 | ||
236 | if (ctx->gf128 != NULL) | 151 | crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); |
237 | gf128mul_free_4k(ctx->gf128); | 152 | crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & |
238 | 153 | CRYPTO_TFM_REQ_MASK); | |
239 | ctx->gf128 = gf128mul_init_4k_lle(&data->hash); | 154 | err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); |
240 | 155 | crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & | |
241 | if (ctx->gf128 == NULL) | 156 | CRYPTO_TFM_RES_MASK); |
242 | err = -ENOMEM; | ||
243 | 157 | ||
244 | out: | 158 | out: |
245 | kfree(data); | 159 | kfree(data); |
@@ -272,8 +186,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | |||
272 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 186 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
273 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 187 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 188 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
275 | u32 flags = req->base.tfm->crt_flags; | ||
276 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | ||
277 | struct scatterlist *dst; | 189 | struct scatterlist *dst; |
278 | __be32 counter = cpu_to_be32(1); | 190 | __be32 counter = cpu_to_be32(1); |
279 | 191 | ||
@@ -296,35 +208,329 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | |||
296 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, | 208 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, |
297 | cryptlen + sizeof(pctx->auth_tag), | 209 | cryptlen + sizeof(pctx->auth_tag), |
298 | req->iv); | 210 | req->iv); |
211 | } | ||
299 | 212 | ||
300 | crypto_gcm_ghash_init(ghash, flags, ctx->gf128); | 213 | static inline unsigned int gcm_remain(unsigned int len) |
214 | { | ||
215 | len &= 0xfU; | ||
216 | return len ? 16 - len : 0; | ||
217 | } | ||
218 | |||
219 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); | ||
220 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err); | ||
221 | |||
222 | static int gcm_hash_update(struct aead_request *req, | ||
223 | struct crypto_gcm_req_priv_ctx *pctx, | ||
224 | crypto_completion_t complete, | ||
225 | struct scatterlist *src, | ||
226 | unsigned int len) | ||
227 | { | ||
228 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
301 | 229 | ||
302 | crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); | 230 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
303 | crypto_gcm_ghash_flush(ghash); | 231 | complete, req); |
232 | ahash_request_set_crypt(ahreq, src, NULL, len); | ||
233 | |||
234 | return crypto_ahash_update(ahreq); | ||
304 | } | 235 | } |
305 | 236 | ||
306 | static int crypto_gcm_hash(struct aead_request *req) | 237 | static int gcm_hash_remain(struct aead_request *req, |
238 | struct crypto_gcm_req_priv_ctx *pctx, | ||
239 | unsigned int remain, | ||
240 | crypto_completion_t complete) | ||
241 | { | ||
242 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
243 | |||
244 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
245 | complete, req); | ||
246 | sg_init_one(pctx->src, gcm_zeroes, remain); | ||
247 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); | ||
248 | |||
249 | return crypto_ahash_update(ahreq); | ||
250 | } | ||
251 | |||
252 | static int gcm_hash_len(struct aead_request *req, | ||
253 | struct crypto_gcm_req_priv_ctx *pctx) | ||
254 | { | ||
255 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
256 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
257 | u128 lengths; | ||
258 | |||
259 | lengths.a = cpu_to_be64(req->assoclen * 8); | ||
260 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); | ||
261 | memcpy(pctx->iauth_tag, &lengths, 16); | ||
262 | sg_init_one(pctx->src, pctx->iauth_tag, 16); | ||
263 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
264 | gcm_hash_len_done, req); | ||
265 | ahash_request_set_crypt(ahreq, pctx->src, | ||
266 | NULL, sizeof(lengths)); | ||
267 | |||
268 | return crypto_ahash_update(ahreq); | ||
269 | } | ||
270 | |||
271 | static int gcm_hash_final(struct aead_request *req, | ||
272 | struct crypto_gcm_req_priv_ctx *pctx) | ||
273 | { | ||
274 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
275 | |||
276 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
277 | gcm_hash_final_done, req); | ||
278 | ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); | ||
279 | |||
280 | return crypto_ahash_final(ahreq); | ||
281 | } | ||
282 | |||
283 | static void __gcm_hash_final_done(struct aead_request *req, int err) | ||
307 | { | 284 | { |
308 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
309 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 285 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
310 | u8 *auth_tag = pctx->auth_tag; | 286 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
311 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | 287 | |
288 | if (!err) | ||
289 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
290 | |||
291 | gctx->complete(req, err); | ||
292 | } | ||
312 | 293 | ||
313 | crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); | 294 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err) |
314 | crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, | 295 | { |
315 | auth_tag); | 296 | struct aead_request *req = areq->data; |
297 | |||
298 | __gcm_hash_final_done(req, err); | ||
299 | } | ||
300 | |||
301 | static void __gcm_hash_len_done(struct aead_request *req, int err) | ||
302 | { | ||
303 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
304 | |||
305 | if (!err) { | ||
306 | err = gcm_hash_final(req, pctx); | ||
307 | if (err == -EINPROGRESS || err == -EBUSY) | ||
308 | return; | ||
309 | } | ||
310 | |||
311 | __gcm_hash_final_done(req, err); | ||
312 | } | ||
313 | |||
314 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err) | ||
315 | { | ||
316 | struct aead_request *req = areq->data; | ||
317 | |||
318 | __gcm_hash_len_done(req, err); | ||
319 | } | ||
320 | |||
321 | static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) | ||
322 | { | ||
323 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
324 | |||
325 | if (!err) { | ||
326 | err = gcm_hash_len(req, pctx); | ||
327 | if (err == -EINPROGRESS || err == -EBUSY) | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | __gcm_hash_len_done(req, err); | ||
332 | } | ||
333 | |||
334 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | ||
335 | int err) | ||
336 | { | ||
337 | struct aead_request *req = areq->data; | ||
338 | |||
339 | __gcm_hash_crypt_remain_done(req, err); | ||
340 | } | ||
341 | |||
342 | static void __gcm_hash_crypt_done(struct aead_request *req, int err) | ||
343 | { | ||
344 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
345 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
346 | unsigned int remain; | ||
347 | |||
348 | if (!err) { | ||
349 | remain = gcm_remain(gctx->cryptlen); | ||
350 | BUG_ON(!remain); | ||
351 | err = gcm_hash_remain(req, pctx, remain, | ||
352 | gcm_hash_crypt_remain_done); | ||
353 | if (err == -EINPROGRESS || err == -EBUSY) | ||
354 | return; | ||
355 | } | ||
356 | |||
357 | __gcm_hash_crypt_remain_done(req, err); | ||
358 | } | ||
359 | |||
360 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err) | ||
361 | { | ||
362 | struct aead_request *req = areq->data; | ||
363 | |||
364 | __gcm_hash_crypt_done(req, err); | ||
365 | } | ||
366 | |||
367 | static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) | ||
368 | { | ||
369 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
370 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
371 | crypto_completion_t complete; | ||
372 | unsigned int remain = 0; | ||
373 | |||
374 | if (!err && gctx->cryptlen) { | ||
375 | remain = gcm_remain(gctx->cryptlen); | ||
376 | complete = remain ? gcm_hash_crypt_done : | ||
377 | gcm_hash_crypt_remain_done; | ||
378 | err = gcm_hash_update(req, pctx, complete, | ||
379 | gctx->src, gctx->cryptlen); | ||
380 | if (err == -EINPROGRESS || err == -EBUSY) | ||
381 | return; | ||
382 | } | ||
383 | |||
384 | if (remain) | ||
385 | __gcm_hash_crypt_done(req, err); | ||
386 | else | ||
387 | __gcm_hash_crypt_remain_done(req, err); | ||
388 | } | ||
389 | |||
390 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | ||
391 | int err) | ||
392 | { | ||
393 | struct aead_request *req = areq->data; | ||
394 | |||
395 | __gcm_hash_assoc_remain_done(req, err); | ||
396 | } | ||
397 | |||
398 | static void __gcm_hash_assoc_done(struct aead_request *req, int err) | ||
399 | { | ||
400 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
401 | unsigned int remain; | ||
402 | |||
403 | if (!err) { | ||
404 | remain = gcm_remain(req->assoclen); | ||
405 | BUG_ON(!remain); | ||
406 | err = gcm_hash_remain(req, pctx, remain, | ||
407 | gcm_hash_assoc_remain_done); | ||
408 | if (err == -EINPROGRESS || err == -EBUSY) | ||
409 | return; | ||
410 | } | ||
411 | |||
412 | __gcm_hash_assoc_remain_done(req, err); | ||
413 | } | ||
414 | |||
415 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err) | ||
416 | { | ||
417 | struct aead_request *req = areq->data; | ||
418 | |||
419 | __gcm_hash_assoc_done(req, err); | ||
420 | } | ||
421 | |||
422 | static void __gcm_hash_init_done(struct aead_request *req, int err) | ||
423 | { | ||
424 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
425 | crypto_completion_t complete; | ||
426 | unsigned int remain = 0; | ||
427 | |||
428 | if (!err && req->assoclen) { | ||
429 | remain = gcm_remain(req->assoclen); | ||
430 | complete = remain ? gcm_hash_assoc_done : | ||
431 | gcm_hash_assoc_remain_done; | ||
432 | err = gcm_hash_update(req, pctx, complete, | ||
433 | req->assoc, req->assoclen); | ||
434 | if (err == -EINPROGRESS || err == -EBUSY) | ||
435 | return; | ||
436 | } | ||
437 | |||
438 | if (remain) | ||
439 | __gcm_hash_assoc_done(req, err); | ||
440 | else | ||
441 | __gcm_hash_assoc_remain_done(req, err); | ||
442 | } | ||
443 | |||
444 | static void gcm_hash_init_done(struct crypto_async_request *areq, int err) | ||
445 | { | ||
446 | struct aead_request *req = areq->data; | ||
447 | |||
448 | __gcm_hash_init_done(req, err); | ||
449 | } | ||
450 | |||
451 | static int gcm_hash(struct aead_request *req, | ||
452 | struct crypto_gcm_req_priv_ctx *pctx) | ||
453 | { | ||
454 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
455 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
456 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
457 | unsigned int remain; | ||
458 | crypto_completion_t complete; | ||
459 | int err; | ||
460 | |||
461 | ahash_request_set_tfm(ahreq, ctx->ghash); | ||
462 | |||
463 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
464 | gcm_hash_init_done, req); | ||
465 | err = crypto_ahash_init(ahreq); | ||
466 | if (err) | ||
467 | return err; | ||
468 | remain = gcm_remain(req->assoclen); | ||
469 | complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; | ||
470 | err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen); | ||
471 | if (err) | ||
472 | return err; | ||
473 | if (remain) { | ||
474 | err = gcm_hash_remain(req, pctx, remain, | ||
475 | gcm_hash_assoc_remain_done); | ||
476 | if (err) | ||
477 | return err; | ||
478 | } | ||
479 | remain = gcm_remain(gctx->cryptlen); | ||
480 | complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; | ||
481 | err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen); | ||
482 | if (err) | ||
483 | return err; | ||
484 | if (remain) { | ||
485 | err = gcm_hash_remain(req, pctx, remain, | ||
486 | gcm_hash_crypt_remain_done); | ||
487 | if (err) | ||
488 | return err; | ||
489 | } | ||
490 | err = gcm_hash_len(req, pctx); | ||
491 | if (err) | ||
492 | return err; | ||
493 | err = gcm_hash_final(req, pctx); | ||
494 | if (err) | ||
495 | return err; | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static void gcm_enc_copy_hash(struct aead_request *req, | ||
501 | struct crypto_gcm_req_priv_ctx *pctx) | ||
502 | { | ||
503 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
504 | u8 *auth_tag = pctx->auth_tag; | ||
316 | 505 | ||
317 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, | 506 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, |
318 | crypto_aead_authsize(aead), 1); | 507 | crypto_aead_authsize(aead), 1); |
319 | return 0; | ||
320 | } | 508 | } |
321 | 509 | ||
322 | static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) | 510 | static void gcm_enc_hash_done(struct aead_request *req, int err) |
323 | { | 511 | { |
324 | struct aead_request *req = areq->data; | 512 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
325 | 513 | ||
326 | if (!err) | 514 | if (!err) |
327 | err = crypto_gcm_hash(req); | 515 | gcm_enc_copy_hash(req, pctx); |
516 | |||
517 | aead_request_complete(req, err); | ||
518 | } | ||
519 | |||
520 | static void gcm_encrypt_done(struct crypto_async_request *areq, int err) | ||
521 | { | ||
522 | struct aead_request *req = areq->data; | ||
523 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
524 | |||
525 | if (!err) { | ||
526 | err = gcm_hash(req, pctx); | ||
527 | if (err == -EINPROGRESS || err == -EBUSY) | ||
528 | return; | ||
529 | else if (!err) { | ||
530 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
531 | gcm_enc_copy_hash(req, pctx); | ||
532 | } | ||
533 | } | ||
328 | 534 | ||
329 | aead_request_complete(req, err); | 535 | aead_request_complete(req, err); |
330 | } | 536 | } |
@@ -332,43 +538,73 @@ static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) | |||
332 | static int crypto_gcm_encrypt(struct aead_request *req) | 538 | static int crypto_gcm_encrypt(struct aead_request *req) |
333 | { | 539 | { |
334 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 540 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
335 | struct ablkcipher_request *abreq = &pctx->abreq; | 541 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
542 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
336 | int err; | 543 | int err; |
337 | 544 | ||
338 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); | 545 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); |
339 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 546 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
340 | crypto_gcm_encrypt_done, req); | 547 | gcm_encrypt_done, req); |
548 | |||
549 | gctx->src = req->dst; | ||
550 | gctx->cryptlen = req->cryptlen; | ||
551 | gctx->complete = gcm_enc_hash_done; | ||
341 | 552 | ||
342 | err = crypto_ablkcipher_encrypt(abreq); | 553 | err = crypto_ablkcipher_encrypt(abreq); |
343 | if (err) | 554 | if (err) |
344 | return err; | 555 | return err; |
345 | 556 | ||
346 | return crypto_gcm_hash(req); | 557 | err = gcm_hash(req, pctx); |
558 | if (err) | ||
559 | return err; | ||
560 | |||
561 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
562 | gcm_enc_copy_hash(req, pctx); | ||
563 | |||
564 | return 0; | ||
347 | } | 565 | } |
348 | 566 | ||
349 | static int crypto_gcm_verify(struct aead_request *req) | 567 | static int crypto_gcm_verify(struct aead_request *req, |
568 | struct crypto_gcm_req_priv_ctx *pctx) | ||
350 | { | 569 | { |
351 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 570 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
352 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
353 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | ||
354 | u8 *auth_tag = pctx->auth_tag; | 571 | u8 *auth_tag = pctx->auth_tag; |
355 | u8 *iauth_tag = pctx->iauth_tag; | 572 | u8 *iauth_tag = pctx->iauth_tag; |
356 | unsigned int authsize = crypto_aead_authsize(aead); | 573 | unsigned int authsize = crypto_aead_authsize(aead); |
357 | unsigned int cryptlen = req->cryptlen - authsize; | 574 | unsigned int cryptlen = req->cryptlen - authsize; |
358 | 575 | ||
359 | crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); | 576 | crypto_xor(auth_tag, iauth_tag, 16); |
360 | |||
361 | authsize = crypto_aead_authsize(aead); | ||
362 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); | 577 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); |
363 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; | 578 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; |
364 | } | 579 | } |
365 | 580 | ||
366 | static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) | 581 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) |
367 | { | 582 | { |
368 | struct aead_request *req = areq->data; | 583 | struct aead_request *req = areq->data; |
584 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
369 | 585 | ||
370 | if (!err) | 586 | if (!err) |
371 | err = crypto_gcm_verify(req); | 587 | err = crypto_gcm_verify(req, pctx); |
588 | |||
589 | aead_request_complete(req, err); | ||
590 | } | ||
591 | |||
592 | static void gcm_dec_hash_done(struct aead_request *req, int err) | ||
593 | { | ||
594 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
595 | struct ablkcipher_request *abreq = &pctx->u.abreq; | ||
596 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
597 | |||
598 | if (!err) { | ||
599 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
600 | gcm_decrypt_done, req); | ||
601 | crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); | ||
602 | err = crypto_ablkcipher_decrypt(abreq); | ||
603 | if (err == -EINPROGRESS || err == -EBUSY) | ||
604 | return; | ||
605 | else if (!err) | ||
606 | err = crypto_gcm_verify(req, pctx); | ||
607 | } | ||
372 | 608 | ||
373 | aead_request_complete(req, err); | 609 | aead_request_complete(req, err); |
374 | } | 610 | } |
@@ -377,27 +613,32 @@ static int crypto_gcm_decrypt(struct aead_request *req) | |||
377 | { | 613 | { |
378 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 614 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
379 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 615 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
380 | struct ablkcipher_request *abreq = &pctx->abreq; | 616 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
381 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | 617 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
382 | unsigned int cryptlen = req->cryptlen; | ||
383 | unsigned int authsize = crypto_aead_authsize(aead); | 618 | unsigned int authsize = crypto_aead_authsize(aead); |
619 | unsigned int cryptlen = req->cryptlen; | ||
384 | int err; | 620 | int err; |
385 | 621 | ||
386 | if (cryptlen < authsize) | 622 | if (cryptlen < authsize) |
387 | return -EINVAL; | 623 | return -EINVAL; |
388 | cryptlen -= authsize; | 624 | cryptlen -= authsize; |
389 | 625 | ||
390 | crypto_gcm_init_crypt(abreq, req, cryptlen); | 626 | gctx->src = req->src; |
391 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 627 | gctx->cryptlen = cryptlen; |
392 | crypto_gcm_decrypt_done, req); | 628 | gctx->complete = gcm_dec_hash_done; |
393 | 629 | ||
394 | crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); | 630 | err = gcm_hash(req, pctx); |
631 | if (err) | ||
632 | return err; | ||
395 | 633 | ||
634 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
635 | gcm_decrypt_done, req); | ||
636 | crypto_gcm_init_crypt(abreq, req, cryptlen); | ||
396 | err = crypto_ablkcipher_decrypt(abreq); | 637 | err = crypto_ablkcipher_decrypt(abreq); |
397 | if (err) | 638 | if (err) |
398 | return err; | 639 | return err; |
399 | 640 | ||
400 | return crypto_gcm_verify(req); | 641 | return crypto_gcm_verify(req, pctx); |
401 | } | 642 | } |
402 | 643 | ||
403 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | 644 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) |
@@ -406,43 +647,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | |||
406 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); | 647 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); |
407 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 648 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
408 | struct crypto_ablkcipher *ctr; | 649 | struct crypto_ablkcipher *ctr; |
650 | struct crypto_ahash *ghash; | ||
409 | unsigned long align; | 651 | unsigned long align; |
410 | int err; | 652 | int err; |
411 | 653 | ||
654 | ghash = crypto_spawn_ahash(&ictx->ghash); | ||
655 | if (IS_ERR(ghash)) | ||
656 | return PTR_ERR(ghash); | ||
657 | |||
412 | ctr = crypto_spawn_skcipher(&ictx->ctr); | 658 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
413 | err = PTR_ERR(ctr); | 659 | err = PTR_ERR(ctr); |
414 | if (IS_ERR(ctr)) | 660 | if (IS_ERR(ctr)) |
415 | return err; | 661 | goto err_free_hash; |
416 | 662 | ||
417 | ctx->ctr = ctr; | 663 | ctx->ctr = ctr; |
418 | ctx->gf128 = NULL; | 664 | ctx->ghash = ghash; |
419 | 665 | ||
420 | align = crypto_tfm_alg_alignmask(tfm); | 666 | align = crypto_tfm_alg_alignmask(tfm); |
421 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 667 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
422 | tfm->crt_aead.reqsize = align + | 668 | tfm->crt_aead.reqsize = align + |
423 | sizeof(struct crypto_gcm_req_priv_ctx) + | 669 | offsetof(struct crypto_gcm_req_priv_ctx, u) + |
424 | crypto_ablkcipher_reqsize(ctr); | 670 | max(sizeof(struct ablkcipher_request) + |
671 | crypto_ablkcipher_reqsize(ctr), | ||
672 | sizeof(struct ahash_request) + | ||
673 | crypto_ahash_reqsize(ghash)); | ||
425 | 674 | ||
426 | return 0; | 675 | return 0; |
676 | |||
677 | err_free_hash: | ||
678 | crypto_free_ahash(ghash); | ||
679 | return err; | ||
427 | } | 680 | } |
428 | 681 | ||
429 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) | 682 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) |
430 | { | 683 | { |
431 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 684 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
432 | 685 | ||
433 | if (ctx->gf128 != NULL) | 686 | crypto_free_ahash(ctx->ghash); |
434 | gf128mul_free_4k(ctx->gf128); | ||
435 | |||
436 | crypto_free_ablkcipher(ctx->ctr); | 687 | crypto_free_ablkcipher(ctx->ctr); |
437 | } | 688 | } |
438 | 689 | ||
439 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | 690 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, |
440 | const char *full_name, | 691 | const char *full_name, |
441 | const char *ctr_name) | 692 | const char *ctr_name, |
693 | const char *ghash_name) | ||
442 | { | 694 | { |
443 | struct crypto_attr_type *algt; | 695 | struct crypto_attr_type *algt; |
444 | struct crypto_instance *inst; | 696 | struct crypto_instance *inst; |
445 | struct crypto_alg *ctr; | 697 | struct crypto_alg *ctr; |
698 | struct crypto_alg *ghash_alg; | ||
699 | struct ahash_alg *ghash_ahash_alg; | ||
446 | struct gcm_instance_ctx *ctx; | 700 | struct gcm_instance_ctx *ctx; |
447 | int err; | 701 | int err; |
448 | 702 | ||
@@ -454,17 +708,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
454 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 708 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
455 | return ERR_PTR(-EINVAL); | 709 | return ERR_PTR(-EINVAL); |
456 | 710 | ||
711 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, | ||
712 | CRYPTO_ALG_TYPE_HASH, | ||
713 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
714 | err = PTR_ERR(ghash_alg); | ||
715 | if (IS_ERR(ghash_alg)) | ||
716 | return ERR_PTR(err); | ||
717 | |||
718 | err = -ENOMEM; | ||
457 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 719 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
458 | if (!inst) | 720 | if (!inst) |
459 | return ERR_PTR(-ENOMEM); | 721 | goto out_put_ghash; |
460 | 722 | ||
461 | ctx = crypto_instance_ctx(inst); | 723 | ctx = crypto_instance_ctx(inst); |
724 | ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); | ||
725 | err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, | ||
726 | inst); | ||
727 | if (err) | ||
728 | goto err_free_inst; | ||
729 | |||
462 | crypto_set_skcipher_spawn(&ctx->ctr, inst); | 730 | crypto_set_skcipher_spawn(&ctx->ctr, inst); |
463 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, | 731 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
464 | crypto_requires_sync(algt->type, | 732 | crypto_requires_sync(algt->type, |
465 | algt->mask)); | 733 | algt->mask)); |
466 | if (err) | 734 | if (err) |
467 | goto err_free_inst; | 735 | goto err_drop_ghash; |
468 | 736 | ||
469 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); | 737 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); |
470 | 738 | ||
@@ -479,7 +747,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
479 | 747 | ||
480 | err = -ENAMETOOLONG; | 748 | err = -ENAMETOOLONG; |
481 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 749 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
482 | "gcm_base(%s)", ctr->cra_driver_name) >= | 750 | "gcm_base(%s,%s)", ctr->cra_driver_name, |
751 | ghash_alg->cra_driver_name) >= | ||
483 | CRYPTO_MAX_ALG_NAME) | 752 | CRYPTO_MAX_ALG_NAME) |
484 | goto out_put_ctr; | 753 | goto out_put_ctr; |
485 | 754 | ||
@@ -502,12 +771,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
502 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; | 771 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; |
503 | 772 | ||
504 | out: | 773 | out: |
774 | crypto_mod_put(ghash_alg); | ||
505 | return inst; | 775 | return inst; |
506 | 776 | ||
507 | out_put_ctr: | 777 | out_put_ctr: |
508 | crypto_drop_skcipher(&ctx->ctr); | 778 | crypto_drop_skcipher(&ctx->ctr); |
779 | err_drop_ghash: | ||
780 | crypto_drop_ahash(&ctx->ghash); | ||
509 | err_free_inst: | 781 | err_free_inst: |
510 | kfree(inst); | 782 | kfree(inst); |
783 | out_put_ghash: | ||
511 | inst = ERR_PTR(err); | 784 | inst = ERR_PTR(err); |
512 | goto out; | 785 | goto out; |
513 | } | 786 | } |
@@ -532,7 +805,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) | |||
532 | CRYPTO_MAX_ALG_NAME) | 805 | CRYPTO_MAX_ALG_NAME) |
533 | return ERR_PTR(-ENAMETOOLONG); | 806 | return ERR_PTR(-ENAMETOOLONG); |
534 | 807 | ||
535 | return crypto_gcm_alloc_common(tb, full_name, ctr_name); | 808 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); |
536 | } | 809 | } |
537 | 810 | ||
538 | static void crypto_gcm_free(struct crypto_instance *inst) | 811 | static void crypto_gcm_free(struct crypto_instance *inst) |
@@ -540,6 +813,7 @@ static void crypto_gcm_free(struct crypto_instance *inst) | |||
540 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); | 813 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); |
541 | 814 | ||
542 | crypto_drop_skcipher(&ctx->ctr); | 815 | crypto_drop_skcipher(&ctx->ctr); |
816 | crypto_drop_ahash(&ctx->ghash); | ||
543 | kfree(inst); | 817 | kfree(inst); |
544 | } | 818 | } |
545 | 819 | ||
@@ -554,6 +828,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
554 | { | 828 | { |
555 | int err; | 829 | int err; |
556 | const char *ctr_name; | 830 | const char *ctr_name; |
831 | const char *ghash_name; | ||
557 | char full_name[CRYPTO_MAX_ALG_NAME]; | 832 | char full_name[CRYPTO_MAX_ALG_NAME]; |
558 | 833 | ||
559 | ctr_name = crypto_attr_alg_name(tb[1]); | 834 | ctr_name = crypto_attr_alg_name(tb[1]); |
@@ -561,11 +836,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
561 | if (IS_ERR(ctr_name)) | 836 | if (IS_ERR(ctr_name)) |
562 | return ERR_PTR(err); | 837 | return ERR_PTR(err); |
563 | 838 | ||
564 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", | 839 | ghash_name = crypto_attr_alg_name(tb[2]); |
565 | ctr_name) >= CRYPTO_MAX_ALG_NAME) | 840 | err = PTR_ERR(ghash_name); |
841 | if (IS_ERR(ghash_name)) | ||
842 | return ERR_PTR(err); | ||
843 | |||
844 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", | ||
845 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) | ||
566 | return ERR_PTR(-ENAMETOOLONG); | 846 | return ERR_PTR(-ENAMETOOLONG); |
567 | 847 | ||
568 | return crypto_gcm_alloc_common(tb, full_name, ctr_name); | 848 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); |
569 | } | 849 | } |
570 | 850 | ||
571 | static struct crypto_template crypto_gcm_base_tmpl = { | 851 | static struct crypto_template crypto_gcm_base_tmpl = { |
@@ -780,10 +1060,280 @@ static struct crypto_template crypto_rfc4106_tmpl = { | |||
780 | .module = THIS_MODULE, | 1060 | .module = THIS_MODULE, |
781 | }; | 1061 | }; |
782 | 1062 | ||
1063 | static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx( | ||
1064 | struct aead_request *req) | ||
1065 | { | ||
1066 | unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); | ||
1067 | |||
1068 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | ||
1069 | } | ||
1070 | |||
1071 | static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, | ||
1072 | unsigned int keylen) | ||
1073 | { | ||
1074 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); | ||
1075 | struct crypto_aead *child = ctx->child; | ||
1076 | int err; | ||
1077 | |||
1078 | if (keylen < 4) | ||
1079 | return -EINVAL; | ||
1080 | |||
1081 | keylen -= 4; | ||
1082 | memcpy(ctx->nonce, key + keylen, 4); | ||
1083 | |||
1084 | crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
1085 | crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & | ||
1086 | CRYPTO_TFM_REQ_MASK); | ||
1087 | err = crypto_aead_setkey(child, key, keylen); | ||
1088 | crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & | ||
1089 | CRYPTO_TFM_RES_MASK); | ||
1090 | |||
1091 | return err; | ||
1092 | } | ||
1093 | |||
1094 | static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, | ||
1095 | unsigned int authsize) | ||
1096 | { | ||
1097 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); | ||
1098 | |||
1099 | if (authsize != 16) | ||
1100 | return -EINVAL; | ||
1101 | |||
1102 | return crypto_aead_setauthsize(ctx->child, authsize); | ||
1103 | } | ||
1104 | |||
1105 | /* this is the same as crypto_authenc_chain */ | ||
1106 | static void crypto_rfc4543_chain(struct scatterlist *head, | ||
1107 | struct scatterlist *sg, int chain) | ||
1108 | { | ||
1109 | if (chain) { | ||
1110 | head->length += sg->length; | ||
1111 | sg = scatterwalk_sg_next(sg); | ||
1112 | } | ||
1113 | |||
1114 | if (sg) | ||
1115 | scatterwalk_sg_chain(head, 2, sg); | ||
1116 | else | ||
1117 | sg_mark_end(head); | ||
1118 | } | ||
1119 | |||
1120 | static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | ||
1121 | int enc) | ||
1122 | { | ||
1123 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1124 | struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); | ||
1125 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | ||
1126 | struct aead_request *subreq = &rctx->subreq; | ||
1127 | struct scatterlist *dst = req->dst; | ||
1128 | struct scatterlist *cipher = rctx->cipher; | ||
1129 | struct scatterlist *payload = rctx->payload; | ||
1130 | struct scatterlist *assoc = rctx->assoc; | ||
1131 | unsigned int authsize = crypto_aead_authsize(aead); | ||
1132 | unsigned int assoclen = req->assoclen; | ||
1133 | struct page *dstp; | ||
1134 | u8 *vdst; | ||
1135 | u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), | ||
1136 | crypto_aead_alignmask(ctx->child) + 1); | ||
1137 | |||
1138 | memcpy(iv, ctx->nonce, 4); | ||
1139 | memcpy(iv + 4, req->iv, 8); | ||
1140 | |||
1141 | /* construct cipher/plaintext */ | ||
1142 | if (enc) | ||
1143 | memset(rctx->auth_tag, 0, authsize); | ||
1144 | else | ||
1145 | scatterwalk_map_and_copy(rctx->auth_tag, dst, | ||
1146 | req->cryptlen - authsize, | ||
1147 | authsize, 0); | ||
1148 | |||
1149 | sg_init_one(cipher, rctx->auth_tag, authsize); | ||
1150 | |||
1151 | /* construct the aad */ | ||
1152 | dstp = sg_page(dst); | ||
1153 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | ||
1154 | |||
1155 | sg_init_table(payload, 2); | ||
1156 | sg_set_buf(payload, req->iv, 8); | ||
1157 | crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8); | ||
1158 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); | ||
1159 | |||
1160 | sg_init_table(assoc, 2); | ||
1161 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, | ||
1162 | req->assoc->offset); | ||
1163 | crypto_rfc4543_chain(assoc, payload, 0); | ||
1164 | |||
1165 | aead_request_set_tfm(subreq, ctx->child); | ||
1166 | aead_request_set_callback(subreq, req->base.flags, req->base.complete, | ||
1167 | req->base.data); | ||
1168 | aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); | ||
1169 | aead_request_set_assoc(subreq, assoc, assoclen); | ||
1170 | |||
1171 | return subreq; | ||
1172 | } | ||
1173 | |||
1174 | static int crypto_rfc4543_encrypt(struct aead_request *req) | ||
1175 | { | ||
1176 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1177 | struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); | ||
1178 | struct aead_request *subreq; | ||
1179 | int err; | ||
1180 | |||
1181 | subreq = crypto_rfc4543_crypt(req, 1); | ||
1182 | err = crypto_aead_encrypt(subreq); | ||
1183 | if (err) | ||
1184 | return err; | ||
1185 | |||
1186 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, | ||
1187 | crypto_aead_authsize(aead), 1); | ||
1188 | |||
1189 | return 0; | ||
1190 | } | ||
1191 | |||
1192 | static int crypto_rfc4543_decrypt(struct aead_request *req) | ||
1193 | { | ||
1194 | req = crypto_rfc4543_crypt(req, 0); | ||
1195 | |||
1196 | return crypto_aead_decrypt(req); | ||
1197 | } | ||
1198 | |||
1199 | static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) | ||
1200 | { | ||
1201 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | ||
1202 | struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); | ||
1203 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1204 | struct crypto_aead *aead; | ||
1205 | unsigned long align; | ||
1206 | |||
1207 | aead = crypto_spawn_aead(spawn); | ||
1208 | if (IS_ERR(aead)) | ||
1209 | return PTR_ERR(aead); | ||
1210 | |||
1211 | ctx->child = aead; | ||
1212 | |||
1213 | align = crypto_aead_alignmask(aead); | ||
1214 | align &= ~(crypto_tfm_ctx_alignment() - 1); | ||
1215 | tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + | ||
1216 | ALIGN(crypto_aead_reqsize(aead), | ||
1217 | crypto_tfm_ctx_alignment()) + | ||
1218 | align + 16; | ||
1219 | |||
1220 | return 0; | ||
1221 | } | ||
1222 | |||
1223 | static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) | ||
1224 | { | ||
1225 | struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); | ||
1226 | |||
1227 | crypto_free_aead(ctx->child); | ||
1228 | } | ||
1229 | |||
1230 | static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) | ||
1231 | { | ||
1232 | struct crypto_attr_type *algt; | ||
1233 | struct crypto_instance *inst; | ||
1234 | struct crypto_aead_spawn *spawn; | ||
1235 | struct crypto_alg *alg; | ||
1236 | const char *ccm_name; | ||
1237 | int err; | ||
1238 | |||
1239 | algt = crypto_get_attr_type(tb); | ||
1240 | err = PTR_ERR(algt); | ||
1241 | if (IS_ERR(algt)) | ||
1242 | return ERR_PTR(err); | ||
1243 | |||
1244 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | ||
1245 | return ERR_PTR(-EINVAL); | ||
1246 | |||
1247 | ccm_name = crypto_attr_alg_name(tb[1]); | ||
1248 | err = PTR_ERR(ccm_name); | ||
1249 | if (IS_ERR(ccm_name)) | ||
1250 | return ERR_PTR(err); | ||
1251 | |||
1252 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | ||
1253 | if (!inst) | ||
1254 | return ERR_PTR(-ENOMEM); | ||
1255 | |||
1256 | spawn = crypto_instance_ctx(inst); | ||
1257 | crypto_set_aead_spawn(spawn, inst); | ||
1258 | err = crypto_grab_aead(spawn, ccm_name, 0, | ||
1259 | crypto_requires_sync(algt->type, algt->mask)); | ||
1260 | if (err) | ||
1261 | goto out_free_inst; | ||
1262 | |||
1263 | alg = crypto_aead_spawn_alg(spawn); | ||
1264 | |||
1265 | err = -EINVAL; | ||
1266 | |||
1267 | /* We only support 16-byte blocks. */ | ||
1268 | if (alg->cra_aead.ivsize != 16) | ||
1269 | goto out_drop_alg; | ||
1270 | |||
1271 | /* Not a stream cipher? */ | ||
1272 | if (alg->cra_blocksize != 1) | ||
1273 | goto out_drop_alg; | ||
1274 | |||
1275 | err = -ENAMETOOLONG; | ||
1276 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | ||
1277 | "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || | ||
1278 | snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
1279 | "rfc4543(%s)", alg->cra_driver_name) >= | ||
1280 | CRYPTO_MAX_ALG_NAME) | ||
1281 | goto out_drop_alg; | ||
1282 | |||
1283 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | ||
1284 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | ||
1285 | inst->alg.cra_priority = alg->cra_priority; | ||
1286 | inst->alg.cra_blocksize = 1; | ||
1287 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
1288 | inst->alg.cra_type = &crypto_nivaead_type; | ||
1289 | |||
1290 | inst->alg.cra_aead.ivsize = 8; | ||
1291 | inst->alg.cra_aead.maxauthsize = 16; | ||
1292 | |||
1293 | inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); | ||
1294 | |||
1295 | inst->alg.cra_init = crypto_rfc4543_init_tfm; | ||
1296 | inst->alg.cra_exit = crypto_rfc4543_exit_tfm; | ||
1297 | |||
1298 | inst->alg.cra_aead.setkey = crypto_rfc4543_setkey; | ||
1299 | inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize; | ||
1300 | inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt; | ||
1301 | inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt; | ||
1302 | |||
1303 | inst->alg.cra_aead.geniv = "seqiv"; | ||
1304 | |||
1305 | out: | ||
1306 | return inst; | ||
1307 | |||
1308 | out_drop_alg: | ||
1309 | crypto_drop_aead(spawn); | ||
1310 | out_free_inst: | ||
1311 | kfree(inst); | ||
1312 | inst = ERR_PTR(err); | ||
1313 | goto out; | ||
1314 | } | ||
1315 | |||
1316 | static void crypto_rfc4543_free(struct crypto_instance *inst) | ||
1317 | { | ||
1318 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
1319 | kfree(inst); | ||
1320 | } | ||
1321 | |||
1322 | static struct crypto_template crypto_rfc4543_tmpl = { | ||
1323 | .name = "rfc4543", | ||
1324 | .alloc = crypto_rfc4543_alloc, | ||
1325 | .free = crypto_rfc4543_free, | ||
1326 | .module = THIS_MODULE, | ||
1327 | }; | ||
1328 | |||
783 | static int __init crypto_gcm_module_init(void) | 1329 | static int __init crypto_gcm_module_init(void) |
784 | { | 1330 | { |
785 | int err; | 1331 | int err; |
786 | 1332 | ||
1333 | gcm_zeroes = kzalloc(16, GFP_KERNEL); | ||
1334 | if (!gcm_zeroes) | ||
1335 | return -ENOMEM; | ||
1336 | |||
787 | err = crypto_register_template(&crypto_gcm_base_tmpl); | 1337 | err = crypto_register_template(&crypto_gcm_base_tmpl); |
788 | if (err) | 1338 | if (err) |
789 | goto out; | 1339 | goto out; |
@@ -796,18 +1346,27 @@ static int __init crypto_gcm_module_init(void) | |||
796 | if (err) | 1346 | if (err) |
797 | goto out_undo_gcm; | 1347 | goto out_undo_gcm; |
798 | 1348 | ||
799 | out: | 1349 | err = crypto_register_template(&crypto_rfc4543_tmpl); |
800 | return err; | 1350 | if (err) |
1351 | goto out_undo_rfc4106; | ||
801 | 1352 | ||
1353 | return 0; | ||
1354 | |||
1355 | out_undo_rfc4106: | ||
1356 | crypto_unregister_template(&crypto_rfc4106_tmpl); | ||
802 | out_undo_gcm: | 1357 | out_undo_gcm: |
803 | crypto_unregister_template(&crypto_gcm_tmpl); | 1358 | crypto_unregister_template(&crypto_gcm_tmpl); |
804 | out_undo_base: | 1359 | out_undo_base: |
805 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1360 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
806 | goto out; | 1361 | out: |
1362 | kfree(gcm_zeroes); | ||
1363 | return err; | ||
807 | } | 1364 | } |
808 | 1365 | ||
809 | static void __exit crypto_gcm_module_exit(void) | 1366 | static void __exit crypto_gcm_module_exit(void) |
810 | { | 1367 | { |
1368 | kfree(gcm_zeroes); | ||
1369 | crypto_unregister_template(&crypto_rfc4543_tmpl); | ||
811 | crypto_unregister_template(&crypto_rfc4106_tmpl); | 1370 | crypto_unregister_template(&crypto_rfc4106_tmpl); |
812 | crypto_unregister_template(&crypto_gcm_tmpl); | 1371 | crypto_unregister_template(&crypto_gcm_tmpl); |
813 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1372 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
@@ -821,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode"); | |||
821 | MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); | 1380 | MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); |
822 | MODULE_ALIAS("gcm_base"); | 1381 | MODULE_ALIAS("gcm_base"); |
823 | MODULE_ALIAS("rfc4106"); | 1382 | MODULE_ALIAS("rfc4106"); |
1383 | MODULE_ALIAS("rfc4543"); | ||
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c new file mode 100644 index 000000000000..be4425616931 --- /dev/null +++ b/crypto/ghash-generic.c | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * GHASH: digest algorithm for GCM (Galois/Counter Mode). | ||
3 | * | ||
4 | * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> | ||
5 | * Copyright (c) 2009 Intel Corp. | ||
6 | * Author: Huang Ying <ying.huang@intel.com> | ||
7 | * | ||
8 | * The algorithm implementation is copied from gcm.c. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published | ||
12 | * by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <crypto/algapi.h> | ||
16 | #include <crypto/gf128mul.h> | ||
17 | #include <crypto/internal/hash.h> | ||
18 | #include <linux/crypto.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | |||
23 | #define GHASH_BLOCK_SIZE 16 | ||
24 | #define GHASH_DIGEST_SIZE 16 | ||
25 | |||
26 | struct ghash_ctx { | ||
27 | struct gf128mul_4k *gf128; | ||
28 | }; | ||
29 | |||
30 | struct ghash_desc_ctx { | ||
31 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
32 | u32 bytes; | ||
33 | }; | ||
34 | |||
35 | static int ghash_init(struct shash_desc *desc) | ||
36 | { | ||
37 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
38 | |||
39 | memset(dctx, 0, sizeof(*dctx)); | ||
40 | |||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | static int ghash_setkey(struct crypto_shash *tfm, | ||
45 | const u8 *key, unsigned int keylen) | ||
46 | { | ||
47 | struct ghash_ctx *ctx = crypto_shash_ctx(tfm); | ||
48 | |||
49 | if (keylen != GHASH_BLOCK_SIZE) { | ||
50 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
51 | return -EINVAL; | ||
52 | } | ||
53 | |||
54 | if (ctx->gf128) | ||
55 | gf128mul_free_4k(ctx->gf128); | ||
56 | ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); | ||
57 | if (!ctx->gf128) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static int ghash_update(struct shash_desc *desc, | ||
64 | const u8 *src, unsigned int srclen) | ||
65 | { | ||
66 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
67 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
68 | u8 *dst = dctx->buffer; | ||
69 | |||
70 | if (dctx->bytes) { | ||
71 | int n = min(srclen, dctx->bytes); | ||
72 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | ||
73 | |||
74 | dctx->bytes -= n; | ||
75 | srclen -= n; | ||
76 | |||
77 | while (n--) | ||
78 | *pos++ ^= *src++; | ||
79 | |||
80 | if (!dctx->bytes) | ||
81 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
82 | } | ||
83 | |||
84 | while (srclen >= GHASH_BLOCK_SIZE) { | ||
85 | crypto_xor(dst, src, GHASH_BLOCK_SIZE); | ||
86 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
87 | src += GHASH_BLOCK_SIZE; | ||
88 | srclen -= GHASH_BLOCK_SIZE; | ||
89 | } | ||
90 | |||
91 | if (srclen) { | ||
92 | dctx->bytes = GHASH_BLOCK_SIZE - srclen; | ||
93 | while (srclen--) | ||
94 | *dst++ ^= *src++; | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) | ||
101 | { | ||
102 | u8 *dst = dctx->buffer; | ||
103 | |||
104 | if (dctx->bytes) { | ||
105 | u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | ||
106 | |||
107 | while (dctx->bytes--) | ||
108 | *tmp++ ^= 0; | ||
109 | |||
110 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
111 | } | ||
112 | |||
113 | dctx->bytes = 0; | ||
114 | } | ||
115 | |||
116 | static int ghash_final(struct shash_desc *desc, u8 *dst) | ||
117 | { | ||
118 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
119 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
120 | u8 *buf = dctx->buffer; | ||
121 | |||
122 | ghash_flush(ctx, dctx); | ||
123 | memcpy(dst, buf, GHASH_BLOCK_SIZE); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static void ghash_exit_tfm(struct crypto_tfm *tfm) | ||
129 | { | ||
130 | struct ghash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
131 | if (ctx->gf128) | ||
132 | gf128mul_free_4k(ctx->gf128); | ||
133 | } | ||
134 | |||
135 | static struct shash_alg ghash_alg = { | ||
136 | .digestsize = GHASH_DIGEST_SIZE, | ||
137 | .init = ghash_init, | ||
138 | .update = ghash_update, | ||
139 | .final = ghash_final, | ||
140 | .setkey = ghash_setkey, | ||
141 | .descsize = sizeof(struct ghash_desc_ctx), | ||
142 | .base = { | ||
143 | .cra_name = "ghash", | ||
144 | .cra_driver_name = "ghash-generic", | ||
145 | .cra_priority = 100, | ||
146 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
147 | .cra_blocksize = GHASH_BLOCK_SIZE, | ||
148 | .cra_ctxsize = sizeof(struct ghash_ctx), | ||
149 | .cra_module = THIS_MODULE, | ||
150 | .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list), | ||
151 | .cra_exit = ghash_exit_tfm, | ||
152 | }, | ||
153 | }; | ||
154 | |||
155 | static int __init ghash_mod_init(void) | ||
156 | { | ||
157 | return crypto_register_shash(&ghash_alg); | ||
158 | } | ||
159 | |||
160 | static void __exit ghash_mod_exit(void) | ||
161 | { | ||
162 | crypto_unregister_shash(&ghash_alg); | ||
163 | } | ||
164 | |||
165 | module_init(ghash_mod_init); | ||
166 | module_exit(ghash_mod_exit); | ||
167 | |||
168 | MODULE_LICENSE("GPL"); | ||
169 | MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); | ||
170 | MODULE_ALIAS("ghash"); | ||
diff --git a/crypto/hash.c b/crypto/hash.c deleted file mode 100644 index cb86b19fd105..000000000000 --- a/crypto/hash.c +++ /dev/null | |||
@@ -1,183 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic Hash operations. | ||
3 | * | ||
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the Free | ||
8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
9 | * any later version. | ||
10 | */ | ||
11 | |||
12 | #include <crypto/internal/hash.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | |||
19 | #include "internal.h" | ||
20 | |||
21 | static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type, | ||
22 | u32 mask) | ||
23 | { | ||
24 | return alg->cra_ctxsize; | ||
25 | } | ||
26 | |||
27 | static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key, | ||
28 | unsigned int keylen) | ||
29 | { | ||
30 | struct crypto_tfm *tfm = crypto_hash_tfm(crt); | ||
31 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
32 | unsigned long alignmask = crypto_hash_alignmask(crt); | ||
33 | int ret; | ||
34 | u8 *buffer, *alignbuffer; | ||
35 | unsigned long absize; | ||
36 | |||
37 | absize = keylen + alignmask; | ||
38 | buffer = kmalloc(absize, GFP_ATOMIC); | ||
39 | if (!buffer) | ||
40 | return -ENOMEM; | ||
41 | |||
42 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | ||
43 | memcpy(alignbuffer, key, keylen); | ||
44 | ret = alg->setkey(crt, alignbuffer, keylen); | ||
45 | memset(alignbuffer, 0, keylen); | ||
46 | kfree(buffer); | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | static int hash_setkey(struct crypto_hash *crt, const u8 *key, | ||
51 | unsigned int keylen) | ||
52 | { | ||
53 | struct crypto_tfm *tfm = crypto_hash_tfm(crt); | ||
54 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
55 | unsigned long alignmask = crypto_hash_alignmask(crt); | ||
56 | |||
57 | if ((unsigned long)key & alignmask) | ||
58 | return hash_setkey_unaligned(crt, key, keylen); | ||
59 | |||
60 | return alg->setkey(crt, key, keylen); | ||
61 | } | ||
62 | |||
63 | static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, | ||
64 | unsigned int keylen) | ||
65 | { | ||
66 | struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); | ||
67 | struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm); | ||
68 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
69 | |||
70 | return alg->setkey(tfm_hash, key, keylen); | ||
71 | } | ||
72 | |||
73 | static int hash_async_init(struct ahash_request *req) | ||
74 | { | ||
75 | struct crypto_tfm *tfm = req->base.tfm; | ||
76 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
77 | struct hash_desc desc = { | ||
78 | .tfm = __crypto_hash_cast(tfm), | ||
79 | .flags = req->base.flags, | ||
80 | }; | ||
81 | |||
82 | return alg->init(&desc); | ||
83 | } | ||
84 | |||
85 | static int hash_async_update(struct ahash_request *req) | ||
86 | { | ||
87 | struct crypto_tfm *tfm = req->base.tfm; | ||
88 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
89 | struct hash_desc desc = { | ||
90 | .tfm = __crypto_hash_cast(tfm), | ||
91 | .flags = req->base.flags, | ||
92 | }; | ||
93 | |||
94 | return alg->update(&desc, req->src, req->nbytes); | ||
95 | } | ||
96 | |||
97 | static int hash_async_final(struct ahash_request *req) | ||
98 | { | ||
99 | struct crypto_tfm *tfm = req->base.tfm; | ||
100 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
101 | struct hash_desc desc = { | ||
102 | .tfm = __crypto_hash_cast(tfm), | ||
103 | .flags = req->base.flags, | ||
104 | }; | ||
105 | |||
106 | return alg->final(&desc, req->result); | ||
107 | } | ||
108 | |||
109 | static int hash_async_digest(struct ahash_request *req) | ||
110 | { | ||
111 | struct crypto_tfm *tfm = req->base.tfm; | ||
112 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
113 | struct hash_desc desc = { | ||
114 | .tfm = __crypto_hash_cast(tfm), | ||
115 | .flags = req->base.flags, | ||
116 | }; | ||
117 | |||
118 | return alg->digest(&desc, req->src, req->nbytes, req->result); | ||
119 | } | ||
120 | |||
121 | static int crypto_init_hash_ops_async(struct crypto_tfm *tfm) | ||
122 | { | ||
123 | struct ahash_tfm *crt = &tfm->crt_ahash; | ||
124 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
125 | |||
126 | crt->init = hash_async_init; | ||
127 | crt->update = hash_async_update; | ||
128 | crt->final = hash_async_final; | ||
129 | crt->digest = hash_async_digest; | ||
130 | crt->setkey = hash_async_setkey; | ||
131 | crt->digestsize = alg->digestsize; | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm) | ||
137 | { | ||
138 | struct hash_tfm *crt = &tfm->crt_hash; | ||
139 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
140 | |||
141 | crt->init = alg->init; | ||
142 | crt->update = alg->update; | ||
143 | crt->final = alg->final; | ||
144 | crt->digest = alg->digest; | ||
145 | crt->setkey = hash_setkey; | ||
146 | crt->digestsize = alg->digestsize; | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | ||
152 | { | ||
153 | struct hash_alg *alg = &tfm->__crt_alg->cra_hash; | ||
154 | |||
155 | if (alg->digestsize > PAGE_SIZE / 8) | ||
156 | return -EINVAL; | ||
157 | |||
158 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK) | ||
159 | return crypto_init_hash_ops_async(tfm); | ||
160 | else | ||
161 | return crypto_init_hash_ops_sync(tfm); | ||
162 | } | ||
163 | |||
164 | static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) | ||
165 | __attribute__ ((unused)); | ||
166 | static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) | ||
167 | { | ||
168 | seq_printf(m, "type : hash\n"); | ||
169 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
170 | seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); | ||
171 | } | ||
172 | |||
173 | const struct crypto_type crypto_hash_type = { | ||
174 | .ctxsize = crypto_hash_ctxsize, | ||
175 | .init = crypto_init_hash_ops, | ||
176 | #ifdef CONFIG_PROC_FS | ||
177 | .show = crypto_hash_show, | ||
178 | #endif | ||
179 | }; | ||
180 | EXPORT_SYMBOL_GPL(crypto_hash_type); | ||
181 | |||
182 | MODULE_LICENSE("GPL"); | ||
183 | MODULE_DESCRIPTION("Generic cryptographic hash type"); | ||
diff --git a/crypto/hmac.c b/crypto/hmac.c index 0ad39c374963..8d9544cf8169 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
@@ -23,11 +23,10 @@ | |||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/slab.h> | ||
27 | #include <linux/string.h> | 26 | #include <linux/string.h> |
28 | 27 | ||
29 | struct hmac_ctx { | 28 | struct hmac_ctx { |
30 | struct crypto_hash *child; | 29 | struct crypto_shash *hash; |
31 | }; | 30 | }; |
32 | 31 | ||
33 | static inline void *align_ptr(void *p, unsigned int align) | 32 | static inline void *align_ptr(void *p, unsigned int align) |
@@ -35,65 +34,45 @@ static inline void *align_ptr(void *p, unsigned int align) | |||
35 | return (void *)ALIGN((unsigned long)p, align); | 34 | return (void *)ALIGN((unsigned long)p, align); |
36 | } | 35 | } |
37 | 36 | ||
38 | static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) | 37 | static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) |
39 | { | 38 | { |
40 | return align_ptr(crypto_hash_ctx_aligned(tfm) + | 39 | return align_ptr(crypto_shash_ctx_aligned(tfm) + |
41 | crypto_hash_blocksize(tfm) * 2 + | 40 | crypto_shash_statesize(tfm) * 2, |
42 | crypto_hash_digestsize(tfm), sizeof(void *)); | 41 | crypto_tfm_ctx_alignment()); |
43 | } | 42 | } |
44 | 43 | ||
45 | static int hmac_setkey(struct crypto_hash *parent, | 44 | static int hmac_setkey(struct crypto_shash *parent, |
46 | const u8 *inkey, unsigned int keylen) | 45 | const u8 *inkey, unsigned int keylen) |
47 | { | 46 | { |
48 | int bs = crypto_hash_blocksize(parent); | 47 | int bs = crypto_shash_blocksize(parent); |
49 | int ds = crypto_hash_digestsize(parent); | 48 | int ds = crypto_shash_digestsize(parent); |
50 | char *ipad = crypto_hash_ctx_aligned(parent); | 49 | int ss = crypto_shash_statesize(parent); |
51 | char *opad = ipad + bs; | 50 | char *ipad = crypto_shash_ctx_aligned(parent); |
52 | char *digest = opad + bs; | 51 | char *opad = ipad + ss; |
53 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | 52 | struct hmac_ctx *ctx = align_ptr(opad + ss, |
54 | struct crypto_hash *tfm = ctx->child; | 53 | crypto_tfm_ctx_alignment()); |
54 | struct crypto_shash *hash = ctx->hash; | ||
55 | struct { | ||
56 | struct shash_desc shash; | ||
57 | char ctx[crypto_shash_descsize(hash)]; | ||
58 | } desc; | ||
55 | unsigned int i; | 59 | unsigned int i; |
56 | 60 | ||
61 | desc.shash.tfm = hash; | ||
62 | desc.shash.flags = crypto_shash_get_flags(parent) & | ||
63 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
64 | |||
57 | if (keylen > bs) { | 65 | if (keylen > bs) { |
58 | struct hash_desc desc; | ||
59 | struct scatterlist tmp; | ||
60 | int tmplen; | ||
61 | int err; | 66 | int err; |
62 | 67 | ||
63 | desc.tfm = tfm; | 68 | err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad); |
64 | desc.flags = crypto_hash_get_flags(parent); | ||
65 | desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; | ||
66 | |||
67 | err = crypto_hash_init(&desc); | ||
68 | if (err) | 69 | if (err) |
69 | return err; | 70 | return err; |
70 | 71 | ||
71 | tmplen = bs * 2 + ds; | ||
72 | sg_init_one(&tmp, ipad, tmplen); | ||
73 | |||
74 | for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) { | ||
75 | memcpy(ipad, inkey, tmplen); | ||
76 | err = crypto_hash_update(&desc, &tmp, tmplen); | ||
77 | if (err) | ||
78 | return err; | ||
79 | } | ||
80 | |||
81 | if (keylen) { | ||
82 | memcpy(ipad, inkey, keylen); | ||
83 | err = crypto_hash_update(&desc, &tmp, keylen); | ||
84 | if (err) | ||
85 | return err; | ||
86 | } | ||
87 | |||
88 | err = crypto_hash_final(&desc, digest); | ||
89 | if (err) | ||
90 | return err; | ||
91 | |||
92 | inkey = digest; | ||
93 | keylen = ds; | 72 | keylen = ds; |
94 | } | 73 | } else |
74 | memcpy(ipad, inkey, keylen); | ||
95 | 75 | ||
96 | memcpy(ipad, inkey, keylen); | ||
97 | memset(ipad + keylen, 0, bs - keylen); | 76 | memset(ipad + keylen, 0, bs - keylen); |
98 | memcpy(opad, ipad, bs); | 77 | memcpy(opad, ipad, bs); |
99 | 78 | ||
@@ -102,184 +81,178 @@ static int hmac_setkey(struct crypto_hash *parent, | |||
102 | opad[i] ^= 0x5c; | 81 | opad[i] ^= 0x5c; |
103 | } | 82 | } |
104 | 83 | ||
105 | return 0; | 84 | return crypto_shash_init(&desc.shash) ?: |
85 | crypto_shash_update(&desc.shash, ipad, bs) ?: | ||
86 | crypto_shash_export(&desc.shash, ipad) ?: | ||
87 | crypto_shash_init(&desc.shash) ?: | ||
88 | crypto_shash_update(&desc.shash, opad, bs) ?: | ||
89 | crypto_shash_export(&desc.shash, opad); | ||
106 | } | 90 | } |
107 | 91 | ||
108 | static int hmac_init(struct hash_desc *pdesc) | 92 | static int hmac_export(struct shash_desc *pdesc, void *out) |
109 | { | 93 | { |
110 | struct crypto_hash *parent = pdesc->tfm; | 94 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
111 | int bs = crypto_hash_blocksize(parent); | ||
112 | int ds = crypto_hash_digestsize(parent); | ||
113 | char *ipad = crypto_hash_ctx_aligned(parent); | ||
114 | struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *)); | ||
115 | struct hash_desc desc; | ||
116 | struct scatterlist tmp; | ||
117 | int err; | ||
118 | 95 | ||
119 | desc.tfm = ctx->child; | 96 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
120 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
121 | sg_init_one(&tmp, ipad, bs); | ||
122 | 97 | ||
123 | err = crypto_hash_init(&desc); | 98 | return crypto_shash_export(desc, out); |
124 | if (unlikely(err)) | ||
125 | return err; | ||
126 | |||
127 | return crypto_hash_update(&desc, &tmp, bs); | ||
128 | } | 99 | } |
129 | 100 | ||
130 | static int hmac_update(struct hash_desc *pdesc, | 101 | static int hmac_import(struct shash_desc *pdesc, const void *in) |
131 | struct scatterlist *sg, unsigned int nbytes) | ||
132 | { | 102 | { |
103 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
133 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); | 104 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); |
134 | struct hash_desc desc; | ||
135 | 105 | ||
136 | desc.tfm = ctx->child; | 106 | desc->tfm = ctx->hash; |
137 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 107 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
138 | 108 | ||
139 | return crypto_hash_update(&desc, sg, nbytes); | 109 | return crypto_shash_import(desc, in); |
140 | } | 110 | } |
141 | 111 | ||
142 | static int hmac_final(struct hash_desc *pdesc, u8 *out) | 112 | static int hmac_init(struct shash_desc *pdesc) |
143 | { | 113 | { |
144 | struct crypto_hash *parent = pdesc->tfm; | 114 | return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); |
145 | int bs = crypto_hash_blocksize(parent); | 115 | } |
146 | int ds = crypto_hash_digestsize(parent); | ||
147 | char *opad = crypto_hash_ctx_aligned(parent) + bs; | ||
148 | char *digest = opad + bs; | ||
149 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | ||
150 | struct hash_desc desc; | ||
151 | struct scatterlist tmp; | ||
152 | int err; | ||
153 | 116 | ||
154 | desc.tfm = ctx->child; | 117 | static int hmac_update(struct shash_desc *pdesc, |
155 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 118 | const u8 *data, unsigned int nbytes) |
156 | sg_init_one(&tmp, opad, bs + ds); | 119 | { |
120 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
157 | 121 | ||
158 | err = crypto_hash_final(&desc, digest); | 122 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
159 | if (unlikely(err)) | ||
160 | return err; | ||
161 | 123 | ||
162 | return crypto_hash_digest(&desc, &tmp, bs + ds, out); | 124 | return crypto_shash_update(desc, data, nbytes); |
163 | } | 125 | } |
164 | 126 | ||
165 | static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, | 127 | static int hmac_final(struct shash_desc *pdesc, u8 *out) |
166 | unsigned int nbytes, u8 *out) | ||
167 | { | 128 | { |
168 | struct crypto_hash *parent = pdesc->tfm; | 129 | struct crypto_shash *parent = pdesc->tfm; |
169 | int bs = crypto_hash_blocksize(parent); | 130 | int ds = crypto_shash_digestsize(parent); |
170 | int ds = crypto_hash_digestsize(parent); | 131 | int ss = crypto_shash_statesize(parent); |
171 | char *ipad = crypto_hash_ctx_aligned(parent); | 132 | char *opad = crypto_shash_ctx_aligned(parent) + ss; |
172 | char *opad = ipad + bs; | 133 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
173 | char *digest = opad + bs; | ||
174 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | ||
175 | struct hash_desc desc; | ||
176 | struct scatterlist sg1[2]; | ||
177 | struct scatterlist sg2[1]; | ||
178 | int err; | ||
179 | 134 | ||
180 | desc.tfm = ctx->child; | 135 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
181 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
182 | 136 | ||
183 | sg_init_table(sg1, 2); | 137 | return crypto_shash_final(desc, out) ?: |
184 | sg_set_buf(sg1, ipad, bs); | 138 | crypto_shash_import(desc, opad) ?: |
185 | scatterwalk_sg_chain(sg1, 2, sg); | 139 | crypto_shash_finup(desc, out, ds, out); |
140 | } | ||
186 | 141 | ||
187 | sg_init_table(sg2, 1); | 142 | static int hmac_finup(struct shash_desc *pdesc, const u8 *data, |
188 | sg_set_buf(sg2, opad, bs + ds); | 143 | unsigned int nbytes, u8 *out) |
144 | { | ||
189 | 145 | ||
190 | err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); | 146 | struct crypto_shash *parent = pdesc->tfm; |
191 | if (unlikely(err)) | 147 | int ds = crypto_shash_digestsize(parent); |
192 | return err; | 148 | int ss = crypto_shash_statesize(parent); |
149 | char *opad = crypto_shash_ctx_aligned(parent) + ss; | ||
150 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
193 | 151 | ||
194 | return crypto_hash_digest(&desc, sg2, bs + ds, out); | 152 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
153 | |||
154 | return crypto_shash_finup(desc, data, nbytes, out) ?: | ||
155 | crypto_shash_import(desc, opad) ?: | ||
156 | crypto_shash_finup(desc, out, ds, out); | ||
195 | } | 157 | } |
196 | 158 | ||
197 | static int hmac_init_tfm(struct crypto_tfm *tfm) | 159 | static int hmac_init_tfm(struct crypto_tfm *tfm) |
198 | { | 160 | { |
199 | struct crypto_hash *hash; | 161 | struct crypto_shash *parent = __crypto_shash_cast(tfm); |
162 | struct crypto_shash *hash; | ||
200 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 163 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
201 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 164 | struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); |
202 | struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); | 165 | struct hmac_ctx *ctx = hmac_ctx(parent); |
203 | 166 | ||
204 | hash = crypto_spawn_hash(spawn); | 167 | hash = crypto_spawn_shash(spawn); |
205 | if (IS_ERR(hash)) | 168 | if (IS_ERR(hash)) |
206 | return PTR_ERR(hash); | 169 | return PTR_ERR(hash); |
207 | 170 | ||
208 | ctx->child = hash; | 171 | parent->descsize = sizeof(struct shash_desc) + |
172 | crypto_shash_descsize(hash); | ||
173 | |||
174 | ctx->hash = hash; | ||
209 | return 0; | 175 | return 0; |
210 | } | 176 | } |
211 | 177 | ||
212 | static void hmac_exit_tfm(struct crypto_tfm *tfm) | 178 | static void hmac_exit_tfm(struct crypto_tfm *tfm) |
213 | { | 179 | { |
214 | struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); | 180 | struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); |
215 | crypto_free_hash(ctx->child); | 181 | crypto_free_shash(ctx->hash); |
216 | } | 182 | } |
217 | 183 | ||
218 | static void hmac_free(struct crypto_instance *inst) | 184 | static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) |
219 | { | 185 | { |
220 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 186 | struct shash_instance *inst; |
221 | kfree(inst); | ||
222 | } | ||
223 | |||
224 | static struct crypto_instance *hmac_alloc(struct rtattr **tb) | ||
225 | { | ||
226 | struct crypto_instance *inst; | ||
227 | struct crypto_alg *alg; | 187 | struct crypto_alg *alg; |
188 | struct shash_alg *salg; | ||
228 | int err; | 189 | int err; |
229 | int ds; | 190 | int ds; |
191 | int ss; | ||
230 | 192 | ||
231 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); | 193 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
232 | if (err) | 194 | if (err) |
233 | return ERR_PTR(err); | 195 | return err; |
234 | 196 | ||
235 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 197 | salg = shash_attr_alg(tb[1], 0, 0); |
236 | CRYPTO_ALG_TYPE_HASH_MASK); | 198 | if (IS_ERR(salg)) |
237 | if (IS_ERR(alg)) | 199 | return PTR_ERR(salg); |
238 | return ERR_CAST(alg); | 200 | |
239 | 201 | err = -EINVAL; | |
240 | inst = ERR_PTR(-EINVAL); | 202 | ds = salg->digestsize; |
241 | ds = alg->cra_type == &crypto_hash_type ? | 203 | ss = salg->statesize; |
242 | alg->cra_hash.digestsize : | 204 | alg = &salg->base; |
243 | alg->cra_type ? | 205 | if (ds > alg->cra_blocksize || |
244 | __crypto_shash_alg(alg)->digestsize : | 206 | ss < alg->cra_blocksize) |
245 | alg->cra_digest.dia_digestsize; | ||
246 | if (ds > alg->cra_blocksize) | ||
247 | goto out_put_alg; | 207 | goto out_put_alg; |
248 | 208 | ||
249 | inst = crypto_alloc_instance("hmac", alg); | 209 | inst = shash_alloc_instance("hmac", alg); |
210 | err = PTR_ERR(inst); | ||
250 | if (IS_ERR(inst)) | 211 | if (IS_ERR(inst)) |
251 | goto out_put_alg; | 212 | goto out_put_alg; |
252 | 213 | ||
253 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; | 214 | err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, |
254 | inst->alg.cra_priority = alg->cra_priority; | 215 | shash_crypto_instance(inst)); |
255 | inst->alg.cra_blocksize = alg->cra_blocksize; | 216 | if (err) |
256 | inst->alg.cra_alignmask = alg->cra_alignmask; | 217 | goto out_free_inst; |
257 | inst->alg.cra_type = &crypto_hash_type; | 218 | |
258 | 219 | inst->alg.base.cra_priority = alg->cra_priority; | |
259 | inst->alg.cra_hash.digestsize = ds; | 220 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
260 | 221 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | |
261 | inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + | 222 | |
262 | ALIGN(inst->alg.cra_blocksize * 2 + ds, | 223 | ss = ALIGN(ss, alg->cra_alignmask + 1); |
263 | sizeof(void *)); | 224 | inst->alg.digestsize = ds; |
264 | 225 | inst->alg.statesize = ss; | |
265 | inst->alg.cra_init = hmac_init_tfm; | 226 | |
266 | inst->alg.cra_exit = hmac_exit_tfm; | 227 | inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + |
267 | 228 | ALIGN(ss * 2, crypto_tfm_ctx_alignment()); | |
268 | inst->alg.cra_hash.init = hmac_init; | 229 | |
269 | inst->alg.cra_hash.update = hmac_update; | 230 | inst->alg.base.cra_init = hmac_init_tfm; |
270 | inst->alg.cra_hash.final = hmac_final; | 231 | inst->alg.base.cra_exit = hmac_exit_tfm; |
271 | inst->alg.cra_hash.digest = hmac_digest; | 232 | |
272 | inst->alg.cra_hash.setkey = hmac_setkey; | 233 | inst->alg.init = hmac_init; |
234 | inst->alg.update = hmac_update; | ||
235 | inst->alg.final = hmac_final; | ||
236 | inst->alg.finup = hmac_finup; | ||
237 | inst->alg.export = hmac_export; | ||
238 | inst->alg.import = hmac_import; | ||
239 | inst->alg.setkey = hmac_setkey; | ||
240 | |||
241 | err = shash_register_instance(tmpl, inst); | ||
242 | if (err) { | ||
243 | out_free_inst: | ||
244 | shash_free_instance(shash_crypto_instance(inst)); | ||
245 | } | ||
273 | 246 | ||
274 | out_put_alg: | 247 | out_put_alg: |
275 | crypto_mod_put(alg); | 248 | crypto_mod_put(alg); |
276 | return inst; | 249 | return err; |
277 | } | 250 | } |
278 | 251 | ||
279 | static struct crypto_template hmac_tmpl = { | 252 | static struct crypto_template hmac_tmpl = { |
280 | .name = "hmac", | 253 | .name = "hmac", |
281 | .alloc = hmac_alloc, | 254 | .create = hmac_create, |
282 | .free = hmac_free, | 255 | .free = shash_free_instance, |
283 | .module = THIS_MODULE, | 256 | .module = THIS_MODULE, |
284 | }; | 257 | }; |
285 | 258 | ||
diff --git a/crypto/internal.h b/crypto/internal.h index 113579a82dff..d4384b08ab29 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published by the Free | 8 | * under the terms of the GNU General Public License as published by the Free |
9 | * Software Foundation; either version 2 of the License, or (at your option) | 9 | * Software Foundation; either version 2 of the License, or (at your option) |
10 | * any later version. | 10 | * any later version. |
11 | * | 11 | * |
12 | */ | 12 | */ |
@@ -25,12 +25,7 @@ | |||
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/rwsem.h> | 26 | #include <linux/rwsem.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | 28 | #include <linux/fips.h> | |
29 | #ifdef CONFIG_CRYPTO_FIPS | ||
30 | extern int fips_enabled; | ||
31 | #else | ||
32 | #define fips_enabled 0 | ||
33 | #endif | ||
34 | 29 | ||
35 | /* Crypto notification events. */ | 30 | /* Crypto notification events. */ |
36 | enum { | 31 | enum { |
@@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void) | |||
65 | { } | 60 | { } |
66 | #endif | 61 | #endif |
67 | 62 | ||
68 | static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg) | ||
69 | { | ||
70 | unsigned int len = alg->cra_ctxsize; | ||
71 | |||
72 | if (alg->cra_alignmask) { | ||
73 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); | ||
74 | len += alg->cra_digest.dia_digestsize; | ||
75 | } | ||
76 | |||
77 | return len; | ||
78 | } | ||
79 | |||
80 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) | 63 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) |
81 | { | 64 | { |
82 | return alg->cra_ctxsize; | 65 | return alg->cra_ctxsize; |
@@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); | |||
91 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); | 74 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); |
92 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); | 75 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); |
93 | 76 | ||
94 | int crypto_init_digest_ops(struct crypto_tfm *tfm); | ||
95 | int crypto_init_digest_ops_async(struct crypto_tfm *tfm); | ||
96 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); | 77 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); |
97 | int crypto_init_compress_ops(struct crypto_tfm *tfm); | 78 | int crypto_init_compress_ops(struct crypto_tfm *tfm); |
98 | 79 | ||
99 | void crypto_exit_digest_ops(struct crypto_tfm *tfm); | ||
100 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); | 80 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); |
101 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); | 81 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); |
102 | 82 | ||
@@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, | |||
111 | u32 mask); | 91 | u32 mask); |
112 | void *crypto_create_tfm(struct crypto_alg *alg, | 92 | void *crypto_create_tfm(struct crypto_alg *alg, |
113 | const struct crypto_type *frontend); | 93 | const struct crypto_type *frontend); |
94 | struct crypto_alg *crypto_find_alg(const char *alg_name, | ||
95 | const struct crypto_type *frontend, | ||
96 | u32 type, u32 mask); | ||
114 | void *crypto_alloc_tfm(const char *alg_name, | 97 | void *crypto_alloc_tfm(const char *alg_name, |
115 | const struct crypto_type *frontend, u32 type, u32 mask); | 98 | const struct crypto_type *frontend, u32 type, u32 mask); |
116 | 99 | ||
117 | int crypto_register_instance(struct crypto_template *tmpl, | ||
118 | struct crypto_instance *inst); | ||
119 | |||
120 | int crypto_register_notifier(struct notifier_block *nb); | 100 | int crypto_register_notifier(struct notifier_block *nb); |
121 | int crypto_unregister_notifier(struct notifier_block *nb); | 101 | int crypto_unregister_notifier(struct notifier_block *nb); |
122 | int crypto_probing_notify(unsigned long val, void *v); | 102 | int crypto_probing_notify(unsigned long val, void *v); |
diff --git a/crypto/md5.c b/crypto/md5.c index 83eb52961750..30efc7dad891 100644 --- a/crypto/md5.c +++ b/crypto/md5.c | |||
@@ -16,17 +16,13 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | #include <crypto/internal/hash.h> | 18 | #include <crypto/internal/hash.h> |
19 | #include <crypto/md5.h> | ||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/string.h> | 22 | #include <linux/string.h> |
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <asm/byteorder.h> | 24 | #include <asm/byteorder.h> |
24 | 25 | ||
25 | #define MD5_DIGEST_SIZE 16 | ||
26 | #define MD5_HMAC_BLOCK_SIZE 64 | ||
27 | #define MD5_BLOCK_WORDS 16 | ||
28 | #define MD5_HASH_WORDS 4 | ||
29 | |||
30 | #define F1(x, y, z) (z ^ (x & (y ^ z))) | 26 | #define F1(x, y, z) (z ^ (x & (y ^ z))) |
31 | #define F2(x, y, z) F1(z, x, y) | 27 | #define F2(x, y, z) F1(z, x, y) |
32 | #define F3(x, y, z) (x ^ y ^ z) | 28 | #define F3(x, y, z) (x ^ y ^ z) |
@@ -35,12 +31,6 @@ | |||
35 | #define MD5STEP(f, w, x, y, z, in, s) \ | 31 | #define MD5STEP(f, w, x, y, z, in, s) \ |
36 | (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) | 32 | (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) |
37 | 33 | ||
38 | struct md5_ctx { | ||
39 | u32 hash[MD5_HASH_WORDS]; | ||
40 | u32 block[MD5_BLOCK_WORDS]; | ||
41 | u64 byte_count; | ||
42 | }; | ||
43 | |||
44 | static void md5_transform(u32 *hash, u32 const *in) | 34 | static void md5_transform(u32 *hash, u32 const *in) |
45 | { | 35 | { |
46 | u32 a, b, c, d; | 36 | u32 a, b, c, d; |
@@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words) | |||
141 | } | 131 | } |
142 | } | 132 | } |
143 | 133 | ||
144 | static inline void md5_transform_helper(struct md5_ctx *ctx) | 134 | static inline void md5_transform_helper(struct md5_state *ctx) |
145 | { | 135 | { |
146 | le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); | 136 | le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); |
147 | md5_transform(ctx->hash, ctx->block); | 137 | md5_transform(ctx->hash, ctx->block); |
@@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx) | |||
149 | 139 | ||
150 | static int md5_init(struct shash_desc *desc) | 140 | static int md5_init(struct shash_desc *desc) |
151 | { | 141 | { |
152 | struct md5_ctx *mctx = shash_desc_ctx(desc); | 142 | struct md5_state *mctx = shash_desc_ctx(desc); |
153 | 143 | ||
154 | mctx->hash[0] = 0x67452301; | 144 | mctx->hash[0] = 0x67452301; |
155 | mctx->hash[1] = 0xefcdab89; | 145 | mctx->hash[1] = 0xefcdab89; |
@@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc) | |||
162 | 152 | ||
163 | static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) | 153 | static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) |
164 | { | 154 | { |
165 | struct md5_ctx *mctx = shash_desc_ctx(desc); | 155 | struct md5_state *mctx = shash_desc_ctx(desc); |
166 | const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); | 156 | const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); |
167 | 157 | ||
168 | mctx->byte_count += len; | 158 | mctx->byte_count += len; |
@@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) | |||
194 | 184 | ||
195 | static int md5_final(struct shash_desc *desc, u8 *out) | 185 | static int md5_final(struct shash_desc *desc, u8 *out) |
196 | { | 186 | { |
197 | struct md5_ctx *mctx = shash_desc_ctx(desc); | 187 | struct md5_state *mctx = shash_desc_ctx(desc); |
198 | const unsigned int offset = mctx->byte_count & 0x3f; | 188 | const unsigned int offset = mctx->byte_count & 0x3f; |
199 | char *p = (char *)mctx->block + offset; | 189 | char *p = (char *)mctx->block + offset; |
200 | int padding = 56 - (offset + 1); | 190 | int padding = 56 - (offset + 1); |
@@ -220,12 +210,31 @@ static int md5_final(struct shash_desc *desc, u8 *out) | |||
220 | return 0; | 210 | return 0; |
221 | } | 211 | } |
222 | 212 | ||
213 | static int md5_export(struct shash_desc *desc, void *out) | ||
214 | { | ||
215 | struct md5_state *ctx = shash_desc_ctx(desc); | ||
216 | |||
217 | memcpy(out, ctx, sizeof(*ctx)); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static int md5_import(struct shash_desc *desc, const void *in) | ||
222 | { | ||
223 | struct md5_state *ctx = shash_desc_ctx(desc); | ||
224 | |||
225 | memcpy(ctx, in, sizeof(*ctx)); | ||
226 | return 0; | ||
227 | } | ||
228 | |||
223 | static struct shash_alg alg = { | 229 | static struct shash_alg alg = { |
224 | .digestsize = MD5_DIGEST_SIZE, | 230 | .digestsize = MD5_DIGEST_SIZE, |
225 | .init = md5_init, | 231 | .init = md5_init, |
226 | .update = md5_update, | 232 | .update = md5_update, |
227 | .final = md5_final, | 233 | .final = md5_final, |
228 | .descsize = sizeof(struct md5_ctx), | 234 | .export = md5_export, |
235 | .import = md5_import, | ||
236 | .descsize = sizeof(struct md5_state), | ||
237 | .statesize = sizeof(struct md5_state), | ||
229 | .base = { | 238 | .base = { |
230 | .cra_name = "md5", | 239 | .cra_name = "md5", |
231 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 240 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
diff --git a/crypto/pcompress.c b/crypto/pcompress.c index bcadc03726b7..f7c4a7d7412e 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c | |||
@@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | 38 | ||
39 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, | 39 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) |
40 | const struct crypto_type *frontend) | ||
41 | { | 40 | { |
42 | return alg->cra_ctxsize; | 41 | return alg->cra_ctxsize; |
43 | } | 42 | } |
44 | 43 | ||
45 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, | 44 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) |
46 | const struct crypto_type *frontend) | ||
47 | { | 45 | { |
48 | return 0; | 46 | return 0; |
49 | } | 47 | } |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c new file mode 100644 index 000000000000..75586f1f86e7 --- /dev/null +++ b/crypto/pcrypt.c | |||
@@ -0,0 +1,566 @@ | |||
1 | /* | ||
2 | * pcrypt - Parallel crypto wrapper. | ||
3 | * | ||
4 | * Copyright (C) 2009 secunet Security Networks AG | ||
5 | * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms and conditions of the GNU General Public License, | ||
9 | * version 2, as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program; if not, write to the Free Software Foundation, Inc., | ||
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | */ | ||
20 | |||
21 | #include <crypto/algapi.h> | ||
22 | #include <crypto/internal/aead.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/notifier.h> | ||
28 | #include <linux/kobject.h> | ||
29 | #include <linux/cpu.h> | ||
30 | #include <crypto/pcrypt.h> | ||
31 | |||
32 | struct padata_pcrypt { | ||
33 | struct padata_instance *pinst; | ||
34 | struct workqueue_struct *wq; | ||
35 | |||
36 | /* | ||
37 | * Cpumask for callback CPUs. It should be | ||
38 | * equal to serial cpumask of corresponding padata instance, | ||
39 | * so it is updated when padata notifies us about serial | ||
40 | * cpumask change. | ||
41 | * | ||
42 | * cb_cpumask is protected by RCU. This fact prevents us from | ||
43 | * using cpumask_var_t directly because the actual type of | ||
44 | * cpumsak_var_t depends on kernel configuration(particularly on | ||
45 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration | ||
46 | * cpumask_var_t may be either a pointer to the struct cpumask | ||
47 | * or a variable allocated on the stack. Thus we can not safely use | ||
48 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or | ||
49 | * rcu_dereference. So cpumask_var_t is wrapped with struct | ||
50 | * pcrypt_cpumask which makes possible to use it with RCU. | ||
51 | */ | ||
52 | struct pcrypt_cpumask { | ||
53 | cpumask_var_t mask; | ||
54 | } *cb_cpumask; | ||
55 | struct notifier_block nblock; | ||
56 | }; | ||
57 | |||
58 | static struct padata_pcrypt pencrypt; | ||
59 | static struct padata_pcrypt pdecrypt; | ||
60 | static struct kset *pcrypt_kset; | ||
61 | |||
62 | struct pcrypt_instance_ctx { | ||
63 | struct crypto_spawn spawn; | ||
64 | unsigned int tfm_count; | ||
65 | }; | ||
66 | |||
67 | struct pcrypt_aead_ctx { | ||
68 | struct crypto_aead *child; | ||
69 | unsigned int cb_cpu; | ||
70 | }; | ||
71 | |||
72 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | ||
73 | struct padata_pcrypt *pcrypt) | ||
74 | { | ||
75 | unsigned int cpu_index, cpu, i; | ||
76 | struct pcrypt_cpumask *cpumask; | ||
77 | |||
78 | cpu = *cb_cpu; | ||
79 | |||
80 | rcu_read_lock_bh(); | ||
81 | cpumask = rcu_dereference(pcrypt->cb_cpumask); | ||
82 | if (cpumask_test_cpu(cpu, cpumask->mask)) | ||
83 | goto out; | ||
84 | |||
85 | if (!cpumask_weight(cpumask->mask)) | ||
86 | goto out; | ||
87 | |||
88 | cpu_index = cpu % cpumask_weight(cpumask->mask); | ||
89 | |||
90 | cpu = cpumask_first(cpumask->mask); | ||
91 | for (i = 0; i < cpu_index; i++) | ||
92 | cpu = cpumask_next(cpu, cpumask->mask); | ||
93 | |||
94 | *cb_cpu = cpu; | ||
95 | |||
96 | out: | ||
97 | rcu_read_unlock_bh(); | ||
98 | return padata_do_parallel(pcrypt->pinst, padata, cpu); | ||
99 | } | ||
100 | |||
101 | static int pcrypt_aead_setkey(struct crypto_aead *parent, | ||
102 | const u8 *key, unsigned int keylen) | ||
103 | { | ||
104 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | ||
105 | |||
106 | return crypto_aead_setkey(ctx->child, key, keylen); | ||
107 | } | ||
108 | |||
109 | static int pcrypt_aead_setauthsize(struct crypto_aead *parent, | ||
110 | unsigned int authsize) | ||
111 | { | ||
112 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | ||
113 | |||
114 | return crypto_aead_setauthsize(ctx->child, authsize); | ||
115 | } | ||
116 | |||
117 | static void pcrypt_aead_serial(struct padata_priv *padata) | ||
118 | { | ||
119 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
120 | struct aead_request *req = pcrypt_request_ctx(preq); | ||
121 | |||
122 | aead_request_complete(req->base.data, padata->info); | ||
123 | } | ||
124 | |||
125 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) | ||
126 | { | ||
127 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
128 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | ||
129 | |||
130 | aead_request_complete(req->areq.base.data, padata->info); | ||
131 | } | ||
132 | |||
133 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) | ||
134 | { | ||
135 | struct aead_request *req = areq->data; | ||
136 | struct pcrypt_request *preq = aead_request_ctx(req); | ||
137 | struct padata_priv *padata = pcrypt_request_padata(preq); | ||
138 | |||
139 | padata->info = err; | ||
140 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
141 | |||
142 | padata_do_serial(padata); | ||
143 | } | ||
144 | |||
145 | static void pcrypt_aead_enc(struct padata_priv *padata) | ||
146 | { | ||
147 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
148 | struct aead_request *req = pcrypt_request_ctx(preq); | ||
149 | |||
150 | padata->info = crypto_aead_encrypt(req); | ||
151 | |||
152 | if (padata->info == -EINPROGRESS) | ||
153 | return; | ||
154 | |||
155 | padata_do_serial(padata); | ||
156 | } | ||
157 | |||
158 | static int pcrypt_aead_encrypt(struct aead_request *req) | ||
159 | { | ||
160 | int err; | ||
161 | struct pcrypt_request *preq = aead_request_ctx(req); | ||
162 | struct aead_request *creq = pcrypt_request_ctx(preq); | ||
163 | struct padata_priv *padata = pcrypt_request_padata(preq); | ||
164 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
165 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | ||
166 | u32 flags = aead_request_flags(req); | ||
167 | |||
168 | memset(padata, 0, sizeof(struct padata_priv)); | ||
169 | |||
170 | padata->parallel = pcrypt_aead_enc; | ||
171 | padata->serial = pcrypt_aead_serial; | ||
172 | |||
173 | aead_request_set_tfm(creq, ctx->child); | ||
174 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
175 | pcrypt_aead_done, req); | ||
176 | aead_request_set_crypt(creq, req->src, req->dst, | ||
177 | req->cryptlen, req->iv); | ||
178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | ||
179 | |||
180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | ||
181 | if (!err) | ||
182 | return -EINPROGRESS; | ||
183 | |||
184 | return err; | ||
185 | } | ||
186 | |||
187 | static void pcrypt_aead_dec(struct padata_priv *padata) | ||
188 | { | ||
189 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
190 | struct aead_request *req = pcrypt_request_ctx(preq); | ||
191 | |||
192 | padata->info = crypto_aead_decrypt(req); | ||
193 | |||
194 | if (padata->info == -EINPROGRESS) | ||
195 | return; | ||
196 | |||
197 | padata_do_serial(padata); | ||
198 | } | ||
199 | |||
200 | static int pcrypt_aead_decrypt(struct aead_request *req) | ||
201 | { | ||
202 | int err; | ||
203 | struct pcrypt_request *preq = aead_request_ctx(req); | ||
204 | struct aead_request *creq = pcrypt_request_ctx(preq); | ||
205 | struct padata_priv *padata = pcrypt_request_padata(preq); | ||
206 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
207 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | ||
208 | u32 flags = aead_request_flags(req); | ||
209 | |||
210 | memset(padata, 0, sizeof(struct padata_priv)); | ||
211 | |||
212 | padata->parallel = pcrypt_aead_dec; | ||
213 | padata->serial = pcrypt_aead_serial; | ||
214 | |||
215 | aead_request_set_tfm(creq, ctx->child); | ||
216 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
217 | pcrypt_aead_done, req); | ||
218 | aead_request_set_crypt(creq, req->src, req->dst, | ||
219 | req->cryptlen, req->iv); | ||
220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | ||
221 | |||
222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); | ||
223 | if (!err) | ||
224 | return -EINPROGRESS; | ||
225 | |||
226 | return err; | ||
227 | } | ||
228 | |||
229 | static void pcrypt_aead_givenc(struct padata_priv *padata) | ||
230 | { | ||
231 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | ||
232 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | ||
233 | |||
234 | padata->info = crypto_aead_givencrypt(req); | ||
235 | |||
236 | if (padata->info == -EINPROGRESS) | ||
237 | return; | ||
238 | |||
239 | padata_do_serial(padata); | ||
240 | } | ||
241 | |||
242 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | ||
243 | { | ||
244 | int err; | ||
245 | struct aead_request *areq = &req->areq; | ||
246 | struct pcrypt_request *preq = aead_request_ctx(areq); | ||
247 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); | ||
248 | struct padata_priv *padata = pcrypt_request_padata(preq); | ||
249 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); | ||
250 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | ||
251 | u32 flags = aead_request_flags(areq); | ||
252 | |||
253 | memset(padata, 0, sizeof(struct padata_priv)); | ||
254 | |||
255 | padata->parallel = pcrypt_aead_givenc; | ||
256 | padata->serial = pcrypt_aead_giv_serial; | ||
257 | |||
258 | aead_givcrypt_set_tfm(creq, ctx->child); | ||
259 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | ||
260 | pcrypt_aead_done, areq); | ||
261 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, | ||
262 | areq->cryptlen, areq->iv); | ||
263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | ||
264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | ||
265 | |||
266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); | ||
267 | if (!err) | ||
268 | return -EINPROGRESS; | ||
269 | |||
270 | return err; | ||
271 | } | ||
272 | |||
273 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) | ||
274 | { | ||
275 | int cpu, cpu_index; | ||
276 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
277 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
278 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
279 | struct crypto_aead *cipher; | ||
280 | |||
281 | ictx->tfm_count++; | ||
282 | |||
283 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); | ||
284 | |||
285 | ctx->cb_cpu = cpumask_first(cpu_active_mask); | ||
286 | for (cpu = 0; cpu < cpu_index; cpu++) | ||
287 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); | ||
288 | |||
289 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); | ||
290 | |||
291 | if (IS_ERR(cipher)) | ||
292 | return PTR_ERR(cipher); | ||
293 | |||
294 | ctx->child = cipher; | ||
295 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) | ||
296 | + sizeof(struct aead_givcrypt_request) | ||
297 | + crypto_aead_reqsize(cipher); | ||
298 | |||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) | ||
303 | { | ||
304 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | ||
305 | |||
306 | crypto_free_aead(ctx->child); | ||
307 | } | ||
308 | |||
309 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) | ||
310 | { | ||
311 | struct crypto_instance *inst; | ||
312 | struct pcrypt_instance_ctx *ctx; | ||
313 | int err; | ||
314 | |||
315 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
316 | if (!inst) { | ||
317 | inst = ERR_PTR(-ENOMEM); | ||
318 | goto out; | ||
319 | } | ||
320 | |||
321 | err = -ENAMETOOLONG; | ||
322 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
323 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
324 | goto out_free_inst; | ||
325 | |||
326 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | ||
327 | |||
328 | ctx = crypto_instance_ctx(inst); | ||
329 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
330 | CRYPTO_ALG_TYPE_MASK); | ||
331 | if (err) | ||
332 | goto out_free_inst; | ||
333 | |||
334 | inst->alg.cra_priority = alg->cra_priority + 100; | ||
335 | inst->alg.cra_blocksize = alg->cra_blocksize; | ||
336 | inst->alg.cra_alignmask = alg->cra_alignmask; | ||
337 | |||
338 | out: | ||
339 | return inst; | ||
340 | |||
341 | out_free_inst: | ||
342 | kfree(inst); | ||
343 | inst = ERR_PTR(err); | ||
344 | goto out; | ||
345 | } | ||
346 | |||
347 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, | ||
348 | u32 type, u32 mask) | ||
349 | { | ||
350 | struct crypto_instance *inst; | ||
351 | struct crypto_alg *alg; | ||
352 | |||
353 | alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); | ||
354 | if (IS_ERR(alg)) | ||
355 | return ERR_CAST(alg); | ||
356 | |||
357 | inst = pcrypt_alloc_instance(alg); | ||
358 | if (IS_ERR(inst)) | ||
359 | goto out_put_alg; | ||
360 | |||
361 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
362 | inst->alg.cra_type = &crypto_aead_type; | ||
363 | |||
364 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | ||
365 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | ||
366 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | ||
367 | |||
368 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); | ||
369 | |||
370 | inst->alg.cra_init = pcrypt_aead_init_tfm; | ||
371 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; | ||
372 | |||
373 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; | ||
374 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; | ||
375 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; | ||
376 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; | ||
377 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; | ||
378 | |||
379 | out_put_alg: | ||
380 | crypto_mod_put(alg); | ||
381 | return inst; | ||
382 | } | ||
383 | |||
384 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) | ||
385 | { | ||
386 | struct crypto_attr_type *algt; | ||
387 | |||
388 | algt = crypto_get_attr_type(tb); | ||
389 | if (IS_ERR(algt)) | ||
390 | return ERR_CAST(algt); | ||
391 | |||
392 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | ||
393 | case CRYPTO_ALG_TYPE_AEAD: | ||
394 | return pcrypt_alloc_aead(tb, algt->type, algt->mask); | ||
395 | } | ||
396 | |||
397 | return ERR_PTR(-EINVAL); | ||
398 | } | ||
399 | |||
400 | static void pcrypt_free(struct crypto_instance *inst) | ||
401 | { | ||
402 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
403 | |||
404 | crypto_drop_spawn(&ctx->spawn); | ||
405 | kfree(inst); | ||
406 | } | ||
407 | |||
408 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, | ||
409 | unsigned long val, void *data) | ||
410 | { | ||
411 | struct padata_pcrypt *pcrypt; | ||
412 | struct pcrypt_cpumask *new_mask, *old_mask; | ||
413 | struct padata_cpumask *cpumask = (struct padata_cpumask *)data; | ||
414 | |||
415 | if (!(val & PADATA_CPU_SERIAL)) | ||
416 | return 0; | ||
417 | |||
418 | pcrypt = container_of(self, struct padata_pcrypt, nblock); | ||
419 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); | ||
420 | if (!new_mask) | ||
421 | return -ENOMEM; | ||
422 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { | ||
423 | kfree(new_mask); | ||
424 | return -ENOMEM; | ||
425 | } | ||
426 | |||
427 | old_mask = pcrypt->cb_cpumask; | ||
428 | |||
429 | cpumask_copy(new_mask->mask, cpumask->cbcpu); | ||
430 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); | ||
431 | synchronize_rcu_bh(); | ||
432 | |||
433 | free_cpumask_var(old_mask->mask); | ||
434 | kfree(old_mask); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) | ||
439 | { | ||
440 | int ret; | ||
441 | |||
442 | pinst->kobj.kset = pcrypt_kset; | ||
443 | ret = kobject_add(&pinst->kobj, NULL, name); | ||
444 | if (!ret) | ||
445 | kobject_uevent(&pinst->kobj, KOBJ_ADD); | ||
446 | |||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | ||
451 | const char *name) | ||
452 | { | ||
453 | int ret = -ENOMEM; | ||
454 | struct pcrypt_cpumask *mask; | ||
455 | |||
456 | get_online_cpus(); | ||
457 | |||
458 | pcrypt->wq = create_workqueue(name); | ||
459 | if (!pcrypt->wq) | ||
460 | goto err; | ||
461 | |||
462 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); | ||
463 | if (!pcrypt->pinst) | ||
464 | goto err_destroy_workqueue; | ||
465 | |||
466 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
467 | if (!mask) | ||
468 | goto err_free_padata; | ||
469 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { | ||
470 | kfree(mask); | ||
471 | goto err_free_padata; | ||
472 | } | ||
473 | |||
474 | cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask); | ||
475 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); | ||
476 | |||
477 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; | ||
478 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
479 | if (ret) | ||
480 | goto err_free_cpumask; | ||
481 | |||
482 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); | ||
483 | if (ret) | ||
484 | goto err_unregister_notifier; | ||
485 | |||
486 | put_online_cpus(); | ||
487 | |||
488 | return ret; | ||
489 | |||
490 | err_unregister_notifier: | ||
491 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
492 | err_free_cpumask: | ||
493 | free_cpumask_var(mask->mask); | ||
494 | kfree(mask); | ||
495 | err_free_padata: | ||
496 | padata_free(pcrypt->pinst); | ||
497 | err_destroy_workqueue: | ||
498 | destroy_workqueue(pcrypt->wq); | ||
499 | err: | ||
500 | put_online_cpus(); | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | ||
506 | { | ||
507 | free_cpumask_var(pcrypt->cb_cpumask->mask); | ||
508 | kfree(pcrypt->cb_cpumask); | ||
509 | |||
510 | padata_stop(pcrypt->pinst); | ||
511 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
512 | destroy_workqueue(pcrypt->wq); | ||
513 | padata_free(pcrypt->pinst); | ||
514 | } | ||
515 | |||
516 | static struct crypto_template pcrypt_tmpl = { | ||
517 | .name = "pcrypt", | ||
518 | .alloc = pcrypt_alloc, | ||
519 | .free = pcrypt_free, | ||
520 | .module = THIS_MODULE, | ||
521 | }; | ||
522 | |||
523 | static int __init pcrypt_init(void) | ||
524 | { | ||
525 | int err = -ENOMEM; | ||
526 | |||
527 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); | ||
528 | if (!pcrypt_kset) | ||
529 | goto err; | ||
530 | |||
531 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); | ||
532 | if (err) | ||
533 | goto err_unreg_kset; | ||
534 | |||
535 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); | ||
536 | if (err) | ||
537 | goto err_deinit_pencrypt; | ||
538 | |||
539 | padata_start(pencrypt.pinst); | ||
540 | padata_start(pdecrypt.pinst); | ||
541 | |||
542 | return crypto_register_template(&pcrypt_tmpl); | ||
543 | |||
544 | err_deinit_pencrypt: | ||
545 | pcrypt_fini_padata(&pencrypt); | ||
546 | err_unreg_kset: | ||
547 | kset_unregister(pcrypt_kset); | ||
548 | err: | ||
549 | return err; | ||
550 | } | ||
551 | |||
552 | static void __exit pcrypt_exit(void) | ||
553 | { | ||
554 | pcrypt_fini_padata(&pencrypt); | ||
555 | pcrypt_fini_padata(&pdecrypt); | ||
556 | |||
557 | kset_unregister(pcrypt_kset); | ||
558 | crypto_unregister_template(&pcrypt_tmpl); | ||
559 | } | ||
560 | |||
561 | module_init(pcrypt_init); | ||
562 | module_exit(pcrypt_exit); | ||
563 | |||
564 | MODULE_LICENSE("GPL"); | ||
565 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); | ||
566 | MODULE_DESCRIPTION("Parallel crypto wrapper"); | ||
diff --git a/crypto/proc.c b/crypto/proc.c index 5dc07e442fca..58fef67d4f4d 100644 --- a/crypto/proc.c +++ b/crypto/proc.c | |||
@@ -25,28 +25,22 @@ | |||
25 | #ifdef CONFIG_CRYPTO_FIPS | 25 | #ifdef CONFIG_CRYPTO_FIPS |
26 | static struct ctl_table crypto_sysctl_table[] = { | 26 | static struct ctl_table crypto_sysctl_table[] = { |
27 | { | 27 | { |
28 | .ctl_name = CTL_UNNUMBERED, | ||
29 | .procname = "fips_enabled", | 28 | .procname = "fips_enabled", |
30 | .data = &fips_enabled, | 29 | .data = &fips_enabled, |
31 | .maxlen = sizeof(int), | 30 | .maxlen = sizeof(int), |
32 | .mode = 0444, | 31 | .mode = 0444, |
33 | .proc_handler = &proc_dointvec | 32 | .proc_handler = proc_dointvec |
34 | }, | ||
35 | { | ||
36 | .ctl_name = 0, | ||
37 | }, | 33 | }, |
34 | {} | ||
38 | }; | 35 | }; |
39 | 36 | ||
40 | static struct ctl_table crypto_dir_table[] = { | 37 | static struct ctl_table crypto_dir_table[] = { |
41 | { | 38 | { |
42 | .ctl_name = CTL_UNNUMBERED, | ||
43 | .procname = "crypto", | 39 | .procname = "crypto", |
44 | .mode = 0555, | 40 | .mode = 0555, |
45 | .child = crypto_sysctl_table | 41 | .child = crypto_sysctl_table |
46 | }, | 42 | }, |
47 | { | 43 | {} |
48 | .ctl_name = 0, | ||
49 | }, | ||
50 | }; | 44 | }; |
51 | 45 | ||
52 | static struct ctl_table_header *crypto_sysctls; | 46 | static struct ctl_table_header *crypto_sysctls; |
@@ -115,13 +109,6 @@ static int c_show(struct seq_file *m, void *p) | |||
115 | seq_printf(m, "max keysize : %u\n", | 109 | seq_printf(m, "max keysize : %u\n", |
116 | alg->cra_cipher.cia_max_keysize); | 110 | alg->cra_cipher.cia_max_keysize); |
117 | break; | 111 | break; |
118 | |||
119 | case CRYPTO_ALG_TYPE_DIGEST: | ||
120 | seq_printf(m, "type : digest\n"); | ||
121 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | ||
122 | seq_printf(m, "digestsize : %u\n", | ||
123 | alg->cra_digest.dia_digestsize); | ||
124 | break; | ||
125 | case CRYPTO_ALG_TYPE_COMPRESS: | 112 | case CRYPTO_ALG_TYPE_COMPRESS: |
126 | seq_printf(m, "type : compression\n"); | 113 | seq_printf(m, "type : compression\n"); |
127 | break; | 114 | break; |
diff --git a/crypto/rng.c b/crypto/rng.c index 6e94bc735578..f93cb5311182 100644 --- a/crypto/rng.c +++ b/crypto/rng.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/random.h> | 20 | #include <linux/random.h> |
21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | #include <linux/slab.h> | ||
22 | #include <linux/string.h> | 23 | #include <linux/string.h> |
23 | 24 | ||
24 | static DEFINE_MUTEX(crypto_default_rng_lock); | 25 | static DEFINE_MUTEX(crypto_default_rng_lock); |
@@ -123,4 +124,4 @@ void crypto_put_default_rng(void) | |||
123 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); | 124 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); |
124 | 125 | ||
125 | MODULE_LICENSE("GPL"); | 126 | MODULE_LICENSE("GPL"); |
126 | MODULE_DESCRIPTION("Random Number Genertor"); | 127 | MODULE_DESCRIPTION("Random Number Generator"); |
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 3de89a424401..41e529af0773 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c | |||
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, | |||
68 | 68 | ||
69 | void scatterwalk_done(struct scatter_walk *walk, int out, int more) | 69 | void scatterwalk_done(struct scatter_walk *walk, int out, int more) |
70 | { | 70 | { |
71 | if (!offset_in_page(walk->offset) || !more) | 71 | if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) |
72 | scatterwalk_pagedone(walk, out, more); | 72 | scatterwalk_pagedone(walk, out, more); |
73 | } | 73 | } |
74 | EXPORT_SYMBOL_GPL(scatterwalk_done); | 74 | EXPORT_SYMBOL_GPL(scatterwalk_done); |
diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 5a013a8bf87a..4c4491229417 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
24 | #include <linux/string.h> | 25 | #include <linux/string.h> |
25 | 26 | ||
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 9efef20454cb..0416091bf45a 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c | |||
@@ -25,31 +25,21 @@ | |||
25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
27 | 27 | ||
28 | struct sha1_ctx { | ||
29 | u64 count; | ||
30 | u32 state[5]; | ||
31 | u8 buffer[64]; | ||
32 | }; | ||
33 | |||
34 | static int sha1_init(struct shash_desc *desc) | 28 | static int sha1_init(struct shash_desc *desc) |
35 | { | 29 | { |
36 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 30 | struct sha1_state *sctx = shash_desc_ctx(desc); |
37 | 31 | ||
38 | static const struct sha1_ctx initstate = { | 32 | *sctx = (struct sha1_state){ |
39 | 0, | 33 | .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, |
40 | { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, | ||
41 | { 0, } | ||
42 | }; | 34 | }; |
43 | 35 | ||
44 | *sctx = initstate; | ||
45 | |||
46 | return 0; | 36 | return 0; |
47 | } | 37 | } |
48 | 38 | ||
49 | static int sha1_update(struct shash_desc *desc, const u8 *data, | 39 | static int sha1_update(struct shash_desc *desc, const u8 *data, |
50 | unsigned int len) | 40 | unsigned int len) |
51 | { | 41 | { |
52 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 42 | struct sha1_state *sctx = shash_desc_ctx(desc); |
53 | unsigned int partial, done; | 43 | unsigned int partial, done; |
54 | const u8 *src; | 44 | const u8 *src; |
55 | 45 | ||
@@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, | |||
85 | /* Add padding and return the message digest. */ | 75 | /* Add padding and return the message digest. */ |
86 | static int sha1_final(struct shash_desc *desc, u8 *out) | 76 | static int sha1_final(struct shash_desc *desc, u8 *out) |
87 | { | 77 | { |
88 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 78 | struct sha1_state *sctx = shash_desc_ctx(desc); |
89 | __be32 *dst = (__be32 *)out; | 79 | __be32 *dst = (__be32 *)out; |
90 | u32 i, index, padlen; | 80 | u32 i, index, padlen; |
91 | __be64 bits; | 81 | __be64 bits; |
@@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out) | |||
111 | return 0; | 101 | return 0; |
112 | } | 102 | } |
113 | 103 | ||
104 | static int sha1_export(struct shash_desc *desc, void *out) | ||
105 | { | ||
106 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
107 | |||
108 | memcpy(out, sctx, sizeof(*sctx)); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
113 | { | ||
114 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
115 | |||
116 | memcpy(sctx, in, sizeof(*sctx)); | ||
117 | return 0; | ||
118 | } | ||
119 | |||
114 | static struct shash_alg alg = { | 120 | static struct shash_alg alg = { |
115 | .digestsize = SHA1_DIGEST_SIZE, | 121 | .digestsize = SHA1_DIGEST_SIZE, |
116 | .init = sha1_init, | 122 | .init = sha1_init, |
117 | .update = sha1_update, | 123 | .update = sha1_update, |
118 | .final = sha1_final, | 124 | .final = sha1_final, |
119 | .descsize = sizeof(struct sha1_ctx), | 125 | .export = sha1_export, |
126 | .import = sha1_import, | ||
127 | .descsize = sizeof(struct sha1_state), | ||
128 | .statesize = sizeof(struct sha1_state), | ||
120 | .base = { | 129 | .base = { |
121 | .cra_name = "sha1", | 130 | .cra_name = "sha1", |
122 | .cra_driver_name= "sha1-generic", | 131 | .cra_driver_name= "sha1-generic", |
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 6349d8339d37..c48459ebf05b 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c | |||
@@ -25,12 +25,6 @@ | |||
25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
27 | 27 | ||
28 | struct sha256_ctx { | ||
29 | u32 count[2]; | ||
30 | u32 state[8]; | ||
31 | u8 buf[128]; | ||
32 | }; | ||
33 | |||
34 | static inline u32 Ch(u32 x, u32 y, u32 z) | 28 | static inline u32 Ch(u32 x, u32 y, u32 z) |
35 | { | 29 | { |
36 | return z ^ (x & (y ^ z)); | 30 | return z ^ (x & (y ^ z)); |
@@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input) | |||
222 | 216 | ||
223 | static int sha224_init(struct shash_desc *desc) | 217 | static int sha224_init(struct shash_desc *desc) |
224 | { | 218 | { |
225 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 219 | struct sha256_state *sctx = shash_desc_ctx(desc); |
226 | sctx->state[0] = SHA224_H0; | 220 | sctx->state[0] = SHA224_H0; |
227 | sctx->state[1] = SHA224_H1; | 221 | sctx->state[1] = SHA224_H1; |
228 | sctx->state[2] = SHA224_H2; | 222 | sctx->state[2] = SHA224_H2; |
@@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc) | |||
231 | sctx->state[5] = SHA224_H5; | 225 | sctx->state[5] = SHA224_H5; |
232 | sctx->state[6] = SHA224_H6; | 226 | sctx->state[6] = SHA224_H6; |
233 | sctx->state[7] = SHA224_H7; | 227 | sctx->state[7] = SHA224_H7; |
234 | sctx->count[0] = 0; | 228 | sctx->count = 0; |
235 | sctx->count[1] = 0; | ||
236 | 229 | ||
237 | return 0; | 230 | return 0; |
238 | } | 231 | } |
239 | 232 | ||
240 | static int sha256_init(struct shash_desc *desc) | 233 | static int sha256_init(struct shash_desc *desc) |
241 | { | 234 | { |
242 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 235 | struct sha256_state *sctx = shash_desc_ctx(desc); |
243 | sctx->state[0] = SHA256_H0; | 236 | sctx->state[0] = SHA256_H0; |
244 | sctx->state[1] = SHA256_H1; | 237 | sctx->state[1] = SHA256_H1; |
245 | sctx->state[2] = SHA256_H2; | 238 | sctx->state[2] = SHA256_H2; |
@@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc) | |||
248 | sctx->state[5] = SHA256_H5; | 241 | sctx->state[5] = SHA256_H5; |
249 | sctx->state[6] = SHA256_H6; | 242 | sctx->state[6] = SHA256_H6; |
250 | sctx->state[7] = SHA256_H7; | 243 | sctx->state[7] = SHA256_H7; |
251 | sctx->count[0] = sctx->count[1] = 0; | 244 | sctx->count = 0; |
252 | 245 | ||
253 | return 0; | 246 | return 0; |
254 | } | 247 | } |
@@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc) | |||
256 | static int sha256_update(struct shash_desc *desc, const u8 *data, | 249 | static int sha256_update(struct shash_desc *desc, const u8 *data, |
257 | unsigned int len) | 250 | unsigned int len) |
258 | { | 251 | { |
259 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 252 | struct sha256_state *sctx = shash_desc_ctx(desc); |
260 | unsigned int i, index, part_len; | 253 | unsigned int partial, done; |
261 | 254 | const u8 *src; | |
262 | /* Compute number of bytes mod 128 */ | 255 | |
263 | index = (unsigned int)((sctx->count[0] >> 3) & 0x3f); | 256 | partial = sctx->count & 0x3f; |
264 | 257 | sctx->count += len; | |
265 | /* Update number of bits */ | 258 | done = 0; |
266 | if ((sctx->count[0] += (len << 3)) < (len << 3)) { | 259 | src = data; |
267 | sctx->count[1]++; | 260 | |
268 | sctx->count[1] += (len >> 29); | 261 | if ((partial + len) > 63) { |
269 | } | 262 | if (partial) { |
270 | 263 | done = -partial; | |
271 | part_len = 64 - index; | 264 | memcpy(sctx->buf + partial, data, done + 64); |
272 | 265 | src = sctx->buf; | |
273 | /* Transform as many times as possible. */ | 266 | } |
274 | if (len >= part_len) { | 267 | |
275 | memcpy(&sctx->buf[index], data, part_len); | 268 | do { |
276 | sha256_transform(sctx->state, sctx->buf); | 269 | sha256_transform(sctx->state, src); |
277 | 270 | done += 64; | |
278 | for (i = part_len; i + 63 < len; i += 64) | 271 | src = data + done; |
279 | sha256_transform(sctx->state, &data[i]); | 272 | } while (done + 63 < len); |
280 | index = 0; | 273 | |
281 | } else { | 274 | partial = 0; |
282 | i = 0; | ||
283 | } | 275 | } |
284 | 276 | memcpy(sctx->buf + partial, src, len - done); | |
285 | /* Buffer remaining input */ | ||
286 | memcpy(&sctx->buf[index], &data[i], len-i); | ||
287 | 277 | ||
288 | return 0; | 278 | return 0; |
289 | } | 279 | } |
290 | 280 | ||
291 | static int sha256_final(struct shash_desc *desc, u8 *out) | 281 | static int sha256_final(struct shash_desc *desc, u8 *out) |
292 | { | 282 | { |
293 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 283 | struct sha256_state *sctx = shash_desc_ctx(desc); |
294 | __be32 *dst = (__be32 *)out; | 284 | __be32 *dst = (__be32 *)out; |
295 | __be32 bits[2]; | 285 | __be64 bits; |
296 | unsigned int index, pad_len; | 286 | unsigned int index, pad_len; |
297 | int i; | 287 | int i; |
298 | static const u8 padding[64] = { 0x80, }; | 288 | static const u8 padding[64] = { 0x80, }; |
299 | 289 | ||
300 | /* Save number of bits */ | 290 | /* Save number of bits */ |
301 | bits[1] = cpu_to_be32(sctx->count[0]); | 291 | bits = cpu_to_be64(sctx->count << 3); |
302 | bits[0] = cpu_to_be32(sctx->count[1]); | ||
303 | 292 | ||
304 | /* Pad out to 56 mod 64. */ | 293 | /* Pad out to 56 mod 64. */ |
305 | index = (sctx->count[0] >> 3) & 0x3f; | 294 | index = sctx->count & 0x3f; |
306 | pad_len = (index < 56) ? (56 - index) : ((64+56) - index); | 295 | pad_len = (index < 56) ? (56 - index) : ((64+56) - index); |
307 | sha256_update(desc, padding, pad_len); | 296 | sha256_update(desc, padding, pad_len); |
308 | 297 | ||
309 | /* Append length (before padding) */ | 298 | /* Append length (before padding) */ |
310 | sha256_update(desc, (const u8 *)bits, sizeof(bits)); | 299 | sha256_update(desc, (const u8 *)&bits, sizeof(bits)); |
311 | 300 | ||
312 | /* Store state in digest */ | 301 | /* Store state in digest */ |
313 | for (i = 0; i < 8; i++) | 302 | for (i = 0; i < 8; i++) |
@@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash) | |||
331 | return 0; | 320 | return 0; |
332 | } | 321 | } |
333 | 322 | ||
323 | static int sha256_export(struct shash_desc *desc, void *out) | ||
324 | { | ||
325 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
326 | |||
327 | memcpy(out, sctx, sizeof(*sctx)); | ||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
332 | { | ||
333 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
334 | |||
335 | memcpy(sctx, in, sizeof(*sctx)); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
334 | static struct shash_alg sha256 = { | 339 | static struct shash_alg sha256 = { |
335 | .digestsize = SHA256_DIGEST_SIZE, | 340 | .digestsize = SHA256_DIGEST_SIZE, |
336 | .init = sha256_init, | 341 | .init = sha256_init, |
337 | .update = sha256_update, | 342 | .update = sha256_update, |
338 | .final = sha256_final, | 343 | .final = sha256_final, |
339 | .descsize = sizeof(struct sha256_ctx), | 344 | .export = sha256_export, |
345 | .import = sha256_import, | ||
346 | .descsize = sizeof(struct sha256_state), | ||
347 | .statesize = sizeof(struct sha256_state), | ||
340 | .base = { | 348 | .base = { |
341 | .cra_name = "sha256", | 349 | .cra_name = "sha256", |
342 | .cra_driver_name= "sha256-generic", | 350 | .cra_driver_name= "sha256-generic", |
@@ -351,7 +359,7 @@ static struct shash_alg sha224 = { | |||
351 | .init = sha224_init, | 359 | .init = sha224_init, |
352 | .update = sha256_update, | 360 | .update = sha256_update, |
353 | .final = sha224_final, | 361 | .final = sha224_final, |
354 | .descsize = sizeof(struct sha256_ctx), | 362 | .descsize = sizeof(struct sha256_state), |
355 | .base = { | 363 | .base = { |
356 | .cra_name = "sha224", | 364 | .cra_name = "sha224", |
357 | .cra_driver_name= "sha224-generic", | 365 | .cra_driver_name= "sha224-generic", |
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 3bea38d12242..9ed9f60316e5 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c | |||
@@ -21,12 +21,6 @@ | |||
21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
22 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
23 | 23 | ||
24 | struct sha512_ctx { | ||
25 | u64 state[8]; | ||
26 | u32 count[4]; | ||
27 | u8 buf[128]; | ||
28 | }; | ||
29 | |||
30 | static DEFINE_PER_CPU(u64[80], msg_schedule); | 24 | static DEFINE_PER_CPU(u64[80], msg_schedule); |
31 | 25 | ||
32 | static inline u64 Ch(u64 x, u64 y, u64 z) | 26 | static inline u64 Ch(u64 x, u64 y, u64 z) |
@@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input) | |||
141 | static int | 135 | static int |
142 | sha512_init(struct shash_desc *desc) | 136 | sha512_init(struct shash_desc *desc) |
143 | { | 137 | { |
144 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 138 | struct sha512_state *sctx = shash_desc_ctx(desc); |
145 | sctx->state[0] = SHA512_H0; | 139 | sctx->state[0] = SHA512_H0; |
146 | sctx->state[1] = SHA512_H1; | 140 | sctx->state[1] = SHA512_H1; |
147 | sctx->state[2] = SHA512_H2; | 141 | sctx->state[2] = SHA512_H2; |
@@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc) | |||
150 | sctx->state[5] = SHA512_H5; | 144 | sctx->state[5] = SHA512_H5; |
151 | sctx->state[6] = SHA512_H6; | 145 | sctx->state[6] = SHA512_H6; |
152 | sctx->state[7] = SHA512_H7; | 146 | sctx->state[7] = SHA512_H7; |
153 | sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; | 147 | sctx->count[0] = sctx->count[1] = 0; |
154 | 148 | ||
155 | return 0; | 149 | return 0; |
156 | } | 150 | } |
@@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc) | |||
158 | static int | 152 | static int |
159 | sha384_init(struct shash_desc *desc) | 153 | sha384_init(struct shash_desc *desc) |
160 | { | 154 | { |
161 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 155 | struct sha512_state *sctx = shash_desc_ctx(desc); |
162 | sctx->state[0] = SHA384_H0; | 156 | sctx->state[0] = SHA384_H0; |
163 | sctx->state[1] = SHA384_H1; | 157 | sctx->state[1] = SHA384_H1; |
164 | sctx->state[2] = SHA384_H2; | 158 | sctx->state[2] = SHA384_H2; |
@@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc) | |||
167 | sctx->state[5] = SHA384_H5; | 161 | sctx->state[5] = SHA384_H5; |
168 | sctx->state[6] = SHA384_H6; | 162 | sctx->state[6] = SHA384_H6; |
169 | sctx->state[7] = SHA384_H7; | 163 | sctx->state[7] = SHA384_H7; |
170 | sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; | 164 | sctx->count[0] = sctx->count[1] = 0; |
171 | 165 | ||
172 | return 0; | 166 | return 0; |
173 | } | 167 | } |
@@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc) | |||
175 | static int | 169 | static int |
176 | sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) | 170 | sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) |
177 | { | 171 | { |
178 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 172 | struct sha512_state *sctx = shash_desc_ctx(desc); |
179 | 173 | ||
180 | unsigned int i, index, part_len; | 174 | unsigned int i, index, part_len; |
181 | 175 | ||
182 | /* Compute number of bytes mod 128 */ | 176 | /* Compute number of bytes mod 128 */ |
183 | index = (unsigned int)((sctx->count[0] >> 3) & 0x7F); | 177 | index = sctx->count[0] & 0x7f; |
184 | 178 | ||
185 | /* Update number of bits */ | 179 | /* Update number of bytes */ |
186 | if ((sctx->count[0] += (len << 3)) < (len << 3)) { | 180 | if (!(sctx->count[0] += len)) |
187 | if ((sctx->count[1] += 1) < 1) | 181 | sctx->count[1]++; |
188 | if ((sctx->count[2] += 1) < 1) | ||
189 | sctx->count[3]++; | ||
190 | sctx->count[1] += (len >> 29); | ||
191 | } | ||
192 | 182 | ||
193 | part_len = 128 - index; | 183 | part_len = 128 - index; |
194 | 184 | ||
@@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) | |||
214 | static int | 204 | static int |
215 | sha512_final(struct shash_desc *desc, u8 *hash) | 205 | sha512_final(struct shash_desc *desc, u8 *hash) |
216 | { | 206 | { |
217 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 207 | struct sha512_state *sctx = shash_desc_ctx(desc); |
218 | static u8 padding[128] = { 0x80, }; | 208 | static u8 padding[128] = { 0x80, }; |
219 | __be64 *dst = (__be64 *)hash; | 209 | __be64 *dst = (__be64 *)hash; |
220 | __be32 bits[4]; | 210 | __be64 bits[2]; |
221 | unsigned int index, pad_len; | 211 | unsigned int index, pad_len; |
222 | int i; | 212 | int i; |
223 | 213 | ||
224 | /* Save number of bits */ | 214 | /* Save number of bits */ |
225 | bits[3] = cpu_to_be32(sctx->count[0]); | 215 | bits[1] = cpu_to_be64(sctx->count[0] << 3); |
226 | bits[2] = cpu_to_be32(sctx->count[1]); | 216 | bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); |
227 | bits[1] = cpu_to_be32(sctx->count[2]); | ||
228 | bits[0] = cpu_to_be32(sctx->count[3]); | ||
229 | 217 | ||
230 | /* Pad out to 112 mod 128. */ | 218 | /* Pad out to 112 mod 128. */ |
231 | index = (sctx->count[0] >> 3) & 0x7f; | 219 | index = sctx->count[0] & 0x7f; |
232 | pad_len = (index < 112) ? (112 - index) : ((128+112) - index); | 220 | pad_len = (index < 112) ? (112 - index) : ((128+112) - index); |
233 | sha512_update(desc, padding, pad_len); | 221 | sha512_update(desc, padding, pad_len); |
234 | 222 | ||
@@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash) | |||
240 | dst[i] = cpu_to_be64(sctx->state[i]); | 228 | dst[i] = cpu_to_be64(sctx->state[i]); |
241 | 229 | ||
242 | /* Zeroize sensitive information. */ | 230 | /* Zeroize sensitive information. */ |
243 | memset(sctx, 0, sizeof(struct sha512_ctx)); | 231 | memset(sctx, 0, sizeof(struct sha512_state)); |
244 | 232 | ||
245 | return 0; | 233 | return 0; |
246 | } | 234 | } |
@@ -262,7 +250,7 @@ static struct shash_alg sha512 = { | |||
262 | .init = sha512_init, | 250 | .init = sha512_init, |
263 | .update = sha512_update, | 251 | .update = sha512_update, |
264 | .final = sha512_final, | 252 | .final = sha512_final, |
265 | .descsize = sizeof(struct sha512_ctx), | 253 | .descsize = sizeof(struct sha512_state), |
266 | .base = { | 254 | .base = { |
267 | .cra_name = "sha512", | 255 | .cra_name = "sha512", |
268 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 256 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
@@ -276,7 +264,7 @@ static struct shash_alg sha384 = { | |||
276 | .init = sha384_init, | 264 | .init = sha384_init, |
277 | .update = sha512_update, | 265 | .update = sha512_update, |
278 | .final = sha384_final, | 266 | .final = sha384_final, |
279 | .descsize = sizeof(struct sha512_ctx), | 267 | .descsize = sizeof(struct sha512_state), |
280 | .base = { | 268 | .base = { |
281 | .cra_name = "sha384", | 269 | .cra_name = "sha384", |
282 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 270 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
diff --git a/crypto/shash.c b/crypto/shash.c index 2ccc8b0076ce..22fd9433141f 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -22,6 +22,12 @@ | |||
22 | 22 | ||
23 | static const struct crypto_type crypto_shash_type; | 23 | static const struct crypto_type crypto_shash_type; |
24 | 24 | ||
25 | static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, | ||
26 | unsigned int keylen) | ||
27 | { | ||
28 | return -ENOSYS; | ||
29 | } | ||
30 | |||
25 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | 31 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, |
26 | unsigned int keylen) | 32 | unsigned int keylen) |
27 | { | 33 | { |
@@ -31,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
31 | u8 *buffer, *alignbuffer; | 37 | u8 *buffer, *alignbuffer; |
32 | int err; | 38 | int err; |
33 | 39 | ||
34 | absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); | 40 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); |
35 | buffer = kmalloc(absize, GFP_KERNEL); | 41 | buffer = kmalloc(absize, GFP_KERNEL); |
36 | if (!buffer) | 42 | if (!buffer) |
37 | return -ENOMEM; | 43 | return -ENOMEM; |
@@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
39 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 45 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
40 | memcpy(alignbuffer, key, keylen); | 46 | memcpy(alignbuffer, key, keylen); |
41 | err = shash->setkey(tfm, alignbuffer, keylen); | 47 | err = shash->setkey(tfm, alignbuffer, keylen); |
42 | memset(alignbuffer, 0, keylen); | 48 | kzfree(buffer); |
43 | kfree(buffer); | ||
44 | return err; | 49 | return err; |
45 | } | 50 | } |
46 | 51 | ||
@@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
50 | struct shash_alg *shash = crypto_shash_alg(tfm); | 55 | struct shash_alg *shash = crypto_shash_alg(tfm); |
51 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 56 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
52 | 57 | ||
53 | if (!shash->setkey) | ||
54 | return -ENOSYS; | ||
55 | |||
56 | if ((unsigned long)key & alignmask) | 58 | if ((unsigned long)key & alignmask) |
57 | return shash_setkey_unaligned(tfm, key, keylen); | 59 | return shash_setkey_unaligned(tfm, key, keylen); |
58 | 60 | ||
@@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, | |||
74 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 76 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
75 | unsigned int unaligned_len = alignmask + 1 - | 77 | unsigned int unaligned_len = alignmask + 1 - |
76 | ((unsigned long)data & alignmask); | 78 | ((unsigned long)data & alignmask); |
77 | u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] | 79 | u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] |
78 | __attribute__ ((aligned)); | 80 | __attribute__ ((aligned)); |
81 | u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); | ||
82 | int err; | ||
79 | 83 | ||
80 | if (unaligned_len > len) | 84 | if (unaligned_len > len) |
81 | unaligned_len = len; | 85 | unaligned_len = len; |
82 | 86 | ||
83 | memcpy(buf, data, unaligned_len); | 87 | memcpy(buf, data, unaligned_len); |
88 | err = shash->update(desc, buf, unaligned_len); | ||
89 | memset(buf, 0, unaligned_len); | ||
84 | 90 | ||
85 | return shash->update(desc, buf, unaligned_len) ?: | 91 | return err ?: |
86 | shash->update(desc, data + unaligned_len, len - unaligned_len); | 92 | shash->update(desc, data + unaligned_len, len - unaligned_len); |
87 | } | 93 | } |
88 | 94 | ||
@@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) | |||
106 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 112 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
107 | struct shash_alg *shash = crypto_shash_alg(tfm); | 113 | struct shash_alg *shash = crypto_shash_alg(tfm); |
108 | unsigned int ds = crypto_shash_digestsize(tfm); | 114 | unsigned int ds = crypto_shash_digestsize(tfm); |
109 | u8 buf[shash_align_buffer_size(ds, alignmask)] | 115 | u8 ubuf[shash_align_buffer_size(ds, alignmask)] |
110 | __attribute__ ((aligned)); | 116 | __attribute__ ((aligned)); |
117 | u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); | ||
111 | int err; | 118 | int err; |
112 | 119 | ||
113 | err = shash->final(desc, buf); | 120 | err = shash->final(desc, buf); |
121 | if (err) | ||
122 | goto out; | ||
123 | |||
114 | memcpy(out, buf, ds); | 124 | memcpy(out, buf, ds); |
125 | |||
126 | out: | ||
127 | memset(buf, 0, ds); | ||
115 | return err; | 128 | return err; |
116 | } | 129 | } |
117 | 130 | ||
@@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, | |||
142 | struct shash_alg *shash = crypto_shash_alg(tfm); | 155 | struct shash_alg *shash = crypto_shash_alg(tfm); |
143 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 156 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
144 | 157 | ||
145 | if (((unsigned long)data | (unsigned long)out) & alignmask || | 158 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
146 | !shash->finup) | ||
147 | return shash_finup_unaligned(desc, data, len, out); | 159 | return shash_finup_unaligned(desc, data, len, out); |
148 | 160 | ||
149 | return shash->finup(desc, data, len, out); | 161 | return shash->finup(desc, data, len, out); |
@@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, | |||
154 | unsigned int len, u8 *out) | 166 | unsigned int len, u8 *out) |
155 | { | 167 | { |
156 | return crypto_shash_init(desc) ?: | 168 | return crypto_shash_init(desc) ?: |
157 | crypto_shash_update(desc, data, len) ?: | 169 | crypto_shash_finup(desc, data, len, out); |
158 | crypto_shash_final(desc, out); | ||
159 | } | 170 | } |
160 | 171 | ||
161 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | 172 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, |
@@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | |||
165 | struct shash_alg *shash = crypto_shash_alg(tfm); | 176 | struct shash_alg *shash = crypto_shash_alg(tfm); |
166 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 177 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
167 | 178 | ||
168 | if (((unsigned long)data | (unsigned long)out) & alignmask || | 179 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
169 | !shash->digest) | ||
170 | return shash_digest_unaligned(desc, data, len, out); | 180 | return shash_digest_unaligned(desc, data, len, out); |
171 | 181 | ||
172 | return shash->digest(desc, data, len, out); | 182 | return shash->digest(desc, data, len, out); |
173 | } | 183 | } |
174 | EXPORT_SYMBOL_GPL(crypto_shash_digest); | 184 | EXPORT_SYMBOL_GPL(crypto_shash_digest); |
175 | 185 | ||
176 | int crypto_shash_import(struct shash_desc *desc, const u8 *in) | 186 | static int shash_default_export(struct shash_desc *desc, void *out) |
177 | { | 187 | { |
178 | struct crypto_shash *tfm = desc->tfm; | 188 | memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); |
179 | struct shash_alg *alg = crypto_shash_alg(tfm); | 189 | return 0; |
180 | 190 | } | |
181 | memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); | ||
182 | |||
183 | if (alg->reinit) | ||
184 | alg->reinit(desc); | ||
185 | 191 | ||
192 | static int shash_default_import(struct shash_desc *desc, const void *in) | ||
193 | { | ||
194 | memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); | ||
186 | return 0; | 195 | return 0; |
187 | } | 196 | } |
188 | EXPORT_SYMBOL_GPL(crypto_shash_import); | ||
189 | 197 | ||
190 | static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, | 198 | static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
191 | unsigned int keylen) | 199 | unsigned int keylen) |
@@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req) | |||
206 | return crypto_shash_init(desc); | 214 | return crypto_shash_init(desc); |
207 | } | 215 | } |
208 | 216 | ||
209 | static int shash_async_update(struct ahash_request *req) | 217 | int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) |
210 | { | 218 | { |
211 | struct shash_desc *desc = ahash_request_ctx(req); | ||
212 | struct crypto_hash_walk walk; | 219 | struct crypto_hash_walk walk; |
213 | int nbytes; | 220 | int nbytes; |
214 | 221 | ||
@@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req) | |||
218 | 225 | ||
219 | return nbytes; | 226 | return nbytes; |
220 | } | 227 | } |
228 | EXPORT_SYMBOL_GPL(shash_ahash_update); | ||
229 | |||
230 | static int shash_async_update(struct ahash_request *req) | ||
231 | { | ||
232 | return shash_ahash_update(req, ahash_request_ctx(req)); | ||
233 | } | ||
221 | 234 | ||
222 | static int shash_async_final(struct ahash_request *req) | 235 | static int shash_async_final(struct ahash_request *req) |
223 | { | 236 | { |
224 | return crypto_shash_final(ahash_request_ctx(req), req->result); | 237 | return crypto_shash_final(ahash_request_ctx(req), req->result); |
225 | } | 238 | } |
226 | 239 | ||
227 | static int shash_async_digest(struct ahash_request *req) | 240 | int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) |
241 | { | ||
242 | struct crypto_hash_walk walk; | ||
243 | int nbytes; | ||
244 | |||
245 | nbytes = crypto_hash_walk_first(req, &walk); | ||
246 | if (!nbytes) | ||
247 | return crypto_shash_final(desc, req->result); | ||
248 | |||
249 | do { | ||
250 | nbytes = crypto_hash_walk_last(&walk) ? | ||
251 | crypto_shash_finup(desc, walk.data, nbytes, | ||
252 | req->result) : | ||
253 | crypto_shash_update(desc, walk.data, nbytes); | ||
254 | nbytes = crypto_hash_walk_done(&walk, nbytes); | ||
255 | } while (nbytes > 0); | ||
256 | |||
257 | return nbytes; | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(shash_ahash_finup); | ||
260 | |||
261 | static int shash_async_finup(struct ahash_request *req) | ||
262 | { | ||
263 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); | ||
264 | struct shash_desc *desc = ahash_request_ctx(req); | ||
265 | |||
266 | desc->tfm = *ctx; | ||
267 | desc->flags = req->base.flags; | ||
268 | |||
269 | return shash_ahash_finup(req, desc); | ||
270 | } | ||
271 | |||
272 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) | ||
228 | { | 273 | { |
229 | struct scatterlist *sg = req->src; | 274 | struct scatterlist *sg = req->src; |
230 | unsigned int offset = sg->offset; | 275 | unsigned int offset = sg->offset; |
@@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req) | |||
232 | int err; | 277 | int err; |
233 | 278 | ||
234 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 279 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { |
235 | struct crypto_shash **ctx = | ||
236 | crypto_ahash_ctx(crypto_ahash_reqtfm(req)); | ||
237 | struct shash_desc *desc = ahash_request_ctx(req); | ||
238 | void *data; | 280 | void *data; |
239 | 281 | ||
240 | desc->tfm = *ctx; | ||
241 | desc->flags = req->base.flags; | ||
242 | |||
243 | data = crypto_kmap(sg_page(sg), 0); | 282 | data = crypto_kmap(sg_page(sg), 0); |
244 | err = crypto_shash_digest(desc, data + offset, nbytes, | 283 | err = crypto_shash_digest(desc, data + offset, nbytes, |
245 | req->result); | 284 | req->result); |
246 | crypto_kunmap(data, 0); | 285 | crypto_kunmap(data, 0); |
247 | crypto_yield(desc->flags); | 286 | crypto_yield(desc->flags); |
248 | goto out; | 287 | } else |
249 | } | 288 | err = crypto_shash_init(desc) ?: |
289 | shash_ahash_finup(req, desc); | ||
250 | 290 | ||
251 | err = shash_async_init(req); | 291 | return err; |
252 | if (err) | 292 | } |
253 | goto out; | 293 | EXPORT_SYMBOL_GPL(shash_ahash_digest); |
254 | 294 | ||
255 | err = shash_async_update(req); | 295 | static int shash_async_digest(struct ahash_request *req) |
256 | if (err) | 296 | { |
257 | goto out; | 297 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
298 | struct shash_desc *desc = ahash_request_ctx(req); | ||
258 | 299 | ||
259 | err = shash_async_final(req); | 300 | desc->tfm = *ctx; |
301 | desc->flags = req->base.flags; | ||
260 | 302 | ||
261 | out: | 303 | return shash_ahash_digest(req, desc); |
262 | return err; | 304 | } |
305 | |||
306 | static int shash_async_export(struct ahash_request *req, void *out) | ||
307 | { | ||
308 | return crypto_shash_export(ahash_request_ctx(req), out); | ||
309 | } | ||
310 | |||
311 | static int shash_async_import(struct ahash_request *req, const void *in) | ||
312 | { | ||
313 | return crypto_shash_import(ahash_request_ctx(req), in); | ||
263 | } | 314 | } |
264 | 315 | ||
265 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | 316 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) |
@@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | |||
269 | crypto_free_shash(*ctx); | 320 | crypto_free_shash(*ctx); |
270 | } | 321 | } |
271 | 322 | ||
272 | static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | 323 | int crypto_init_shash_ops_async(struct crypto_tfm *tfm) |
273 | { | 324 | { |
274 | struct crypto_alg *calg = tfm->__crt_alg; | 325 | struct crypto_alg *calg = tfm->__crt_alg; |
275 | struct shash_alg *alg = __crypto_shash_alg(calg); | 326 | struct shash_alg *alg = __crypto_shash_alg(calg); |
276 | struct ahash_tfm *crt = &tfm->crt_ahash; | 327 | struct crypto_ahash *crt = __crypto_ahash_cast(tfm); |
277 | struct crypto_shash **ctx = crypto_tfm_ctx(tfm); | 328 | struct crypto_shash **ctx = crypto_tfm_ctx(tfm); |
278 | struct crypto_shash *shash; | 329 | struct crypto_shash *shash; |
279 | 330 | ||
@@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
291 | 342 | ||
292 | crt->init = shash_async_init; | 343 | crt->init = shash_async_init; |
293 | crt->update = shash_async_update; | 344 | crt->update = shash_async_update; |
294 | crt->final = shash_async_final; | 345 | crt->final = shash_async_final; |
346 | crt->finup = shash_async_finup; | ||
295 | crt->digest = shash_async_digest; | 347 | crt->digest = shash_async_digest; |
296 | crt->setkey = shash_async_setkey; | ||
297 | 348 | ||
298 | crt->digestsize = alg->digestsize; | 349 | if (alg->setkey) |
350 | crt->setkey = shash_async_setkey; | ||
351 | if (alg->export) | ||
352 | crt->export = shash_async_export; | ||
353 | if (alg->import) | ||
354 | crt->import = shash_async_import; | ||
355 | |||
299 | crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); | 356 | crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); |
300 | 357 | ||
301 | return 0; | 358 | return 0; |
@@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
304 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, | 361 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, |
305 | unsigned int keylen) | 362 | unsigned int keylen) |
306 | { | 363 | { |
307 | struct shash_desc *desc = crypto_hash_ctx(tfm); | 364 | struct shash_desc **descp = crypto_hash_ctx(tfm); |
365 | struct shash_desc *desc = *descp; | ||
308 | 366 | ||
309 | return crypto_shash_setkey(desc->tfm, key, keylen); | 367 | return crypto_shash_setkey(desc->tfm, key, keylen); |
310 | } | 368 | } |
311 | 369 | ||
312 | static int shash_compat_init(struct hash_desc *hdesc) | 370 | static int shash_compat_init(struct hash_desc *hdesc) |
313 | { | 371 | { |
314 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 372 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
373 | struct shash_desc *desc = *descp; | ||
315 | 374 | ||
316 | desc->flags = hdesc->flags; | 375 | desc->flags = hdesc->flags; |
317 | 376 | ||
@@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc) | |||
321 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | 380 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, |
322 | unsigned int len) | 381 | unsigned int len) |
323 | { | 382 | { |
324 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 383 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
384 | struct shash_desc *desc = *descp; | ||
325 | struct crypto_hash_walk walk; | 385 | struct crypto_hash_walk walk; |
326 | int nbytes; | 386 | int nbytes; |
327 | 387 | ||
@@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | |||
334 | 394 | ||
335 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) | 395 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) |
336 | { | 396 | { |
337 | return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); | 397 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
398 | |||
399 | return crypto_shash_final(*descp, out); | ||
338 | } | 400 | } |
339 | 401 | ||
340 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | 402 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, |
@@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | |||
344 | int err; | 406 | int err; |
345 | 407 | ||
346 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 408 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { |
347 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 409 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
410 | struct shash_desc *desc = *descp; | ||
348 | void *data; | 411 | void *data; |
349 | 412 | ||
350 | desc->flags = hdesc->flags; | 413 | desc->flags = hdesc->flags; |
@@ -372,9 +435,11 @@ out: | |||
372 | 435 | ||
373 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) | 436 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) |
374 | { | 437 | { |
375 | struct shash_desc *desc= crypto_tfm_ctx(tfm); | 438 | struct shash_desc **descp = crypto_tfm_ctx(tfm); |
439 | struct shash_desc *desc = *descp; | ||
376 | 440 | ||
377 | crypto_free_shash(desc->tfm); | 441 | crypto_free_shash(desc->tfm); |
442 | kzfree(desc); | ||
378 | } | 443 | } |
379 | 444 | ||
380 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | 445 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) |
@@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | |||
382 | struct hash_tfm *crt = &tfm->crt_hash; | 447 | struct hash_tfm *crt = &tfm->crt_hash; |
383 | struct crypto_alg *calg = tfm->__crt_alg; | 448 | struct crypto_alg *calg = tfm->__crt_alg; |
384 | struct shash_alg *alg = __crypto_shash_alg(calg); | 449 | struct shash_alg *alg = __crypto_shash_alg(calg); |
385 | struct shash_desc *desc = crypto_tfm_ctx(tfm); | 450 | struct shash_desc **descp = crypto_tfm_ctx(tfm); |
386 | struct crypto_shash *shash; | 451 | struct crypto_shash *shash; |
452 | struct shash_desc *desc; | ||
387 | 453 | ||
388 | if (!crypto_mod_get(calg)) | 454 | if (!crypto_mod_get(calg)) |
389 | return -EAGAIN; | 455 | return -EAGAIN; |
@@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | |||
394 | return PTR_ERR(shash); | 460 | return PTR_ERR(shash); |
395 | } | 461 | } |
396 | 462 | ||
463 | desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), | ||
464 | GFP_KERNEL); | ||
465 | if (!desc) { | ||
466 | crypto_free_shash(shash); | ||
467 | return -ENOMEM; | ||
468 | } | ||
469 | |||
470 | *descp = desc; | ||
397 | desc->tfm = shash; | 471 | desc->tfm = shash; |
398 | tfm->exit = crypto_exit_shash_ops_compat; | 472 | tfm->exit = crypto_exit_shash_ops_compat; |
399 | 473 | ||
@@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
413 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | 487 | switch (mask & CRYPTO_ALG_TYPE_MASK) { |
414 | case CRYPTO_ALG_TYPE_HASH_MASK: | 488 | case CRYPTO_ALG_TYPE_HASH_MASK: |
415 | return crypto_init_shash_ops_compat(tfm); | 489 | return crypto_init_shash_ops_compat(tfm); |
416 | case CRYPTO_ALG_TYPE_AHASH_MASK: | ||
417 | return crypto_init_shash_ops_async(tfm); | ||
418 | } | 490 | } |
419 | 491 | ||
420 | return -EINVAL; | 492 | return -EINVAL; |
@@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
423 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, | 495 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, |
424 | u32 mask) | 496 | u32 mask) |
425 | { | 497 | { |
426 | struct shash_alg *salg = __crypto_shash_alg(alg); | ||
427 | |||
428 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | 498 | switch (mask & CRYPTO_ALG_TYPE_MASK) { |
429 | case CRYPTO_ALG_TYPE_HASH_MASK: | 499 | case CRYPTO_ALG_TYPE_HASH_MASK: |
430 | return sizeof(struct shash_desc) + salg->descsize; | 500 | return sizeof(struct shash_desc *); |
431 | case CRYPTO_ALG_TYPE_AHASH_MASK: | ||
432 | return sizeof(struct crypto_shash *); | ||
433 | } | 501 | } |
434 | 502 | ||
435 | return 0; | 503 | return 0; |
436 | } | 504 | } |
437 | 505 | ||
438 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm, | 506 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) |
439 | const struct crypto_type *frontend) | ||
440 | { | 507 | { |
508 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | ||
509 | |||
510 | hash->descsize = crypto_shash_alg(hash)->descsize; | ||
441 | return 0; | 511 | return 0; |
442 | } | 512 | } |
443 | 513 | ||
444 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg, | 514 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg) |
445 | const struct crypto_type *frontend) | ||
446 | { | 515 | { |
447 | return alg->cra_ctxsize; | 516 | return alg->cra_ctxsize; |
448 | } | 517 | } |
@@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) | |||
456 | seq_printf(m, "type : shash\n"); | 525 | seq_printf(m, "type : shash\n"); |
457 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 526 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
458 | seq_printf(m, "digestsize : %u\n", salg->digestsize); | 527 | seq_printf(m, "digestsize : %u\n", salg->digestsize); |
459 | seq_printf(m, "descsize : %u\n", salg->descsize); | ||
460 | } | 528 | } |
461 | 529 | ||
462 | static const struct crypto_type crypto_shash_type = { | 530 | static const struct crypto_type crypto_shash_type = { |
@@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, | |||
480 | } | 548 | } |
481 | EXPORT_SYMBOL_GPL(crypto_alloc_shash); | 549 | EXPORT_SYMBOL_GPL(crypto_alloc_shash); |
482 | 550 | ||
483 | int crypto_register_shash(struct shash_alg *alg) | 551 | static int shash_prepare_alg(struct shash_alg *alg) |
484 | { | 552 | { |
485 | struct crypto_alg *base = &alg->base; | 553 | struct crypto_alg *base = &alg->base; |
486 | 554 | ||
487 | if (alg->digestsize > PAGE_SIZE / 8 || | 555 | if (alg->digestsize > PAGE_SIZE / 8 || |
488 | alg->descsize > PAGE_SIZE / 8) | 556 | alg->descsize > PAGE_SIZE / 8 || |
557 | alg->statesize > PAGE_SIZE / 8) | ||
489 | return -EINVAL; | 558 | return -EINVAL; |
490 | 559 | ||
491 | base->cra_type = &crypto_shash_type; | 560 | base->cra_type = &crypto_shash_type; |
492 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | 561 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
493 | base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; | 562 | base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; |
494 | 563 | ||
564 | if (!alg->finup) | ||
565 | alg->finup = shash_finup_unaligned; | ||
566 | if (!alg->digest) | ||
567 | alg->digest = shash_digest_unaligned; | ||
568 | if (!alg->export) { | ||
569 | alg->export = shash_default_export; | ||
570 | alg->import = shash_default_import; | ||
571 | alg->statesize = alg->descsize; | ||
572 | } | ||
573 | if (!alg->setkey) | ||
574 | alg->setkey = shash_no_setkey; | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | int crypto_register_shash(struct shash_alg *alg) | ||
580 | { | ||
581 | struct crypto_alg *base = &alg->base; | ||
582 | int err; | ||
583 | |||
584 | err = shash_prepare_alg(alg); | ||
585 | if (err) | ||
586 | return err; | ||
587 | |||
495 | return crypto_register_alg(base); | 588 | return crypto_register_alg(base); |
496 | } | 589 | } |
497 | EXPORT_SYMBOL_GPL(crypto_register_shash); | 590 | EXPORT_SYMBOL_GPL(crypto_register_shash); |
@@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg) | |||
502 | } | 595 | } |
503 | EXPORT_SYMBOL_GPL(crypto_unregister_shash); | 596 | EXPORT_SYMBOL_GPL(crypto_unregister_shash); |
504 | 597 | ||
598 | int shash_register_instance(struct crypto_template *tmpl, | ||
599 | struct shash_instance *inst) | ||
600 | { | ||
601 | int err; | ||
602 | |||
603 | err = shash_prepare_alg(&inst->alg); | ||
604 | if (err) | ||
605 | return err; | ||
606 | |||
607 | return crypto_register_instance(tmpl, shash_crypto_instance(inst)); | ||
608 | } | ||
609 | EXPORT_SYMBOL_GPL(shash_register_instance); | ||
610 | |||
611 | void shash_free_instance(struct crypto_instance *inst) | ||
612 | { | ||
613 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
614 | kfree(shash_instance(inst)); | ||
615 | } | ||
616 | EXPORT_SYMBOL_GPL(shash_free_instance); | ||
617 | |||
618 | int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, | ||
619 | struct shash_alg *alg, | ||
620 | struct crypto_instance *inst) | ||
621 | { | ||
622 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | ||
623 | &crypto_shash_type); | ||
624 | } | ||
625 | EXPORT_SYMBOL_GPL(crypto_init_shash_spawn); | ||
626 | |||
627 | struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | ||
628 | { | ||
629 | struct crypto_alg *alg; | ||
630 | |||
631 | alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask); | ||
632 | return IS_ERR(alg) ? ERR_CAST(alg) : | ||
633 | container_of(alg, struct shash_alg, base); | ||
634 | } | ||
635 | EXPORT_SYMBOL_GPL(shash_attr_alg); | ||
636 | |||
505 | MODULE_LICENSE("GPL"); | 637 | MODULE_LICENSE("GPL"); |
506 | MODULE_DESCRIPTION("Synchronous cryptographic hash type"); | 638 | MODULE_DESCRIPTION("Synchronous cryptographic hash type"); |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d59ba5079d14..3ca68f9fc14d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <crypto/hash.h> | 18 | #include <crypto/hash.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/gfp.h> | ||
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
@@ -45,6 +45,9 @@ | |||
45 | */ | 45 | */ |
46 | static unsigned int sec; | 46 | static unsigned int sec; |
47 | 47 | ||
48 | static char *alg = NULL; | ||
49 | static u32 type; | ||
50 | static u32 mask; | ||
48 | static int mode; | 51 | static int mode; |
49 | static char *tvmem[TVMEMSIZE]; | 52 | static char *tvmem[TVMEMSIZE]; |
50 | 53 | ||
@@ -391,6 +394,17 @@ out: | |||
391 | return 0; | 394 | return 0; |
392 | } | 395 | } |
393 | 396 | ||
397 | static void test_hash_sg_init(struct scatterlist *sg) | ||
398 | { | ||
399 | int i; | ||
400 | |||
401 | sg_init_table(sg, TVMEMSIZE); | ||
402 | for (i = 0; i < TVMEMSIZE; i++) { | ||
403 | sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); | ||
404 | memset(tvmem[i], 0xff, PAGE_SIZE); | ||
405 | } | ||
406 | } | ||
407 | |||
394 | static void test_hash_speed(const char *algo, unsigned int sec, | 408 | static void test_hash_speed(const char *algo, unsigned int sec, |
395 | struct hash_speed *speed) | 409 | struct hash_speed *speed) |
396 | { | 410 | { |
@@ -420,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
420 | goto out; | 434 | goto out; |
421 | } | 435 | } |
422 | 436 | ||
423 | sg_init_table(sg, TVMEMSIZE); | 437 | test_hash_sg_init(sg); |
424 | for (i = 0; i < TVMEMSIZE; i++) { | ||
425 | sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); | ||
426 | memset(tvmem[i], 0xff, PAGE_SIZE); | ||
427 | } | ||
428 | |||
429 | for (i = 0; speed[i].blen != 0; i++) { | 438 | for (i = 0; speed[i].blen != 0; i++) { |
430 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | 439 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { |
431 | printk(KERN_ERR | 440 | printk(KERN_ERR |
@@ -434,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec, | |||
434 | goto out; | 443 | goto out; |
435 | } | 444 | } |
436 | 445 | ||
446 | if (speed[i].klen) | ||
447 | crypto_hash_setkey(tfm, tvmem[0], speed[i].klen); | ||
448 | |||
437 | printk(KERN_INFO "test%3u " | 449 | printk(KERN_INFO "test%3u " |
438 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | 450 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", |
439 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | 451 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); |
@@ -455,6 +467,250 @@ out: | |||
455 | crypto_free_hash(tfm); | 467 | crypto_free_hash(tfm); |
456 | } | 468 | } |
457 | 469 | ||
470 | struct tcrypt_result { | ||
471 | struct completion completion; | ||
472 | int err; | ||
473 | }; | ||
474 | |||
475 | static void tcrypt_complete(struct crypto_async_request *req, int err) | ||
476 | { | ||
477 | struct tcrypt_result *res = req->data; | ||
478 | |||
479 | if (err == -EINPROGRESS) | ||
480 | return; | ||
481 | |||
482 | res->err = err; | ||
483 | complete(&res->completion); | ||
484 | } | ||
485 | |||
486 | static inline int do_one_ahash_op(struct ahash_request *req, int ret) | ||
487 | { | ||
488 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
489 | struct tcrypt_result *tr = req->base.data; | ||
490 | |||
491 | ret = wait_for_completion_interruptible(&tr->completion); | ||
492 | if (!ret) | ||
493 | ret = tr->err; | ||
494 | INIT_COMPLETION(tr->completion); | ||
495 | } | ||
496 | return ret; | ||
497 | } | ||
498 | |||
499 | static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, | ||
500 | char *out, int sec) | ||
501 | { | ||
502 | unsigned long start, end; | ||
503 | int bcount; | ||
504 | int ret; | ||
505 | |||
506 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | ||
507 | time_before(jiffies, end); bcount++) { | ||
508 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | ||
509 | if (ret) | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | printk("%6u opers/sec, %9lu bytes/sec\n", | ||
514 | bcount / sec, ((long)bcount * blen) / sec); | ||
515 | |||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | static int test_ahash_jiffies(struct ahash_request *req, int blen, | ||
520 | int plen, char *out, int sec) | ||
521 | { | ||
522 | unsigned long start, end; | ||
523 | int bcount, pcount; | ||
524 | int ret; | ||
525 | |||
526 | if (plen == blen) | ||
527 | return test_ahash_jiffies_digest(req, blen, out, sec); | ||
528 | |||
529 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | ||
530 | time_before(jiffies, end); bcount++) { | ||
531 | ret = crypto_ahash_init(req); | ||
532 | if (ret) | ||
533 | return ret; | ||
534 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
535 | ret = do_one_ahash_op(req, crypto_ahash_update(req)); | ||
536 | if (ret) | ||
537 | return ret; | ||
538 | } | ||
539 | /* we assume there is enough space in 'out' for the result */ | ||
540 | ret = do_one_ahash_op(req, crypto_ahash_final(req)); | ||
541 | if (ret) | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | pr_cont("%6u opers/sec, %9lu bytes/sec\n", | ||
546 | bcount / sec, ((long)bcount * blen) / sec); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int test_ahash_cycles_digest(struct ahash_request *req, int blen, | ||
552 | char *out) | ||
553 | { | ||
554 | unsigned long cycles = 0; | ||
555 | int ret, i; | ||
556 | |||
557 | /* Warm-up run. */ | ||
558 | for (i = 0; i < 4; i++) { | ||
559 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | ||
560 | if (ret) | ||
561 | goto out; | ||
562 | } | ||
563 | |||
564 | /* The real thing. */ | ||
565 | for (i = 0; i < 8; i++) { | ||
566 | cycles_t start, end; | ||
567 | |||
568 | start = get_cycles(); | ||
569 | |||
570 | ret = do_one_ahash_op(req, crypto_ahash_digest(req)); | ||
571 | if (ret) | ||
572 | goto out; | ||
573 | |||
574 | end = get_cycles(); | ||
575 | |||
576 | cycles += end - start; | ||
577 | } | ||
578 | |||
579 | out: | ||
580 | if (ret) | ||
581 | return ret; | ||
582 | |||
583 | pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", | ||
584 | cycles / 8, cycles / (8 * blen)); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static int test_ahash_cycles(struct ahash_request *req, int blen, | ||
590 | int plen, char *out) | ||
591 | { | ||
592 | unsigned long cycles = 0; | ||
593 | int i, pcount, ret; | ||
594 | |||
595 | if (plen == blen) | ||
596 | return test_ahash_cycles_digest(req, blen, out); | ||
597 | |||
598 | /* Warm-up run. */ | ||
599 | for (i = 0; i < 4; i++) { | ||
600 | ret = crypto_ahash_init(req); | ||
601 | if (ret) | ||
602 | goto out; | ||
603 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
604 | ret = do_one_ahash_op(req, crypto_ahash_update(req)); | ||
605 | if (ret) | ||
606 | goto out; | ||
607 | } | ||
608 | ret = do_one_ahash_op(req, crypto_ahash_final(req)); | ||
609 | if (ret) | ||
610 | goto out; | ||
611 | } | ||
612 | |||
613 | /* The real thing. */ | ||
614 | for (i = 0; i < 8; i++) { | ||
615 | cycles_t start, end; | ||
616 | |||
617 | start = get_cycles(); | ||
618 | |||
619 | ret = crypto_ahash_init(req); | ||
620 | if (ret) | ||
621 | goto out; | ||
622 | for (pcount = 0; pcount < blen; pcount += plen) { | ||
623 | ret = do_one_ahash_op(req, crypto_ahash_update(req)); | ||
624 | if (ret) | ||
625 | goto out; | ||
626 | } | ||
627 | ret = do_one_ahash_op(req, crypto_ahash_final(req)); | ||
628 | if (ret) | ||
629 | goto out; | ||
630 | |||
631 | end = get_cycles(); | ||
632 | |||
633 | cycles += end - start; | ||
634 | } | ||
635 | |||
636 | out: | ||
637 | if (ret) | ||
638 | return ret; | ||
639 | |||
640 | pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", | ||
641 | cycles / 8, cycles / (8 * blen)); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static void test_ahash_speed(const char *algo, unsigned int sec, | ||
647 | struct hash_speed *speed) | ||
648 | { | ||
649 | struct scatterlist sg[TVMEMSIZE]; | ||
650 | struct tcrypt_result tresult; | ||
651 | struct ahash_request *req; | ||
652 | struct crypto_ahash *tfm; | ||
653 | static char output[1024]; | ||
654 | int i, ret; | ||
655 | |||
656 | printk(KERN_INFO "\ntesting speed of async %s\n", algo); | ||
657 | |||
658 | tfm = crypto_alloc_ahash(algo, 0, 0); | ||
659 | if (IS_ERR(tfm)) { | ||
660 | pr_err("failed to load transform for %s: %ld\n", | ||
661 | algo, PTR_ERR(tfm)); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | if (crypto_ahash_digestsize(tfm) > sizeof(output)) { | ||
666 | pr_err("digestsize(%u) > outputbuffer(%zu)\n", | ||
667 | crypto_ahash_digestsize(tfm), sizeof(output)); | ||
668 | goto out; | ||
669 | } | ||
670 | |||
671 | test_hash_sg_init(sg); | ||
672 | req = ahash_request_alloc(tfm, GFP_KERNEL); | ||
673 | if (!req) { | ||
674 | pr_err("ahash request allocation failure\n"); | ||
675 | goto out; | ||
676 | } | ||
677 | |||
678 | init_completion(&tresult.completion); | ||
679 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
680 | tcrypt_complete, &tresult); | ||
681 | |||
682 | for (i = 0; speed[i].blen != 0; i++) { | ||
683 | if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { | ||
684 | pr_err("template (%u) too big for tvmem (%lu)\n", | ||
685 | speed[i].blen, TVMEMSIZE * PAGE_SIZE); | ||
686 | break; | ||
687 | } | ||
688 | |||
689 | pr_info("test%3u " | ||
690 | "(%5u byte blocks,%5u bytes per update,%4u updates): ", | ||
691 | i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); | ||
692 | |||
693 | ahash_request_set_crypt(req, sg, output, speed[i].plen); | ||
694 | |||
695 | if (sec) | ||
696 | ret = test_ahash_jiffies(req, speed[i].blen, | ||
697 | speed[i].plen, output, sec); | ||
698 | else | ||
699 | ret = test_ahash_cycles(req, speed[i].blen, | ||
700 | speed[i].plen, output); | ||
701 | |||
702 | if (ret) { | ||
703 | pr_err("hashing failed ret=%d\n", ret); | ||
704 | break; | ||
705 | } | ||
706 | } | ||
707 | |||
708 | ahash_request_free(req); | ||
709 | |||
710 | out: | ||
711 | crypto_free_ahash(tfm); | ||
712 | } | ||
713 | |||
458 | static void test_available(void) | 714 | static void test_available(void) |
459 | { | 715 | { |
460 | char **name = check; | 716 | char **name = check; |
@@ -716,6 +972,10 @@ static int do_test(int m) | |||
716 | ret += tcrypt_test("hmac(rmd160)"); | 972 | ret += tcrypt_test("hmac(rmd160)"); |
717 | break; | 973 | break; |
718 | 974 | ||
975 | case 109: | ||
976 | ret += tcrypt_test("vmac(aes)"); | ||
977 | break; | ||
978 | |||
719 | case 150: | 979 | case 150: |
720 | ret += tcrypt_test("ansi_cprng"); | 980 | ret += tcrypt_test("ansi_cprng"); |
721 | break; | 981 | break; |
@@ -874,9 +1134,87 @@ static int do_test(int m) | |||
874 | test_hash_speed("rmd320", sec, generic_hash_speed_template); | 1134 | test_hash_speed("rmd320", sec, generic_hash_speed_template); |
875 | if (mode > 300 && mode < 400) break; | 1135 | if (mode > 300 && mode < 400) break; |
876 | 1136 | ||
1137 | case 318: | ||
1138 | test_hash_speed("ghash-generic", sec, hash_speed_template_16); | ||
1139 | if (mode > 300 && mode < 400) break; | ||
1140 | |||
877 | case 399: | 1141 | case 399: |
878 | break; | 1142 | break; |
879 | 1143 | ||
1144 | case 400: | ||
1145 | /* fall through */ | ||
1146 | |||
1147 | case 401: | ||
1148 | test_ahash_speed("md4", sec, generic_hash_speed_template); | ||
1149 | if (mode > 400 && mode < 500) break; | ||
1150 | |||
1151 | case 402: | ||
1152 | test_ahash_speed("md5", sec, generic_hash_speed_template); | ||
1153 | if (mode > 400 && mode < 500) break; | ||
1154 | |||
1155 | case 403: | ||
1156 | test_ahash_speed("sha1", sec, generic_hash_speed_template); | ||
1157 | if (mode > 400 && mode < 500) break; | ||
1158 | |||
1159 | case 404: | ||
1160 | test_ahash_speed("sha256", sec, generic_hash_speed_template); | ||
1161 | if (mode > 400 && mode < 500) break; | ||
1162 | |||
1163 | case 405: | ||
1164 | test_ahash_speed("sha384", sec, generic_hash_speed_template); | ||
1165 | if (mode > 400 && mode < 500) break; | ||
1166 | |||
1167 | case 406: | ||
1168 | test_ahash_speed("sha512", sec, generic_hash_speed_template); | ||
1169 | if (mode > 400 && mode < 500) break; | ||
1170 | |||
1171 | case 407: | ||
1172 | test_ahash_speed("wp256", sec, generic_hash_speed_template); | ||
1173 | if (mode > 400 && mode < 500) break; | ||
1174 | |||
1175 | case 408: | ||
1176 | test_ahash_speed("wp384", sec, generic_hash_speed_template); | ||
1177 | if (mode > 400 && mode < 500) break; | ||
1178 | |||
1179 | case 409: | ||
1180 | test_ahash_speed("wp512", sec, generic_hash_speed_template); | ||
1181 | if (mode > 400 && mode < 500) break; | ||
1182 | |||
1183 | case 410: | ||
1184 | test_ahash_speed("tgr128", sec, generic_hash_speed_template); | ||
1185 | if (mode > 400 && mode < 500) break; | ||
1186 | |||
1187 | case 411: | ||
1188 | test_ahash_speed("tgr160", sec, generic_hash_speed_template); | ||
1189 | if (mode > 400 && mode < 500) break; | ||
1190 | |||
1191 | case 412: | ||
1192 | test_ahash_speed("tgr192", sec, generic_hash_speed_template); | ||
1193 | if (mode > 400 && mode < 500) break; | ||
1194 | |||
1195 | case 413: | ||
1196 | test_ahash_speed("sha224", sec, generic_hash_speed_template); | ||
1197 | if (mode > 400 && mode < 500) break; | ||
1198 | |||
1199 | case 414: | ||
1200 | test_ahash_speed("rmd128", sec, generic_hash_speed_template); | ||
1201 | if (mode > 400 && mode < 500) break; | ||
1202 | |||
1203 | case 415: | ||
1204 | test_ahash_speed("rmd160", sec, generic_hash_speed_template); | ||
1205 | if (mode > 400 && mode < 500) break; | ||
1206 | |||
1207 | case 416: | ||
1208 | test_ahash_speed("rmd256", sec, generic_hash_speed_template); | ||
1209 | if (mode > 400 && mode < 500) break; | ||
1210 | |||
1211 | case 417: | ||
1212 | test_ahash_speed("rmd320", sec, generic_hash_speed_template); | ||
1213 | if (mode > 400 && mode < 500) break; | ||
1214 | |||
1215 | case 499: | ||
1216 | break; | ||
1217 | |||
880 | case 1000: | 1218 | case 1000: |
881 | test_available(); | 1219 | test_available(); |
882 | break; | 1220 | break; |
@@ -885,6 +1223,12 @@ static int do_test(int m) | |||
885 | return ret; | 1223 | return ret; |
886 | } | 1224 | } |
887 | 1225 | ||
1226 | static int do_alg_test(const char *alg, u32 type, u32 mask) | ||
1227 | { | ||
1228 | return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ? | ||
1229 | 0 : -ENOENT; | ||
1230 | } | ||
1231 | |||
888 | static int __init tcrypt_mod_init(void) | 1232 | static int __init tcrypt_mod_init(void) |
889 | { | 1233 | { |
890 | int err = -ENOMEM; | 1234 | int err = -ENOMEM; |
@@ -896,7 +1240,11 @@ static int __init tcrypt_mod_init(void) | |||
896 | goto err_free_tv; | 1240 | goto err_free_tv; |
897 | } | 1241 | } |
898 | 1242 | ||
899 | err = do_test(mode); | 1243 | if (alg) |
1244 | err = do_alg_test(alg, type, mask); | ||
1245 | else | ||
1246 | err = do_test(mode); | ||
1247 | |||
900 | if (err) { | 1248 | if (err) { |
901 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); | 1249 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); |
902 | goto err_free_tv; | 1250 | goto err_free_tv; |
@@ -928,6 +1276,9 @@ static void __exit tcrypt_mod_fini(void) { } | |||
928 | module_init(tcrypt_mod_init); | 1276 | module_init(tcrypt_mod_init); |
929 | module_exit(tcrypt_mod_fini); | 1277 | module_exit(tcrypt_mod_fini); |
930 | 1278 | ||
1279 | module_param(alg, charp, 0); | ||
1280 | module_param(type, uint, 0); | ||
1281 | module_param(mask, uint, 0); | ||
931 | module_param(mode, int, 0); | 1282 | module_param(mode, int, 0); |
932 | module_param(sec, uint, 0); | 1283 | module_param(sec, uint, 0); |
933 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " | 1284 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " |
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 966bbfaf95b1..10cb925132c9 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h | |||
@@ -25,6 +25,7 @@ struct cipher_speed_template { | |||
25 | struct hash_speed { | 25 | struct hash_speed { |
26 | unsigned int blen; /* buffer length */ | 26 | unsigned int blen; /* buffer length */ |
27 | unsigned int plen; /* per-update length */ | 27 | unsigned int plen; /* per-update length */ |
28 | unsigned int klen; /* key length */ | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | /* | 31 | /* |
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = { | |||
83 | { .blen = 0, .plen = 0, } | 84 | { .blen = 0, .plen = 0, } |
84 | }; | 85 | }; |
85 | 86 | ||
87 | static struct hash_speed hash_speed_template_16[] = { | ||
88 | { .blen = 16, .plen = 16, .klen = 16, }, | ||
89 | { .blen = 64, .plen = 16, .klen = 16, }, | ||
90 | { .blen = 64, .plen = 64, .klen = 16, }, | ||
91 | { .blen = 256, .plen = 16, .klen = 16, }, | ||
92 | { .blen = 256, .plen = 64, .klen = 16, }, | ||
93 | { .blen = 256, .plen = 256, .klen = 16, }, | ||
94 | { .blen = 1024, .plen = 16, .klen = 16, }, | ||
95 | { .blen = 1024, .plen = 256, .klen = 16, }, | ||
96 | { .blen = 1024, .plen = 1024, .klen = 16, }, | ||
97 | { .blen = 2048, .plen = 16, .klen = 16, }, | ||
98 | { .blen = 2048, .plen = 256, .klen = 16, }, | ||
99 | { .blen = 2048, .plen = 1024, .klen = 16, }, | ||
100 | { .blen = 2048, .plen = 2048, .klen = 16, }, | ||
101 | { .blen = 4096, .plen = 16, .klen = 16, }, | ||
102 | { .blen = 4096, .plen = 256, .klen = 16, }, | ||
103 | { .blen = 4096, .plen = 1024, .klen = 16, }, | ||
104 | { .blen = 4096, .plen = 4096, .klen = 16, }, | ||
105 | { .blen = 8192, .plen = 16, .klen = 16, }, | ||
106 | { .blen = 8192, .plen = 256, .klen = 16, }, | ||
107 | { .blen = 8192, .plen = 1024, .klen = 16, }, | ||
108 | { .blen = 8192, .plen = 4096, .klen = 16, }, | ||
109 | { .blen = 8192, .plen = 8192, .klen = 16, }, | ||
110 | |||
111 | /* End marker */ | ||
112 | { .blen = 0, .plen = 0, .klen = 0, } | ||
113 | }; | ||
114 | |||
86 | #endif /* _CRYPTO_TCRYPT_H */ | 115 | #endif /* _CRYPTO_TCRYPT_H */ |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index e9e9d84293b9..fa8c8f78c8d4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -22,6 +22,17 @@ | |||
22 | #include <crypto/rng.h> | 22 | #include <crypto/rng.h> |
23 | 23 | ||
24 | #include "internal.h" | 24 | #include "internal.h" |
25 | |||
26 | #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS | ||
27 | |||
28 | /* a perfect nop */ | ||
29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | #else | ||
35 | |||
25 | #include "testmgr.h" | 36 | #include "testmgr.h" |
26 | 37 | ||
27 | /* | 38 | /* |
@@ -153,8 +164,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) | |||
153 | free_page((unsigned long)buf[i]); | 164 | free_page((unsigned long)buf[i]); |
154 | } | 165 | } |
155 | 166 | ||
167 | static int do_one_async_hash_op(struct ahash_request *req, | ||
168 | struct tcrypt_result *tr, | ||
169 | int ret) | ||
170 | { | ||
171 | if (ret == -EINPROGRESS || ret == -EBUSY) { | ||
172 | ret = wait_for_completion_interruptible(&tr->completion); | ||
173 | if (!ret) | ||
174 | ret = tr->err; | ||
175 | INIT_COMPLETION(tr->completion); | ||
176 | } | ||
177 | return ret; | ||
178 | } | ||
179 | |||
156 | static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | 180 | static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, |
157 | unsigned int tcount) | 181 | unsigned int tcount, bool use_digest) |
158 | { | 182 | { |
159 | const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); | 183 | const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); |
160 | unsigned int i, j, k, temp; | 184 | unsigned int i, j, k, temp; |
@@ -190,10 +214,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
190 | 214 | ||
191 | hash_buff = xbuf[0]; | 215 | hash_buff = xbuf[0]; |
192 | 216 | ||
193 | ret = -EINVAL; | ||
194 | if (WARN_ON(template[i].psize > PAGE_SIZE)) | ||
195 | goto out; | ||
196 | |||
197 | memcpy(hash_buff, template[i].plaintext, template[i].psize); | 217 | memcpy(hash_buff, template[i].plaintext, template[i].psize); |
198 | sg_init_one(&sg[0], hash_buff, template[i].psize); | 218 | sg_init_one(&sg[0], hash_buff, template[i].psize); |
199 | 219 | ||
@@ -210,23 +230,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
210 | } | 230 | } |
211 | 231 | ||
212 | ahash_request_set_crypt(req, sg, result, template[i].psize); | 232 | ahash_request_set_crypt(req, sg, result, template[i].psize); |
213 | ret = crypto_ahash_digest(req); | 233 | if (use_digest) { |
214 | switch (ret) { | 234 | ret = do_one_async_hash_op(req, &tresult, |
215 | case 0: | 235 | crypto_ahash_digest(req)); |
216 | break; | 236 | if (ret) { |
217 | case -EINPROGRESS: | 237 | pr_err("alg: hash: digest failed on test %d " |
218 | case -EBUSY: | 238 | "for %s: ret=%d\n", j, algo, -ret); |
219 | ret = wait_for_completion_interruptible( | 239 | goto out; |
220 | &tresult.completion); | 240 | } |
221 | if (!ret && !(ret = tresult.err)) { | 241 | } else { |
222 | INIT_COMPLETION(tresult.completion); | 242 | ret = do_one_async_hash_op(req, &tresult, |
223 | break; | 243 | crypto_ahash_init(req)); |
244 | if (ret) { | ||
245 | pr_err("alt: hash: init failed on test %d " | ||
246 | "for %s: ret=%d\n", j, algo, -ret); | ||
247 | goto out; | ||
248 | } | ||
249 | ret = do_one_async_hash_op(req, &tresult, | ||
250 | crypto_ahash_update(req)); | ||
251 | if (ret) { | ||
252 | pr_err("alt: hash: update failed on test %d " | ||
253 | "for %s: ret=%d\n", j, algo, -ret); | ||
254 | goto out; | ||
255 | } | ||
256 | ret = do_one_async_hash_op(req, &tresult, | ||
257 | crypto_ahash_final(req)); | ||
258 | if (ret) { | ||
259 | pr_err("alt: hash: final failed on test %d " | ||
260 | "for %s: ret=%d\n", j, algo, -ret); | ||
261 | goto out; | ||
224 | } | 262 | } |
225 | /* fall through */ | ||
226 | default: | ||
227 | printk(KERN_ERR "alg: hash: digest failed on test %d " | ||
228 | "for %s: ret=%d\n", j, algo, -ret); | ||
229 | goto out; | ||
230 | } | 263 | } |
231 | 264 | ||
232 | if (memcmp(result, template[i].digest, | 265 | if (memcmp(result, template[i].digest, |
@@ -1205,7 +1238,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template, | |||
1205 | unsigned int tcount) | 1238 | unsigned int tcount) |
1206 | { | 1239 | { |
1207 | const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); | 1240 | const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); |
1208 | int err, i, j, seedsize; | 1241 | int err = 0, i, j, seedsize; |
1209 | u8 *seed; | 1242 | u8 *seed; |
1210 | char result[32]; | 1243 | char result[32]; |
1211 | 1244 | ||
@@ -1406,7 +1439,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, | |||
1406 | return PTR_ERR(tfm); | 1439 | return PTR_ERR(tfm); |
1407 | } | 1440 | } |
1408 | 1441 | ||
1409 | err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); | 1442 | err = test_hash(tfm, desc->suite.hash.vecs, |
1443 | desc->suite.hash.count, true); | ||
1444 | if (!err) | ||
1445 | err = test_hash(tfm, desc->suite.hash.vecs, | ||
1446 | desc->suite.hash.count, false); | ||
1410 | 1447 | ||
1411 | crypto_free_ahash(tfm); | 1448 | crypto_free_ahash(tfm); |
1412 | return err; | 1449 | return err; |
@@ -1481,9 +1518,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, | |||
1481 | return err; | 1518 | return err; |
1482 | } | 1519 | } |
1483 | 1520 | ||
1521 | static int alg_test_null(const struct alg_test_desc *desc, | ||
1522 | const char *driver, u32 type, u32 mask) | ||
1523 | { | ||
1524 | return 0; | ||
1525 | } | ||
1526 | |||
1484 | /* Please keep this list sorted by algorithm name. */ | 1527 | /* Please keep this list sorted by algorithm name. */ |
1485 | static const struct alg_test_desc alg_test_descs[] = { | 1528 | static const struct alg_test_desc alg_test_descs[] = { |
1486 | { | 1529 | { |
1530 | .alg = "__driver-cbc-aes-aesni", | ||
1531 | .test = alg_test_null, | ||
1532 | .suite = { | ||
1533 | .cipher = { | ||
1534 | .enc = { | ||
1535 | .vecs = NULL, | ||
1536 | .count = 0 | ||
1537 | }, | ||
1538 | .dec = { | ||
1539 | .vecs = NULL, | ||
1540 | .count = 0 | ||
1541 | } | ||
1542 | } | ||
1543 | } | ||
1544 | }, { | ||
1545 | .alg = "__driver-ecb-aes-aesni", | ||
1546 | .test = alg_test_null, | ||
1547 | .suite = { | ||
1548 | .cipher = { | ||
1549 | .enc = { | ||
1550 | .vecs = NULL, | ||
1551 | .count = 0 | ||
1552 | }, | ||
1553 | .dec = { | ||
1554 | .vecs = NULL, | ||
1555 | .count = 0 | ||
1556 | } | ||
1557 | } | ||
1558 | } | ||
1559 | }, { | ||
1560 | .alg = "__ghash-pclmulqdqni", | ||
1561 | .test = alg_test_null, | ||
1562 | .suite = { | ||
1563 | .hash = { | ||
1564 | .vecs = NULL, | ||
1565 | .count = 0 | ||
1566 | } | ||
1567 | } | ||
1568 | }, { | ||
1487 | .alg = "ansi_cprng", | 1569 | .alg = "ansi_cprng", |
1488 | .test = alg_test_cprng, | 1570 | .test = alg_test_cprng, |
1489 | .fips_allowed = 1, | 1571 | .fips_allowed = 1, |
@@ -1627,6 +1709,30 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1627 | } | 1709 | } |
1628 | } | 1710 | } |
1629 | }, { | 1711 | }, { |
1712 | .alg = "cryptd(__driver-ecb-aes-aesni)", | ||
1713 | .test = alg_test_null, | ||
1714 | .suite = { | ||
1715 | .cipher = { | ||
1716 | .enc = { | ||
1717 | .vecs = NULL, | ||
1718 | .count = 0 | ||
1719 | }, | ||
1720 | .dec = { | ||
1721 | .vecs = NULL, | ||
1722 | .count = 0 | ||
1723 | } | ||
1724 | } | ||
1725 | } | ||
1726 | }, { | ||
1727 | .alg = "cryptd(__ghash-pclmulqdqni)", | ||
1728 | .test = alg_test_null, | ||
1729 | .suite = { | ||
1730 | .hash = { | ||
1731 | .vecs = NULL, | ||
1732 | .count = 0 | ||
1733 | } | ||
1734 | } | ||
1735 | }, { | ||
1630 | .alg = "ctr(aes)", | 1736 | .alg = "ctr(aes)", |
1631 | .test = alg_test_skcipher, | 1737 | .test = alg_test_skcipher, |
1632 | .fips_allowed = 1, | 1738 | .fips_allowed = 1, |
@@ -1673,6 +1779,21 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1673 | } | 1779 | } |
1674 | } | 1780 | } |
1675 | }, { | 1781 | }, { |
1782 | .alg = "ecb(__aes-aesni)", | ||
1783 | .test = alg_test_null, | ||
1784 | .suite = { | ||
1785 | .cipher = { | ||
1786 | .enc = { | ||
1787 | .vecs = NULL, | ||
1788 | .count = 0 | ||
1789 | }, | ||
1790 | .dec = { | ||
1791 | .vecs = NULL, | ||
1792 | .count = 0 | ||
1793 | } | ||
1794 | } | ||
1795 | } | ||
1796 | }, { | ||
1676 | .alg = "ecb(aes)", | 1797 | .alg = "ecb(aes)", |
1677 | .test = alg_test_skcipher, | 1798 | .test = alg_test_skcipher, |
1678 | .fips_allowed = 1, | 1799 | .fips_allowed = 1, |
@@ -1947,6 +2068,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
1947 | } | 2068 | } |
1948 | } | 2069 | } |
1949 | }, { | 2070 | }, { |
2071 | .alg = "ghash", | ||
2072 | .test = alg_test_hash, | ||
2073 | .suite = { | ||
2074 | .hash = { | ||
2075 | .vecs = ghash_tv_template, | ||
2076 | .count = GHASH_TEST_VECTORS | ||
2077 | } | ||
2078 | } | ||
2079 | }, { | ||
1950 | .alg = "hmac(md5)", | 2080 | .alg = "hmac(md5)", |
1951 | .test = alg_test_hash, | 2081 | .test = alg_test_hash, |
1952 | .suite = { | 2082 | .suite = { |
@@ -2252,6 +2382,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2252 | } | 2382 | } |
2253 | } | 2383 | } |
2254 | }, { | 2384 | }, { |
2385 | .alg = "vmac(aes)", | ||
2386 | .test = alg_test_hash, | ||
2387 | .suite = { | ||
2388 | .hash = { | ||
2389 | .vecs = aes_vmac128_tv_template, | ||
2390 | .count = VMAC_AES_TEST_VECTORS | ||
2391 | } | ||
2392 | } | ||
2393 | }, { | ||
2255 | .alg = "wp256", | 2394 | .alg = "wp256", |
2256 | .test = alg_test_hash, | 2395 | .test = alg_test_hash, |
2257 | .suite = { | 2396 | .suite = { |
@@ -2348,6 +2487,7 @@ static int alg_find_test(const char *alg) | |||
2348 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | 2487 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) |
2349 | { | 2488 | { |
2350 | int i; | 2489 | int i; |
2490 | int j; | ||
2351 | int rc; | 2491 | int rc; |
2352 | 2492 | ||
2353 | if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { | 2493 | if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { |
@@ -2369,14 +2509,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
2369 | } | 2509 | } |
2370 | 2510 | ||
2371 | i = alg_find_test(alg); | 2511 | i = alg_find_test(alg); |
2372 | if (i < 0) | 2512 | j = alg_find_test(driver); |
2513 | if (i < 0 && j < 0) | ||
2373 | goto notest; | 2514 | goto notest; |
2374 | 2515 | ||
2375 | if (fips_enabled && !alg_test_descs[i].fips_allowed) | 2516 | if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) || |
2517 | (j >= 0 && !alg_test_descs[j].fips_allowed))) | ||
2376 | goto non_fips_alg; | 2518 | goto non_fips_alg; |
2377 | 2519 | ||
2378 | rc = alg_test_descs[i].test(alg_test_descs + i, driver, | 2520 | rc = 0; |
2379 | type, mask); | 2521 | if (i >= 0) |
2522 | rc |= alg_test_descs[i].test(alg_test_descs + i, driver, | ||
2523 | type, mask); | ||
2524 | if (j >= 0) | ||
2525 | rc |= alg_test_descs[j].test(alg_test_descs + j, driver, | ||
2526 | type, mask); | ||
2527 | |||
2380 | test_done: | 2528 | test_done: |
2381 | if (fips_enabled && rc) | 2529 | if (fips_enabled && rc) |
2382 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); | 2530 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); |
@@ -2393,4 +2541,7 @@ notest: | |||
2393 | non_fips_alg: | 2541 | non_fips_alg: |
2394 | return -EINVAL; | 2542 | return -EINVAL; |
2395 | } | 2543 | } |
2544 | |||
2545 | #endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ | ||
2546 | |||
2396 | EXPORT_SYMBOL_GPL(alg_test); | 2547 | EXPORT_SYMBOL_GPL(alg_test); |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 69316228fc19..74e35377fd30 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = { | |||
1003 | }, | 1003 | }, |
1004 | }; | 1004 | }; |
1005 | 1005 | ||
1006 | #define GHASH_TEST_VECTORS 1 | ||
1007 | |||
1008 | static struct hash_testvec ghash_tv_template[] = | ||
1009 | { | ||
1010 | { | ||
1011 | |||
1012 | .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61", | ||
1013 | .ksize = 16, | ||
1014 | .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6", | ||
1015 | .psize = 16, | ||
1016 | .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6" | ||
1017 | "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60", | ||
1018 | }, | ||
1019 | }; | ||
1020 | |||
1006 | /* | 1021 | /* |
1007 | * HMAC-MD5 test vectors from RFC2202 | 1022 | * HMAC-MD5 test vectors from RFC2202 |
1008 | * (These need to be fixed to not use strlen). | 1023 | * (These need to be fixed to not use strlen). |
@@ -1654,6 +1669,78 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { | |||
1654 | } | 1669 | } |
1655 | }; | 1670 | }; |
1656 | 1671 | ||
1672 | #define VMAC_AES_TEST_VECTORS 8 | ||
1673 | static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', | ||
1674 | '\x02', '\x03', '\x02', '\x02', | ||
1675 | '\x02', '\x04', '\x01', '\x07', | ||
1676 | '\x04', '\x01', '\x04', '\x03',}; | ||
1677 | static char vmac_string2[128] = {'a', 'b', 'c',}; | ||
1678 | static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', | ||
1679 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1680 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1681 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1682 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1683 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1684 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1685 | 'a', 'b', 'c', 'a', 'b', 'c', | ||
1686 | }; | ||
1687 | |||
1688 | static struct hash_testvec aes_vmac128_tv_template[] = { | ||
1689 | { | ||
1690 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1691 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1692 | .plaintext = NULL, | ||
1693 | .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54", | ||
1694 | .psize = 0, | ||
1695 | .ksize = 16, | ||
1696 | }, { | ||
1697 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1698 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1699 | .plaintext = vmac_string1, | ||
1700 | .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1", | ||
1701 | .psize = 128, | ||
1702 | .ksize = 16, | ||
1703 | }, { | ||
1704 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1705 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1706 | .plaintext = vmac_string2, | ||
1707 | .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d", | ||
1708 | .psize = 128, | ||
1709 | .ksize = 16, | ||
1710 | }, { | ||
1711 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1712 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1713 | .plaintext = vmac_string3, | ||
1714 | .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19", | ||
1715 | .psize = 128, | ||
1716 | .ksize = 16, | ||
1717 | }, { | ||
1718 | .key = "abcdefghijklmnop", | ||
1719 | .plaintext = NULL, | ||
1720 | .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84", | ||
1721 | .psize = 0, | ||
1722 | .ksize = 16, | ||
1723 | }, { | ||
1724 | .key = "abcdefghijklmnop", | ||
1725 | .plaintext = vmac_string1, | ||
1726 | .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2", | ||
1727 | .psize = 128, | ||
1728 | .ksize = 16, | ||
1729 | }, { | ||
1730 | .key = "abcdefghijklmnop", | ||
1731 | .plaintext = vmac_string2, | ||
1732 | .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf", | ||
1733 | .psize = 128, | ||
1734 | .ksize = 16, | ||
1735 | }, { | ||
1736 | .key = "abcdefghijklmnop", | ||
1737 | .plaintext = vmac_string3, | ||
1738 | .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4", | ||
1739 | .psize = 128, | ||
1740 | .ksize = 16, | ||
1741 | }, | ||
1742 | }; | ||
1743 | |||
1657 | /* | 1744 | /* |
1658 | * SHA384 HMAC test vectors from RFC4231 | 1745 | * SHA384 HMAC test vectors from RFC4231 |
1659 | */ | 1746 | */ |
diff --git a/crypto/twofish.c b/crypto/twofish_generic.c index dfcda231f87a..1f07b843e07c 100644 --- a/crypto/twofish.c +++ b/crypto/twofish_generic.c | |||
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini); | |||
212 | 212 | ||
213 | MODULE_LICENSE("GPL"); | 213 | MODULE_LICENSE("GPL"); |
214 | MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); | 214 | MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); |
215 | MODULE_ALIAS("twofish"); | ||
diff --git a/crypto/vmac.c b/crypto/vmac.c new file mode 100644 index 000000000000..0999274a27ac --- /dev/null +++ b/crypto/vmac.c | |||
@@ -0,0 +1,675 @@ | |||
1 | /* | ||
2 | * Modified to interface to the Linux kernel | ||
3 | * Copyright (c) 2009, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | */ | ||
18 | |||
19 | /* -------------------------------------------------------------------------- | ||
20 | * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. | ||
21 | * This implementation is herby placed in the public domain. | ||
22 | * The authors offers no warranty. Use at your own risk. | ||
23 | * Please send bug reports to the authors. | ||
24 | * Last modified: 17 APR 08, 1700 PDT | ||
25 | * ----------------------------------------------------------------------- */ | ||
26 | |||
27 | #include <linux/init.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/crypto.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | #include <asm/byteorder.h> | ||
32 | #include <crypto/scatterwalk.h> | ||
33 | #include <crypto/vmac.h> | ||
34 | #include <crypto/internal/hash.h> | ||
35 | |||
36 | /* | ||
37 | * Constants and masks | ||
38 | */ | ||
39 | #define UINT64_C(x) x##ULL | ||
40 | const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ | ||
41 | const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ | ||
42 | const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ | ||
43 | const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ | ||
44 | const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | ||
45 | |||
46 | #define pe64_to_cpup le64_to_cpup /* Prefer little endian */ | ||
47 | |||
48 | #ifdef __LITTLE_ENDIAN | ||
49 | #define INDEX_HIGH 1 | ||
50 | #define INDEX_LOW 0 | ||
51 | #else | ||
52 | #define INDEX_HIGH 0 | ||
53 | #define INDEX_LOW 1 | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * The following routines are used in this implementation. They are | ||
58 | * written via macros to simulate zero-overhead call-by-reference. | ||
59 | * | ||
60 | * MUL64: 64x64->128-bit multiplication | ||
61 | * PMUL64: assumes top bits cleared on inputs | ||
62 | * ADD128: 128x128->128-bit addition | ||
63 | */ | ||
64 | |||
65 | #define ADD128(rh, rl, ih, il) \ | ||
66 | do { \ | ||
67 | u64 _il = (il); \ | ||
68 | (rl) += (_il); \ | ||
69 | if ((rl) < (_il)) \ | ||
70 | (rh)++; \ | ||
71 | (rh) += (ih); \ | ||
72 | } while (0) | ||
73 | |||
74 | #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) | ||
75 | |||
76 | #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ | ||
77 | do { \ | ||
78 | u64 _i1 = (i1), _i2 = (i2); \ | ||
79 | u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ | ||
80 | rh = MUL32(_i1>>32, _i2>>32); \ | ||
81 | rl = MUL32(_i1, _i2); \ | ||
82 | ADD128(rh, rl, (m >> 32), (m << 32)); \ | ||
83 | } while (0) | ||
84 | |||
85 | #define MUL64(rh, rl, i1, i2) \ | ||
86 | do { \ | ||
87 | u64 _i1 = (i1), _i2 = (i2); \ | ||
88 | u64 m1 = MUL32(_i1, _i2>>32); \ | ||
89 | u64 m2 = MUL32(_i1>>32, _i2); \ | ||
90 | rh = MUL32(_i1>>32, _i2>>32); \ | ||
91 | rl = MUL32(_i1, _i2); \ | ||
92 | ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ | ||
93 | ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ | ||
94 | } while (0) | ||
95 | |||
96 | /* | ||
97 | * For highest performance the L1 NH and L2 polynomial hashes should be | ||
98 | * carefully implemented to take advantage of one's target architechture. | ||
99 | * Here these two hash functions are defined multiple time; once for | ||
100 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once | ||
101 | * for the rest (32-bit) architectures. | ||
102 | * For each, nh_16 *must* be defined (works on multiples of 16 bytes). | ||
103 | * Optionally, nh_vmac_nhbytes can be defined (for multiples of | ||
104 | * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two | ||
105 | * NH computations at once). | ||
106 | */ | ||
107 | |||
108 | #ifdef CONFIG_64BIT | ||
109 | |||
110 | #define nh_16(mp, kp, nw, rh, rl) \ | ||
111 | do { \ | ||
112 | int i; u64 th, tl; \ | ||
113 | rh = rl = 0; \ | ||
114 | for (i = 0; i < nw; i += 2) { \ | ||
115 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | ||
116 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
117 | ADD128(rh, rl, th, tl); \ | ||
118 | } \ | ||
119 | } while (0) | ||
120 | |||
121 | #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ | ||
122 | do { \ | ||
123 | int i; u64 th, tl; \ | ||
124 | rh1 = rl1 = rh = rl = 0; \ | ||
125 | for (i = 0; i < nw; i += 2) { \ | ||
126 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | ||
127 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
128 | ADD128(rh, rl, th, tl); \ | ||
129 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ | ||
130 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | ||
131 | ADD128(rh1, rl1, th, tl); \ | ||
132 | } \ | ||
133 | } while (0) | ||
134 | |||
135 | #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ | ||
136 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | ||
137 | do { \ | ||
138 | int i; u64 th, tl; \ | ||
139 | rh = rl = 0; \ | ||
140 | for (i = 0; i < nw; i += 8) { \ | ||
141 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | ||
142 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
143 | ADD128(rh, rl, th, tl); \ | ||
144 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ | ||
145 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | ||
146 | ADD128(rh, rl, th, tl); \ | ||
147 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ | ||
148 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | ||
149 | ADD128(rh, rl, th, tl); \ | ||
150 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ | ||
151 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | ||
152 | ADD128(rh, rl, th, tl); \ | ||
153 | } \ | ||
154 | } while (0) | ||
155 | |||
156 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ | ||
157 | do { \ | ||
158 | int i; u64 th, tl; \ | ||
159 | rh1 = rl1 = rh = rl = 0; \ | ||
160 | for (i = 0; i < nw; i += 8) { \ | ||
161 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ | ||
162 | pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
163 | ADD128(rh, rl, th, tl); \ | ||
164 | MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ | ||
165 | pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | ||
166 | ADD128(rh1, rl1, th, tl); \ | ||
167 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ | ||
168 | pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | ||
169 | ADD128(rh, rl, th, tl); \ | ||
170 | MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ | ||
171 | pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ | ||
172 | ADD128(rh1, rl1, th, tl); \ | ||
173 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ | ||
174 | pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | ||
175 | ADD128(rh, rl, th, tl); \ | ||
176 | MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ | ||
177 | pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ | ||
178 | ADD128(rh1, rl1, th, tl); \ | ||
179 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ | ||
180 | pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | ||
181 | ADD128(rh, rl, th, tl); \ | ||
182 | MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ | ||
183 | pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ | ||
184 | ADD128(rh1, rl1, th, tl); \ | ||
185 | } \ | ||
186 | } while (0) | ||
187 | #endif | ||
188 | |||
189 | #define poly_step(ah, al, kh, kl, mh, ml) \ | ||
190 | do { \ | ||
191 | u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ | ||
192 | /* compute ab*cd, put bd into result registers */ \ | ||
193 | PMUL64(t3h, t3l, al, kh); \ | ||
194 | PMUL64(t2h, t2l, ah, kl); \ | ||
195 | PMUL64(t1h, t1l, ah, 2*kh); \ | ||
196 | PMUL64(ah, al, al, kl); \ | ||
197 | /* add 2 * ac to result */ \ | ||
198 | ADD128(ah, al, t1h, t1l); \ | ||
199 | /* add together ad + bc */ \ | ||
200 | ADD128(t2h, t2l, t3h, t3l); \ | ||
201 | /* now (ah,al), (t2l,2*t2h) need summing */ \ | ||
202 | /* first add the high registers, carrying into t2h */ \ | ||
203 | ADD128(t2h, ah, z, t2l); \ | ||
204 | /* double t2h and add top bit of ah */ \ | ||
205 | t2h = 2 * t2h + (ah >> 63); \ | ||
206 | ah &= m63; \ | ||
207 | /* now add the low registers */ \ | ||
208 | ADD128(ah, al, mh, ml); \ | ||
209 | ADD128(ah, al, z, t2h); \ | ||
210 | } while (0) | ||
211 | |||
212 | #else /* ! CONFIG_64BIT */ | ||
213 | |||
214 | #ifndef nh_16 | ||
215 | #define nh_16(mp, kp, nw, rh, rl) \ | ||
216 | do { \ | ||
217 | u64 t1, t2, m1, m2, t; \ | ||
218 | int i; \ | ||
219 | rh = rl = t = 0; \ | ||
220 | for (i = 0; i < nw; i += 2) { \ | ||
221 | t1 = pe64_to_cpup(mp+i) + kp[i]; \ | ||
222 | t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ | ||
223 | m2 = MUL32(t1 >> 32, t2); \ | ||
224 | m1 = MUL32(t1, t2 >> 32); \ | ||
225 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ | ||
226 | MUL32(t1, t2)); \ | ||
227 | rh += (u64)(u32)(m1 >> 32) \ | ||
228 | + (u32)(m2 >> 32); \ | ||
229 | t += (u64)(u32)m1 + (u32)m2; \ | ||
230 | } \ | ||
231 | ADD128(rh, rl, (t >> 32), (t << 32)); \ | ||
232 | } while (0) | ||
233 | #endif | ||
234 | |||
235 | static void poly_step_func(u64 *ahi, u64 *alo, | ||
236 | const u64 *kh, const u64 *kl, | ||
237 | const u64 *mh, const u64 *ml) | ||
238 | { | ||
239 | #define a0 (*(((u32 *)alo)+INDEX_LOW)) | ||
240 | #define a1 (*(((u32 *)alo)+INDEX_HIGH)) | ||
241 | #define a2 (*(((u32 *)ahi)+INDEX_LOW)) | ||
242 | #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) | ||
243 | #define k0 (*(((u32 *)kl)+INDEX_LOW)) | ||
244 | #define k1 (*(((u32 *)kl)+INDEX_HIGH)) | ||
245 | #define k2 (*(((u32 *)kh)+INDEX_LOW)) | ||
246 | #define k3 (*(((u32 *)kh)+INDEX_HIGH)) | ||
247 | |||
248 | u64 p, q, t; | ||
249 | u32 t2; | ||
250 | |||
251 | p = MUL32(a3, k3); | ||
252 | p += p; | ||
253 | p += *(u64 *)mh; | ||
254 | p += MUL32(a0, k2); | ||
255 | p += MUL32(a1, k1); | ||
256 | p += MUL32(a2, k0); | ||
257 | t = (u32)(p); | ||
258 | p >>= 32; | ||
259 | p += MUL32(a0, k3); | ||
260 | p += MUL32(a1, k2); | ||
261 | p += MUL32(a2, k1); | ||
262 | p += MUL32(a3, k0); | ||
263 | t |= ((u64)((u32)p & 0x7fffffff)) << 32; | ||
264 | p >>= 31; | ||
265 | p += (u64)(((u32 *)ml)[INDEX_LOW]); | ||
266 | p += MUL32(a0, k0); | ||
267 | q = MUL32(a1, k3); | ||
268 | q += MUL32(a2, k2); | ||
269 | q += MUL32(a3, k1); | ||
270 | q += q; | ||
271 | p += q; | ||
272 | t2 = (u32)(p); | ||
273 | p >>= 32; | ||
274 | p += (u64)(((u32 *)ml)[INDEX_HIGH]); | ||
275 | p += MUL32(a0, k1); | ||
276 | p += MUL32(a1, k0); | ||
277 | q = MUL32(a2, k3); | ||
278 | q += MUL32(a3, k2); | ||
279 | q += q; | ||
280 | p += q; | ||
281 | *(u64 *)(alo) = (p << 32) | t2; | ||
282 | p >>= 32; | ||
283 | *(u64 *)(ahi) = p + t; | ||
284 | |||
285 | #undef a0 | ||
286 | #undef a1 | ||
287 | #undef a2 | ||
288 | #undef a3 | ||
289 | #undef k0 | ||
290 | #undef k1 | ||
291 | #undef k2 | ||
292 | #undef k3 | ||
293 | } | ||
294 | |||
295 | #define poly_step(ah, al, kh, kl, mh, ml) \ | ||
296 | poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) | ||
297 | |||
298 | #endif /* end of specialized NH and poly definitions */ | ||
299 | |||
300 | /* At least nh_16 is defined. Defined others as needed here */ | ||
301 | #ifndef nh_16_2 | ||
302 | #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ | ||
303 | do { \ | ||
304 | nh_16(mp, kp, nw, rh, rl); \ | ||
305 | nh_16(mp, ((kp)+2), nw, rh2, rl2); \ | ||
306 | } while (0) | ||
307 | #endif | ||
308 | #ifndef nh_vmac_nhbytes | ||
309 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | ||
310 | nh_16(mp, kp, nw, rh, rl) | ||
311 | #endif | ||
312 | #ifndef nh_vmac_nhbytes_2 | ||
313 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ | ||
314 | do { \ | ||
315 | nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ | ||
316 | nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ | ||
317 | } while (0) | ||
318 | #endif | ||
319 | |||
320 | static void vhash_abort(struct vmac_ctx *ctx) | ||
321 | { | ||
322 | ctx->polytmp[0] = ctx->polykey[0] ; | ||
323 | ctx->polytmp[1] = ctx->polykey[1] ; | ||
324 | ctx->first_block_processed = 0; | ||
325 | } | ||
326 | |||
327 | static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) | ||
328 | { | ||
329 | u64 rh, rl, t, z = 0; | ||
330 | |||
331 | /* fully reduce (p1,p2)+(len,0) mod p127 */ | ||
332 | t = p1 >> 63; | ||
333 | p1 &= m63; | ||
334 | ADD128(p1, p2, len, t); | ||
335 | /* At this point, (p1,p2) is at most 2^127+(len<<64) */ | ||
336 | t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); | ||
337 | ADD128(p1, p2, z, t); | ||
338 | p1 &= m63; | ||
339 | |||
340 | /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ | ||
341 | t = p1 + (p2 >> 32); | ||
342 | t += (t >> 32); | ||
343 | t += (u32)t > 0xfffffffeu; | ||
344 | p1 += (t >> 32); | ||
345 | p2 += (p1 << 32); | ||
346 | |||
347 | /* compute (p1+k1)%p64 and (p2+k2)%p64 */ | ||
348 | p1 += k1; | ||
349 | p1 += (0 - (p1 < k1)) & 257; | ||
350 | p2 += k2; | ||
351 | p2 += (0 - (p2 < k2)) & 257; | ||
352 | |||
353 | /* compute (p1+k1)*(p2+k2)%p64 */ | ||
354 | MUL64(rh, rl, p1, p2); | ||
355 | t = rh >> 56; | ||
356 | ADD128(t, rl, z, rh); | ||
357 | rh <<= 8; | ||
358 | ADD128(t, rl, z, rh); | ||
359 | t += t << 8; | ||
360 | rl += t; | ||
361 | rl += (0 - (rl < t)) & 257; | ||
362 | rl += (0 - (rl > p64-1)) & 257; | ||
363 | return rl; | ||
364 | } | ||
365 | |||
366 | static void vhash_update(const unsigned char *m, | ||
367 | unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ | ||
368 | struct vmac_ctx *ctx) | ||
369 | { | ||
370 | u64 rh, rl, *mptr; | ||
371 | const u64 *kptr = (u64 *)ctx->nhkey; | ||
372 | int i; | ||
373 | u64 ch, cl; | ||
374 | u64 pkh = ctx->polykey[0]; | ||
375 | u64 pkl = ctx->polykey[1]; | ||
376 | |||
377 | mptr = (u64 *)m; | ||
378 | i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ | ||
379 | |||
380 | ch = ctx->polytmp[0]; | ||
381 | cl = ctx->polytmp[1]; | ||
382 | |||
383 | if (!ctx->first_block_processed) { | ||
384 | ctx->first_block_processed = 1; | ||
385 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
386 | rh &= m62; | ||
387 | ADD128(ch, cl, rh, rl); | ||
388 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
389 | i--; | ||
390 | } | ||
391 | |||
392 | while (i--) { | ||
393 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
394 | rh &= m62; | ||
395 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
396 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
397 | } | ||
398 | |||
399 | ctx->polytmp[0] = ch; | ||
400 | ctx->polytmp[1] = cl; | ||
401 | } | ||
402 | |||
403 | static u64 vhash(unsigned char m[], unsigned int mbytes, | ||
404 | u64 *tagl, struct vmac_ctx *ctx) | ||
405 | { | ||
406 | u64 rh, rl, *mptr; | ||
407 | const u64 *kptr = (u64 *)ctx->nhkey; | ||
408 | int i, remaining; | ||
409 | u64 ch, cl; | ||
410 | u64 pkh = ctx->polykey[0]; | ||
411 | u64 pkl = ctx->polykey[1]; | ||
412 | |||
413 | mptr = (u64 *)m; | ||
414 | i = mbytes / VMAC_NHBYTES; | ||
415 | remaining = mbytes % VMAC_NHBYTES; | ||
416 | |||
417 | if (ctx->first_block_processed) { | ||
418 | ch = ctx->polytmp[0]; | ||
419 | cl = ctx->polytmp[1]; | ||
420 | } else if (i) { | ||
421 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); | ||
422 | ch &= m62; | ||
423 | ADD128(ch, cl, pkh, pkl); | ||
424 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
425 | i--; | ||
426 | } else if (remaining) { | ||
427 | nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); | ||
428 | ch &= m62; | ||
429 | ADD128(ch, cl, pkh, pkl); | ||
430 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
431 | goto do_l3; | ||
432 | } else {/* Empty String */ | ||
433 | ch = pkh; cl = pkl; | ||
434 | goto do_l3; | ||
435 | } | ||
436 | |||
437 | while (i--) { | ||
438 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
439 | rh &= m62; | ||
440 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
441 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
442 | } | ||
443 | if (remaining) { | ||
444 | nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); | ||
445 | rh &= m62; | ||
446 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
447 | } | ||
448 | |||
449 | do_l3: | ||
450 | vhash_abort(ctx); | ||
451 | remaining *= 8; | ||
452 | return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); | ||
453 | } | ||
454 | |||
455 | static u64 vmac(unsigned char m[], unsigned int mbytes, | ||
456 | unsigned char n[16], u64 *tagl, | ||
457 | struct vmac_ctx_t *ctx) | ||
458 | { | ||
459 | u64 *in_n, *out_p; | ||
460 | u64 p, h; | ||
461 | int i; | ||
462 | |||
463 | in_n = ctx->__vmac_ctx.cached_nonce; | ||
464 | out_p = ctx->__vmac_ctx.cached_aes; | ||
465 | |||
466 | i = n[15] & 1; | ||
467 | if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { | ||
468 | in_n[0] = *(u64 *)(n); | ||
469 | in_n[1] = *(u64 *)(n+8); | ||
470 | ((unsigned char *)in_n)[15] &= 0xFE; | ||
471 | crypto_cipher_encrypt_one(ctx->child, | ||
472 | (unsigned char *)out_p, (unsigned char *)in_n); | ||
473 | |||
474 | ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); | ||
475 | } | ||
476 | p = be64_to_cpup(out_p + i); | ||
477 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); | ||
478 | return le64_to_cpu(p + h); | ||
479 | } | ||
480 | |||
481 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) | ||
482 | { | ||
483 | u64 in[2] = {0}, out[2]; | ||
484 | unsigned i; | ||
485 | int err = 0; | ||
486 | |||
487 | err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); | ||
488 | if (err) | ||
489 | return err; | ||
490 | |||
491 | /* Fill nh key */ | ||
492 | ((unsigned char *)in)[0] = 0x80; | ||
493 | for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { | ||
494 | crypto_cipher_encrypt_one(ctx->child, | ||
495 | (unsigned char *)out, (unsigned char *)in); | ||
496 | ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); | ||
497 | ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); | ||
498 | ((unsigned char *)in)[15] += 1; | ||
499 | } | ||
500 | |||
501 | /* Fill poly key */ | ||
502 | ((unsigned char *)in)[0] = 0xC0; | ||
503 | in[1] = 0; | ||
504 | for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { | ||
505 | crypto_cipher_encrypt_one(ctx->child, | ||
506 | (unsigned char *)out, (unsigned char *)in); | ||
507 | ctx->__vmac_ctx.polytmp[i] = | ||
508 | ctx->__vmac_ctx.polykey[i] = | ||
509 | be64_to_cpup(out) & mpoly; | ||
510 | ctx->__vmac_ctx.polytmp[i+1] = | ||
511 | ctx->__vmac_ctx.polykey[i+1] = | ||
512 | be64_to_cpup(out+1) & mpoly; | ||
513 | ((unsigned char *)in)[15] += 1; | ||
514 | } | ||
515 | |||
516 | /* Fill ip key */ | ||
517 | ((unsigned char *)in)[0] = 0xE0; | ||
518 | in[1] = 0; | ||
519 | for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { | ||
520 | do { | ||
521 | crypto_cipher_encrypt_one(ctx->child, | ||
522 | (unsigned char *)out, (unsigned char *)in); | ||
523 | ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); | ||
524 | ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); | ||
525 | ((unsigned char *)in)[15] += 1; | ||
526 | } while (ctx->__vmac_ctx.l3key[i] >= p64 | ||
527 | || ctx->__vmac_ctx.l3key[i+1] >= p64); | ||
528 | } | ||
529 | |||
530 | /* Invalidate nonce/aes cache and reset other elements */ | ||
531 | ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ | ||
532 | ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ | ||
533 | ctx->__vmac_ctx.first_block_processed = 0; | ||
534 | |||
535 | return err; | ||
536 | } | ||
537 | |||
538 | static int vmac_setkey(struct crypto_shash *parent, | ||
539 | const u8 *key, unsigned int keylen) | ||
540 | { | ||
541 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
542 | |||
543 | if (keylen != VMAC_KEY_LEN) { | ||
544 | crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
545 | return -EINVAL; | ||
546 | } | ||
547 | |||
548 | return vmac_set_key((u8 *)key, ctx); | ||
549 | } | ||
550 | |||
551 | static int vmac_init(struct shash_desc *pdesc) | ||
552 | { | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static int vmac_update(struct shash_desc *pdesc, const u8 *p, | ||
557 | unsigned int len) | ||
558 | { | ||
559 | struct crypto_shash *parent = pdesc->tfm; | ||
560 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
561 | |||
562 | vhash_update(p, len, &ctx->__vmac_ctx); | ||
563 | |||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | static int vmac_final(struct shash_desc *pdesc, u8 *out) | ||
568 | { | ||
569 | struct crypto_shash *parent = pdesc->tfm; | ||
570 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
571 | vmac_t mac; | ||
572 | u8 nonce[16] = {}; | ||
573 | |||
574 | mac = vmac(NULL, 0, nonce, NULL, ctx); | ||
575 | memcpy(out, &mac, sizeof(vmac_t)); | ||
576 | memset(&mac, 0, sizeof(vmac_t)); | ||
577 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | static int vmac_init_tfm(struct crypto_tfm *tfm) | ||
582 | { | ||
583 | struct crypto_cipher *cipher; | ||
584 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | ||
585 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | ||
586 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | ||
587 | |||
588 | cipher = crypto_spawn_cipher(spawn); | ||
589 | if (IS_ERR(cipher)) | ||
590 | return PTR_ERR(cipher); | ||
591 | |||
592 | ctx->child = cipher; | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static void vmac_exit_tfm(struct crypto_tfm *tfm) | ||
597 | { | ||
598 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | ||
599 | crypto_free_cipher(ctx->child); | ||
600 | } | ||
601 | |||
602 | static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
603 | { | ||
604 | struct shash_instance *inst; | ||
605 | struct crypto_alg *alg; | ||
606 | int err; | ||
607 | |||
608 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | ||
609 | if (err) | ||
610 | return err; | ||
611 | |||
612 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | ||
613 | CRYPTO_ALG_TYPE_MASK); | ||
614 | if (IS_ERR(alg)) | ||
615 | return PTR_ERR(alg); | ||
616 | |||
617 | inst = shash_alloc_instance("vmac", alg); | ||
618 | err = PTR_ERR(inst); | ||
619 | if (IS_ERR(inst)) | ||
620 | goto out_put_alg; | ||
621 | |||
622 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, | ||
623 | shash_crypto_instance(inst), | ||
624 | CRYPTO_ALG_TYPE_MASK); | ||
625 | if (err) | ||
626 | goto out_free_inst; | ||
627 | |||
628 | inst->alg.base.cra_priority = alg->cra_priority; | ||
629 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | ||
630 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | ||
631 | |||
632 | inst->alg.digestsize = sizeof(vmac_t); | ||
633 | inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); | ||
634 | inst->alg.base.cra_init = vmac_init_tfm; | ||
635 | inst->alg.base.cra_exit = vmac_exit_tfm; | ||
636 | |||
637 | inst->alg.init = vmac_init; | ||
638 | inst->alg.update = vmac_update; | ||
639 | inst->alg.final = vmac_final; | ||
640 | inst->alg.setkey = vmac_setkey; | ||
641 | |||
642 | err = shash_register_instance(tmpl, inst); | ||
643 | if (err) { | ||
644 | out_free_inst: | ||
645 | shash_free_instance(shash_crypto_instance(inst)); | ||
646 | } | ||
647 | |||
648 | out_put_alg: | ||
649 | crypto_mod_put(alg); | ||
650 | return err; | ||
651 | } | ||
652 | |||
653 | static struct crypto_template vmac_tmpl = { | ||
654 | .name = "vmac", | ||
655 | .create = vmac_create, | ||
656 | .free = shash_free_instance, | ||
657 | .module = THIS_MODULE, | ||
658 | }; | ||
659 | |||
660 | static int __init vmac_module_init(void) | ||
661 | { | ||
662 | return crypto_register_template(&vmac_tmpl); | ||
663 | } | ||
664 | |||
665 | static void __exit vmac_module_exit(void) | ||
666 | { | ||
667 | crypto_unregister_template(&vmac_tmpl); | ||
668 | } | ||
669 | |||
670 | module_init(vmac_module_init); | ||
671 | module_exit(vmac_module_exit); | ||
672 | |||
673 | MODULE_LICENSE("GPL"); | ||
674 | MODULE_DESCRIPTION("VMAC hash algorithm"); | ||
675 | |||
diff --git a/crypto/xcbc.c b/crypto/xcbc.c index b63b633e549c..bb7b67fba349 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c | |||
@@ -19,211 +19,142 @@ | |||
19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> | 19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <crypto/scatterwalk.h> | 22 | #include <crypto/internal/hash.h> |
23 | #include <linux/crypto.h> | ||
24 | #include <linux/err.h> | 23 | #include <linux/err.h> |
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | ||
28 | #include <linux/rtnetlink.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | 25 | ||
32 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, | 26 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, |
33 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, | 27 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, |
34 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; | 28 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; |
29 | |||
35 | /* | 30 | /* |
36 | * +------------------------ | 31 | * +------------------------ |
37 | * | <parent tfm> | 32 | * | <parent tfm> |
38 | * +------------------------ | 33 | * +------------------------ |
39 | * | crypto_xcbc_ctx | 34 | * | xcbc_tfm_ctx |
40 | * +------------------------ | 35 | * +------------------------ |
41 | * | odds (block size) | 36 | * | consts (block size * 2) |
42 | * +------------------------ | 37 | * +------------------------ |
43 | * | prev (block size) | 38 | */ |
39 | struct xcbc_tfm_ctx { | ||
40 | struct crypto_cipher *child; | ||
41 | u8 ctx[]; | ||
42 | }; | ||
43 | |||
44 | /* | ||
44 | * +------------------------ | 45 | * +------------------------ |
45 | * | key (block size) | 46 | * | <shash desc> |
46 | * +------------------------ | 47 | * +------------------------ |
47 | * | consts (block size * 3) | 48 | * | xcbc_desc_ctx |
49 | * +------------------------ | ||
50 | * | odds (block size) | ||
51 | * +------------------------ | ||
52 | * | prev (block size) | ||
48 | * +------------------------ | 53 | * +------------------------ |
49 | */ | 54 | */ |
50 | struct crypto_xcbc_ctx { | 55 | struct xcbc_desc_ctx { |
51 | struct crypto_cipher *child; | ||
52 | u8 *odds; | ||
53 | u8 *prev; | ||
54 | u8 *key; | ||
55 | u8 *consts; | ||
56 | void (*xor)(u8 *a, const u8 *b, unsigned int bs); | ||
57 | unsigned int keylen; | ||
58 | unsigned int len; | 56 | unsigned int len; |
57 | u8 ctx[]; | ||
59 | }; | 58 | }; |
60 | 59 | ||
61 | static void xor_128(u8 *a, const u8 *b, unsigned int bs) | 60 | static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, |
62 | { | 61 | const u8 *inkey, unsigned int keylen) |
63 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; | ||
64 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; | ||
65 | ((u32 *)a)[2] ^= ((u32 *)b)[2]; | ||
66 | ((u32 *)a)[3] ^= ((u32 *)b)[3]; | ||
67 | } | ||
68 | |||
69 | static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent, | ||
70 | struct crypto_xcbc_ctx *ctx) | ||
71 | { | 62 | { |
72 | int bs = crypto_hash_blocksize(parent); | 63 | unsigned long alignmask = crypto_shash_alignmask(parent); |
64 | struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); | ||
65 | int bs = crypto_shash_blocksize(parent); | ||
66 | u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | ||
73 | int err = 0; | 67 | int err = 0; |
74 | u8 key1[bs]; | 68 | u8 key1[bs]; |
75 | 69 | ||
76 | if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) | 70 | if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) |
77 | return err; | 71 | return err; |
78 | 72 | ||
79 | crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts); | 73 | crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); |
74 | crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); | ||
75 | crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); | ||
80 | 76 | ||
81 | return crypto_cipher_setkey(ctx->child, key1, bs); | 77 | return crypto_cipher_setkey(ctx->child, key1, bs); |
82 | } | ||
83 | |||
84 | static int crypto_xcbc_digest_setkey(struct crypto_hash *parent, | ||
85 | const u8 *inkey, unsigned int keylen) | ||
86 | { | ||
87 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | ||
88 | |||
89 | if (keylen != crypto_cipher_blocksize(ctx->child)) | ||
90 | return -EINVAL; | ||
91 | 78 | ||
92 | ctx->keylen = keylen; | ||
93 | memcpy(ctx->key, inkey, keylen); | ||
94 | ctx->consts = (u8*)ks; | ||
95 | |||
96 | return _crypto_xcbc_digest_setkey(parent, ctx); | ||
97 | } | 79 | } |
98 | 80 | ||
99 | static int crypto_xcbc_digest_init(struct hash_desc *pdesc) | 81 | static int crypto_xcbc_digest_init(struct shash_desc *pdesc) |
100 | { | 82 | { |
101 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm); | 83 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); |
102 | int bs = crypto_hash_blocksize(pdesc->tfm); | 84 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
85 | int bs = crypto_shash_blocksize(pdesc->tfm); | ||
86 | u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; | ||
103 | 87 | ||
104 | ctx->len = 0; | 88 | ctx->len = 0; |
105 | memset(ctx->odds, 0, bs); | 89 | memset(prev, 0, bs); |
106 | memset(ctx->prev, 0, bs); | ||
107 | 90 | ||
108 | return 0; | 91 | return 0; |
109 | } | 92 | } |
110 | 93 | ||
111 | static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, | 94 | static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, |
112 | struct scatterlist *sg, | 95 | unsigned int len) |
113 | unsigned int nbytes) | ||
114 | { | 96 | { |
115 | struct crypto_hash *parent = pdesc->tfm; | 97 | struct crypto_shash *parent = pdesc->tfm; |
116 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | 98 | unsigned long alignmask = crypto_shash_alignmask(parent); |
117 | struct crypto_cipher *tfm = ctx->child; | 99 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); |
118 | int bs = crypto_hash_blocksize(parent); | 100 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
119 | 101 | struct crypto_cipher *tfm = tctx->child; | |
120 | for (;;) { | 102 | int bs = crypto_shash_blocksize(parent); |
121 | struct page *pg = sg_page(sg); | 103 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); |
122 | unsigned int offset = sg->offset; | 104 | u8 *prev = odds + bs; |
123 | unsigned int slen = sg->length; | 105 | |
124 | 106 | /* checking the data can fill the block */ | |
125 | if (unlikely(slen > nbytes)) | 107 | if ((ctx->len + len) <= bs) { |
126 | slen = nbytes; | 108 | memcpy(odds + ctx->len, p, len); |
127 | 109 | ctx->len += len; | |
128 | nbytes -= slen; | 110 | return 0; |
129 | |||
130 | while (slen > 0) { | ||
131 | unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | ||
132 | char *p = crypto_kmap(pg, 0) + offset; | ||
133 | |||
134 | /* checking the data can fill the block */ | ||
135 | if ((ctx->len + len) <= bs) { | ||
136 | memcpy(ctx->odds + ctx->len, p, len); | ||
137 | ctx->len += len; | ||
138 | slen -= len; | ||
139 | |||
140 | /* checking the rest of the page */ | ||
141 | if (len + offset >= PAGE_SIZE) { | ||
142 | offset = 0; | ||
143 | pg++; | ||
144 | } else | ||
145 | offset += len; | ||
146 | |||
147 | crypto_kunmap(p, 0); | ||
148 | crypto_yield(pdesc->flags); | ||
149 | continue; | ||
150 | } | ||
151 | |||
152 | /* filling odds with new data and encrypting it */ | ||
153 | memcpy(ctx->odds + ctx->len, p, bs - ctx->len); | ||
154 | len -= bs - ctx->len; | ||
155 | p += bs - ctx->len; | ||
156 | |||
157 | ctx->xor(ctx->prev, ctx->odds, bs); | ||
158 | crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); | ||
159 | |||
160 | /* clearing the length */ | ||
161 | ctx->len = 0; | ||
162 | |||
163 | /* encrypting the rest of data */ | ||
164 | while (len > bs) { | ||
165 | ctx->xor(ctx->prev, p, bs); | ||
166 | crypto_cipher_encrypt_one(tfm, ctx->prev, | ||
167 | ctx->prev); | ||
168 | p += bs; | ||
169 | len -= bs; | ||
170 | } | ||
171 | |||
172 | /* keeping the surplus of blocksize */ | ||
173 | if (len) { | ||
174 | memcpy(ctx->odds, p, len); | ||
175 | ctx->len = len; | ||
176 | } | ||
177 | crypto_kunmap(p, 0); | ||
178 | crypto_yield(pdesc->flags); | ||
179 | slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | ||
180 | offset = 0; | ||
181 | pg++; | ||
182 | } | ||
183 | |||
184 | if (!nbytes) | ||
185 | break; | ||
186 | sg = scatterwalk_sg_next(sg); | ||
187 | } | 111 | } |
188 | 112 | ||
189 | return 0; | 113 | /* filling odds with new data and encrypting it */ |
190 | } | 114 | memcpy(odds + ctx->len, p, bs - ctx->len); |
115 | len -= bs - ctx->len; | ||
116 | p += bs - ctx->len; | ||
191 | 117 | ||
192 | static int crypto_xcbc_digest_update(struct hash_desc *pdesc, | 118 | crypto_xor(prev, odds, bs); |
193 | struct scatterlist *sg, | 119 | crypto_cipher_encrypt_one(tfm, prev, prev); |
194 | unsigned int nbytes) | ||
195 | { | ||
196 | if (WARN_ON_ONCE(in_irq())) | ||
197 | return -EDEADLK; | ||
198 | return crypto_xcbc_digest_update2(pdesc, sg, nbytes); | ||
199 | } | ||
200 | 120 | ||
201 | static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) | 121 | /* clearing the length */ |
202 | { | 122 | ctx->len = 0; |
203 | struct crypto_hash *parent = pdesc->tfm; | ||
204 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | ||
205 | struct crypto_cipher *tfm = ctx->child; | ||
206 | int bs = crypto_hash_blocksize(parent); | ||
207 | int err = 0; | ||
208 | |||
209 | if (ctx->len == bs) { | ||
210 | u8 key2[bs]; | ||
211 | 123 | ||
212 | if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) | 124 | /* encrypting the rest of data */ |
213 | return err; | 125 | while (len > bs) { |
126 | crypto_xor(prev, p, bs); | ||
127 | crypto_cipher_encrypt_one(tfm, prev, prev); | ||
128 | p += bs; | ||
129 | len -= bs; | ||
130 | } | ||
214 | 131 | ||
215 | crypto_cipher_encrypt_one(tfm, key2, | 132 | /* keeping the surplus of blocksize */ |
216 | (u8 *)(ctx->consts + bs)); | 133 | if (len) { |
134 | memcpy(odds, p, len); | ||
135 | ctx->len = len; | ||
136 | } | ||
217 | 137 | ||
218 | ctx->xor(ctx->prev, ctx->odds, bs); | 138 | return 0; |
219 | ctx->xor(ctx->prev, key2, bs); | 139 | } |
220 | _crypto_xcbc_digest_setkey(parent, ctx); | ||
221 | 140 | ||
222 | crypto_cipher_encrypt_one(tfm, out, ctx->prev); | 141 | static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) |
223 | } else { | 142 | { |
224 | u8 key3[bs]; | 143 | struct crypto_shash *parent = pdesc->tfm; |
144 | unsigned long alignmask = crypto_shash_alignmask(parent); | ||
145 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); | ||
146 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); | ||
147 | struct crypto_cipher *tfm = tctx->child; | ||
148 | int bs = crypto_shash_blocksize(parent); | ||
149 | u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); | ||
150 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | ||
151 | u8 *prev = odds + bs; | ||
152 | unsigned int offset = 0; | ||
153 | |||
154 | if (ctx->len != bs) { | ||
225 | unsigned int rlen; | 155 | unsigned int rlen; |
226 | u8 *p = ctx->odds + ctx->len; | 156 | u8 *p = odds + ctx->len; |
157 | |||
227 | *p = 0x80; | 158 | *p = 0x80; |
228 | p++; | 159 | p++; |
229 | 160 | ||
@@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) | |||
231 | if (rlen) | 162 | if (rlen) |
232 | memset(p, 0, rlen); | 163 | memset(p, 0, rlen); |
233 | 164 | ||
234 | if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) | 165 | offset += bs; |
235 | return err; | ||
236 | |||
237 | crypto_cipher_encrypt_one(tfm, key3, | ||
238 | (u8 *)(ctx->consts + bs * 2)); | ||
239 | |||
240 | ctx->xor(ctx->prev, ctx->odds, bs); | ||
241 | ctx->xor(ctx->prev, key3, bs); | ||
242 | |||
243 | _crypto_xcbc_digest_setkey(parent, ctx); | ||
244 | |||
245 | crypto_cipher_encrypt_one(tfm, out, ctx->prev); | ||
246 | } | 166 | } |
247 | 167 | ||
248 | return 0; | 168 | crypto_xor(prev, odds, bs); |
249 | } | 169 | crypto_xor(prev, consts + offset, bs); |
250 | 170 | ||
251 | static int crypto_xcbc_digest(struct hash_desc *pdesc, | 171 | crypto_cipher_encrypt_one(tfm, out, prev); |
252 | struct scatterlist *sg, unsigned int nbytes, u8 *out) | ||
253 | { | ||
254 | if (WARN_ON_ONCE(in_irq())) | ||
255 | return -EDEADLK; | ||
256 | 172 | ||
257 | crypto_xcbc_digest_init(pdesc); | 173 | return 0; |
258 | crypto_xcbc_digest_update2(pdesc, sg, nbytes); | ||
259 | return crypto_xcbc_digest_final(pdesc, out); | ||
260 | } | 174 | } |
261 | 175 | ||
262 | static int xcbc_init_tfm(struct crypto_tfm *tfm) | 176 | static int xcbc_init_tfm(struct crypto_tfm *tfm) |
@@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) | |||
264 | struct crypto_cipher *cipher; | 178 | struct crypto_cipher *cipher; |
265 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 179 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
266 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 180 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
267 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); | 181 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
268 | int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm)); | ||
269 | 182 | ||
270 | cipher = crypto_spawn_cipher(spawn); | 183 | cipher = crypto_spawn_cipher(spawn); |
271 | if (IS_ERR(cipher)) | 184 | if (IS_ERR(cipher)) |
272 | return PTR_ERR(cipher); | 185 | return PTR_ERR(cipher); |
273 | 186 | ||
274 | switch(bs) { | ||
275 | case 16: | ||
276 | ctx->xor = xor_128; | ||
277 | break; | ||
278 | default: | ||
279 | return -EINVAL; | ||
280 | } | ||
281 | |||
282 | ctx->child = cipher; | 187 | ctx->child = cipher; |
283 | ctx->odds = (u8*)(ctx+1); | ||
284 | ctx->prev = ctx->odds + bs; | ||
285 | ctx->key = ctx->prev + bs; | ||
286 | 188 | ||
287 | return 0; | 189 | return 0; |
288 | }; | 190 | }; |
289 | 191 | ||
290 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) | 192 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) |
291 | { | 193 | { |
292 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); | 194 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
293 | crypto_free_cipher(ctx->child); | 195 | crypto_free_cipher(ctx->child); |
294 | } | 196 | } |
295 | 197 | ||
296 | static struct crypto_instance *xcbc_alloc(struct rtattr **tb) | 198 | static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) |
297 | { | 199 | { |
298 | struct crypto_instance *inst; | 200 | struct shash_instance *inst; |
299 | struct crypto_alg *alg; | 201 | struct crypto_alg *alg; |
202 | unsigned long alignmask; | ||
300 | int err; | 203 | int err; |
301 | 204 | ||
302 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); | 205 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
303 | if (err) | 206 | if (err) |
304 | return ERR_PTR(err); | 207 | return err; |
305 | 208 | ||
306 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 209 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
307 | CRYPTO_ALG_TYPE_MASK); | 210 | CRYPTO_ALG_TYPE_MASK); |
308 | if (IS_ERR(alg)) | 211 | if (IS_ERR(alg)) |
309 | return ERR_CAST(alg); | 212 | return PTR_ERR(alg); |
310 | 213 | ||
311 | switch(alg->cra_blocksize) { | 214 | switch(alg->cra_blocksize) { |
312 | case 16: | 215 | case 16: |
313 | break; | 216 | break; |
314 | default: | 217 | default: |
315 | inst = ERR_PTR(-EINVAL); | ||
316 | goto out_put_alg; | 218 | goto out_put_alg; |
317 | } | 219 | } |
318 | 220 | ||
319 | inst = crypto_alloc_instance("xcbc", alg); | 221 | inst = shash_alloc_instance("xcbc", alg); |
222 | err = PTR_ERR(inst); | ||
320 | if (IS_ERR(inst)) | 223 | if (IS_ERR(inst)) |
321 | goto out_put_alg; | 224 | goto out_put_alg; |
322 | 225 | ||
323 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; | 226 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, |
324 | inst->alg.cra_priority = alg->cra_priority; | 227 | shash_crypto_instance(inst), |
325 | inst->alg.cra_blocksize = alg->cra_blocksize; | 228 | CRYPTO_ALG_TYPE_MASK); |
326 | inst->alg.cra_alignmask = alg->cra_alignmask; | 229 | if (err) |
327 | inst->alg.cra_type = &crypto_hash_type; | 230 | goto out_free_inst; |
328 | 231 | ||
329 | inst->alg.cra_hash.digestsize = alg->cra_blocksize; | 232 | alignmask = alg->cra_alignmask | 3; |
330 | inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + | 233 | inst->alg.base.cra_alignmask = alignmask; |
331 | ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); | 234 | inst->alg.base.cra_priority = alg->cra_priority; |
332 | inst->alg.cra_init = xcbc_init_tfm; | 235 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
333 | inst->alg.cra_exit = xcbc_exit_tfm; | 236 | |
334 | 237 | inst->alg.digestsize = alg->cra_blocksize; | |
335 | inst->alg.cra_hash.init = crypto_xcbc_digest_init; | 238 | inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), |
336 | inst->alg.cra_hash.update = crypto_xcbc_digest_update; | 239 | crypto_tfm_ctx_alignment()) + |
337 | inst->alg.cra_hash.final = crypto_xcbc_digest_final; | 240 | (alignmask & |
338 | inst->alg.cra_hash.digest = crypto_xcbc_digest; | 241 | ~(crypto_tfm_ctx_alignment() - 1)) + |
339 | inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey; | 242 | alg->cra_blocksize * 2; |
243 | |||
244 | inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), | ||
245 | alignmask + 1) + | ||
246 | alg->cra_blocksize * 2; | ||
247 | inst->alg.base.cra_init = xcbc_init_tfm; | ||
248 | inst->alg.base.cra_exit = xcbc_exit_tfm; | ||
249 | |||
250 | inst->alg.init = crypto_xcbc_digest_init; | ||
251 | inst->alg.update = crypto_xcbc_digest_update; | ||
252 | inst->alg.final = crypto_xcbc_digest_final; | ||
253 | inst->alg.setkey = crypto_xcbc_digest_setkey; | ||
254 | |||
255 | err = shash_register_instance(tmpl, inst); | ||
256 | if (err) { | ||
257 | out_free_inst: | ||
258 | shash_free_instance(shash_crypto_instance(inst)); | ||
259 | } | ||
340 | 260 | ||
341 | out_put_alg: | 261 | out_put_alg: |
342 | crypto_mod_put(alg); | 262 | crypto_mod_put(alg); |
343 | return inst; | 263 | return err; |
344 | } | ||
345 | |||
346 | static void xcbc_free(struct crypto_instance *inst) | ||
347 | { | ||
348 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
349 | kfree(inst); | ||
350 | } | 264 | } |
351 | 265 | ||
352 | static struct crypto_template crypto_xcbc_tmpl = { | 266 | static struct crypto_template crypto_xcbc_tmpl = { |
353 | .name = "xcbc", | 267 | .name = "xcbc", |
354 | .alloc = xcbc_alloc, | 268 | .create = xcbc_create, |
355 | .free = xcbc_free, | 269 | .free = shash_free_instance, |
356 | .module = THIS_MODULE, | 270 | .module = THIS_MODULE, |
357 | }; | 271 | }; |
358 | 272 | ||
diff --git a/crypto/xor.c b/crypto/xor.c index fc5b836f3430..b75182d8ab14 100644 --- a/crypto/xor.c +++ b/crypto/xor.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #define BH_TRACE 0 | 19 | #define BH_TRACE 0 |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/gfp.h> | ||
21 | #include <linux/raid/xor.h> | 22 | #include <linux/raid/xor.h> |
22 | #include <linux/jiffies.h> | 23 | #include <linux/jiffies.h> |
23 | #include <asm/xor.h> | 24 | #include <asm/xor.h> |
diff --git a/crypto/xts.c b/crypto/xts.c index d87b0f3102c3..555ecaab1e54 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb) | |||
224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
225 | CRYPTO_ALG_TYPE_MASK); | 225 | CRYPTO_ALG_TYPE_MASK); |
226 | if (IS_ERR(alg)) | 226 | if (IS_ERR(alg)) |
227 | return ERR_PTR(PTR_ERR(alg)); | 227 | return ERR_CAST(alg); |
228 | 228 | ||
229 | inst = crypto_alloc_instance("xts", alg); | 229 | inst = crypto_alloc_instance("xts", alg); |
230 | if (IS_ERR(inst)) | 230 | if (IS_ERR(inst)) |