diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 12:38:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 12:38:37 -0400 |
commit | 332a3392188e0ad966543c87b8da2b9d246f301d (patch) | |
tree | ac0d570590bffdd1924426adc5b255857d2f3297 | |
parent | a9c86d42599519f3d83b5f46bdab25046fe47b84 (diff) | |
parent | 81bd5f6c966cf2f137c2759dfc78abdffcff055e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (102 commits)
crypto: sha-s390 - Fix warnings in import function
crypto: vmac - New hash algorithm for intel_txt support
crypto: api - Do not displace newly registered algorithms
crypto: ansi_cprng - Fix module initialization
crypto: xcbc - Fix alignment calculation of xcbc_tfm_ctx
crypto: fips - Depend on ansi_cprng
crypto: blkcipher - Do not use eseqiv on stream ciphers
crypto: ctr - Use chainiv on raw counter mode
Revert crypto: fips - Select CPRNG
crypto: rng - Fix typo
crypto: talitos - add support for 36 bit addressing
crypto: talitos - align locks on cache lines
crypto: talitos - simplify hmac data size calculation
crypto: mv_cesa - Add support for Orion5X crypto engine
crypto: cryptd - Add support to access underlaying shash
crypto: gcm - Use GHASH digest algorithm
crypto: ghash - Add GHASH digest algorithm for GCM
crypto: authenc - Convert to ahash
crypto: api - Fix aligned ctx helper
crypto: hmac - Prehash ipad/opad
...
53 files changed, 4515 insertions, 1512 deletions
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 4aba83b31596..2bc479ab3a66 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
250 | const u8 *temp_key = key; | 250 | const u8 *temp_key = key; |
251 | u32 *flags = &tfm->crt_flags; | 251 | u32 *flags = &tfm->crt_flags; |
252 | 252 | ||
253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { | 253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) && |
254 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 254 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
255 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
255 | return -EINVAL; | 256 | return -EINVAL; |
256 | } | 257 | } |
257 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { | 258 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { |
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
411 | 412 | ||
412 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && | 413 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && |
413 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], | 414 | memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], |
414 | DES_KEY_SIZE))) { | 415 | DES_KEY_SIZE)) && |
415 | 416 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
416 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | 417 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
417 | return -EINVAL; | 418 | return -EINVAL; |
418 | } | 419 | } |
419 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { | 420 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { |
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index e85ba348722a..f6de7826c979 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc) | |||
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int sha1_export(struct shash_desc *desc, void *out) | ||
50 | { | ||
51 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
52 | struct sha1_state *octx = out; | ||
53 | |||
54 | octx->count = sctx->count; | ||
55 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
56 | memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
61 | { | ||
62 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
63 | const struct sha1_state *ictx = in; | ||
64 | |||
65 | sctx->count = ictx->count; | ||
66 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
67 | memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); | ||
68 | sctx->func = KIMD_SHA_1; | ||
69 | return 0; | ||
70 | } | ||
71 | |||
49 | static struct shash_alg alg = { | 72 | static struct shash_alg alg = { |
50 | .digestsize = SHA1_DIGEST_SIZE, | 73 | .digestsize = SHA1_DIGEST_SIZE, |
51 | .init = sha1_init, | 74 | .init = sha1_init, |
52 | .update = s390_sha_update, | 75 | .update = s390_sha_update, |
53 | .final = s390_sha_final, | 76 | .final = s390_sha_final, |
77 | .export = sha1_export, | ||
78 | .import = sha1_import, | ||
54 | .descsize = sizeof(struct s390_sha_ctx), | 79 | .descsize = sizeof(struct s390_sha_ctx), |
80 | .statesize = sizeof(struct sha1_state), | ||
55 | .base = { | 81 | .base = { |
56 | .cra_name = "sha1", | 82 | .cra_name = "sha1", |
57 | .cra_driver_name= "sha1-s390", | 83 | .cra_driver_name= "sha1-s390", |
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index f9fefc569632..61a7db372121 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc) | |||
42 | return 0; | 42 | return 0; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int sha256_export(struct shash_desc *desc, void *out) | ||
46 | { | ||
47 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
48 | struct sha256_state *octx = out; | ||
49 | |||
50 | octx->count = sctx->count; | ||
51 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
52 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
57 | { | ||
58 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
59 | const struct sha256_state *ictx = in; | ||
60 | |||
61 | sctx->count = ictx->count; | ||
62 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
63 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
64 | sctx->func = KIMD_SHA_256; | ||
65 | return 0; | ||
66 | } | ||
67 | |||
45 | static struct shash_alg alg = { | 68 | static struct shash_alg alg = { |
46 | .digestsize = SHA256_DIGEST_SIZE, | 69 | .digestsize = SHA256_DIGEST_SIZE, |
47 | .init = sha256_init, | 70 | .init = sha256_init, |
48 | .update = s390_sha_update, | 71 | .update = s390_sha_update, |
49 | .final = s390_sha_final, | 72 | .final = s390_sha_final, |
73 | .export = sha256_export, | ||
74 | .import = sha256_import, | ||
50 | .descsize = sizeof(struct s390_sha_ctx), | 75 | .descsize = sizeof(struct s390_sha_ctx), |
76 | .statesize = sizeof(struct sha256_state), | ||
51 | .base = { | 77 | .base = { |
52 | .cra_name = "sha256", | 78 | .cra_name = "sha256", |
53 | .cra_driver_name= "sha256-s390", | 79 | .cra_driver_name= "sha256-s390", |
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 83192bfc8048..4bf73d0dc525 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c | |||
@@ -13,7 +13,10 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | #include <crypto/internal/hash.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/sha.h> | ||
17 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
18 | 21 | ||
19 | #include "sha.h" | 22 | #include "sha.h" |
@@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc) | |||
37 | return 0; | 40 | return 0; |
38 | } | 41 | } |
39 | 42 | ||
43 | static int sha512_export(struct shash_desc *desc, void *out) | ||
44 | { | ||
45 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
46 | struct sha512_state *octx = out; | ||
47 | |||
48 | octx->count[0] = sctx->count; | ||
49 | octx->count[1] = 0; | ||
50 | memcpy(octx->state, sctx->state, sizeof(octx->state)); | ||
51 | memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int sha512_import(struct shash_desc *desc, const void *in) | ||
56 | { | ||
57 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
58 | const struct sha512_state *ictx = in; | ||
59 | |||
60 | if (unlikely(ictx->count[1])) | ||
61 | return -ERANGE; | ||
62 | sctx->count = ictx->count[0]; | ||
63 | |||
64 | memcpy(sctx->state, ictx->state, sizeof(ictx->state)); | ||
65 | memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); | ||
66 | sctx->func = KIMD_SHA_512; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
40 | static struct shash_alg sha512_alg = { | 70 | static struct shash_alg sha512_alg = { |
41 | .digestsize = SHA512_DIGEST_SIZE, | 71 | .digestsize = SHA512_DIGEST_SIZE, |
42 | .init = sha512_init, | 72 | .init = sha512_init, |
43 | .update = s390_sha_update, | 73 | .update = s390_sha_update, |
44 | .final = s390_sha_final, | 74 | .final = s390_sha_final, |
75 | .export = sha512_export, | ||
76 | .import = sha512_import, | ||
45 | .descsize = sizeof(struct s390_sha_ctx), | 77 | .descsize = sizeof(struct s390_sha_ctx), |
78 | .statesize = sizeof(struct sha512_state), | ||
46 | .base = { | 79 | .base = { |
47 | .cra_name = "sha512", | 80 | .cra_name = "sha512", |
48 | .cra_driver_name= "sha512-s390", | 81 | .cra_driver_name= "sha512-s390", |
@@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = { | |||
78 | .init = sha384_init, | 111 | .init = sha384_init, |
79 | .update = s390_sha_update, | 112 | .update = s390_sha_update, |
80 | .final = s390_sha_final, | 113 | .final = s390_sha_final, |
114 | .export = sha512_export, | ||
115 | .import = sha512_import, | ||
81 | .descsize = sizeof(struct s390_sha_ctx), | 116 | .descsize = sizeof(struct s390_sha_ctx), |
117 | .statesize = sizeof(struct sha512_state), | ||
82 | .base = { | 118 | .base = { |
83 | .cra_name = "sha384", | 119 | .cra_name = "sha384", |
84 | .cra_driver_name= "sha384-s390", | 120 | .cra_driver_name= "sha384-s390", |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index c580c5ec1cad..d3ec8d588d4b 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -636,7 +636,7 @@ static int __init aesni_init(void) | |||
636 | int err; | 636 | int err; |
637 | 637 | ||
638 | if (!cpu_has_aes) { | 638 | if (!cpu_has_aes) { |
639 | printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); | 639 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); |
640 | return -ENODEV; | 640 | return -ENODEV; |
641 | } | 641 | } |
642 | if ((err = crypto_register_alg(&aesni_alg))) | 642 | if ((err = crypto_register_alg(&aesni_alg))) |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 4dfdd03e708f..26b5dd0cb564 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -23,11 +23,13 @@ comment "Crypto core or helper" | |||
23 | 23 | ||
24 | config CRYPTO_FIPS | 24 | config CRYPTO_FIPS |
25 | bool "FIPS 200 compliance" | 25 | bool "FIPS 200 compliance" |
26 | depends on CRYPTO_ANSI_CPRNG | ||
26 | help | 27 | help |
27 | This options enables the fips boot option which is | 28 | This options enables the fips boot option which is |
28 | required if you want to system to operate in a FIPS 200 | 29 | required if you want to system to operate in a FIPS 200 |
29 | certification. You should say no unless you know what | 30 | certification. You should say no unless you know what |
30 | this is. | 31 | this is. Note that CRYPTO_ANSI_CPRNG is requred if this |
32 | option is selected | ||
31 | 33 | ||
32 | config CRYPTO_ALGAPI | 34 | config CRYPTO_ALGAPI |
33 | tristate | 35 | tristate |
@@ -156,7 +158,7 @@ config CRYPTO_GCM | |||
156 | tristate "GCM/GMAC support" | 158 | tristate "GCM/GMAC support" |
157 | select CRYPTO_CTR | 159 | select CRYPTO_CTR |
158 | select CRYPTO_AEAD | 160 | select CRYPTO_AEAD |
159 | select CRYPTO_GF128MUL | 161 | select CRYPTO_GHASH |
160 | help | 162 | help |
161 | Support for Galois/Counter Mode (GCM) and Galois Message | 163 | Support for Galois/Counter Mode (GCM) and Galois Message |
162 | Authentication Code (GMAC). Required for IPSec. | 164 | Authentication Code (GMAC). Required for IPSec. |
@@ -267,6 +269,18 @@ config CRYPTO_XCBC | |||
267 | http://csrc.nist.gov/encryption/modes/proposedmodes/ | 269 | http://csrc.nist.gov/encryption/modes/proposedmodes/ |
268 | xcbc-mac/xcbc-mac-spec.pdf | 270 | xcbc-mac/xcbc-mac-spec.pdf |
269 | 271 | ||
272 | config CRYPTO_VMAC | ||
273 | tristate "VMAC support" | ||
274 | depends on EXPERIMENTAL | ||
275 | select CRYPTO_HASH | ||
276 | select CRYPTO_MANAGER | ||
277 | help | ||
278 | VMAC is a message authentication algorithm designed for | ||
279 | very high speed on 64-bit architectures. | ||
280 | |||
281 | See also: | ||
282 | <http://fastcrypto.org/vmac> | ||
283 | |||
270 | comment "Digest" | 284 | comment "Digest" |
271 | 285 | ||
272 | config CRYPTO_CRC32C | 286 | config CRYPTO_CRC32C |
@@ -289,6 +303,13 @@ config CRYPTO_CRC32C_INTEL | |||
289 | gain performance compared with software implementation. | 303 | gain performance compared with software implementation. |
290 | Module will be crc32c-intel. | 304 | Module will be crc32c-intel. |
291 | 305 | ||
306 | config CRYPTO_GHASH | ||
307 | tristate "GHASH digest algorithm" | ||
308 | select CRYPTO_SHASH | ||
309 | select CRYPTO_GF128MUL | ||
310 | help | ||
311 | GHASH is message digest algorithm for GCM (Galois/Counter Mode). | ||
312 | |||
292 | config CRYPTO_MD4 | 313 | config CRYPTO_MD4 |
293 | tristate "MD4 digest algorithm" | 314 | tristate "MD4 digest algorithm" |
294 | select CRYPTO_HASH | 315 | select CRYPTO_HASH |
@@ -780,13 +801,14 @@ comment "Random Number Generation" | |||
780 | 801 | ||
781 | config CRYPTO_ANSI_CPRNG | 802 | config CRYPTO_ANSI_CPRNG |
782 | tristate "Pseudo Random Number Generation for Cryptographic modules" | 803 | tristate "Pseudo Random Number Generation for Cryptographic modules" |
804 | default m | ||
783 | select CRYPTO_AES | 805 | select CRYPTO_AES |
784 | select CRYPTO_RNG | 806 | select CRYPTO_RNG |
785 | select CRYPTO_FIPS | ||
786 | help | 807 | help |
787 | This option enables the generic pseudo random number generator | 808 | This option enables the generic pseudo random number generator |
788 | for cryptographic modules. Uses the Algorithm specified in | 809 | for cryptographic modules. Uses the Algorithm specified in |
789 | ANSI X9.31 A.2.4 | 810 | ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS |
811 | is selected | ||
790 | 812 | ||
791 | source "drivers/crypto/Kconfig" | 813 | source "drivers/crypto/Kconfig" |
792 | 814 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 673d9f7c1bda..9e8f61908cb5 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO) += crypto.o | 5 | obj-$(CONFIG_CRYPTO) += crypto.o |
6 | crypto-objs := api.o cipher.o digest.o compress.o | 6 | crypto-objs := api.o cipher.o compress.o |
7 | 7 | ||
8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o |
9 | 9 | ||
@@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o | |||
22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o | 22 | obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o |
23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o | 23 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o |
24 | 24 | ||
25 | crypto_hash-objs := hash.o | ||
26 | crypto_hash-objs += ahash.o | 25 | crypto_hash-objs += ahash.o |
27 | crypto_hash-objs += shash.o | 26 | crypto_hash-objs += shash.o |
28 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
@@ -33,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o | |||
33 | 32 | ||
34 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o | 33 | obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o |
35 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o | 34 | obj-$(CONFIG_CRYPTO_HMAC) += hmac.o |
35 | obj-$(CONFIG_CRYPTO_VMAC) += vmac.o | ||
36 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o | 36 | obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o |
37 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o | 37 | obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o |
38 | obj-$(CONFIG_CRYPTO_MD4) += md4.o | 38 | obj-$(CONFIG_CRYPTO_MD4) += md4.o |
@@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o | |||
83 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o | 83 | obj-$(CONFIG_CRYPTO_RNG2) += krng.o |
84 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 84 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
85 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o | 85 | obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o |
86 | obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o | ||
86 | 87 | ||
87 | # | 88 | # |
88 | # generic algorithms and the async_tx api | 89 | # generic algorithms and the async_tx api |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index e11ce37c7104..f6f08336df5d 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/internal/skcipher.h> | 16 | #include <crypto/internal/skcipher.h> |
17 | #include <linux/cpumask.h> | ||
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
@@ -25,6 +26,8 @@ | |||
25 | 26 | ||
26 | #include "internal.h" | 27 | #include "internal.h" |
27 | 28 | ||
29 | static const char *skcipher_default_geniv __read_mostly; | ||
30 | |||
28 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, | 31 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
29 | unsigned int keylen) | 32 | unsigned int keylen) |
30 | { | 33 | { |
@@ -180,7 +183,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type); | |||
180 | 183 | ||
181 | const char *crypto_default_geniv(const struct crypto_alg *alg) | 184 | const char *crypto_default_geniv(const struct crypto_alg *alg) |
182 | { | 185 | { |
183 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; | 186 | if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == |
187 | CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : | ||
188 | alg->cra_ablkcipher.ivsize) != | ||
189 | alg->cra_blocksize) | ||
190 | return "chainiv"; | ||
191 | |||
192 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? | ||
193 | "eseqiv" : skcipher_default_geniv; | ||
184 | } | 194 | } |
185 | 195 | ||
186 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | 196 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) |
@@ -201,8 +211,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | |||
201 | int err; | 211 | int err; |
202 | 212 | ||
203 | larval = crypto_larval_lookup(alg->cra_driver_name, | 213 | larval = crypto_larval_lookup(alg->cra_driver_name, |
214 | (type & ~CRYPTO_ALG_TYPE_MASK) | | ||
204 | CRYPTO_ALG_TYPE_GIVCIPHER, | 215 | CRYPTO_ALG_TYPE_GIVCIPHER, |
205 | CRYPTO_ALG_TYPE_MASK); | 216 | mask | CRYPTO_ALG_TYPE_MASK); |
206 | err = PTR_ERR(larval); | 217 | err = PTR_ERR(larval); |
207 | if (IS_ERR(larval)) | 218 | if (IS_ERR(larval)) |
208 | goto out; | 219 | goto out; |
@@ -360,3 +371,17 @@ err: | |||
360 | return ERR_PTR(err); | 371 | return ERR_PTR(err); |
361 | } | 372 | } |
362 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); | 373 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); |
374 | |||
375 | static int __init skcipher_module_init(void) | ||
376 | { | ||
377 | skcipher_default_geniv = num_possible_cpus() > 1 ? | ||
378 | "eseqiv" : "chainiv"; | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static void skcipher_module_exit(void) | ||
383 | { | ||
384 | } | ||
385 | |||
386 | module_init(skcipher_module_init); | ||
387 | module_exit(skcipher_module_exit); | ||
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index b8b66ec3883b..e78b7ee44a74 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c | |||
@@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
1174 | ctx->key_enc[6 * i + 11] = t; \ | 1174 | ctx->key_enc[6 * i + 11] = t; \ |
1175 | } while (0) | 1175 | } while (0) |
1176 | 1176 | ||
1177 | #define loop8(i) do { \ | 1177 | #define loop8tophalf(i) do { \ |
1178 | t = ror32(t, 8); \ | 1178 | t = ror32(t, 8); \ |
1179 | t = ls_box(t) ^ rco_tab[i]; \ | 1179 | t = ls_box(t) ^ rco_tab[i]; \ |
1180 | t ^= ctx->key_enc[8 * i]; \ | 1180 | t ^= ctx->key_enc[8 * i]; \ |
@@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); | |||
1185 | ctx->key_enc[8 * i + 10] = t; \ | 1185 | ctx->key_enc[8 * i + 10] = t; \ |
1186 | t ^= ctx->key_enc[8 * i + 3]; \ | 1186 | t ^= ctx->key_enc[8 * i + 3]; \ |
1187 | ctx->key_enc[8 * i + 11] = t; \ | 1187 | ctx->key_enc[8 * i + 11] = t; \ |
1188 | } while (0) | ||
1189 | |||
1190 | #define loop8(i) do { \ | ||
1191 | loop8tophalf(i); \ | ||
1188 | t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ | 1192 | t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ |
1189 | ctx->key_enc[8 * i + 12] = t; \ | 1193 | ctx->key_enc[8 * i + 12] = t; \ |
1190 | t ^= ctx->key_enc[8 * i + 5]; \ | 1194 | t ^= ctx->key_enc[8 * i + 5]; \ |
@@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | |||
1245 | ctx->key_enc[5] = le32_to_cpu(key[5]); | 1249 | ctx->key_enc[5] = le32_to_cpu(key[5]); |
1246 | ctx->key_enc[6] = le32_to_cpu(key[6]); | 1250 | ctx->key_enc[6] = le32_to_cpu(key[6]); |
1247 | t = ctx->key_enc[7] = le32_to_cpu(key[7]); | 1251 | t = ctx->key_enc[7] = le32_to_cpu(key[7]); |
1248 | for (i = 0; i < 7; ++i) | 1252 | for (i = 0; i < 6; ++i) |
1249 | loop8(i); | 1253 | loop8(i); |
1254 | loop8tophalf(i); | ||
1250 | break; | 1255 | break; |
1251 | } | 1256 | } |
1252 | 1257 | ||
diff --git a/crypto/ahash.c b/crypto/ahash.c index f3476374f764..33a4ff45f842 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c | |||
@@ -24,6 +24,19 @@ | |||
24 | 24 | ||
25 | #include "internal.h" | 25 | #include "internal.h" |
26 | 26 | ||
27 | struct ahash_request_priv { | ||
28 | crypto_completion_t complete; | ||
29 | void *data; | ||
30 | u8 *result; | ||
31 | void *ubuf[] CRYPTO_MINALIGN_ATTR; | ||
32 | }; | ||
33 | |||
34 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) | ||
35 | { | ||
36 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, | ||
37 | halg); | ||
38 | } | ||
39 | |||
27 | static int hash_walk_next(struct crypto_hash_walk *walk) | 40 | static int hash_walk_next(struct crypto_hash_walk *walk) |
28 | { | 41 | { |
29 | unsigned int alignmask = walk->alignmask; | 42 | unsigned int alignmask = walk->alignmask; |
@@ -132,36 +145,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, | |||
132 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, | 145 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
133 | unsigned int keylen) | 146 | unsigned int keylen) |
134 | { | 147 | { |
135 | struct ahash_alg *ahash = crypto_ahash_alg(tfm); | ||
136 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 148 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
137 | int ret; | 149 | int ret; |
138 | u8 *buffer, *alignbuffer; | 150 | u8 *buffer, *alignbuffer; |
139 | unsigned long absize; | 151 | unsigned long absize; |
140 | 152 | ||
141 | absize = keylen + alignmask; | 153 | absize = keylen + alignmask; |
142 | buffer = kmalloc(absize, GFP_ATOMIC); | 154 | buffer = kmalloc(absize, GFP_KERNEL); |
143 | if (!buffer) | 155 | if (!buffer) |
144 | return -ENOMEM; | 156 | return -ENOMEM; |
145 | 157 | ||
146 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 158 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
147 | memcpy(alignbuffer, key, keylen); | 159 | memcpy(alignbuffer, key, keylen); |
148 | ret = ahash->setkey(tfm, alignbuffer, keylen); | 160 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
149 | memset(alignbuffer, 0, keylen); | 161 | kzfree(buffer); |
150 | kfree(buffer); | ||
151 | return ret; | 162 | return ret; |
152 | } | 163 | } |
153 | 164 | ||
154 | static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | 165 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
155 | unsigned int keylen) | 166 | unsigned int keylen) |
156 | { | 167 | { |
157 | struct ahash_alg *ahash = crypto_ahash_alg(tfm); | ||
158 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | 168 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
159 | 169 | ||
160 | if ((unsigned long)key & alignmask) | 170 | if ((unsigned long)key & alignmask) |
161 | return ahash_setkey_unaligned(tfm, key, keylen); | 171 | return ahash_setkey_unaligned(tfm, key, keylen); |
162 | 172 | ||
163 | return ahash->setkey(tfm, key, keylen); | 173 | return tfm->setkey(tfm, key, keylen); |
164 | } | 174 | } |
175 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); | ||
165 | 176 | ||
166 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, | 177 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
167 | unsigned int keylen) | 178 | unsigned int keylen) |
@@ -169,44 +180,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, | |||
169 | return -ENOSYS; | 180 | return -ENOSYS; |
170 | } | 181 | } |
171 | 182 | ||
172 | int crypto_ahash_import(struct ahash_request *req, const u8 *in) | 183 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
184 | unsigned long mask) | ||
185 | { | ||
186 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); | ||
187 | } | ||
188 | |||
189 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) | ||
190 | { | ||
191 | struct ahash_request_priv *priv = req->priv; | ||
192 | |||
193 | if (err == -EINPROGRESS) | ||
194 | return; | ||
195 | |||
196 | if (!err) | ||
197 | memcpy(priv->result, req->result, | ||
198 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | ||
199 | |||
200 | kzfree(priv); | ||
201 | } | ||
202 | |||
203 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | ||
204 | { | ||
205 | struct ahash_request *areq = req->data; | ||
206 | struct ahash_request_priv *priv = areq->priv; | ||
207 | crypto_completion_t complete = priv->complete; | ||
208 | void *data = priv->data; | ||
209 | |||
210 | ahash_op_unaligned_finish(areq, err); | ||
211 | |||
212 | complete(data, err); | ||
213 | } | ||
214 | |||
215 | static int ahash_op_unaligned(struct ahash_request *req, | ||
216 | int (*op)(struct ahash_request *)) | ||
173 | { | 217 | { |
174 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 218 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
175 | struct ahash_alg *alg = crypto_ahash_alg(tfm); | 219 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
220 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
221 | struct ahash_request_priv *priv; | ||
222 | int err; | ||
223 | |||
224 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
225 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
226 | GFP_KERNEL : GFP_ATOMIC); | ||
227 | if (!priv) | ||
228 | return -ENOMEM; | ||
176 | 229 | ||
177 | memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); | 230 | priv->result = req->result; |
231 | priv->complete = req->base.complete; | ||
232 | priv->data = req->base.data; | ||
178 | 233 | ||
179 | if (alg->reinit) | 234 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); |
180 | alg->reinit(req); | 235 | req->base.complete = ahash_op_unaligned_done; |
236 | req->base.data = req; | ||
237 | req->priv = priv; | ||
181 | 238 | ||
182 | return 0; | 239 | err = op(req); |
240 | ahash_op_unaligned_finish(req, err); | ||
241 | |||
242 | return err; | ||
183 | } | 243 | } |
184 | EXPORT_SYMBOL_GPL(crypto_ahash_import); | ||
185 | 244 | ||
186 | static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, | 245 | static int crypto_ahash_op(struct ahash_request *req, |
187 | u32 mask) | 246 | int (*op)(struct ahash_request *)) |
188 | { | 247 | { |
189 | return alg->cra_ctxsize; | 248 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
249 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
250 | |||
251 | if ((unsigned long)req->result & alignmask) | ||
252 | return ahash_op_unaligned(req, op); | ||
253 | |||
254 | return op(req); | ||
190 | } | 255 | } |
191 | 256 | ||
192 | static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | 257 | int crypto_ahash_final(struct ahash_request *req) |
193 | { | 258 | { |
194 | struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; | 259 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); |
195 | struct ahash_tfm *crt = &tfm->crt_ahash; | 260 | } |
261 | EXPORT_SYMBOL_GPL(crypto_ahash_final); | ||
196 | 262 | ||
197 | if (alg->digestsize > PAGE_SIZE / 8) | 263 | int crypto_ahash_finup(struct ahash_request *req) |
198 | return -EINVAL; | 264 | { |
265 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); | ||
266 | } | ||
267 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); | ||
268 | |||
269 | int crypto_ahash_digest(struct ahash_request *req) | ||
270 | { | ||
271 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); | ||
274 | |||
275 | static void ahash_def_finup_finish2(struct ahash_request *req, int err) | ||
276 | { | ||
277 | struct ahash_request_priv *priv = req->priv; | ||
278 | |||
279 | if (err == -EINPROGRESS) | ||
280 | return; | ||
281 | |||
282 | if (!err) | ||
283 | memcpy(priv->result, req->result, | ||
284 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | ||
199 | 285 | ||
200 | crt->init = alg->init; | 286 | kzfree(priv); |
201 | crt->update = alg->update; | 287 | } |
202 | crt->final = alg->final; | 288 | |
203 | crt->digest = alg->digest; | 289 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
204 | crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; | 290 | { |
205 | crt->digestsize = alg->digestsize; | 291 | struct ahash_request *areq = req->data; |
292 | struct ahash_request_priv *priv = areq->priv; | ||
293 | crypto_completion_t complete = priv->complete; | ||
294 | void *data = priv->data; | ||
295 | |||
296 | ahash_def_finup_finish2(areq, err); | ||
297 | |||
298 | complete(data, err); | ||
299 | } | ||
300 | |||
301 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) | ||
302 | { | ||
303 | if (err) | ||
304 | goto out; | ||
305 | |||
306 | req->base.complete = ahash_def_finup_done2; | ||
307 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
308 | err = crypto_ahash_reqtfm(req)->final(req); | ||
309 | |||
310 | out: | ||
311 | ahash_def_finup_finish2(req, err); | ||
312 | return err; | ||
313 | } | ||
314 | |||
315 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) | ||
316 | { | ||
317 | struct ahash_request *areq = req->data; | ||
318 | struct ahash_request_priv *priv = areq->priv; | ||
319 | crypto_completion_t complete = priv->complete; | ||
320 | void *data = priv->data; | ||
321 | |||
322 | err = ahash_def_finup_finish1(areq, err); | ||
323 | |||
324 | complete(data, err); | ||
325 | } | ||
326 | |||
327 | static int ahash_def_finup(struct ahash_request *req) | ||
328 | { | ||
329 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
330 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | ||
331 | unsigned int ds = crypto_ahash_digestsize(tfm); | ||
332 | struct ahash_request_priv *priv; | ||
333 | |||
334 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | ||
335 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
336 | GFP_KERNEL : GFP_ATOMIC); | ||
337 | if (!priv) | ||
338 | return -ENOMEM; | ||
339 | |||
340 | priv->result = req->result; | ||
341 | priv->complete = req->base.complete; | ||
342 | priv->data = req->base.data; | ||
343 | |||
344 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | ||
345 | req->base.complete = ahash_def_finup_done1; | ||
346 | req->base.data = req; | ||
347 | req->priv = priv; | ||
348 | |||
349 | return ahash_def_finup_finish1(req, tfm->update(req)); | ||
350 | } | ||
351 | |||
352 | static int ahash_no_export(struct ahash_request *req, void *out) | ||
353 | { | ||
354 | return -ENOSYS; | ||
355 | } | ||
356 | |||
357 | static int ahash_no_import(struct ahash_request *req, const void *in) | ||
358 | { | ||
359 | return -ENOSYS; | ||
360 | } | ||
361 | |||
362 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) | ||
363 | { | ||
364 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); | ||
365 | struct ahash_alg *alg = crypto_ahash_alg(hash); | ||
366 | |||
367 | hash->setkey = ahash_nosetkey; | ||
368 | hash->export = ahash_no_export; | ||
369 | hash->import = ahash_no_import; | ||
370 | |||
371 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) | ||
372 | return crypto_init_shash_ops_async(tfm); | ||
373 | |||
374 | hash->init = alg->init; | ||
375 | hash->update = alg->update; | ||
376 | hash->final = alg->final; | ||
377 | hash->finup = alg->finup ?: ahash_def_finup; | ||
378 | hash->digest = alg->digest; | ||
379 | |||
380 | if (alg->setkey) | ||
381 | hash->setkey = alg->setkey; | ||
382 | if (alg->export) | ||
383 | hash->export = alg->export; | ||
384 | if (alg->import) | ||
385 | hash->import = alg->import; | ||
206 | 386 | ||
207 | return 0; | 387 | return 0; |
208 | } | 388 | } |
209 | 389 | ||
390 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) | ||
391 | { | ||
392 | if (alg->cra_type == &crypto_ahash_type) | ||
393 | return alg->cra_ctxsize; | ||
394 | |||
395 | return sizeof(struct crypto_shash *); | ||
396 | } | ||
397 | |||
210 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | 398 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
211 | __attribute__ ((unused)); | 399 | __attribute__ ((unused)); |
212 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | 400 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
@@ -215,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | |||
215 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | 403 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
216 | "yes" : "no"); | 404 | "yes" : "no"); |
217 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 405 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
218 | seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize); | 406 | seq_printf(m, "digestsize : %u\n", |
407 | __crypto_hash_alg_common(alg)->digestsize); | ||
219 | } | 408 | } |
220 | 409 | ||
221 | const struct crypto_type crypto_ahash_type = { | 410 | const struct crypto_type crypto_ahash_type = { |
222 | .ctxsize = crypto_ahash_ctxsize, | 411 | .extsize = crypto_ahash_extsize, |
223 | .init = crypto_init_ahash_ops, | 412 | .init_tfm = crypto_ahash_init_tfm, |
224 | #ifdef CONFIG_PROC_FS | 413 | #ifdef CONFIG_PROC_FS |
225 | .show = crypto_ahash_show, | 414 | .show = crypto_ahash_show, |
226 | #endif | 415 | #endif |
416 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, | ||
417 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, | ||
418 | .type = CRYPTO_ALG_TYPE_AHASH, | ||
419 | .tfmsize = offsetof(struct crypto_ahash, base), | ||
227 | }; | 420 | }; |
228 | EXPORT_SYMBOL_GPL(crypto_ahash_type); | 421 | EXPORT_SYMBOL_GPL(crypto_ahash_type); |
229 | 422 | ||
423 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, | ||
424 | u32 mask) | ||
425 | { | ||
426 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); | ||
427 | } | ||
428 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); | ||
429 | |||
430 | static int ahash_prepare_alg(struct ahash_alg *alg) | ||
431 | { | ||
432 | struct crypto_alg *base = &alg->halg.base; | ||
433 | |||
434 | if (alg->halg.digestsize > PAGE_SIZE / 8 || | ||
435 | alg->halg.statesize > PAGE_SIZE / 8) | ||
436 | return -EINVAL; | ||
437 | |||
438 | base->cra_type = &crypto_ahash_type; | ||
439 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | ||
440 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | int crypto_register_ahash(struct ahash_alg *alg) | ||
446 | { | ||
447 | struct crypto_alg *base = &alg->halg.base; | ||
448 | int err; | ||
449 | |||
450 | err = ahash_prepare_alg(alg); | ||
451 | if (err) | ||
452 | return err; | ||
453 | |||
454 | return crypto_register_alg(base); | ||
455 | } | ||
456 | EXPORT_SYMBOL_GPL(crypto_register_ahash); | ||
457 | |||
458 | int crypto_unregister_ahash(struct ahash_alg *alg) | ||
459 | { | ||
460 | return crypto_unregister_alg(&alg->halg.base); | ||
461 | } | ||
462 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); | ||
463 | |||
464 | int ahash_register_instance(struct crypto_template *tmpl, | ||
465 | struct ahash_instance *inst) | ||
466 | { | ||
467 | int err; | ||
468 | |||
469 | err = ahash_prepare_alg(&inst->alg); | ||
470 | if (err) | ||
471 | return err; | ||
472 | |||
473 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); | ||
474 | } | ||
475 | EXPORT_SYMBOL_GPL(ahash_register_instance); | ||
476 | |||
477 | void ahash_free_instance(struct crypto_instance *inst) | ||
478 | { | ||
479 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
480 | kfree(ahash_instance(inst)); | ||
481 | } | ||
482 | EXPORT_SYMBOL_GPL(ahash_free_instance); | ||
483 | |||
484 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | ||
485 | struct hash_alg_common *alg, | ||
486 | struct crypto_instance *inst) | ||
487 | { | ||
488 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | ||
489 | &crypto_ahash_type); | ||
490 | } | ||
491 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); | ||
492 | |||
493 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | ||
494 | { | ||
495 | struct crypto_alg *alg; | ||
496 | |||
497 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); | ||
498 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); | ||
499 | } | ||
500 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | ||
501 | |||
230 | MODULE_LICENSE("GPL"); | 502 | MODULE_LICENSE("GPL"); |
231 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); | 503 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index df0863d56995..f149b1c8b76d 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg) | |||
81 | crypto_tmpl_put(tmpl); | 81 | crypto_tmpl_put(tmpl); |
82 | } | 82 | } |
83 | 83 | ||
84 | static struct list_head *crypto_more_spawns(struct crypto_alg *alg, | ||
85 | struct list_head *stack, | ||
86 | struct list_head *top, | ||
87 | struct list_head *secondary_spawns) | ||
88 | { | ||
89 | struct crypto_spawn *spawn, *n; | ||
90 | |||
91 | if (list_empty(stack)) | ||
92 | return NULL; | ||
93 | |||
94 | spawn = list_first_entry(stack, struct crypto_spawn, list); | ||
95 | n = list_entry(spawn->list.next, struct crypto_spawn, list); | ||
96 | |||
97 | if (spawn->alg && &n->list != stack && !n->alg) | ||
98 | n->alg = (n->list.next == stack) ? alg : | ||
99 | &list_entry(n->list.next, struct crypto_spawn, | ||
100 | list)->inst->alg; | ||
101 | |||
102 | list_move(&spawn->list, secondary_spawns); | ||
103 | |||
104 | return &n->list == stack ? top : &n->inst->alg.cra_users; | ||
105 | } | ||
106 | |||
84 | static void crypto_remove_spawn(struct crypto_spawn *spawn, | 107 | static void crypto_remove_spawn(struct crypto_spawn *spawn, |
85 | struct list_head *list, | 108 | struct list_head *list) |
86 | struct list_head *secondary_spawns) | ||
87 | { | 109 | { |
88 | struct crypto_instance *inst = spawn->inst; | 110 | struct crypto_instance *inst = spawn->inst; |
89 | struct crypto_template *tmpl = inst->tmpl; | 111 | struct crypto_template *tmpl = inst->tmpl; |
90 | 112 | ||
91 | list_del_init(&spawn->list); | ||
92 | spawn->alg = NULL; | ||
93 | |||
94 | if (crypto_is_dead(&inst->alg)) | 113 | if (crypto_is_dead(&inst->alg)) |
95 | return; | 114 | return; |
96 | 115 | ||
@@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn, | |||
106 | hlist_del(&inst->list); | 125 | hlist_del(&inst->list); |
107 | inst->alg.cra_destroy = crypto_destroy_instance; | 126 | inst->alg.cra_destroy = crypto_destroy_instance; |
108 | 127 | ||
109 | list_splice(&inst->alg.cra_users, secondary_spawns); | 128 | BUG_ON(!list_empty(&inst->alg.cra_users)); |
110 | } | 129 | } |
111 | 130 | ||
112 | static void crypto_remove_spawns(struct list_head *spawns, | 131 | static void crypto_remove_spawns(struct crypto_alg *alg, |
113 | struct list_head *list, u32 new_type) | 132 | struct list_head *list, |
133 | struct crypto_alg *nalg) | ||
114 | { | 134 | { |
135 | u32 new_type = (nalg ?: alg)->cra_flags; | ||
115 | struct crypto_spawn *spawn, *n; | 136 | struct crypto_spawn *spawn, *n; |
116 | LIST_HEAD(secondary_spawns); | 137 | LIST_HEAD(secondary_spawns); |
138 | struct list_head *spawns; | ||
139 | LIST_HEAD(stack); | ||
140 | LIST_HEAD(top); | ||
117 | 141 | ||
142 | spawns = &alg->cra_users; | ||
118 | list_for_each_entry_safe(spawn, n, spawns, list) { | 143 | list_for_each_entry_safe(spawn, n, spawns, list) { |
119 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) | 144 | if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) |
120 | continue; | 145 | continue; |
121 | 146 | ||
122 | crypto_remove_spawn(spawn, list, &secondary_spawns); | 147 | list_move(&spawn->list, &top); |
123 | } | 148 | } |
124 | 149 | ||
125 | while (!list_empty(&secondary_spawns)) { | 150 | spawns = ⊤ |
126 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) | 151 | do { |
127 | crypto_remove_spawn(spawn, list, &secondary_spawns); | 152 | while (!list_empty(spawns)) { |
153 | struct crypto_instance *inst; | ||
154 | |||
155 | spawn = list_first_entry(spawns, struct crypto_spawn, | ||
156 | list); | ||
157 | inst = spawn->inst; | ||
158 | |||
159 | BUG_ON(&inst->alg == alg); | ||
160 | |||
161 | list_move(&spawn->list, &stack); | ||
162 | |||
163 | if (&inst->alg == nalg) | ||
164 | break; | ||
165 | |||
166 | spawn->alg = NULL; | ||
167 | spawns = &inst->alg.cra_users; | ||
168 | } | ||
169 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, | ||
170 | &secondary_spawns))); | ||
171 | |||
172 | list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { | ||
173 | if (spawn->alg) | ||
174 | list_move(&spawn->list, &spawn->alg->cra_users); | ||
175 | else | ||
176 | crypto_remove_spawn(spawn, list); | ||
128 | } | 177 | } |
129 | } | 178 | } |
130 | 179 | ||
@@ -258,7 +307,7 @@ found: | |||
258 | q->cra_priority > alg->cra_priority) | 307 | q->cra_priority > alg->cra_priority) |
259 | continue; | 308 | continue; |
260 | 309 | ||
261 | crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags); | 310 | crypto_remove_spawns(q, &list, alg); |
262 | } | 311 | } |
263 | 312 | ||
264 | complete: | 313 | complete: |
@@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) | |||
330 | 379 | ||
331 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); | 380 | crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); |
332 | list_del_init(&alg->cra_list); | 381 | list_del_init(&alg->cra_list); |
333 | crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); | 382 | crypto_remove_spawns(alg, list, NULL); |
334 | 383 | ||
335 | return 0; | 384 | return 0; |
336 | } | 385 | } |
@@ -488,20 +537,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | |||
488 | } | 537 | } |
489 | EXPORT_SYMBOL_GPL(crypto_init_spawn); | 538 | EXPORT_SYMBOL_GPL(crypto_init_spawn); |
490 | 539 | ||
540 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | ||
541 | struct crypto_instance *inst, | ||
542 | const struct crypto_type *frontend) | ||
543 | { | ||
544 | int err = -EINVAL; | ||
545 | |||
546 | if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) | ||
547 | goto out; | ||
548 | |||
549 | spawn->frontend = frontend; | ||
550 | err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); | ||
551 | |||
552 | out: | ||
553 | return err; | ||
554 | } | ||
555 | EXPORT_SYMBOL_GPL(crypto_init_spawn2); | ||
556 | |||
491 | void crypto_drop_spawn(struct crypto_spawn *spawn) | 557 | void crypto_drop_spawn(struct crypto_spawn *spawn) |
492 | { | 558 | { |
559 | if (!spawn->alg) | ||
560 | return; | ||
561 | |||
493 | down_write(&crypto_alg_sem); | 562 | down_write(&crypto_alg_sem); |
494 | list_del(&spawn->list); | 563 | list_del(&spawn->list); |
495 | up_write(&crypto_alg_sem); | 564 | up_write(&crypto_alg_sem); |
496 | } | 565 | } |
497 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); | 566 | EXPORT_SYMBOL_GPL(crypto_drop_spawn); |
498 | 567 | ||
499 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 568 | static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) |
500 | u32 mask) | ||
501 | { | 569 | { |
502 | struct crypto_alg *alg; | 570 | struct crypto_alg *alg; |
503 | struct crypto_alg *alg2; | 571 | struct crypto_alg *alg2; |
504 | struct crypto_tfm *tfm; | ||
505 | 572 | ||
506 | down_read(&crypto_alg_sem); | 573 | down_read(&crypto_alg_sem); |
507 | alg = spawn->alg; | 574 | alg = spawn->alg; |
@@ -516,6 +583,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | |||
516 | return ERR_PTR(-EAGAIN); | 583 | return ERR_PTR(-EAGAIN); |
517 | } | 584 | } |
518 | 585 | ||
586 | return alg; | ||
587 | } | ||
588 | |||
589 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | ||
590 | u32 mask) | ||
591 | { | ||
592 | struct crypto_alg *alg; | ||
593 | struct crypto_tfm *tfm; | ||
594 | |||
595 | alg = crypto_spawn_alg(spawn); | ||
596 | if (IS_ERR(alg)) | ||
597 | return ERR_CAST(alg); | ||
598 | |||
519 | tfm = ERR_PTR(-EINVAL); | 599 | tfm = ERR_PTR(-EINVAL); |
520 | if (unlikely((alg->cra_flags ^ type) & mask)) | 600 | if (unlikely((alg->cra_flags ^ type) & mask)) |
521 | goto out_put_alg; | 601 | goto out_put_alg; |
@@ -532,6 +612,27 @@ out_put_alg: | |||
532 | } | 612 | } |
533 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); | 613 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm); |
534 | 614 | ||
615 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn) | ||
616 | { | ||
617 | struct crypto_alg *alg; | ||
618 | struct crypto_tfm *tfm; | ||
619 | |||
620 | alg = crypto_spawn_alg(spawn); | ||
621 | if (IS_ERR(alg)) | ||
622 | return ERR_CAST(alg); | ||
623 | |||
624 | tfm = crypto_create_tfm(alg, spawn->frontend); | ||
625 | if (IS_ERR(tfm)) | ||
626 | goto out_put_alg; | ||
627 | |||
628 | return tfm; | ||
629 | |||
630 | out_put_alg: | ||
631 | crypto_mod_put(alg); | ||
632 | return tfm; | ||
633 | } | ||
634 | EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); | ||
635 | |||
535 | int crypto_register_notifier(struct notifier_block *nb) | 636 | int crypto_register_notifier(struct notifier_block *nb) |
536 | { | 637 | { |
537 | return blocking_notifier_chain_register(&crypto_chain, nb); | 638 | return blocking_notifier_chain_register(&crypto_chain, nb); |
@@ -595,7 +696,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta) | |||
595 | } | 696 | } |
596 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); | 697 | EXPORT_SYMBOL_GPL(crypto_attr_alg_name); |
597 | 698 | ||
598 | struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) | 699 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
700 | const struct crypto_type *frontend, | ||
701 | u32 type, u32 mask) | ||
599 | { | 702 | { |
600 | const char *name; | 703 | const char *name; |
601 | int err; | 704 | int err; |
@@ -605,9 +708,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |||
605 | if (IS_ERR(name)) | 708 | if (IS_ERR(name)) |
606 | return ERR_PTR(err); | 709 | return ERR_PTR(err); |
607 | 710 | ||
608 | return crypto_alg_mod_lookup(name, type, mask); | 711 | return crypto_find_alg(name, frontend, type, mask); |
609 | } | 712 | } |
610 | EXPORT_SYMBOL_GPL(crypto_attr_alg); | 713 | EXPORT_SYMBOL_GPL(crypto_attr_alg2); |
611 | 714 | ||
612 | int crypto_attr_u32(struct rtattr *rta, u32 *num) | 715 | int crypto_attr_u32(struct rtattr *rta, u32 *num) |
613 | { | 716 | { |
@@ -627,17 +730,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num) | |||
627 | } | 730 | } |
628 | EXPORT_SYMBOL_GPL(crypto_attr_u32); | 731 | EXPORT_SYMBOL_GPL(crypto_attr_u32); |
629 | 732 | ||
630 | struct crypto_instance *crypto_alloc_instance(const char *name, | 733 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
631 | struct crypto_alg *alg) | 734 | unsigned int head) |
632 | { | 735 | { |
633 | struct crypto_instance *inst; | 736 | struct crypto_instance *inst; |
634 | struct crypto_spawn *spawn; | 737 | char *p; |
635 | int err; | 738 | int err; |
636 | 739 | ||
637 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | 740 | p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), |
638 | if (!inst) | 741 | GFP_KERNEL); |
742 | if (!p) | ||
639 | return ERR_PTR(-ENOMEM); | 743 | return ERR_PTR(-ENOMEM); |
640 | 744 | ||
745 | inst = (void *)(p + head); | ||
746 | |||
641 | err = -ENAMETOOLONG; | 747 | err = -ENAMETOOLONG; |
642 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, | 748 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, |
643 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) | 749 | alg->cra_name) >= CRYPTO_MAX_ALG_NAME) |
@@ -647,6 +753,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
647 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 753 | name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
648 | goto err_free_inst; | 754 | goto err_free_inst; |
649 | 755 | ||
756 | return p; | ||
757 | |||
758 | err_free_inst: | ||
759 | kfree(p); | ||
760 | return ERR_PTR(err); | ||
761 | } | ||
762 | EXPORT_SYMBOL_GPL(crypto_alloc_instance2); | ||
763 | |||
764 | struct crypto_instance *crypto_alloc_instance(const char *name, | ||
765 | struct crypto_alg *alg) | ||
766 | { | ||
767 | struct crypto_instance *inst; | ||
768 | struct crypto_spawn *spawn; | ||
769 | int err; | ||
770 | |||
771 | inst = crypto_alloc_instance2(name, alg, 0); | ||
772 | if (IS_ERR(inst)) | ||
773 | goto out; | ||
774 | |||
650 | spawn = crypto_instance_ctx(inst); | 775 | spawn = crypto_instance_ctx(inst); |
651 | err = crypto_init_spawn(spawn, alg, inst, | 776 | err = crypto_init_spawn(spawn, alg, inst, |
652 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 777 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); |
@@ -658,7 +783,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
658 | 783 | ||
659 | err_free_inst: | 784 | err_free_inst: |
660 | kfree(inst); | 785 | kfree(inst); |
661 | return ERR_PTR(err); | 786 | inst = ERR_PTR(err); |
787 | |||
788 | out: | ||
789 | return inst; | ||
662 | } | 790 | } |
663 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); | 791 | EXPORT_SYMBOL_GPL(crypto_alloc_instance); |
664 | 792 | ||
diff --git a/crypto/algboss.c b/crypto/algboss.c index 9908dd830c26..412241ce4cfa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data) | |||
68 | goto err; | 68 | goto err; |
69 | 69 | ||
70 | do { | 70 | do { |
71 | if (tmpl->create) { | ||
72 | err = tmpl->create(tmpl, param->tb); | ||
73 | continue; | ||
74 | } | ||
75 | |||
71 | inst = tmpl->alloc(param->tb); | 76 | inst = tmpl->alloc(param->tb); |
72 | if (IS_ERR(inst)) | 77 | if (IS_ERR(inst)) |
73 | err = PTR_ERR(inst); | 78 | err = PTR_ERR(inst); |
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index d80ed4c1e009..3aa6e3834bfe 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
@@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx) | |||
187 | /* Our exported functions */ | 187 | /* Our exported functions */ |
188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | 188 | static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) |
189 | { | 189 | { |
190 | unsigned long flags; | ||
191 | unsigned char *ptr = buf; | 190 | unsigned char *ptr = buf; |
192 | unsigned int byte_count = (unsigned int)nbytes; | 191 | unsigned int byte_count = (unsigned int)nbytes; |
193 | int err; | 192 | int err; |
@@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) | |||
196 | if (nbytes < 0) | 195 | if (nbytes < 0) |
197 | return -EINVAL; | 196 | return -EINVAL; |
198 | 197 | ||
199 | spin_lock_irqsave(&ctx->prng_lock, flags); | 198 | spin_lock_bh(&ctx->prng_lock); |
200 | 199 | ||
201 | err = -EINVAL; | 200 | err = -EINVAL; |
202 | if (ctx->flags & PRNG_NEED_RESET) | 201 | if (ctx->flags & PRNG_NEED_RESET) |
@@ -268,7 +267,7 @@ empty_rbuf: | |||
268 | goto remainder; | 267 | goto remainder; |
269 | 268 | ||
270 | done: | 269 | done: |
271 | spin_unlock_irqrestore(&ctx->prng_lock, flags); | 270 | spin_unlock_bh(&ctx->prng_lock); |
272 | dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", | 271 | dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", |
273 | err, ctx); | 272 | err, ctx); |
274 | return err; | 273 | return err; |
@@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx, | |||
284 | unsigned char *V, unsigned char *DT) | 283 | unsigned char *V, unsigned char *DT) |
285 | { | 284 | { |
286 | int ret; | 285 | int ret; |
287 | int rc = -EINVAL; | ||
288 | unsigned char *prng_key; | 286 | unsigned char *prng_key; |
289 | 287 | ||
290 | spin_lock(&ctx->prng_lock); | 288 | spin_lock_bh(&ctx->prng_lock); |
291 | ctx->flags |= PRNG_NEED_RESET; | 289 | ctx->flags |= PRNG_NEED_RESET; |
292 | 290 | ||
293 | prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; | 291 | prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; |
@@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx, | |||
308 | memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); | 306 | memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); |
309 | memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); | 307 | memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); |
310 | 308 | ||
311 | if (ctx->tfm) | ||
312 | crypto_free_cipher(ctx->tfm); | ||
313 | |||
314 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); | ||
315 | if (IS_ERR(ctx->tfm)) { | ||
316 | dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", | ||
317 | ctx); | ||
318 | ctx->tfm = NULL; | ||
319 | goto out; | ||
320 | } | ||
321 | |||
322 | ctx->rand_data_valid = DEFAULT_BLK_SZ; | 309 | ctx->rand_data_valid = DEFAULT_BLK_SZ; |
323 | 310 | ||
324 | ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); | 311 | ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); |
325 | if (ret) { | 312 | if (ret) { |
326 | dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", | 313 | dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", |
327 | crypto_cipher_get_flags(ctx->tfm)); | 314 | crypto_cipher_get_flags(ctx->tfm)); |
328 | crypto_free_cipher(ctx->tfm); | ||
329 | goto out; | 315 | goto out; |
330 | } | 316 | } |
331 | 317 | ||
332 | rc = 0; | 318 | ret = 0; |
333 | ctx->flags &= ~PRNG_NEED_RESET; | 319 | ctx->flags &= ~PRNG_NEED_RESET; |
334 | out: | 320 | out: |
335 | spin_unlock(&ctx->prng_lock); | 321 | spin_unlock_bh(&ctx->prng_lock); |
336 | 322 | return ret; | |
337 | return rc; | ||
338 | |||
339 | } | 323 | } |
340 | 324 | ||
341 | static int cprng_init(struct crypto_tfm *tfm) | 325 | static int cprng_init(struct crypto_tfm *tfm) |
@@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm) | |||
343 | struct prng_context *ctx = crypto_tfm_ctx(tfm); | 327 | struct prng_context *ctx = crypto_tfm_ctx(tfm); |
344 | 328 | ||
345 | spin_lock_init(&ctx->prng_lock); | 329 | spin_lock_init(&ctx->prng_lock); |
330 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); | ||
331 | if (IS_ERR(ctx->tfm)) { | ||
332 | dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", | ||
333 | ctx); | ||
334 | return PTR_ERR(ctx->tfm); | ||
335 | } | ||
346 | 336 | ||
347 | if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) | 337 | if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) |
348 | return -EINVAL; | 338 | return -EINVAL; |
@@ -418,17 +408,10 @@ static struct crypto_alg rng_alg = { | |||
418 | /* Module initalization */ | 408 | /* Module initalization */ |
419 | static int __init prng_mod_init(void) | 409 | static int __init prng_mod_init(void) |
420 | { | 410 | { |
421 | int ret = 0; | ||
422 | |||
423 | if (fips_enabled) | 411 | if (fips_enabled) |
424 | rng_alg.cra_priority += 200; | 412 | rng_alg.cra_priority += 200; |
425 | 413 | ||
426 | ret = crypto_register_alg(&rng_alg); | 414 | return crypto_register_alg(&rng_alg); |
427 | |||
428 | if (ret) | ||
429 | goto out; | ||
430 | out: | ||
431 | return 0; | ||
432 | } | 415 | } |
433 | 416 | ||
434 | static void __exit prng_mod_fini(void) | 417 | static void __exit prng_mod_fini(void) |
diff --git a/crypto/api.c b/crypto/api.c index d5944f92b416..798526d90538 100644 --- a/crypto/api.c +++ b/crypto/api.c | |||
@@ -285,13 +285,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
285 | switch (crypto_tfm_alg_type(tfm)) { | 285 | switch (crypto_tfm_alg_type(tfm)) { |
286 | case CRYPTO_ALG_TYPE_CIPHER: | 286 | case CRYPTO_ALG_TYPE_CIPHER: |
287 | return crypto_init_cipher_ops(tfm); | 287 | return crypto_init_cipher_ops(tfm); |
288 | |||
289 | case CRYPTO_ALG_TYPE_DIGEST: | ||
290 | if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != | ||
291 | CRYPTO_ALG_TYPE_HASH_MASK) | ||
292 | return crypto_init_digest_ops_async(tfm); | ||
293 | else | ||
294 | return crypto_init_digest_ops(tfm); | ||
295 | 288 | ||
296 | case CRYPTO_ALG_TYPE_COMPRESS: | 289 | case CRYPTO_ALG_TYPE_COMPRESS: |
297 | return crypto_init_compress_ops(tfm); | 290 | return crypto_init_compress_ops(tfm); |
@@ -318,11 +311,7 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) | |||
318 | case CRYPTO_ALG_TYPE_CIPHER: | 311 | case CRYPTO_ALG_TYPE_CIPHER: |
319 | crypto_exit_cipher_ops(tfm); | 312 | crypto_exit_cipher_ops(tfm); |
320 | break; | 313 | break; |
321 | 314 | ||
322 | case CRYPTO_ALG_TYPE_DIGEST: | ||
323 | crypto_exit_digest_ops(tfm); | ||
324 | break; | ||
325 | |||
326 | case CRYPTO_ALG_TYPE_COMPRESS: | 315 | case CRYPTO_ALG_TYPE_COMPRESS: |
327 | crypto_exit_compress_ops(tfm); | 316 | crypto_exit_compress_ops(tfm); |
328 | break; | 317 | break; |
@@ -349,11 +338,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) | |||
349 | case CRYPTO_ALG_TYPE_CIPHER: | 338 | case CRYPTO_ALG_TYPE_CIPHER: |
350 | len += crypto_cipher_ctxsize(alg); | 339 | len += crypto_cipher_ctxsize(alg); |
351 | break; | 340 | break; |
352 | 341 | ||
353 | case CRYPTO_ALG_TYPE_DIGEST: | ||
354 | len += crypto_digest_ctxsize(alg); | ||
355 | break; | ||
356 | |||
357 | case CRYPTO_ALG_TYPE_COMPRESS: | 342 | case CRYPTO_ALG_TYPE_COMPRESS: |
358 | len += crypto_compress_ctxsize(alg); | 343 | len += crypto_compress_ctxsize(alg); |
359 | break; | 344 | break; |
@@ -472,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, | |||
472 | int err = -ENOMEM; | 457 | int err = -ENOMEM; |
473 | 458 | ||
474 | tfmsize = frontend->tfmsize; | 459 | tfmsize = frontend->tfmsize; |
475 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); | 460 | total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); |
476 | 461 | ||
477 | mem = kzalloc(total, GFP_KERNEL); | 462 | mem = kzalloc(total, GFP_KERNEL); |
478 | if (mem == NULL) | 463 | if (mem == NULL) |
@@ -481,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, | |||
481 | tfm = (struct crypto_tfm *)(mem + tfmsize); | 466 | tfm = (struct crypto_tfm *)(mem + tfmsize); |
482 | tfm->__crt_alg = alg; | 467 | tfm->__crt_alg = alg; |
483 | 468 | ||
484 | err = frontend->init_tfm(tfm, frontend); | 469 | err = frontend->init_tfm(tfm); |
485 | if (err) | 470 | if (err) |
486 | goto out_free_tfm; | 471 | goto out_free_tfm; |
487 | 472 | ||
@@ -503,6 +488,27 @@ out: | |||
503 | } | 488 | } |
504 | EXPORT_SYMBOL_GPL(crypto_create_tfm); | 489 | EXPORT_SYMBOL_GPL(crypto_create_tfm); |
505 | 490 | ||
491 | struct crypto_alg *crypto_find_alg(const char *alg_name, | ||
492 | const struct crypto_type *frontend, | ||
493 | u32 type, u32 mask) | ||
494 | { | ||
495 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = | ||
496 | crypto_alg_mod_lookup; | ||
497 | |||
498 | if (frontend) { | ||
499 | type &= frontend->maskclear; | ||
500 | mask &= frontend->maskclear; | ||
501 | type |= frontend->type; | ||
502 | mask |= frontend->maskset; | ||
503 | |||
504 | if (frontend->lookup) | ||
505 | lookup = frontend->lookup; | ||
506 | } | ||
507 | |||
508 | return lookup(alg_name, type, mask); | ||
509 | } | ||
510 | EXPORT_SYMBOL_GPL(crypto_find_alg); | ||
511 | |||
506 | /* | 512 | /* |
507 | * crypto_alloc_tfm - Locate algorithm and allocate transform | 513 | * crypto_alloc_tfm - Locate algorithm and allocate transform |
508 | * @alg_name: Name of algorithm | 514 | * @alg_name: Name of algorithm |
@@ -526,21 +532,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm); | |||
526 | void *crypto_alloc_tfm(const char *alg_name, | 532 | void *crypto_alloc_tfm(const char *alg_name, |
527 | const struct crypto_type *frontend, u32 type, u32 mask) | 533 | const struct crypto_type *frontend, u32 type, u32 mask) |
528 | { | 534 | { |
529 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | ||
530 | void *tfm; | 535 | void *tfm; |
531 | int err; | 536 | int err; |
532 | 537 | ||
533 | type &= frontend->maskclear; | ||
534 | mask &= frontend->maskclear; | ||
535 | type |= frontend->type; | ||
536 | mask |= frontend->maskset; | ||
537 | |||
538 | lookup = frontend->lookup ?: crypto_alg_mod_lookup; | ||
539 | |||
540 | for (;;) { | 538 | for (;;) { |
541 | struct crypto_alg *alg; | 539 | struct crypto_alg *alg; |
542 | 540 | ||
543 | alg = lookup(alg_name, type, mask); | 541 | alg = crypto_find_alg(alg_name, frontend, type, mask); |
544 | if (IS_ERR(alg)) { | 542 | if (IS_ERR(alg)) { |
545 | err = PTR_ERR(alg); | 543 | err = PTR_ERR(alg); |
546 | goto err; | 544 | goto err; |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 5793b64c81a8..4d6f49a5daeb 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -23,24 +23,36 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | 25 | ||
26 | typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); | ||
27 | |||
26 | struct authenc_instance_ctx { | 28 | struct authenc_instance_ctx { |
27 | struct crypto_spawn auth; | 29 | struct crypto_ahash_spawn auth; |
28 | struct crypto_skcipher_spawn enc; | 30 | struct crypto_skcipher_spawn enc; |
29 | }; | 31 | }; |
30 | 32 | ||
31 | struct crypto_authenc_ctx { | 33 | struct crypto_authenc_ctx { |
32 | spinlock_t auth_lock; | 34 | unsigned int reqoff; |
33 | struct crypto_hash *auth; | 35 | struct crypto_ahash *auth; |
34 | struct crypto_ablkcipher *enc; | 36 | struct crypto_ablkcipher *enc; |
35 | }; | 37 | }; |
36 | 38 | ||
39 | struct authenc_request_ctx { | ||
40 | unsigned int cryptlen; | ||
41 | struct scatterlist *sg; | ||
42 | struct scatterlist asg[2]; | ||
43 | struct scatterlist cipher[2]; | ||
44 | crypto_completion_t complete; | ||
45 | crypto_completion_t update_complete; | ||
46 | char tail[]; | ||
47 | }; | ||
48 | |||
37 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 49 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, |
38 | unsigned int keylen) | 50 | unsigned int keylen) |
39 | { | 51 | { |
40 | unsigned int authkeylen; | 52 | unsigned int authkeylen; |
41 | unsigned int enckeylen; | 53 | unsigned int enckeylen; |
42 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 54 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
43 | struct crypto_hash *auth = ctx->auth; | 55 | struct crypto_ahash *auth = ctx->auth; |
44 | struct crypto_ablkcipher *enc = ctx->enc; | 56 | struct crypto_ablkcipher *enc = ctx->enc; |
45 | struct rtattr *rta = (void *)key; | 57 | struct rtattr *rta = (void *)key; |
46 | struct crypto_authenc_key_param *param; | 58 | struct crypto_authenc_key_param *param; |
@@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
64 | 76 | ||
65 | authkeylen = keylen - enckeylen; | 77 | authkeylen = keylen - enckeylen; |
66 | 78 | ||
67 | crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 79 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
68 | crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & | 80 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & |
69 | CRYPTO_TFM_REQ_MASK); | 81 | CRYPTO_TFM_REQ_MASK); |
70 | err = crypto_hash_setkey(auth, key, authkeylen); | 82 | err = crypto_ahash_setkey(auth, key, authkeylen); |
71 | crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & | 83 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & |
72 | CRYPTO_TFM_RES_MASK); | 84 | CRYPTO_TFM_RES_MASK); |
73 | 85 | ||
74 | if (err) | 86 | if (err) |
@@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, | |||
103 | sg_mark_end(head); | 115 | sg_mark_end(head); |
104 | } | 116 | } |
105 | 117 | ||
106 | static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, | 118 | static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, |
107 | struct scatterlist *cipher, | 119 | int err) |
108 | unsigned int cryptlen) | 120 | { |
121 | struct aead_request *req = areq->data; | ||
122 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
123 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
124 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
125 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
126 | |||
127 | if (err) | ||
128 | goto out; | ||
129 | |||
130 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
131 | areq_ctx->cryptlen); | ||
132 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
133 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
134 | areq_ctx->complete, req); | ||
135 | |||
136 | err = crypto_ahash_finup(ahreq); | ||
137 | if (err) | ||
138 | goto out; | ||
139 | |||
140 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
141 | areq_ctx->cryptlen, | ||
142 | crypto_aead_authsize(authenc), 1); | ||
143 | |||
144 | out: | ||
145 | aead_request_complete(req, err); | ||
146 | } | ||
147 | |||
148 | static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) | ||
149 | { | ||
150 | struct aead_request *req = areq->data; | ||
151 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
152 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
153 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
154 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
155 | |||
156 | if (err) | ||
157 | goto out; | ||
158 | |||
159 | scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, | ||
160 | areq_ctx->cryptlen, | ||
161 | crypto_aead_authsize(authenc), 1); | ||
162 | |||
163 | out: | ||
164 | aead_request_complete(req, err); | ||
165 | } | ||
166 | |||
167 | static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | ||
168 | int err) | ||
109 | { | 169 | { |
170 | u8 *ihash; | ||
171 | unsigned int authsize; | ||
172 | struct ablkcipher_request *abreq; | ||
173 | struct aead_request *req = areq->data; | ||
110 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 174 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
111 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 175 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
112 | struct crypto_hash *auth = ctx->auth; | 176 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); |
113 | struct hash_desc desc = { | 177 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); |
114 | .tfm = auth, | 178 | |
115 | .flags = aead_request_flags(req) & flags, | 179 | if (err) |
116 | }; | 180 | goto out; |
117 | u8 *hash = aead_request_ctx(req); | 181 | |
182 | ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, | ||
183 | areq_ctx->cryptlen); | ||
184 | ahash_request_set_callback(ahreq, aead_request_flags(req) & | ||
185 | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
186 | areq_ctx->complete, req); | ||
187 | |||
188 | err = crypto_ahash_finup(ahreq); | ||
189 | if (err) | ||
190 | goto out; | ||
191 | |||
192 | authsize = crypto_aead_authsize(authenc); | ||
193 | ihash = ahreq->result + authsize; | ||
194 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
195 | authsize, 0); | ||
196 | |||
197 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; | ||
198 | if (err) | ||
199 | goto out; | ||
200 | |||
201 | abreq = aead_request_ctx(req); | ||
202 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
203 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
204 | req->base.complete, req->base.data); | ||
205 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
206 | req->cryptlen, req->iv); | ||
207 | |||
208 | err = crypto_ablkcipher_decrypt(abreq); | ||
209 | |||
210 | out: | ||
211 | aead_request_complete(req, err); | ||
212 | } | ||
213 | |||
214 | static void authenc_verify_ahash_done(struct crypto_async_request *areq, | ||
215 | int err) | ||
216 | { | ||
217 | u8 *ihash; | ||
218 | unsigned int authsize; | ||
219 | struct ablkcipher_request *abreq; | ||
220 | struct aead_request *req = areq->data; | ||
221 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
222 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
223 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
224 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
225 | |||
226 | if (err) | ||
227 | goto out; | ||
228 | |||
229 | authsize = crypto_aead_authsize(authenc); | ||
230 | ihash = ahreq->result + authsize; | ||
231 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | ||
232 | authsize, 0); | ||
233 | |||
234 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; | ||
235 | if (err) | ||
236 | goto out; | ||
237 | |||
238 | abreq = aead_request_ctx(req); | ||
239 | ablkcipher_request_set_tfm(abreq, ctx->enc); | ||
240 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
241 | req->base.complete, req->base.data); | ||
242 | ablkcipher_request_set_crypt(abreq, req->src, req->dst, | ||
243 | req->cryptlen, req->iv); | ||
244 | |||
245 | err = crypto_ablkcipher_decrypt(abreq); | ||
246 | |||
247 | out: | ||
248 | aead_request_complete(req, err); | ||
249 | } | ||
250 | |||
251 | static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) | ||
252 | { | ||
253 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
254 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
255 | struct crypto_ahash *auth = ctx->auth; | ||
256 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
257 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
258 | u8 *hash = areq_ctx->tail; | ||
118 | int err; | 259 | int err; |
119 | 260 | ||
120 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), | 261 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), |
121 | crypto_hash_alignmask(auth) + 1); | 262 | crypto_ahash_alignmask(auth) + 1); |
263 | |||
264 | ahash_request_set_tfm(ahreq, auth); | ||
122 | 265 | ||
123 | spin_lock_bh(&ctx->auth_lock); | 266 | err = crypto_ahash_init(ahreq); |
124 | err = crypto_hash_init(&desc); | ||
125 | if (err) | 267 | if (err) |
126 | goto auth_unlock; | 268 | return ERR_PTR(err); |
269 | |||
270 | ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); | ||
271 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
272 | areq_ctx->update_complete, req); | ||
127 | 273 | ||
128 | err = crypto_hash_update(&desc, req->assoc, req->assoclen); | 274 | err = crypto_ahash_update(ahreq); |
129 | if (err) | 275 | if (err) |
130 | goto auth_unlock; | 276 | return ERR_PTR(err); |
277 | |||
278 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | ||
279 | areq_ctx->cryptlen); | ||
280 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
281 | areq_ctx->complete, req); | ||
131 | 282 | ||
132 | err = crypto_hash_update(&desc, cipher, cryptlen); | 283 | err = crypto_ahash_finup(ahreq); |
133 | if (err) | 284 | if (err) |
134 | goto auth_unlock; | 285 | return ERR_PTR(err); |
135 | 286 | ||
136 | err = crypto_hash_final(&desc, hash); | 287 | return hash; |
137 | auth_unlock: | 288 | } |
138 | spin_unlock_bh(&ctx->auth_lock); | 289 | |
290 | static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) | ||
291 | { | ||
292 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | ||
293 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
294 | struct crypto_ahash *auth = ctx->auth; | ||
295 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
296 | struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); | ||
297 | u8 *hash = areq_ctx->tail; | ||
298 | int err; | ||
139 | 299 | ||
300 | hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), | ||
301 | crypto_ahash_alignmask(auth) + 1); | ||
302 | |||
303 | ahash_request_set_tfm(ahreq, auth); | ||
304 | ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, | ||
305 | areq_ctx->cryptlen); | ||
306 | ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, | ||
307 | areq_ctx->complete, req); | ||
308 | |||
309 | err = crypto_ahash_digest(ahreq); | ||
140 | if (err) | 310 | if (err) |
141 | return ERR_PTR(err); | 311 | return ERR_PTR(err); |
142 | 312 | ||
@@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
147 | unsigned int flags) | 317 | unsigned int flags) |
148 | { | 318 | { |
149 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 319 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
320 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
150 | struct scatterlist *dst = req->dst; | 321 | struct scatterlist *dst = req->dst; |
151 | struct scatterlist cipher[2]; | 322 | struct scatterlist *assoc = req->assoc; |
152 | struct page *dstp; | 323 | struct scatterlist *cipher = areq_ctx->cipher; |
324 | struct scatterlist *asg = areq_ctx->asg; | ||
153 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 325 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
154 | unsigned int cryptlen; | 326 | unsigned int cryptlen = req->cryptlen; |
327 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
328 | struct page *dstp; | ||
155 | u8 *vdst; | 329 | u8 *vdst; |
156 | u8 *hash; | 330 | u8 *hash; |
157 | 331 | ||
@@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
163 | sg_set_buf(cipher, iv, ivsize); | 337 | sg_set_buf(cipher, iv, ivsize); |
164 | authenc_chain(cipher, dst, vdst == iv + ivsize); | 338 | authenc_chain(cipher, dst, vdst == iv + ivsize); |
165 | dst = cipher; | 339 | dst = cipher; |
340 | cryptlen += ivsize; | ||
166 | } | 341 | } |
167 | 342 | ||
168 | cryptlen = req->cryptlen + ivsize; | 343 | if (sg_is_last(assoc)) { |
169 | hash = crypto_authenc_hash(req, flags, dst, cryptlen); | 344 | authenc_ahash_fn = crypto_authenc_ahash; |
345 | sg_init_table(asg, 2); | ||
346 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
347 | authenc_chain(asg, dst, 0); | ||
348 | dst = asg; | ||
349 | cryptlen += req->assoclen; | ||
350 | } | ||
351 | |||
352 | areq_ctx->cryptlen = cryptlen; | ||
353 | areq_ctx->sg = dst; | ||
354 | |||
355 | areq_ctx->complete = authenc_geniv_ahash_done; | ||
356 | areq_ctx->update_complete = authenc_geniv_ahash_update_done; | ||
357 | |||
358 | hash = authenc_ahash_fn(req, flags); | ||
170 | if (IS_ERR(hash)) | 359 | if (IS_ERR(hash)) |
171 | return PTR_ERR(hash); | 360 | return PTR_ERR(hash); |
172 | 361 | ||
@@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) | |||
256 | } | 445 | } |
257 | 446 | ||
258 | static int crypto_authenc_verify(struct aead_request *req, | 447 | static int crypto_authenc_verify(struct aead_request *req, |
259 | struct scatterlist *cipher, | 448 | authenc_ahash_t authenc_ahash_fn) |
260 | unsigned int cryptlen) | ||
261 | { | 449 | { |
262 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 450 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
451 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
263 | u8 *ohash; | 452 | u8 *ohash; |
264 | u8 *ihash; | 453 | u8 *ihash; |
265 | unsigned int authsize; | 454 | unsigned int authsize; |
266 | 455 | ||
267 | ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, | 456 | areq_ctx->complete = authenc_verify_ahash_done; |
268 | cryptlen); | 457 | areq_ctx->complete = authenc_verify_ahash_update_done; |
458 | |||
459 | ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); | ||
269 | if (IS_ERR(ohash)) | 460 | if (IS_ERR(ohash)) |
270 | return PTR_ERR(ohash); | 461 | return PTR_ERR(ohash); |
271 | 462 | ||
272 | authsize = crypto_aead_authsize(authenc); | 463 | authsize = crypto_aead_authsize(authenc); |
273 | ihash = ohash + authsize; | 464 | ihash = ohash + authsize; |
274 | scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); | 465 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
466 | authsize, 0); | ||
275 | return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; | 467 | return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; |
276 | } | 468 | } |
277 | 469 | ||
@@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
279 | unsigned int cryptlen) | 471 | unsigned int cryptlen) |
280 | { | 472 | { |
281 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 473 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
474 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); | ||
282 | struct scatterlist *src = req->src; | 475 | struct scatterlist *src = req->src; |
283 | struct scatterlist cipher[2]; | 476 | struct scatterlist *assoc = req->assoc; |
284 | struct page *srcp; | 477 | struct scatterlist *cipher = areq_ctx->cipher; |
478 | struct scatterlist *asg = areq_ctx->asg; | ||
285 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 479 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
480 | authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; | ||
481 | struct page *srcp; | ||
286 | u8 *vsrc; | 482 | u8 *vsrc; |
287 | 483 | ||
288 | srcp = sg_page(src); | 484 | srcp = sg_page(src); |
@@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
293 | sg_set_buf(cipher, iv, ivsize); | 489 | sg_set_buf(cipher, iv, ivsize); |
294 | authenc_chain(cipher, src, vsrc == iv + ivsize); | 490 | authenc_chain(cipher, src, vsrc == iv + ivsize); |
295 | src = cipher; | 491 | src = cipher; |
492 | cryptlen += ivsize; | ||
493 | } | ||
494 | |||
495 | if (sg_is_last(assoc)) { | ||
496 | authenc_ahash_fn = crypto_authenc_ahash; | ||
497 | sg_init_table(asg, 2); | ||
498 | sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); | ||
499 | authenc_chain(asg, src, 0); | ||
500 | src = asg; | ||
501 | cryptlen += req->assoclen; | ||
296 | } | 502 | } |
297 | 503 | ||
298 | return crypto_authenc_verify(req, src, cryptlen + ivsize); | 504 | areq_ctx->cryptlen = cryptlen; |
505 | areq_ctx->sg = src; | ||
506 | |||
507 | return crypto_authenc_verify(req, authenc_ahash_fn); | ||
299 | } | 508 | } |
300 | 509 | ||
301 | static int crypto_authenc_decrypt(struct aead_request *req) | 510 | static int crypto_authenc_decrypt(struct aead_request *req) |
@@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req) | |||
326 | 535 | ||
327 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) | 536 | static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) |
328 | { | 537 | { |
329 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 538 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
330 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); | 539 | struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); |
331 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 540 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
332 | struct crypto_hash *auth; | 541 | struct crypto_ahash *auth; |
333 | struct crypto_ablkcipher *enc; | 542 | struct crypto_ablkcipher *enc; |
334 | int err; | 543 | int err; |
335 | 544 | ||
336 | auth = crypto_spawn_hash(&ictx->auth); | 545 | auth = crypto_spawn_ahash(&ictx->auth); |
337 | if (IS_ERR(auth)) | 546 | if (IS_ERR(auth)) |
338 | return PTR_ERR(auth); | 547 | return PTR_ERR(auth); |
339 | 548 | ||
549 | ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + | ||
550 | crypto_ahash_alignmask(auth), | ||
551 | crypto_ahash_alignmask(auth) + 1); | ||
552 | |||
340 | enc = crypto_spawn_skcipher(&ictx->enc); | 553 | enc = crypto_spawn_skcipher(&ictx->enc); |
341 | err = PTR_ERR(enc); | 554 | err = PTR_ERR(enc); |
342 | if (IS_ERR(enc)) | 555 | if (IS_ERR(enc)) |
343 | goto err_free_hash; | 556 | goto err_free_ahash; |
344 | 557 | ||
345 | ctx->auth = auth; | 558 | ctx->auth = auth; |
346 | ctx->enc = enc; | 559 | ctx->enc = enc; |
560 | |||
347 | tfm->crt_aead.reqsize = max_t(unsigned int, | 561 | tfm->crt_aead.reqsize = max_t(unsigned int, |
348 | (crypto_hash_alignmask(auth) & | 562 | crypto_ahash_reqsize(auth) + ctx->reqoff + |
349 | ~(crypto_tfm_ctx_alignment() - 1)) + | 563 | sizeof(struct authenc_request_ctx) + |
350 | crypto_hash_digestsize(auth) * 2, | 564 | sizeof(struct ahash_request), |
351 | sizeof(struct skcipher_givcrypt_request) + | 565 | sizeof(struct skcipher_givcrypt_request) + |
352 | crypto_ablkcipher_reqsize(enc) + | 566 | crypto_ablkcipher_reqsize(enc) + |
353 | crypto_ablkcipher_ivsize(enc)); | 567 | crypto_ablkcipher_ivsize(enc)); |
354 | |||
355 | spin_lock_init(&ctx->auth_lock); | ||
356 | 568 | ||
357 | return 0; | 569 | return 0; |
358 | 570 | ||
359 | err_free_hash: | 571 | err_free_ahash: |
360 | crypto_free_hash(auth); | 572 | crypto_free_ahash(auth); |
361 | return err; | 573 | return err; |
362 | } | 574 | } |
363 | 575 | ||
@@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) | |||
365 | { | 577 | { |
366 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); | 578 | struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); |
367 | 579 | ||
368 | crypto_free_hash(ctx->auth); | 580 | crypto_free_ahash(ctx->auth); |
369 | crypto_free_ablkcipher(ctx->enc); | 581 | crypto_free_ablkcipher(ctx->enc); |
370 | } | 582 | } |
371 | 583 | ||
@@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
373 | { | 585 | { |
374 | struct crypto_attr_type *algt; | 586 | struct crypto_attr_type *algt; |
375 | struct crypto_instance *inst; | 587 | struct crypto_instance *inst; |
376 | struct crypto_alg *auth; | 588 | struct hash_alg_common *auth; |
589 | struct crypto_alg *auth_base; | ||
377 | struct crypto_alg *enc; | 590 | struct crypto_alg *enc; |
378 | struct authenc_instance_ctx *ctx; | 591 | struct authenc_instance_ctx *ctx; |
379 | const char *enc_name; | 592 | const char *enc_name; |
@@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
387 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 600 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
388 | return ERR_PTR(-EINVAL); | 601 | return ERR_PTR(-EINVAL); |
389 | 602 | ||
390 | auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 603 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
391 | CRYPTO_ALG_TYPE_HASH_MASK); | 604 | CRYPTO_ALG_TYPE_AHASH_MASK); |
392 | if (IS_ERR(auth)) | 605 | if (IS_ERR(auth)) |
393 | return ERR_PTR(PTR_ERR(auth)); | 606 | return ERR_PTR(PTR_ERR(auth)); |
394 | 607 | ||
608 | auth_base = &auth->base; | ||
609 | |||
395 | enc_name = crypto_attr_alg_name(tb[2]); | 610 | enc_name = crypto_attr_alg_name(tb[2]); |
396 | err = PTR_ERR(enc_name); | 611 | err = PTR_ERR(enc_name); |
397 | if (IS_ERR(enc_name)) | 612 | if (IS_ERR(enc_name)) |
@@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
404 | 619 | ||
405 | ctx = crypto_instance_ctx(inst); | 620 | ctx = crypto_instance_ctx(inst); |
406 | 621 | ||
407 | err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); | 622 | err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); |
408 | if (err) | 623 | if (err) |
409 | goto err_free_inst; | 624 | goto err_free_inst; |
410 | 625 | ||
@@ -419,28 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
419 | 634 | ||
420 | err = -ENAMETOOLONG; | 635 | err = -ENAMETOOLONG; |
421 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | 636 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, |
422 | "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= | 637 | "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= |
423 | CRYPTO_MAX_ALG_NAME) | 638 | CRYPTO_MAX_ALG_NAME) |
424 | goto err_drop_enc; | 639 | goto err_drop_enc; |
425 | 640 | ||
426 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 641 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
427 | "authenc(%s,%s)", auth->cra_driver_name, | 642 | "authenc(%s,%s)", auth_base->cra_driver_name, |
428 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 643 | enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
429 | goto err_drop_enc; | 644 | goto err_drop_enc; |
430 | 645 | ||
431 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; | 646 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; |
432 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; | 647 | inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; |
433 | inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; | 648 | inst->alg.cra_priority = enc->cra_priority * |
649 | 10 + auth_base->cra_priority; | ||
434 | inst->alg.cra_blocksize = enc->cra_blocksize; | 650 | inst->alg.cra_blocksize = enc->cra_blocksize; |
435 | inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; | 651 | inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; |
436 | inst->alg.cra_type = &crypto_aead_type; | 652 | inst->alg.cra_type = &crypto_aead_type; |
437 | 653 | ||
438 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; | 654 | inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; |
439 | inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? | 655 | inst->alg.cra_aead.maxauthsize = auth->digestsize; |
440 | auth->cra_hash.digestsize : | ||
441 | auth->cra_type ? | ||
442 | __crypto_shash_alg(auth)->digestsize : | ||
443 | auth->cra_digest.dia_digestsize; | ||
444 | 656 | ||
445 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); | 657 | inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); |
446 | 658 | ||
@@ -453,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
453 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; | 665 | inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; |
454 | 666 | ||
455 | out: | 667 | out: |
456 | crypto_mod_put(auth); | 668 | crypto_mod_put(auth_base); |
457 | return inst; | 669 | return inst; |
458 | 670 | ||
459 | err_drop_enc: | 671 | err_drop_enc: |
460 | crypto_drop_skcipher(&ctx->enc); | 672 | crypto_drop_skcipher(&ctx->enc); |
461 | err_drop_auth: | 673 | err_drop_auth: |
462 | crypto_drop_spawn(&ctx->auth); | 674 | crypto_drop_ahash(&ctx->auth); |
463 | err_free_inst: | 675 | err_free_inst: |
464 | kfree(inst); | 676 | kfree(inst); |
465 | out_put_auth: | 677 | out_put_auth: |
@@ -472,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst) | |||
472 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); | 684 | struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); |
473 | 685 | ||
474 | crypto_drop_skcipher(&ctx->enc); | 686 | crypto_drop_skcipher(&ctx->enc); |
475 | crypto_drop_spawn(&ctx->auth); | 687 | crypto_drop_ahash(&ctx->auth); |
476 | kfree(inst); | 688 | kfree(inst); |
477 | } | 689 | } |
478 | 690 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ae5fa99d5d36..35335825a4ef 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -39,6 +39,11 @@ struct cryptd_instance_ctx { | |||
39 | struct cryptd_queue *queue; | 39 | struct cryptd_queue *queue; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct hashd_instance_ctx { | ||
43 | struct crypto_shash_spawn spawn; | ||
44 | struct cryptd_queue *queue; | ||
45 | }; | ||
46 | |||
42 | struct cryptd_blkcipher_ctx { | 47 | struct cryptd_blkcipher_ctx { |
43 | struct crypto_blkcipher *child; | 48 | struct crypto_blkcipher *child; |
44 | }; | 49 | }; |
@@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx { | |||
48 | }; | 53 | }; |
49 | 54 | ||
50 | struct cryptd_hash_ctx { | 55 | struct cryptd_hash_ctx { |
51 | struct crypto_hash *child; | 56 | struct crypto_shash *child; |
52 | }; | 57 | }; |
53 | 58 | ||
54 | struct cryptd_hash_request_ctx { | 59 | struct cryptd_hash_request_ctx { |
55 | crypto_completion_t complete; | 60 | crypto_completion_t complete; |
61 | struct shash_desc desc; | ||
56 | }; | 62 | }; |
57 | 63 | ||
58 | static void cryptd_queue_worker(struct work_struct *work); | 64 | static void cryptd_queue_worker(struct work_struct *work); |
@@ -249,32 +255,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |||
249 | crypto_free_blkcipher(ctx->child); | 255 | crypto_free_blkcipher(ctx->child); |
250 | } | 256 | } |
251 | 257 | ||
252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 258 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
253 | struct cryptd_queue *queue) | 259 | unsigned int tail) |
254 | { | 260 | { |
261 | char *p; | ||
255 | struct crypto_instance *inst; | 262 | struct crypto_instance *inst; |
256 | struct cryptd_instance_ctx *ctx; | ||
257 | int err; | 263 | int err; |
258 | 264 | ||
259 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 265 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
260 | if (!inst) { | 266 | if (!p) |
261 | inst = ERR_PTR(-ENOMEM); | 267 | return ERR_PTR(-ENOMEM); |
262 | goto out; | 268 | |
263 | } | 269 | inst = (void *)(p + head); |
264 | 270 | ||
265 | err = -ENAMETOOLONG; | 271 | err = -ENAMETOOLONG; |
266 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 272 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
267 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 273 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
268 | goto out_free_inst; | 274 | goto out_free_inst; |
269 | 275 | ||
270 | ctx = crypto_instance_ctx(inst); | ||
271 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
272 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
273 | if (err) | ||
274 | goto out_free_inst; | ||
275 | |||
276 | ctx->queue = queue; | ||
277 | |||
278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 276 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
279 | 277 | ||
280 | inst->alg.cra_priority = alg->cra_priority + 50; | 278 | inst->alg.cra_priority = alg->cra_priority + 50; |
@@ -282,29 +280,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |||
282 | inst->alg.cra_alignmask = alg->cra_alignmask; | 280 | inst->alg.cra_alignmask = alg->cra_alignmask; |
283 | 281 | ||
284 | out: | 282 | out: |
285 | return inst; | 283 | return p; |
286 | 284 | ||
287 | out_free_inst: | 285 | out_free_inst: |
288 | kfree(inst); | 286 | kfree(p); |
289 | inst = ERR_PTR(err); | 287 | p = ERR_PTR(err); |
290 | goto out; | 288 | goto out; |
291 | } | 289 | } |
292 | 290 | ||
293 | static struct crypto_instance *cryptd_alloc_blkcipher( | 291 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
294 | struct rtattr **tb, struct cryptd_queue *queue) | 292 | struct rtattr **tb, |
293 | struct cryptd_queue *queue) | ||
295 | { | 294 | { |
295 | struct cryptd_instance_ctx *ctx; | ||
296 | struct crypto_instance *inst; | 296 | struct crypto_instance *inst; |
297 | struct crypto_alg *alg; | 297 | struct crypto_alg *alg; |
298 | int err; | ||
298 | 299 | ||
299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | 300 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, |
300 | CRYPTO_ALG_TYPE_MASK); | 301 | CRYPTO_ALG_TYPE_MASK); |
301 | if (IS_ERR(alg)) | 302 | if (IS_ERR(alg)) |
302 | return ERR_CAST(alg); | 303 | return PTR_ERR(alg); |
303 | 304 | ||
304 | inst = cryptd_alloc_instance(alg, queue); | 305 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
306 | err = PTR_ERR(inst); | ||
305 | if (IS_ERR(inst)) | 307 | if (IS_ERR(inst)) |
306 | goto out_put_alg; | 308 | goto out_put_alg; |
307 | 309 | ||
310 | ctx = crypto_instance_ctx(inst); | ||
311 | ctx->queue = queue; | ||
312 | |||
313 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | ||
314 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | ||
315 | if (err) | ||
316 | goto out_free_inst; | ||
317 | |||
308 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 318 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
309 | inst->alg.cra_type = &crypto_ablkcipher_type; | 319 | inst->alg.cra_type = &crypto_ablkcipher_type; |
310 | 320 | ||
@@ -323,26 +333,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher( | |||
323 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 333 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; |
324 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 334 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; |
325 | 335 | ||
336 | err = crypto_register_instance(tmpl, inst); | ||
337 | if (err) { | ||
338 | crypto_drop_spawn(&ctx->spawn); | ||
339 | out_free_inst: | ||
340 | kfree(inst); | ||
341 | } | ||
342 | |||
326 | out_put_alg: | 343 | out_put_alg: |
327 | crypto_mod_put(alg); | 344 | crypto_mod_put(alg); |
328 | return inst; | 345 | return err; |
329 | } | 346 | } |
330 | 347 | ||
331 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 348 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
332 | { | 349 | { |
333 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 350 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); |
334 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 351 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
335 | struct crypto_spawn *spawn = &ictx->spawn; | 352 | struct crypto_shash_spawn *spawn = &ictx->spawn; |
336 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 353 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
337 | struct crypto_hash *cipher; | 354 | struct crypto_shash *hash; |
338 | 355 | ||
339 | cipher = crypto_spawn_hash(spawn); | 356 | hash = crypto_spawn_shash(spawn); |
340 | if (IS_ERR(cipher)) | 357 | if (IS_ERR(hash)) |
341 | return PTR_ERR(cipher); | 358 | return PTR_ERR(hash); |
342 | 359 | ||
343 | ctx->child = cipher; | 360 | ctx->child = hash; |
344 | tfm->crt_ahash.reqsize = | 361 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
345 | sizeof(struct cryptd_hash_request_ctx); | 362 | sizeof(struct cryptd_hash_request_ctx) + |
363 | crypto_shash_descsize(hash)); | ||
346 | return 0; | 364 | return 0; |
347 | } | 365 | } |
348 | 366 | ||
@@ -350,22 +368,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |||
350 | { | 368 | { |
351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 369 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
352 | 370 | ||
353 | crypto_free_hash(ctx->child); | 371 | crypto_free_shash(ctx->child); |
354 | } | 372 | } |
355 | 373 | ||
356 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 374 | static int cryptd_hash_setkey(struct crypto_ahash *parent, |
357 | const u8 *key, unsigned int keylen) | 375 | const u8 *key, unsigned int keylen) |
358 | { | 376 | { |
359 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | 377 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); |
360 | struct crypto_hash *child = ctx->child; | 378 | struct crypto_shash *child = ctx->child; |
361 | int err; | 379 | int err; |
362 | 380 | ||
363 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 381 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
364 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | 382 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & |
365 | CRYPTO_TFM_REQ_MASK); | 383 | CRYPTO_TFM_REQ_MASK); |
366 | err = crypto_hash_setkey(child, key, keylen); | 384 | err = crypto_shash_setkey(child, key, keylen); |
367 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | 385 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & |
368 | CRYPTO_TFM_RES_MASK); | 386 | CRYPTO_TFM_RES_MASK); |
369 | return err; | 387 | return err; |
370 | } | 388 | } |
371 | 389 | ||
@@ -385,21 +403,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req, | |||
385 | 403 | ||
386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 404 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
387 | { | 405 | { |
388 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 406 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
389 | struct crypto_hash *child = ctx->child; | 407 | struct crypto_shash *child = ctx->child; |
390 | struct ahash_request *req = ahash_request_cast(req_async); | 408 | struct ahash_request *req = ahash_request_cast(req_async); |
391 | struct cryptd_hash_request_ctx *rctx; | 409 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
392 | struct hash_desc desc; | 410 | struct shash_desc *desc = &rctx->desc; |
393 | |||
394 | rctx = ahash_request_ctx(req); | ||
395 | 411 | ||
396 | if (unlikely(err == -EINPROGRESS)) | 412 | if (unlikely(err == -EINPROGRESS)) |
397 | goto out; | 413 | goto out; |
398 | 414 | ||
399 | desc.tfm = child; | 415 | desc->tfm = child; |
400 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 416 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
401 | 417 | ||
402 | err = crypto_hash_crt(child)->init(&desc); | 418 | err = crypto_shash_init(desc); |
403 | 419 | ||
404 | req->base.complete = rctx->complete; | 420 | req->base.complete = rctx->complete; |
405 | 421 | ||
@@ -416,23 +432,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req) | |||
416 | 432 | ||
417 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 433 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) |
418 | { | 434 | { |
419 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 435 | struct ahash_request *req = ahash_request_cast(req_async); |
420 | struct crypto_hash *child = ctx->child; | ||
421 | struct ahash_request *req = ahash_request_cast(req_async); | ||
422 | struct cryptd_hash_request_ctx *rctx; | 436 | struct cryptd_hash_request_ctx *rctx; |
423 | struct hash_desc desc; | ||
424 | 437 | ||
425 | rctx = ahash_request_ctx(req); | 438 | rctx = ahash_request_ctx(req); |
426 | 439 | ||
427 | if (unlikely(err == -EINPROGRESS)) | 440 | if (unlikely(err == -EINPROGRESS)) |
428 | goto out; | 441 | goto out; |
429 | 442 | ||
430 | desc.tfm = child; | 443 | err = shash_ahash_update(req, &rctx->desc); |
431 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
432 | |||
433 | err = crypto_hash_crt(child)->update(&desc, | ||
434 | req->src, | ||
435 | req->nbytes); | ||
436 | 444 | ||
437 | req->base.complete = rctx->complete; | 445 | req->base.complete = rctx->complete; |
438 | 446 | ||
@@ -449,21 +457,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req) | |||
449 | 457 | ||
450 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 458 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) |
451 | { | 459 | { |
452 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 460 | struct ahash_request *req = ahash_request_cast(req_async); |
453 | struct crypto_hash *child = ctx->child; | 461 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
454 | struct ahash_request *req = ahash_request_cast(req_async); | ||
455 | struct cryptd_hash_request_ctx *rctx; | ||
456 | struct hash_desc desc; | ||
457 | |||
458 | rctx = ahash_request_ctx(req); | ||
459 | 462 | ||
460 | if (unlikely(err == -EINPROGRESS)) | 463 | if (unlikely(err == -EINPROGRESS)) |
461 | goto out; | 464 | goto out; |
462 | 465 | ||
463 | desc.tfm = child; | 466 | err = crypto_shash_final(&rctx->desc, req->result); |
464 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
465 | |||
466 | err = crypto_hash_crt(child)->final(&desc, req->result); | ||
467 | 467 | ||
468 | req->base.complete = rctx->complete; | 468 | req->base.complete = rctx->complete; |
469 | 469 | ||
@@ -478,26 +478,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req) | |||
478 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 478 | return cryptd_hash_enqueue(req, cryptd_hash_final); |
479 | } | 479 | } |
480 | 480 | ||
481 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 481 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
482 | { | 482 | { |
483 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 483 | struct ahash_request *req = ahash_request_cast(req_async); |
484 | struct crypto_hash *child = ctx->child; | 484 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
485 | struct ahash_request *req = ahash_request_cast(req_async); | ||
486 | struct cryptd_hash_request_ctx *rctx; | ||
487 | struct hash_desc desc; | ||
488 | 485 | ||
489 | rctx = ahash_request_ctx(req); | 486 | if (unlikely(err == -EINPROGRESS)) |
487 | goto out; | ||
488 | |||
489 | err = shash_ahash_finup(req, &rctx->desc); | ||
490 | |||
491 | req->base.complete = rctx->complete; | ||
492 | |||
493 | out: | ||
494 | local_bh_disable(); | ||
495 | rctx->complete(&req->base, err); | ||
496 | local_bh_enable(); | ||
497 | } | ||
498 | |||
499 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | ||
500 | { | ||
501 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | ||
502 | } | ||
503 | |||
504 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | ||
505 | { | ||
506 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
507 | struct crypto_shash *child = ctx->child; | ||
508 | struct ahash_request *req = ahash_request_cast(req_async); | ||
509 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
510 | struct shash_desc *desc = &rctx->desc; | ||
490 | 511 | ||
491 | if (unlikely(err == -EINPROGRESS)) | 512 | if (unlikely(err == -EINPROGRESS)) |
492 | goto out; | 513 | goto out; |
493 | 514 | ||
494 | desc.tfm = child; | 515 | desc->tfm = child; |
495 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 516 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
496 | 517 | ||
497 | err = crypto_hash_crt(child)->digest(&desc, | 518 | err = shash_ahash_digest(req, desc); |
498 | req->src, | ||
499 | req->nbytes, | ||
500 | req->result); | ||
501 | 519 | ||
502 | req->base.complete = rctx->complete; | 520 | req->base.complete = rctx->complete; |
503 | 521 | ||
@@ -512,64 +530,108 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |||
512 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 530 | return cryptd_hash_enqueue(req, cryptd_hash_digest); |
513 | } | 531 | } |
514 | 532 | ||
515 | static struct crypto_instance *cryptd_alloc_hash( | 533 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
516 | struct rtattr **tb, struct cryptd_queue *queue) | ||
517 | { | 534 | { |
518 | struct crypto_instance *inst; | 535 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); |
536 | |||
537 | return crypto_shash_export(&rctx->desc, out); | ||
538 | } | ||
539 | |||
540 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | ||
541 | { | ||
542 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
543 | |||
544 | return crypto_shash_import(&rctx->desc, in); | ||
545 | } | ||
546 | |||
547 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | ||
548 | struct cryptd_queue *queue) | ||
549 | { | ||
550 | struct hashd_instance_ctx *ctx; | ||
551 | struct ahash_instance *inst; | ||
552 | struct shash_alg *salg; | ||
519 | struct crypto_alg *alg; | 553 | struct crypto_alg *alg; |
554 | int err; | ||
520 | 555 | ||
521 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 556 | salg = shash_attr_alg(tb[1], 0, 0); |
522 | CRYPTO_ALG_TYPE_HASH_MASK); | 557 | if (IS_ERR(salg)) |
523 | if (IS_ERR(alg)) | 558 | return PTR_ERR(salg); |
524 | return ERR_PTR(PTR_ERR(alg)); | ||
525 | 559 | ||
526 | inst = cryptd_alloc_instance(alg, queue); | 560 | alg = &salg->base; |
561 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), | ||
562 | sizeof(*ctx)); | ||
563 | err = PTR_ERR(inst); | ||
527 | if (IS_ERR(inst)) | 564 | if (IS_ERR(inst)) |
528 | goto out_put_alg; | 565 | goto out_put_alg; |
529 | 566 | ||
530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | 567 | ctx = ahash_instance_ctx(inst); |
531 | inst->alg.cra_type = &crypto_ahash_type; | 568 | ctx->queue = queue; |
532 | 569 | ||
533 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | 570 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 571 | ahash_crypto_instance(inst)); |
572 | if (err) | ||
573 | goto out_free_inst; | ||
535 | 574 | ||
536 | inst->alg.cra_init = cryptd_hash_init_tfm; | 575 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | ||
538 | 576 | ||
539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | 577 | inst->alg.halg.digestsize = salg->digestsize; |
540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | 578 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | 579 | |
542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | 580 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | 581 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; |
582 | |||
583 | inst->alg.init = cryptd_hash_init_enqueue; | ||
584 | inst->alg.update = cryptd_hash_update_enqueue; | ||
585 | inst->alg.final = cryptd_hash_final_enqueue; | ||
586 | inst->alg.finup = cryptd_hash_finup_enqueue; | ||
587 | inst->alg.export = cryptd_hash_export; | ||
588 | inst->alg.import = cryptd_hash_import; | ||
589 | inst->alg.setkey = cryptd_hash_setkey; | ||
590 | inst->alg.digest = cryptd_hash_digest_enqueue; | ||
591 | |||
592 | err = ahash_register_instance(tmpl, inst); | ||
593 | if (err) { | ||
594 | crypto_drop_shash(&ctx->spawn); | ||
595 | out_free_inst: | ||
596 | kfree(inst); | ||
597 | } | ||
544 | 598 | ||
545 | out_put_alg: | 599 | out_put_alg: |
546 | crypto_mod_put(alg); | 600 | crypto_mod_put(alg); |
547 | return inst; | 601 | return err; |
548 | } | 602 | } |
549 | 603 | ||
550 | static struct cryptd_queue queue; | 604 | static struct cryptd_queue queue; |
551 | 605 | ||
552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 606 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
553 | { | 607 | { |
554 | struct crypto_attr_type *algt; | 608 | struct crypto_attr_type *algt; |
555 | 609 | ||
556 | algt = crypto_get_attr_type(tb); | 610 | algt = crypto_get_attr_type(tb); |
557 | if (IS_ERR(algt)) | 611 | if (IS_ERR(algt)) |
558 | return ERR_CAST(algt); | 612 | return PTR_ERR(algt); |
559 | 613 | ||
560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 614 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
561 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 615 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
562 | return cryptd_alloc_blkcipher(tb, &queue); | 616 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
563 | case CRYPTO_ALG_TYPE_DIGEST: | 617 | case CRYPTO_ALG_TYPE_DIGEST: |
564 | return cryptd_alloc_hash(tb, &queue); | 618 | return cryptd_create_hash(tmpl, tb, &queue); |
565 | } | 619 | } |
566 | 620 | ||
567 | return ERR_PTR(-EINVAL); | 621 | return -EINVAL; |
568 | } | 622 | } |
569 | 623 | ||
570 | static void cryptd_free(struct crypto_instance *inst) | 624 | static void cryptd_free(struct crypto_instance *inst) |
571 | { | 625 | { |
572 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 626 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); |
627 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | ||
628 | |||
629 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
630 | case CRYPTO_ALG_TYPE_AHASH: | ||
631 | crypto_drop_shash(&hctx->spawn); | ||
632 | kfree(ahash_instance(inst)); | ||
633 | return; | ||
634 | } | ||
573 | 635 | ||
574 | crypto_drop_spawn(&ctx->spawn); | 636 | crypto_drop_spawn(&ctx->spawn); |
575 | kfree(inst); | 637 | kfree(inst); |
@@ -577,7 +639,7 @@ static void cryptd_free(struct crypto_instance *inst) | |||
577 | 639 | ||
578 | static struct crypto_template cryptd_tmpl = { | 640 | static struct crypto_template cryptd_tmpl = { |
579 | .name = "cryptd", | 641 | .name = "cryptd", |
580 | .alloc = cryptd_alloc, | 642 | .create = cryptd_create, |
581 | .free = cryptd_free, | 643 | .free = cryptd_free, |
582 | .module = THIS_MODULE, | 644 | .module = THIS_MODULE, |
583 | }; | 645 | }; |
@@ -620,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |||
620 | } | 682 | } |
621 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 683 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); |
622 | 684 | ||
685 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | ||
686 | u32 type, u32 mask) | ||
687 | { | ||
688 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | ||
689 | struct crypto_ahash *tfm; | ||
690 | |||
691 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | ||
692 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | ||
693 | return ERR_PTR(-EINVAL); | ||
694 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | ||
695 | if (IS_ERR(tfm)) | ||
696 | return ERR_CAST(tfm); | ||
697 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | ||
698 | crypto_free_ahash(tfm); | ||
699 | return ERR_PTR(-EINVAL); | ||
700 | } | ||
701 | |||
702 | return __cryptd_ahash_cast(tfm); | ||
703 | } | ||
704 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | ||
705 | |||
706 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | ||
707 | { | ||
708 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | ||
709 | |||
710 | return ctx->child; | ||
711 | } | ||
712 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | ||
713 | |||
714 | void cryptd_free_ahash(struct cryptd_ahash *tfm) | ||
715 | { | ||
716 | crypto_free_ahash(&tfm->base); | ||
717 | } | ||
718 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | ||
719 | |||
623 | static int __init cryptd_init(void) | 720 | static int __init cryptd_init(void) |
624 | { | 721 | { |
625 | int err; | 722 | int err; |
diff --git a/crypto/ctr.c b/crypto/ctr.c index 2d7425f0e7b8..6c3bfabb9d1d 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
@@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | |||
219 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; | 219 | inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; |
220 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; | 220 | inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; |
221 | 221 | ||
222 | inst->alg.cra_blkcipher.geniv = "chainiv"; | ||
223 | |||
222 | out: | 224 | out: |
223 | crypto_mod_put(alg); | 225 | crypto_mod_put(alg); |
224 | return inst; | 226 | return inst; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index e70afd0c73dd..5fc3292483ef 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -11,7 +11,10 @@ | |||
11 | #include <crypto/gf128mul.h> | 11 | #include <crypto/gf128mul.h> |
12 | #include <crypto/internal/aead.h> | 12 | #include <crypto/internal/aead.h> |
13 | #include <crypto/internal/skcipher.h> | 13 | #include <crypto/internal/skcipher.h> |
14 | #include <crypto/internal/hash.h> | ||
14 | #include <crypto/scatterwalk.h> | 15 | #include <crypto/scatterwalk.h> |
16 | #include <crypto/hash.h> | ||
17 | #include "internal.h" | ||
15 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
16 | #include <linux/err.h> | 19 | #include <linux/err.h> |
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
@@ -21,11 +24,12 @@ | |||
21 | 24 | ||
22 | struct gcm_instance_ctx { | 25 | struct gcm_instance_ctx { |
23 | struct crypto_skcipher_spawn ctr; | 26 | struct crypto_skcipher_spawn ctr; |
27 | struct crypto_ahash_spawn ghash; | ||
24 | }; | 28 | }; |
25 | 29 | ||
26 | struct crypto_gcm_ctx { | 30 | struct crypto_gcm_ctx { |
27 | struct crypto_ablkcipher *ctr; | 31 | struct crypto_ablkcipher *ctr; |
28 | struct gf128mul_4k *gf128; | 32 | struct crypto_ahash *ghash; |
29 | }; | 33 | }; |
30 | 34 | ||
31 | struct crypto_rfc4106_ctx { | 35 | struct crypto_rfc4106_ctx { |
@@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx { | |||
34 | }; | 38 | }; |
35 | 39 | ||
36 | struct crypto_gcm_ghash_ctx { | 40 | struct crypto_gcm_ghash_ctx { |
37 | u32 bytes; | 41 | unsigned int cryptlen; |
38 | u32 flags; | 42 | struct scatterlist *src; |
39 | struct gf128mul_4k *gf128; | 43 | crypto_completion_t complete; |
40 | u8 buffer[16]; | ||
41 | }; | 44 | }; |
42 | 45 | ||
43 | struct crypto_gcm_req_priv_ctx { | 46 | struct crypto_gcm_req_priv_ctx { |
@@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx { | |||
45 | u8 iauth_tag[16]; | 48 | u8 iauth_tag[16]; |
46 | struct scatterlist src[2]; | 49 | struct scatterlist src[2]; |
47 | struct scatterlist dst[2]; | 50 | struct scatterlist dst[2]; |
48 | struct crypto_gcm_ghash_ctx ghash; | 51 | struct crypto_gcm_ghash_ctx ghash_ctx; |
49 | struct ablkcipher_request abreq; | 52 | union { |
53 | struct ahash_request ahreq; | ||
54 | struct ablkcipher_request abreq; | ||
55 | } u; | ||
50 | }; | 56 | }; |
51 | 57 | ||
52 | struct crypto_gcm_setkey_result { | 58 | struct crypto_gcm_setkey_result { |
@@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result { | |||
54 | struct completion completion; | 60 | struct completion completion; |
55 | }; | 61 | }; |
56 | 62 | ||
63 | static void *gcm_zeroes; | ||
64 | |||
57 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | 65 | static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( |
58 | struct aead_request *req) | 66 | struct aead_request *req) |
59 | { | 67 | { |
@@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( | |||
62 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); | 70 | return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); |
63 | } | 71 | } |
64 | 72 | ||
65 | static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, | ||
66 | struct gf128mul_4k *gf128) | ||
67 | { | ||
68 | ctx->bytes = 0; | ||
69 | ctx->flags = flags; | ||
70 | ctx->gf128 = gf128; | ||
71 | memset(ctx->buffer, 0, 16); | ||
72 | } | ||
73 | |||
74 | static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx, | ||
75 | const u8 *src, unsigned int srclen) | ||
76 | { | ||
77 | u8 *dst = ctx->buffer; | ||
78 | |||
79 | if (ctx->bytes) { | ||
80 | int n = min(srclen, ctx->bytes); | ||
81 | u8 *pos = dst + (16 - ctx->bytes); | ||
82 | |||
83 | ctx->bytes -= n; | ||
84 | srclen -= n; | ||
85 | |||
86 | while (n--) | ||
87 | *pos++ ^= *src++; | ||
88 | |||
89 | if (!ctx->bytes) | ||
90 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
91 | } | ||
92 | |||
93 | while (srclen >= 16) { | ||
94 | crypto_xor(dst, src, 16); | ||
95 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
96 | src += 16; | ||
97 | srclen -= 16; | ||
98 | } | ||
99 | |||
100 | if (srclen) { | ||
101 | ctx->bytes = 16 - srclen; | ||
102 | while (srclen--) | ||
103 | *dst++ ^= *src++; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, | ||
108 | struct scatterlist *sg, int len) | ||
109 | { | ||
110 | struct scatter_walk walk; | ||
111 | u8 *src; | ||
112 | int n; | ||
113 | |||
114 | if (!len) | ||
115 | return; | ||
116 | |||
117 | scatterwalk_start(&walk, sg); | ||
118 | |||
119 | while (len) { | ||
120 | n = scatterwalk_clamp(&walk, len); | ||
121 | |||
122 | if (!n) { | ||
123 | scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); | ||
124 | n = scatterwalk_clamp(&walk, len); | ||
125 | } | ||
126 | |||
127 | src = scatterwalk_map(&walk, 0); | ||
128 | |||
129 | crypto_gcm_ghash_update(ctx, src, n); | ||
130 | len -= n; | ||
131 | |||
132 | scatterwalk_unmap(src, 0); | ||
133 | scatterwalk_advance(&walk, n); | ||
134 | scatterwalk_done(&walk, 0, len); | ||
135 | if (len) | ||
136 | crypto_yield(ctx->flags); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx) | ||
141 | { | ||
142 | u8 *dst = ctx->buffer; | ||
143 | |||
144 | if (ctx->bytes) { | ||
145 | u8 *tmp = dst + (16 - ctx->bytes); | ||
146 | |||
147 | while (ctx->bytes--) | ||
148 | *tmp++ ^= 0; | ||
149 | |||
150 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
151 | } | ||
152 | |||
153 | ctx->bytes = 0; | ||
154 | } | ||
155 | |||
156 | static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, | ||
157 | unsigned int authlen, | ||
158 | unsigned int cryptlen, u8 *dst) | ||
159 | { | ||
160 | u8 *buf = ctx->buffer; | ||
161 | u128 lengths; | ||
162 | |||
163 | lengths.a = cpu_to_be64(authlen * 8); | ||
164 | lengths.b = cpu_to_be64(cryptlen * 8); | ||
165 | |||
166 | crypto_gcm_ghash_flush(ctx); | ||
167 | crypto_xor(buf, (u8 *)&lengths, 16); | ||
168 | gf128mul_4k_lle((be128 *)buf, ctx->gf128); | ||
169 | crypto_xor(dst, buf, 16); | ||
170 | } | ||
171 | |||
172 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) | 73 | static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) |
173 | { | 74 | { |
174 | struct crypto_gcm_setkey_result *result = req->data; | 75 | struct crypto_gcm_setkey_result *result = req->data; |
@@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
184 | unsigned int keylen) | 85 | unsigned int keylen) |
185 | { | 86 | { |
186 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 87 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
88 | struct crypto_ahash *ghash = ctx->ghash; | ||
187 | struct crypto_ablkcipher *ctr = ctx->ctr; | 89 | struct crypto_ablkcipher *ctr = ctx->ctr; |
188 | struct { | 90 | struct { |
189 | be128 hash; | 91 | be128 hash; |
@@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |||
233 | if (err) | 135 | if (err) |
234 | goto out; | 136 | goto out; |
235 | 137 | ||
236 | if (ctx->gf128 != NULL) | 138 | crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); |
237 | gf128mul_free_4k(ctx->gf128); | 139 | crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & |
238 | 140 | CRYPTO_TFM_REQ_MASK); | |
239 | ctx->gf128 = gf128mul_init_4k_lle(&data->hash); | 141 | err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); |
240 | 142 | crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & | |
241 | if (ctx->gf128 == NULL) | 143 | CRYPTO_TFM_RES_MASK); |
242 | err = -ENOMEM; | ||
243 | 144 | ||
244 | out: | 145 | out: |
245 | kfree(data); | 146 | kfree(data); |
@@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | |||
272 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 173 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
273 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); | 174 | struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); |
274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 175 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
275 | u32 flags = req->base.tfm->crt_flags; | ||
276 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | ||
277 | struct scatterlist *dst; | 176 | struct scatterlist *dst; |
278 | __be32 counter = cpu_to_be32(1); | 177 | __be32 counter = cpu_to_be32(1); |
279 | 178 | ||
@@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, | |||
296 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, | 195 | ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, |
297 | cryptlen + sizeof(pctx->auth_tag), | 196 | cryptlen + sizeof(pctx->auth_tag), |
298 | req->iv); | 197 | req->iv); |
198 | } | ||
199 | |||
200 | static inline unsigned int gcm_remain(unsigned int len) | ||
201 | { | ||
202 | len &= 0xfU; | ||
203 | return len ? 16 - len : 0; | ||
204 | } | ||
205 | |||
206 | static void gcm_hash_len_done(struct crypto_async_request *areq, int err); | ||
207 | static void gcm_hash_final_done(struct crypto_async_request *areq, int err); | ||
299 | 208 | ||
300 | crypto_gcm_ghash_init(ghash, flags, ctx->gf128); | 209 | static int gcm_hash_update(struct aead_request *req, |
210 | struct crypto_gcm_req_priv_ctx *pctx, | ||
211 | crypto_completion_t complete, | ||
212 | struct scatterlist *src, | ||
213 | unsigned int len) | ||
214 | { | ||
215 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
301 | 216 | ||
302 | crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); | 217 | ahash_request_set_callback(ahreq, aead_request_flags(req), |
303 | crypto_gcm_ghash_flush(ghash); | 218 | complete, req); |
219 | ahash_request_set_crypt(ahreq, src, NULL, len); | ||
220 | |||
221 | return crypto_ahash_update(ahreq); | ||
304 | } | 222 | } |
305 | 223 | ||
306 | static int crypto_gcm_hash(struct aead_request *req) | 224 | static int gcm_hash_remain(struct aead_request *req, |
225 | struct crypto_gcm_req_priv_ctx *pctx, | ||
226 | unsigned int remain, | ||
227 | crypto_completion_t complete) | ||
307 | { | 228 | { |
308 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 229 | struct ahash_request *ahreq = &pctx->u.ahreq; |
230 | |||
231 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
232 | complete, req); | ||
233 | sg_init_one(pctx->src, gcm_zeroes, remain); | ||
234 | ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); | ||
235 | |||
236 | return crypto_ahash_update(ahreq); | ||
237 | } | ||
238 | |||
239 | static int gcm_hash_len(struct aead_request *req, | ||
240 | struct crypto_gcm_req_priv_ctx *pctx) | ||
241 | { | ||
242 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
243 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
244 | u128 lengths; | ||
245 | |||
246 | lengths.a = cpu_to_be64(req->assoclen * 8); | ||
247 | lengths.b = cpu_to_be64(gctx->cryptlen * 8); | ||
248 | memcpy(pctx->iauth_tag, &lengths, 16); | ||
249 | sg_init_one(pctx->src, pctx->iauth_tag, 16); | ||
250 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
251 | gcm_hash_len_done, req); | ||
252 | ahash_request_set_crypt(ahreq, pctx->src, | ||
253 | NULL, sizeof(lengths)); | ||
254 | |||
255 | return crypto_ahash_update(ahreq); | ||
256 | } | ||
257 | |||
258 | static int gcm_hash_final(struct aead_request *req, | ||
259 | struct crypto_gcm_req_priv_ctx *pctx) | ||
260 | { | ||
261 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
262 | |||
263 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
264 | gcm_hash_final_done, req); | ||
265 | ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); | ||
266 | |||
267 | return crypto_ahash_final(ahreq); | ||
268 | } | ||
269 | |||
270 | static void gcm_hash_final_done(struct crypto_async_request *areq, | ||
271 | int err) | ||
272 | { | ||
273 | struct aead_request *req = areq->data; | ||
309 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 274 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
310 | u8 *auth_tag = pctx->auth_tag; | 275 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
311 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | 276 | |
277 | if (!err) | ||
278 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
312 | 279 | ||
313 | crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); | 280 | gctx->complete(areq, err); |
314 | crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, | 281 | } |
315 | auth_tag); | 282 | |
283 | static void gcm_hash_len_done(struct crypto_async_request *areq, | ||
284 | int err) | ||
285 | { | ||
286 | struct aead_request *req = areq->data; | ||
287 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
288 | |||
289 | if (!err) { | ||
290 | err = gcm_hash_final(req, pctx); | ||
291 | if (err == -EINPROGRESS || err == -EBUSY) | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | gcm_hash_final_done(areq, err); | ||
296 | } | ||
297 | |||
298 | static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, | ||
299 | int err) | ||
300 | { | ||
301 | struct aead_request *req = areq->data; | ||
302 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
303 | |||
304 | if (!err) { | ||
305 | err = gcm_hash_len(req, pctx); | ||
306 | if (err == -EINPROGRESS || err == -EBUSY) | ||
307 | return; | ||
308 | } | ||
309 | |||
310 | gcm_hash_len_done(areq, err); | ||
311 | } | ||
312 | |||
313 | static void gcm_hash_crypt_done(struct crypto_async_request *areq, | ||
314 | int err) | ||
315 | { | ||
316 | struct aead_request *req = areq->data; | ||
317 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
318 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
319 | unsigned int remain; | ||
320 | |||
321 | if (!err) { | ||
322 | remain = gcm_remain(gctx->cryptlen); | ||
323 | BUG_ON(!remain); | ||
324 | err = gcm_hash_remain(req, pctx, remain, | ||
325 | gcm_hash_crypt_remain_done); | ||
326 | if (err == -EINPROGRESS || err == -EBUSY) | ||
327 | return; | ||
328 | } | ||
329 | |||
330 | gcm_hash_crypt_remain_done(areq, err); | ||
331 | } | ||
332 | |||
333 | static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, | ||
334 | int err) | ||
335 | { | ||
336 | struct aead_request *req = areq->data; | ||
337 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
338 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
339 | crypto_completion_t complete; | ||
340 | unsigned int remain = 0; | ||
341 | |||
342 | if (!err && gctx->cryptlen) { | ||
343 | remain = gcm_remain(gctx->cryptlen); | ||
344 | complete = remain ? gcm_hash_crypt_done : | ||
345 | gcm_hash_crypt_remain_done; | ||
346 | err = gcm_hash_update(req, pctx, complete, | ||
347 | gctx->src, gctx->cryptlen); | ||
348 | if (err == -EINPROGRESS || err == -EBUSY) | ||
349 | return; | ||
350 | } | ||
351 | |||
352 | if (remain) | ||
353 | gcm_hash_crypt_done(areq, err); | ||
354 | else | ||
355 | gcm_hash_crypt_remain_done(areq, err); | ||
356 | } | ||
357 | |||
358 | static void gcm_hash_assoc_done(struct crypto_async_request *areq, | ||
359 | int err) | ||
360 | { | ||
361 | struct aead_request *req = areq->data; | ||
362 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
363 | unsigned int remain; | ||
364 | |||
365 | if (!err) { | ||
366 | remain = gcm_remain(req->assoclen); | ||
367 | BUG_ON(!remain); | ||
368 | err = gcm_hash_remain(req, pctx, remain, | ||
369 | gcm_hash_assoc_remain_done); | ||
370 | if (err == -EINPROGRESS || err == -EBUSY) | ||
371 | return; | ||
372 | } | ||
373 | |||
374 | gcm_hash_assoc_remain_done(areq, err); | ||
375 | } | ||
376 | |||
377 | static void gcm_hash_init_done(struct crypto_async_request *areq, | ||
378 | int err) | ||
379 | { | ||
380 | struct aead_request *req = areq->data; | ||
381 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
382 | crypto_completion_t complete; | ||
383 | unsigned int remain = 0; | ||
384 | |||
385 | if (!err && req->assoclen) { | ||
386 | remain = gcm_remain(req->assoclen); | ||
387 | complete = remain ? gcm_hash_assoc_done : | ||
388 | gcm_hash_assoc_remain_done; | ||
389 | err = gcm_hash_update(req, pctx, complete, | ||
390 | req->assoc, req->assoclen); | ||
391 | if (err == -EINPROGRESS || err == -EBUSY) | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | if (remain) | ||
396 | gcm_hash_assoc_done(areq, err); | ||
397 | else | ||
398 | gcm_hash_assoc_remain_done(areq, err); | ||
399 | } | ||
400 | |||
401 | static int gcm_hash(struct aead_request *req, | ||
402 | struct crypto_gcm_req_priv_ctx *pctx) | ||
403 | { | ||
404 | struct ahash_request *ahreq = &pctx->u.ahreq; | ||
405 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
406 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
407 | unsigned int remain; | ||
408 | crypto_completion_t complete; | ||
409 | int err; | ||
410 | |||
411 | ahash_request_set_tfm(ahreq, ctx->ghash); | ||
412 | |||
413 | ahash_request_set_callback(ahreq, aead_request_flags(req), | ||
414 | gcm_hash_init_done, req); | ||
415 | err = crypto_ahash_init(ahreq); | ||
416 | if (err) | ||
417 | return err; | ||
418 | remain = gcm_remain(req->assoclen); | ||
419 | complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; | ||
420 | err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen); | ||
421 | if (err) | ||
422 | return err; | ||
423 | if (remain) { | ||
424 | err = gcm_hash_remain(req, pctx, remain, | ||
425 | gcm_hash_assoc_remain_done); | ||
426 | if (err) | ||
427 | return err; | ||
428 | } | ||
429 | remain = gcm_remain(gctx->cryptlen); | ||
430 | complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; | ||
431 | err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen); | ||
432 | if (err) | ||
433 | return err; | ||
434 | if (remain) { | ||
435 | err = gcm_hash_remain(req, pctx, remain, | ||
436 | gcm_hash_crypt_remain_done); | ||
437 | if (err) | ||
438 | return err; | ||
439 | } | ||
440 | err = gcm_hash_len(req, pctx); | ||
441 | if (err) | ||
442 | return err; | ||
443 | err = gcm_hash_final(req, pctx); | ||
444 | if (err) | ||
445 | return err; | ||
446 | |||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static void gcm_enc_copy_hash(struct aead_request *req, | ||
451 | struct crypto_gcm_req_priv_ctx *pctx) | ||
452 | { | ||
453 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
454 | u8 *auth_tag = pctx->auth_tag; | ||
316 | 455 | ||
317 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, | 456 | scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, |
318 | crypto_aead_authsize(aead), 1); | 457 | crypto_aead_authsize(aead), 1); |
319 | return 0; | ||
320 | } | 458 | } |
321 | 459 | ||
322 | static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) | 460 | static void gcm_enc_hash_done(struct crypto_async_request *areq, |
461 | int err) | ||
323 | { | 462 | { |
324 | struct aead_request *req = areq->data; | 463 | struct aead_request *req = areq->data; |
464 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
325 | 465 | ||
326 | if (!err) | 466 | if (!err) |
327 | err = crypto_gcm_hash(req); | 467 | gcm_enc_copy_hash(req, pctx); |
328 | 468 | ||
329 | aead_request_complete(req, err); | 469 | aead_request_complete(req, err); |
330 | } | 470 | } |
331 | 471 | ||
472 | static void gcm_encrypt_done(struct crypto_async_request *areq, | ||
473 | int err) | ||
474 | { | ||
475 | struct aead_request *req = areq->data; | ||
476 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
477 | |||
478 | if (!err) { | ||
479 | err = gcm_hash(req, pctx); | ||
480 | if (err == -EINPROGRESS || err == -EBUSY) | ||
481 | return; | ||
482 | } | ||
483 | |||
484 | gcm_enc_hash_done(areq, err); | ||
485 | } | ||
486 | |||
332 | static int crypto_gcm_encrypt(struct aead_request *req) | 487 | static int crypto_gcm_encrypt(struct aead_request *req) |
333 | { | 488 | { |
334 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 489 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
335 | struct ablkcipher_request *abreq = &pctx->abreq; | 490 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
491 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
336 | int err; | 492 | int err; |
337 | 493 | ||
338 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); | 494 | crypto_gcm_init_crypt(abreq, req, req->cryptlen); |
339 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 495 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), |
340 | crypto_gcm_encrypt_done, req); | 496 | gcm_encrypt_done, req); |
497 | |||
498 | gctx->src = req->dst; | ||
499 | gctx->cryptlen = req->cryptlen; | ||
500 | gctx->complete = gcm_enc_hash_done; | ||
341 | 501 | ||
342 | err = crypto_ablkcipher_encrypt(abreq); | 502 | err = crypto_ablkcipher_encrypt(abreq); |
343 | if (err) | 503 | if (err) |
344 | return err; | 504 | return err; |
345 | 505 | ||
346 | return crypto_gcm_hash(req); | 506 | err = gcm_hash(req, pctx); |
507 | if (err) | ||
508 | return err; | ||
509 | |||
510 | crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); | ||
511 | gcm_enc_copy_hash(req, pctx); | ||
512 | |||
513 | return 0; | ||
347 | } | 514 | } |
348 | 515 | ||
349 | static int crypto_gcm_verify(struct aead_request *req) | 516 | static int crypto_gcm_verify(struct aead_request *req, |
517 | struct crypto_gcm_req_priv_ctx *pctx) | ||
350 | { | 518 | { |
351 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 519 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
352 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
353 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | ||
354 | u8 *auth_tag = pctx->auth_tag; | 520 | u8 *auth_tag = pctx->auth_tag; |
355 | u8 *iauth_tag = pctx->iauth_tag; | 521 | u8 *iauth_tag = pctx->iauth_tag; |
356 | unsigned int authsize = crypto_aead_authsize(aead); | 522 | unsigned int authsize = crypto_aead_authsize(aead); |
357 | unsigned int cryptlen = req->cryptlen - authsize; | 523 | unsigned int cryptlen = req->cryptlen - authsize; |
358 | 524 | ||
359 | crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); | 525 | crypto_xor(auth_tag, iauth_tag, 16); |
360 | |||
361 | authsize = crypto_aead_authsize(aead); | ||
362 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); | 526 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); |
363 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; | 527 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; |
364 | } | 528 | } |
365 | 529 | ||
366 | static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) | 530 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) |
367 | { | 531 | { |
368 | struct aead_request *req = areq->data; | 532 | struct aead_request *req = areq->data; |
533 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
369 | 534 | ||
370 | if (!err) | 535 | if (!err) |
371 | err = crypto_gcm_verify(req); | 536 | err = crypto_gcm_verify(req, pctx); |
372 | 537 | ||
373 | aead_request_complete(req, err); | 538 | aead_request_complete(req, err); |
374 | } | 539 | } |
375 | 540 | ||
541 | static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) | ||
542 | { | ||
543 | struct aead_request *req = areq->data; | ||
544 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | ||
545 | struct ablkcipher_request *abreq = &pctx->u.abreq; | ||
546 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; | ||
547 | |||
548 | if (!err) { | ||
549 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
550 | gcm_decrypt_done, req); | ||
551 | crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); | ||
552 | err = crypto_ablkcipher_decrypt(abreq); | ||
553 | if (err == -EINPROGRESS || err == -EBUSY) | ||
554 | return; | ||
555 | } | ||
556 | |||
557 | gcm_decrypt_done(areq, err); | ||
558 | } | ||
559 | |||
376 | static int crypto_gcm_decrypt(struct aead_request *req) | 560 | static int crypto_gcm_decrypt(struct aead_request *req) |
377 | { | 561 | { |
378 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 562 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
379 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); | 563 | struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); |
380 | struct ablkcipher_request *abreq = &pctx->abreq; | 564 | struct ablkcipher_request *abreq = &pctx->u.abreq; |
381 | struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; | 565 | struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; |
382 | unsigned int cryptlen = req->cryptlen; | ||
383 | unsigned int authsize = crypto_aead_authsize(aead); | 566 | unsigned int authsize = crypto_aead_authsize(aead); |
567 | unsigned int cryptlen = req->cryptlen; | ||
384 | int err; | 568 | int err; |
385 | 569 | ||
386 | if (cryptlen < authsize) | 570 | if (cryptlen < authsize) |
387 | return -EINVAL; | 571 | return -EINVAL; |
388 | cryptlen -= authsize; | 572 | cryptlen -= authsize; |
389 | 573 | ||
390 | crypto_gcm_init_crypt(abreq, req, cryptlen); | 574 | gctx->src = req->src; |
391 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | 575 | gctx->cryptlen = cryptlen; |
392 | crypto_gcm_decrypt_done, req); | 576 | gctx->complete = gcm_dec_hash_done; |
393 | 577 | ||
394 | crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); | 578 | err = gcm_hash(req, pctx); |
579 | if (err) | ||
580 | return err; | ||
395 | 581 | ||
582 | ablkcipher_request_set_callback(abreq, aead_request_flags(req), | ||
583 | gcm_decrypt_done, req); | ||
584 | crypto_gcm_init_crypt(abreq, req, cryptlen); | ||
396 | err = crypto_ablkcipher_decrypt(abreq); | 585 | err = crypto_ablkcipher_decrypt(abreq); |
397 | if (err) | 586 | if (err) |
398 | return err; | 587 | return err; |
399 | 588 | ||
400 | return crypto_gcm_verify(req); | 589 | return crypto_gcm_verify(req, pctx); |
401 | } | 590 | } |
402 | 591 | ||
403 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | 592 | static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) |
@@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) | |||
406 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); | 595 | struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); |
407 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 596 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
408 | struct crypto_ablkcipher *ctr; | 597 | struct crypto_ablkcipher *ctr; |
598 | struct crypto_ahash *ghash; | ||
409 | unsigned long align; | 599 | unsigned long align; |
410 | int err; | 600 | int err; |
411 | 601 | ||
602 | ghash = crypto_spawn_ahash(&ictx->ghash); | ||
603 | if (IS_ERR(ghash)) | ||
604 | return PTR_ERR(ghash); | ||
605 | |||
412 | ctr = crypto_spawn_skcipher(&ictx->ctr); | 606 | ctr = crypto_spawn_skcipher(&ictx->ctr); |
413 | err = PTR_ERR(ctr); | 607 | err = PTR_ERR(ctr); |
414 | if (IS_ERR(ctr)) | 608 | if (IS_ERR(ctr)) |
415 | return err; | 609 | goto err_free_hash; |
416 | 610 | ||
417 | ctx->ctr = ctr; | 611 | ctx->ctr = ctr; |
418 | ctx->gf128 = NULL; | 612 | ctx->ghash = ghash; |
419 | 613 | ||
420 | align = crypto_tfm_alg_alignmask(tfm); | 614 | align = crypto_tfm_alg_alignmask(tfm); |
421 | align &= ~(crypto_tfm_ctx_alignment() - 1); | 615 | align &= ~(crypto_tfm_ctx_alignment() - 1); |
422 | tfm->crt_aead.reqsize = align + | 616 | tfm->crt_aead.reqsize = align + |
423 | sizeof(struct crypto_gcm_req_priv_ctx) + | 617 | offsetof(struct crypto_gcm_req_priv_ctx, u) + |
424 | crypto_ablkcipher_reqsize(ctr); | 618 | max(sizeof(struct ablkcipher_request) + |
619 | crypto_ablkcipher_reqsize(ctr), | ||
620 | sizeof(struct ahash_request) + | ||
621 | crypto_ahash_reqsize(ghash)); | ||
425 | 622 | ||
426 | return 0; | 623 | return 0; |
624 | |||
625 | err_free_hash: | ||
626 | crypto_free_ahash(ghash); | ||
627 | return err; | ||
427 | } | 628 | } |
428 | 629 | ||
429 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) | 630 | static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) |
430 | { | 631 | { |
431 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); | 632 | struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); |
432 | 633 | ||
433 | if (ctx->gf128 != NULL) | 634 | crypto_free_ahash(ctx->ghash); |
434 | gf128mul_free_4k(ctx->gf128); | ||
435 | |||
436 | crypto_free_ablkcipher(ctx->ctr); | 635 | crypto_free_ablkcipher(ctx->ctr); |
437 | } | 636 | } |
438 | 637 | ||
439 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | 638 | static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, |
440 | const char *full_name, | 639 | const char *full_name, |
441 | const char *ctr_name) | 640 | const char *ctr_name, |
641 | const char *ghash_name) | ||
442 | { | 642 | { |
443 | struct crypto_attr_type *algt; | 643 | struct crypto_attr_type *algt; |
444 | struct crypto_instance *inst; | 644 | struct crypto_instance *inst; |
445 | struct crypto_alg *ctr; | 645 | struct crypto_alg *ctr; |
646 | struct crypto_alg *ghash_alg; | ||
647 | struct ahash_alg *ghash_ahash_alg; | ||
446 | struct gcm_instance_ctx *ctx; | 648 | struct gcm_instance_ctx *ctx; |
447 | int err; | 649 | int err; |
448 | 650 | ||
@@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
454 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) | 656 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
455 | return ERR_PTR(-EINVAL); | 657 | return ERR_PTR(-EINVAL); |
456 | 658 | ||
659 | ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, | ||
660 | CRYPTO_ALG_TYPE_HASH, | ||
661 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
662 | err = PTR_ERR(ghash_alg); | ||
663 | if (IS_ERR(ghash_alg)) | ||
664 | return ERR_PTR(err); | ||
665 | |||
666 | err = -ENOMEM; | ||
457 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 667 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
458 | if (!inst) | 668 | if (!inst) |
459 | return ERR_PTR(-ENOMEM); | 669 | goto out_put_ghash; |
460 | 670 | ||
461 | ctx = crypto_instance_ctx(inst); | 671 | ctx = crypto_instance_ctx(inst); |
672 | ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); | ||
673 | err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, | ||
674 | inst); | ||
675 | if (err) | ||
676 | goto err_free_inst; | ||
677 | |||
462 | crypto_set_skcipher_spawn(&ctx->ctr, inst); | 678 | crypto_set_skcipher_spawn(&ctx->ctr, inst); |
463 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, | 679 | err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, |
464 | crypto_requires_sync(algt->type, | 680 | crypto_requires_sync(algt->type, |
465 | algt->mask)); | 681 | algt->mask)); |
466 | if (err) | 682 | if (err) |
467 | goto err_free_inst; | 683 | goto err_drop_ghash; |
468 | 684 | ||
469 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); | 685 | ctr = crypto_skcipher_spawn_alg(&ctx->ctr); |
470 | 686 | ||
@@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
479 | 695 | ||
480 | err = -ENAMETOOLONG; | 696 | err = -ENAMETOOLONG; |
481 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 697 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
482 | "gcm_base(%s)", ctr->cra_driver_name) >= | 698 | "gcm_base(%s,%s)", ctr->cra_driver_name, |
699 | ghash_alg->cra_driver_name) >= | ||
483 | CRYPTO_MAX_ALG_NAME) | 700 | CRYPTO_MAX_ALG_NAME) |
484 | goto out_put_ctr; | 701 | goto out_put_ctr; |
485 | 702 | ||
@@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, | |||
502 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; | 719 | inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; |
503 | 720 | ||
504 | out: | 721 | out: |
722 | crypto_mod_put(ghash_alg); | ||
505 | return inst; | 723 | return inst; |
506 | 724 | ||
507 | out_put_ctr: | 725 | out_put_ctr: |
508 | crypto_drop_skcipher(&ctx->ctr); | 726 | crypto_drop_skcipher(&ctx->ctr); |
727 | err_drop_ghash: | ||
728 | crypto_drop_ahash(&ctx->ghash); | ||
509 | err_free_inst: | 729 | err_free_inst: |
510 | kfree(inst); | 730 | kfree(inst); |
731 | out_put_ghash: | ||
511 | inst = ERR_PTR(err); | 732 | inst = ERR_PTR(err); |
512 | goto out; | 733 | goto out; |
513 | } | 734 | } |
@@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) | |||
532 | CRYPTO_MAX_ALG_NAME) | 753 | CRYPTO_MAX_ALG_NAME) |
533 | return ERR_PTR(-ENAMETOOLONG); | 754 | return ERR_PTR(-ENAMETOOLONG); |
534 | 755 | ||
535 | return crypto_gcm_alloc_common(tb, full_name, ctr_name); | 756 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); |
536 | } | 757 | } |
537 | 758 | ||
538 | static void crypto_gcm_free(struct crypto_instance *inst) | 759 | static void crypto_gcm_free(struct crypto_instance *inst) |
@@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst) | |||
540 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); | 761 | struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); |
541 | 762 | ||
542 | crypto_drop_skcipher(&ctx->ctr); | 763 | crypto_drop_skcipher(&ctx->ctr); |
764 | crypto_drop_ahash(&ctx->ghash); | ||
543 | kfree(inst); | 765 | kfree(inst); |
544 | } | 766 | } |
545 | 767 | ||
@@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
554 | { | 776 | { |
555 | int err; | 777 | int err; |
556 | const char *ctr_name; | 778 | const char *ctr_name; |
779 | const char *ghash_name; | ||
557 | char full_name[CRYPTO_MAX_ALG_NAME]; | 780 | char full_name[CRYPTO_MAX_ALG_NAME]; |
558 | 781 | ||
559 | ctr_name = crypto_attr_alg_name(tb[1]); | 782 | ctr_name = crypto_attr_alg_name(tb[1]); |
@@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) | |||
561 | if (IS_ERR(ctr_name)) | 784 | if (IS_ERR(ctr_name)) |
562 | return ERR_PTR(err); | 785 | return ERR_PTR(err); |
563 | 786 | ||
564 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", | 787 | ghash_name = crypto_attr_alg_name(tb[2]); |
565 | ctr_name) >= CRYPTO_MAX_ALG_NAME) | 788 | err = PTR_ERR(ghash_name); |
789 | if (IS_ERR(ghash_name)) | ||
790 | return ERR_PTR(err); | ||
791 | |||
792 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", | ||
793 | ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) | ||
566 | return ERR_PTR(-ENAMETOOLONG); | 794 | return ERR_PTR(-ENAMETOOLONG); |
567 | 795 | ||
568 | return crypto_gcm_alloc_common(tb, full_name, ctr_name); | 796 | return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); |
569 | } | 797 | } |
570 | 798 | ||
571 | static struct crypto_template crypto_gcm_base_tmpl = { | 799 | static struct crypto_template crypto_gcm_base_tmpl = { |
@@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void) | |||
784 | { | 1012 | { |
785 | int err; | 1013 | int err; |
786 | 1014 | ||
1015 | gcm_zeroes = kzalloc(16, GFP_KERNEL); | ||
1016 | if (!gcm_zeroes) | ||
1017 | return -ENOMEM; | ||
1018 | |||
787 | err = crypto_register_template(&crypto_gcm_base_tmpl); | 1019 | err = crypto_register_template(&crypto_gcm_base_tmpl); |
788 | if (err) | 1020 | if (err) |
789 | goto out; | 1021 | goto out; |
@@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void) | |||
796 | if (err) | 1028 | if (err) |
797 | goto out_undo_gcm; | 1029 | goto out_undo_gcm; |
798 | 1030 | ||
799 | out: | 1031 | return 0; |
800 | return err; | ||
801 | 1032 | ||
802 | out_undo_gcm: | 1033 | out_undo_gcm: |
803 | crypto_unregister_template(&crypto_gcm_tmpl); | 1034 | crypto_unregister_template(&crypto_gcm_tmpl); |
804 | out_undo_base: | 1035 | out_undo_base: |
805 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1036 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
806 | goto out; | 1037 | out: |
1038 | kfree(gcm_zeroes); | ||
1039 | return err; | ||
807 | } | 1040 | } |
808 | 1041 | ||
809 | static void __exit crypto_gcm_module_exit(void) | 1042 | static void __exit crypto_gcm_module_exit(void) |
810 | { | 1043 | { |
1044 | kfree(gcm_zeroes); | ||
811 | crypto_unregister_template(&crypto_rfc4106_tmpl); | 1045 | crypto_unregister_template(&crypto_rfc4106_tmpl); |
812 | crypto_unregister_template(&crypto_gcm_tmpl); | 1046 | crypto_unregister_template(&crypto_gcm_tmpl); |
813 | crypto_unregister_template(&crypto_gcm_base_tmpl); | 1047 | crypto_unregister_template(&crypto_gcm_base_tmpl); |
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c new file mode 100644 index 000000000000..be4425616931 --- /dev/null +++ b/crypto/ghash-generic.c | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * GHASH: digest algorithm for GCM (Galois/Counter Mode). | ||
3 | * | ||
4 | * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> | ||
5 | * Copyright (c) 2009 Intel Corp. | ||
6 | * Author: Huang Ying <ying.huang@intel.com> | ||
7 | * | ||
8 | * The algorithm implementation is copied from gcm.c. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published | ||
12 | * by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <crypto/algapi.h> | ||
16 | #include <crypto/gf128mul.h> | ||
17 | #include <crypto/internal/hash.h> | ||
18 | #include <linux/crypto.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | ||
22 | |||
23 | #define GHASH_BLOCK_SIZE 16 | ||
24 | #define GHASH_DIGEST_SIZE 16 | ||
25 | |||
26 | struct ghash_ctx { | ||
27 | struct gf128mul_4k *gf128; | ||
28 | }; | ||
29 | |||
30 | struct ghash_desc_ctx { | ||
31 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
32 | u32 bytes; | ||
33 | }; | ||
34 | |||
35 | static int ghash_init(struct shash_desc *desc) | ||
36 | { | ||
37 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
38 | |||
39 | memset(dctx, 0, sizeof(*dctx)); | ||
40 | |||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | static int ghash_setkey(struct crypto_shash *tfm, | ||
45 | const u8 *key, unsigned int keylen) | ||
46 | { | ||
47 | struct ghash_ctx *ctx = crypto_shash_ctx(tfm); | ||
48 | |||
49 | if (keylen != GHASH_BLOCK_SIZE) { | ||
50 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
51 | return -EINVAL; | ||
52 | } | ||
53 | |||
54 | if (ctx->gf128) | ||
55 | gf128mul_free_4k(ctx->gf128); | ||
56 | ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); | ||
57 | if (!ctx->gf128) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static int ghash_update(struct shash_desc *desc, | ||
64 | const u8 *src, unsigned int srclen) | ||
65 | { | ||
66 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
67 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
68 | u8 *dst = dctx->buffer; | ||
69 | |||
70 | if (dctx->bytes) { | ||
71 | int n = min(srclen, dctx->bytes); | ||
72 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | ||
73 | |||
74 | dctx->bytes -= n; | ||
75 | srclen -= n; | ||
76 | |||
77 | while (n--) | ||
78 | *pos++ ^= *src++; | ||
79 | |||
80 | if (!dctx->bytes) | ||
81 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
82 | } | ||
83 | |||
84 | while (srclen >= GHASH_BLOCK_SIZE) { | ||
85 | crypto_xor(dst, src, GHASH_BLOCK_SIZE); | ||
86 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
87 | src += GHASH_BLOCK_SIZE; | ||
88 | srclen -= GHASH_BLOCK_SIZE; | ||
89 | } | ||
90 | |||
91 | if (srclen) { | ||
92 | dctx->bytes = GHASH_BLOCK_SIZE - srclen; | ||
93 | while (srclen--) | ||
94 | *dst++ ^= *src++; | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) | ||
101 | { | ||
102 | u8 *dst = dctx->buffer; | ||
103 | |||
104 | if (dctx->bytes) { | ||
105 | u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | ||
106 | |||
107 | while (dctx->bytes--) | ||
108 | *tmp++ ^= 0; | ||
109 | |||
110 | gf128mul_4k_lle((be128 *)dst, ctx->gf128); | ||
111 | } | ||
112 | |||
113 | dctx->bytes = 0; | ||
114 | } | ||
115 | |||
116 | static int ghash_final(struct shash_desc *desc, u8 *dst) | ||
117 | { | ||
118 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | ||
119 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
120 | u8 *buf = dctx->buffer; | ||
121 | |||
122 | ghash_flush(ctx, dctx); | ||
123 | memcpy(dst, buf, GHASH_BLOCK_SIZE); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static void ghash_exit_tfm(struct crypto_tfm *tfm) | ||
129 | { | ||
130 | struct ghash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
131 | if (ctx->gf128) | ||
132 | gf128mul_free_4k(ctx->gf128); | ||
133 | } | ||
134 | |||
135 | static struct shash_alg ghash_alg = { | ||
136 | .digestsize = GHASH_DIGEST_SIZE, | ||
137 | .init = ghash_init, | ||
138 | .update = ghash_update, | ||
139 | .final = ghash_final, | ||
140 | .setkey = ghash_setkey, | ||
141 | .descsize = sizeof(struct ghash_desc_ctx), | ||
142 | .base = { | ||
143 | .cra_name = "ghash", | ||
144 | .cra_driver_name = "ghash-generic", | ||
145 | .cra_priority = 100, | ||
146 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
147 | .cra_blocksize = GHASH_BLOCK_SIZE, | ||
148 | .cra_ctxsize = sizeof(struct ghash_ctx), | ||
149 | .cra_module = THIS_MODULE, | ||
150 | .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list), | ||
151 | .cra_exit = ghash_exit_tfm, | ||
152 | }, | ||
153 | }; | ||
154 | |||
155 | static int __init ghash_mod_init(void) | ||
156 | { | ||
157 | return crypto_register_shash(&ghash_alg); | ||
158 | } | ||
159 | |||
160 | static void __exit ghash_mod_exit(void) | ||
161 | { | ||
162 | crypto_unregister_shash(&ghash_alg); | ||
163 | } | ||
164 | |||
165 | module_init(ghash_mod_init); | ||
166 | module_exit(ghash_mod_exit); | ||
167 | |||
168 | MODULE_LICENSE("GPL"); | ||
169 | MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); | ||
170 | MODULE_ALIAS("ghash"); | ||
diff --git a/crypto/hmac.c b/crypto/hmac.c index 0ad39c374963..15c2eb534541 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | 28 | ||
29 | struct hmac_ctx { | 29 | struct hmac_ctx { |
30 | struct crypto_hash *child; | 30 | struct crypto_shash *hash; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static inline void *align_ptr(void *p, unsigned int align) | 33 | static inline void *align_ptr(void *p, unsigned int align) |
@@ -35,65 +35,45 @@ static inline void *align_ptr(void *p, unsigned int align) | |||
35 | return (void *)ALIGN((unsigned long)p, align); | 35 | return (void *)ALIGN((unsigned long)p, align); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) | 38 | static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) |
39 | { | 39 | { |
40 | return align_ptr(crypto_hash_ctx_aligned(tfm) + | 40 | return align_ptr(crypto_shash_ctx_aligned(tfm) + |
41 | crypto_hash_blocksize(tfm) * 2 + | 41 | crypto_shash_statesize(tfm) * 2, |
42 | crypto_hash_digestsize(tfm), sizeof(void *)); | 42 | crypto_tfm_ctx_alignment()); |
43 | } | 43 | } |
44 | 44 | ||
45 | static int hmac_setkey(struct crypto_hash *parent, | 45 | static int hmac_setkey(struct crypto_shash *parent, |
46 | const u8 *inkey, unsigned int keylen) | 46 | const u8 *inkey, unsigned int keylen) |
47 | { | 47 | { |
48 | int bs = crypto_hash_blocksize(parent); | 48 | int bs = crypto_shash_blocksize(parent); |
49 | int ds = crypto_hash_digestsize(parent); | 49 | int ds = crypto_shash_digestsize(parent); |
50 | char *ipad = crypto_hash_ctx_aligned(parent); | 50 | int ss = crypto_shash_statesize(parent); |
51 | char *opad = ipad + bs; | 51 | char *ipad = crypto_shash_ctx_aligned(parent); |
52 | char *digest = opad + bs; | 52 | char *opad = ipad + ss; |
53 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | 53 | struct hmac_ctx *ctx = align_ptr(opad + ss, |
54 | struct crypto_hash *tfm = ctx->child; | 54 | crypto_tfm_ctx_alignment()); |
55 | struct crypto_shash *hash = ctx->hash; | ||
56 | struct { | ||
57 | struct shash_desc shash; | ||
58 | char ctx[crypto_shash_descsize(hash)]; | ||
59 | } desc; | ||
55 | unsigned int i; | 60 | unsigned int i; |
56 | 61 | ||
62 | desc.shash.tfm = hash; | ||
63 | desc.shash.flags = crypto_shash_get_flags(parent) & | ||
64 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
65 | |||
57 | if (keylen > bs) { | 66 | if (keylen > bs) { |
58 | struct hash_desc desc; | ||
59 | struct scatterlist tmp; | ||
60 | int tmplen; | ||
61 | int err; | 67 | int err; |
62 | 68 | ||
63 | desc.tfm = tfm; | 69 | err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad); |
64 | desc.flags = crypto_hash_get_flags(parent); | ||
65 | desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; | ||
66 | |||
67 | err = crypto_hash_init(&desc); | ||
68 | if (err) | 70 | if (err) |
69 | return err; | 71 | return err; |
70 | 72 | ||
71 | tmplen = bs * 2 + ds; | ||
72 | sg_init_one(&tmp, ipad, tmplen); | ||
73 | |||
74 | for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) { | ||
75 | memcpy(ipad, inkey, tmplen); | ||
76 | err = crypto_hash_update(&desc, &tmp, tmplen); | ||
77 | if (err) | ||
78 | return err; | ||
79 | } | ||
80 | |||
81 | if (keylen) { | ||
82 | memcpy(ipad, inkey, keylen); | ||
83 | err = crypto_hash_update(&desc, &tmp, keylen); | ||
84 | if (err) | ||
85 | return err; | ||
86 | } | ||
87 | |||
88 | err = crypto_hash_final(&desc, digest); | ||
89 | if (err) | ||
90 | return err; | ||
91 | |||
92 | inkey = digest; | ||
93 | keylen = ds; | 73 | keylen = ds; |
94 | } | 74 | } else |
75 | memcpy(ipad, inkey, keylen); | ||
95 | 76 | ||
96 | memcpy(ipad, inkey, keylen); | ||
97 | memset(ipad + keylen, 0, bs - keylen); | 77 | memset(ipad + keylen, 0, bs - keylen); |
98 | memcpy(opad, ipad, bs); | 78 | memcpy(opad, ipad, bs); |
99 | 79 | ||
@@ -102,184 +82,178 @@ static int hmac_setkey(struct crypto_hash *parent, | |||
102 | opad[i] ^= 0x5c; | 82 | opad[i] ^= 0x5c; |
103 | } | 83 | } |
104 | 84 | ||
105 | return 0; | 85 | return crypto_shash_init(&desc.shash) ?: |
86 | crypto_shash_update(&desc.shash, ipad, bs) ?: | ||
87 | crypto_shash_export(&desc.shash, ipad) ?: | ||
88 | crypto_shash_init(&desc.shash) ?: | ||
89 | crypto_shash_update(&desc.shash, opad, bs) ?: | ||
90 | crypto_shash_export(&desc.shash, opad); | ||
106 | } | 91 | } |
107 | 92 | ||
108 | static int hmac_init(struct hash_desc *pdesc) | 93 | static int hmac_export(struct shash_desc *pdesc, void *out) |
109 | { | 94 | { |
110 | struct crypto_hash *parent = pdesc->tfm; | 95 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
111 | int bs = crypto_hash_blocksize(parent); | ||
112 | int ds = crypto_hash_digestsize(parent); | ||
113 | char *ipad = crypto_hash_ctx_aligned(parent); | ||
114 | struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *)); | ||
115 | struct hash_desc desc; | ||
116 | struct scatterlist tmp; | ||
117 | int err; | ||
118 | 96 | ||
119 | desc.tfm = ctx->child; | 97 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
120 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
121 | sg_init_one(&tmp, ipad, bs); | ||
122 | 98 | ||
123 | err = crypto_hash_init(&desc); | 99 | return crypto_shash_export(desc, out); |
124 | if (unlikely(err)) | ||
125 | return err; | ||
126 | |||
127 | return crypto_hash_update(&desc, &tmp, bs); | ||
128 | } | 100 | } |
129 | 101 | ||
130 | static int hmac_update(struct hash_desc *pdesc, | 102 | static int hmac_import(struct shash_desc *pdesc, const void *in) |
131 | struct scatterlist *sg, unsigned int nbytes) | ||
132 | { | 103 | { |
104 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
133 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); | 105 | struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); |
134 | struct hash_desc desc; | ||
135 | 106 | ||
136 | desc.tfm = ctx->child; | 107 | desc->tfm = ctx->hash; |
137 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 108 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
138 | 109 | ||
139 | return crypto_hash_update(&desc, sg, nbytes); | 110 | return crypto_shash_import(desc, in); |
140 | } | 111 | } |
141 | 112 | ||
142 | static int hmac_final(struct hash_desc *pdesc, u8 *out) | 113 | static int hmac_init(struct shash_desc *pdesc) |
143 | { | 114 | { |
144 | struct crypto_hash *parent = pdesc->tfm; | 115 | return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); |
145 | int bs = crypto_hash_blocksize(parent); | 116 | } |
146 | int ds = crypto_hash_digestsize(parent); | ||
147 | char *opad = crypto_hash_ctx_aligned(parent) + bs; | ||
148 | char *digest = opad + bs; | ||
149 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | ||
150 | struct hash_desc desc; | ||
151 | struct scatterlist tmp; | ||
152 | int err; | ||
153 | 117 | ||
154 | desc.tfm = ctx->child; | 118 | static int hmac_update(struct shash_desc *pdesc, |
155 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 119 | const u8 *data, unsigned int nbytes) |
156 | sg_init_one(&tmp, opad, bs + ds); | 120 | { |
121 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
157 | 122 | ||
158 | err = crypto_hash_final(&desc, digest); | 123 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
159 | if (unlikely(err)) | ||
160 | return err; | ||
161 | 124 | ||
162 | return crypto_hash_digest(&desc, &tmp, bs + ds, out); | 125 | return crypto_shash_update(desc, data, nbytes); |
163 | } | 126 | } |
164 | 127 | ||
165 | static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, | 128 | static int hmac_final(struct shash_desc *pdesc, u8 *out) |
166 | unsigned int nbytes, u8 *out) | ||
167 | { | 129 | { |
168 | struct crypto_hash *parent = pdesc->tfm; | 130 | struct crypto_shash *parent = pdesc->tfm; |
169 | int bs = crypto_hash_blocksize(parent); | 131 | int ds = crypto_shash_digestsize(parent); |
170 | int ds = crypto_hash_digestsize(parent); | 132 | int ss = crypto_shash_statesize(parent); |
171 | char *ipad = crypto_hash_ctx_aligned(parent); | 133 | char *opad = crypto_shash_ctx_aligned(parent) + ss; |
172 | char *opad = ipad + bs; | 134 | struct shash_desc *desc = shash_desc_ctx(pdesc); |
173 | char *digest = opad + bs; | ||
174 | struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); | ||
175 | struct hash_desc desc; | ||
176 | struct scatterlist sg1[2]; | ||
177 | struct scatterlist sg2[1]; | ||
178 | int err; | ||
179 | 135 | ||
180 | desc.tfm = ctx->child; | 136 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
181 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
182 | 137 | ||
183 | sg_init_table(sg1, 2); | 138 | return crypto_shash_final(desc, out) ?: |
184 | sg_set_buf(sg1, ipad, bs); | 139 | crypto_shash_import(desc, opad) ?: |
185 | scatterwalk_sg_chain(sg1, 2, sg); | 140 | crypto_shash_finup(desc, out, ds, out); |
141 | } | ||
186 | 142 | ||
187 | sg_init_table(sg2, 1); | 143 | static int hmac_finup(struct shash_desc *pdesc, const u8 *data, |
188 | sg_set_buf(sg2, opad, bs + ds); | 144 | unsigned int nbytes, u8 *out) |
145 | { | ||
189 | 146 | ||
190 | err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); | 147 | struct crypto_shash *parent = pdesc->tfm; |
191 | if (unlikely(err)) | 148 | int ds = crypto_shash_digestsize(parent); |
192 | return err; | 149 | int ss = crypto_shash_statesize(parent); |
150 | char *opad = crypto_shash_ctx_aligned(parent) + ss; | ||
151 | struct shash_desc *desc = shash_desc_ctx(pdesc); | ||
193 | 152 | ||
194 | return crypto_hash_digest(&desc, sg2, bs + ds, out); | 153 | desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
154 | |||
155 | return crypto_shash_finup(desc, data, nbytes, out) ?: | ||
156 | crypto_shash_import(desc, opad) ?: | ||
157 | crypto_shash_finup(desc, out, ds, out); | ||
195 | } | 158 | } |
196 | 159 | ||
197 | static int hmac_init_tfm(struct crypto_tfm *tfm) | 160 | static int hmac_init_tfm(struct crypto_tfm *tfm) |
198 | { | 161 | { |
199 | struct crypto_hash *hash; | 162 | struct crypto_shash *parent = __crypto_shash_cast(tfm); |
163 | struct crypto_shash *hash; | ||
200 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 164 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
201 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 165 | struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); |
202 | struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); | 166 | struct hmac_ctx *ctx = hmac_ctx(parent); |
203 | 167 | ||
204 | hash = crypto_spawn_hash(spawn); | 168 | hash = crypto_spawn_shash(spawn); |
205 | if (IS_ERR(hash)) | 169 | if (IS_ERR(hash)) |
206 | return PTR_ERR(hash); | 170 | return PTR_ERR(hash); |
207 | 171 | ||
208 | ctx->child = hash; | 172 | parent->descsize = sizeof(struct shash_desc) + |
173 | crypto_shash_descsize(hash); | ||
174 | |||
175 | ctx->hash = hash; | ||
209 | return 0; | 176 | return 0; |
210 | } | 177 | } |
211 | 178 | ||
212 | static void hmac_exit_tfm(struct crypto_tfm *tfm) | 179 | static void hmac_exit_tfm(struct crypto_tfm *tfm) |
213 | { | 180 | { |
214 | struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); | 181 | struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); |
215 | crypto_free_hash(ctx->child); | 182 | crypto_free_shash(ctx->hash); |
216 | } | 183 | } |
217 | 184 | ||
218 | static void hmac_free(struct crypto_instance *inst) | 185 | static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) |
219 | { | 186 | { |
220 | crypto_drop_spawn(crypto_instance_ctx(inst)); | 187 | struct shash_instance *inst; |
221 | kfree(inst); | ||
222 | } | ||
223 | |||
224 | static struct crypto_instance *hmac_alloc(struct rtattr **tb) | ||
225 | { | ||
226 | struct crypto_instance *inst; | ||
227 | struct crypto_alg *alg; | 188 | struct crypto_alg *alg; |
189 | struct shash_alg *salg; | ||
228 | int err; | 190 | int err; |
229 | int ds; | 191 | int ds; |
192 | int ss; | ||
230 | 193 | ||
231 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); | 194 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
232 | if (err) | 195 | if (err) |
233 | return ERR_PTR(err); | 196 | return err; |
234 | 197 | ||
235 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | 198 | salg = shash_attr_alg(tb[1], 0, 0); |
236 | CRYPTO_ALG_TYPE_HASH_MASK); | 199 | if (IS_ERR(salg)) |
237 | if (IS_ERR(alg)) | 200 | return PTR_ERR(salg); |
238 | return ERR_CAST(alg); | 201 | |
239 | 202 | err = -EINVAL; | |
240 | inst = ERR_PTR(-EINVAL); | 203 | ds = salg->digestsize; |
241 | ds = alg->cra_type == &crypto_hash_type ? | 204 | ss = salg->statesize; |
242 | alg->cra_hash.digestsize : | 205 | alg = &salg->base; |
243 | alg->cra_type ? | 206 | if (ds > alg->cra_blocksize || |
244 | __crypto_shash_alg(alg)->digestsize : | 207 | ss < alg->cra_blocksize) |
245 | alg->cra_digest.dia_digestsize; | ||
246 | if (ds > alg->cra_blocksize) | ||
247 | goto out_put_alg; | 208 | goto out_put_alg; |
248 | 209 | ||
249 | inst = crypto_alloc_instance("hmac", alg); | 210 | inst = shash_alloc_instance("hmac", alg); |
211 | err = PTR_ERR(inst); | ||
250 | if (IS_ERR(inst)) | 212 | if (IS_ERR(inst)) |
251 | goto out_put_alg; | 213 | goto out_put_alg; |
252 | 214 | ||
253 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; | 215 | err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, |
254 | inst->alg.cra_priority = alg->cra_priority; | 216 | shash_crypto_instance(inst)); |
255 | inst->alg.cra_blocksize = alg->cra_blocksize; | 217 | if (err) |
256 | inst->alg.cra_alignmask = alg->cra_alignmask; | 218 | goto out_free_inst; |
257 | inst->alg.cra_type = &crypto_hash_type; | 219 | |
258 | 220 | inst->alg.base.cra_priority = alg->cra_priority; | |
259 | inst->alg.cra_hash.digestsize = ds; | 221 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
260 | 222 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | |
261 | inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + | 223 | |
262 | ALIGN(inst->alg.cra_blocksize * 2 + ds, | 224 | ss = ALIGN(ss, alg->cra_alignmask + 1); |
263 | sizeof(void *)); | 225 | inst->alg.digestsize = ds; |
264 | 226 | inst->alg.statesize = ss; | |
265 | inst->alg.cra_init = hmac_init_tfm; | 227 | |
266 | inst->alg.cra_exit = hmac_exit_tfm; | 228 | inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + |
267 | 229 | ALIGN(ss * 2, crypto_tfm_ctx_alignment()); | |
268 | inst->alg.cra_hash.init = hmac_init; | 230 | |
269 | inst->alg.cra_hash.update = hmac_update; | 231 | inst->alg.base.cra_init = hmac_init_tfm; |
270 | inst->alg.cra_hash.final = hmac_final; | 232 | inst->alg.base.cra_exit = hmac_exit_tfm; |
271 | inst->alg.cra_hash.digest = hmac_digest; | 233 | |
272 | inst->alg.cra_hash.setkey = hmac_setkey; | 234 | inst->alg.init = hmac_init; |
235 | inst->alg.update = hmac_update; | ||
236 | inst->alg.final = hmac_final; | ||
237 | inst->alg.finup = hmac_finup; | ||
238 | inst->alg.export = hmac_export; | ||
239 | inst->alg.import = hmac_import; | ||
240 | inst->alg.setkey = hmac_setkey; | ||
241 | |||
242 | err = shash_register_instance(tmpl, inst); | ||
243 | if (err) { | ||
244 | out_free_inst: | ||
245 | shash_free_instance(shash_crypto_instance(inst)); | ||
246 | } | ||
273 | 247 | ||
274 | out_put_alg: | 248 | out_put_alg: |
275 | crypto_mod_put(alg); | 249 | crypto_mod_put(alg); |
276 | return inst; | 250 | return err; |
277 | } | 251 | } |
278 | 252 | ||
279 | static struct crypto_template hmac_tmpl = { | 253 | static struct crypto_template hmac_tmpl = { |
280 | .name = "hmac", | 254 | .name = "hmac", |
281 | .alloc = hmac_alloc, | 255 | .create = hmac_create, |
282 | .free = hmac_free, | 256 | .free = shash_free_instance, |
283 | .module = THIS_MODULE, | 257 | .module = THIS_MODULE, |
284 | }; | 258 | }; |
285 | 259 | ||
diff --git a/crypto/internal.h b/crypto/internal.h index 113579a82dff..2d226362e594 100644 --- a/crypto/internal.h +++ b/crypto/internal.h | |||
@@ -25,12 +25,7 @@ | |||
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/rwsem.h> | 26 | #include <linux/rwsem.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | 28 | #include <linux/fips.h> | |
29 | #ifdef CONFIG_CRYPTO_FIPS | ||
30 | extern int fips_enabled; | ||
31 | #else | ||
32 | #define fips_enabled 0 | ||
33 | #endif | ||
34 | 29 | ||
35 | /* Crypto notification events. */ | 30 | /* Crypto notification events. */ |
36 | enum { | 31 | enum { |
@@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void) | |||
65 | { } | 60 | { } |
66 | #endif | 61 | #endif |
67 | 62 | ||
68 | static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg) | ||
69 | { | ||
70 | unsigned int len = alg->cra_ctxsize; | ||
71 | |||
72 | if (alg->cra_alignmask) { | ||
73 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); | ||
74 | len += alg->cra_digest.dia_digestsize; | ||
75 | } | ||
76 | |||
77 | return len; | ||
78 | } | ||
79 | |||
80 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) | 63 | static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) |
81 | { | 64 | { |
82 | return alg->cra_ctxsize; | 65 | return alg->cra_ctxsize; |
@@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); | |||
91 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); | 74 | struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); |
92 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); | 75 | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); |
93 | 76 | ||
94 | int crypto_init_digest_ops(struct crypto_tfm *tfm); | ||
95 | int crypto_init_digest_ops_async(struct crypto_tfm *tfm); | ||
96 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); | 77 | int crypto_init_cipher_ops(struct crypto_tfm *tfm); |
97 | int crypto_init_compress_ops(struct crypto_tfm *tfm); | 78 | int crypto_init_compress_ops(struct crypto_tfm *tfm); |
98 | 79 | ||
99 | void crypto_exit_digest_ops(struct crypto_tfm *tfm); | ||
100 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); | 80 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm); |
101 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); | 81 | void crypto_exit_compress_ops(struct crypto_tfm *tfm); |
102 | 82 | ||
@@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, | |||
111 | u32 mask); | 91 | u32 mask); |
112 | void *crypto_create_tfm(struct crypto_alg *alg, | 92 | void *crypto_create_tfm(struct crypto_alg *alg, |
113 | const struct crypto_type *frontend); | 93 | const struct crypto_type *frontend); |
94 | struct crypto_alg *crypto_find_alg(const char *alg_name, | ||
95 | const struct crypto_type *frontend, | ||
96 | u32 type, u32 mask); | ||
114 | void *crypto_alloc_tfm(const char *alg_name, | 97 | void *crypto_alloc_tfm(const char *alg_name, |
115 | const struct crypto_type *frontend, u32 type, u32 mask); | 98 | const struct crypto_type *frontend, u32 type, u32 mask); |
116 | 99 | ||
117 | int crypto_register_instance(struct crypto_template *tmpl, | ||
118 | struct crypto_instance *inst); | ||
119 | |||
120 | int crypto_register_notifier(struct notifier_block *nb); | 100 | int crypto_register_notifier(struct notifier_block *nb); |
121 | int crypto_unregister_notifier(struct notifier_block *nb); | 101 | int crypto_unregister_notifier(struct notifier_block *nb); |
122 | int crypto_probing_notify(unsigned long val, void *v); | 102 | int crypto_probing_notify(unsigned long val, void *v); |
diff --git a/crypto/pcompress.c b/crypto/pcompress.c index bcadc03726b7..f7c4a7d7412e 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c | |||
@@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | 38 | ||
39 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, | 39 | static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) |
40 | const struct crypto_type *frontend) | ||
41 | { | 40 | { |
42 | return alg->cra_ctxsize; | 41 | return alg->cra_ctxsize; |
43 | } | 42 | } |
44 | 43 | ||
45 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, | 44 | static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) |
46 | const struct crypto_type *frontend) | ||
47 | { | 45 | { |
48 | return 0; | 46 | return 0; |
49 | } | 47 | } |
diff --git a/crypto/rng.c b/crypto/rng.c index 6e94bc735578..ba05e7380e76 100644 --- a/crypto/rng.c +++ b/crypto/rng.c | |||
@@ -123,4 +123,4 @@ void crypto_put_default_rng(void) | |||
123 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); | 123 | EXPORT_SYMBOL_GPL(crypto_put_default_rng); |
124 | 124 | ||
125 | MODULE_LICENSE("GPL"); | 125 | MODULE_LICENSE("GPL"); |
126 | MODULE_DESCRIPTION("Random Number Genertor"); | 126 | MODULE_DESCRIPTION("Random Number Generator"); |
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 9efef20454cb..0416091bf45a 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c | |||
@@ -25,31 +25,21 @@ | |||
25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
27 | 27 | ||
28 | struct sha1_ctx { | ||
29 | u64 count; | ||
30 | u32 state[5]; | ||
31 | u8 buffer[64]; | ||
32 | }; | ||
33 | |||
34 | static int sha1_init(struct shash_desc *desc) | 28 | static int sha1_init(struct shash_desc *desc) |
35 | { | 29 | { |
36 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 30 | struct sha1_state *sctx = shash_desc_ctx(desc); |
37 | 31 | ||
38 | static const struct sha1_ctx initstate = { | 32 | *sctx = (struct sha1_state){ |
39 | 0, | 33 | .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, |
40 | { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, | ||
41 | { 0, } | ||
42 | }; | 34 | }; |
43 | 35 | ||
44 | *sctx = initstate; | ||
45 | |||
46 | return 0; | 36 | return 0; |
47 | } | 37 | } |
48 | 38 | ||
49 | static int sha1_update(struct shash_desc *desc, const u8 *data, | 39 | static int sha1_update(struct shash_desc *desc, const u8 *data, |
50 | unsigned int len) | 40 | unsigned int len) |
51 | { | 41 | { |
52 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 42 | struct sha1_state *sctx = shash_desc_ctx(desc); |
53 | unsigned int partial, done; | 43 | unsigned int partial, done; |
54 | const u8 *src; | 44 | const u8 *src; |
55 | 45 | ||
@@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, | |||
85 | /* Add padding and return the message digest. */ | 75 | /* Add padding and return the message digest. */ |
86 | static int sha1_final(struct shash_desc *desc, u8 *out) | 76 | static int sha1_final(struct shash_desc *desc, u8 *out) |
87 | { | 77 | { |
88 | struct sha1_ctx *sctx = shash_desc_ctx(desc); | 78 | struct sha1_state *sctx = shash_desc_ctx(desc); |
89 | __be32 *dst = (__be32 *)out; | 79 | __be32 *dst = (__be32 *)out; |
90 | u32 i, index, padlen; | 80 | u32 i, index, padlen; |
91 | __be64 bits; | 81 | __be64 bits; |
@@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out) | |||
111 | return 0; | 101 | return 0; |
112 | } | 102 | } |
113 | 103 | ||
104 | static int sha1_export(struct shash_desc *desc, void *out) | ||
105 | { | ||
106 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
107 | |||
108 | memcpy(out, sctx, sizeof(*sctx)); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int sha1_import(struct shash_desc *desc, const void *in) | ||
113 | { | ||
114 | struct sha1_state *sctx = shash_desc_ctx(desc); | ||
115 | |||
116 | memcpy(sctx, in, sizeof(*sctx)); | ||
117 | return 0; | ||
118 | } | ||
119 | |||
114 | static struct shash_alg alg = { | 120 | static struct shash_alg alg = { |
115 | .digestsize = SHA1_DIGEST_SIZE, | 121 | .digestsize = SHA1_DIGEST_SIZE, |
116 | .init = sha1_init, | 122 | .init = sha1_init, |
117 | .update = sha1_update, | 123 | .update = sha1_update, |
118 | .final = sha1_final, | 124 | .final = sha1_final, |
119 | .descsize = sizeof(struct sha1_ctx), | 125 | .export = sha1_export, |
126 | .import = sha1_import, | ||
127 | .descsize = sizeof(struct sha1_state), | ||
128 | .statesize = sizeof(struct sha1_state), | ||
120 | .base = { | 129 | .base = { |
121 | .cra_name = "sha1", | 130 | .cra_name = "sha1", |
122 | .cra_driver_name= "sha1-generic", | 131 | .cra_driver_name= "sha1-generic", |
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 6349d8339d37..c48459ebf05b 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c | |||
@@ -25,12 +25,6 @@ | |||
25 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
26 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
27 | 27 | ||
28 | struct sha256_ctx { | ||
29 | u32 count[2]; | ||
30 | u32 state[8]; | ||
31 | u8 buf[128]; | ||
32 | }; | ||
33 | |||
34 | static inline u32 Ch(u32 x, u32 y, u32 z) | 28 | static inline u32 Ch(u32 x, u32 y, u32 z) |
35 | { | 29 | { |
36 | return z ^ (x & (y ^ z)); | 30 | return z ^ (x & (y ^ z)); |
@@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input) | |||
222 | 216 | ||
223 | static int sha224_init(struct shash_desc *desc) | 217 | static int sha224_init(struct shash_desc *desc) |
224 | { | 218 | { |
225 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 219 | struct sha256_state *sctx = shash_desc_ctx(desc); |
226 | sctx->state[0] = SHA224_H0; | 220 | sctx->state[0] = SHA224_H0; |
227 | sctx->state[1] = SHA224_H1; | 221 | sctx->state[1] = SHA224_H1; |
228 | sctx->state[2] = SHA224_H2; | 222 | sctx->state[2] = SHA224_H2; |
@@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc) | |||
231 | sctx->state[5] = SHA224_H5; | 225 | sctx->state[5] = SHA224_H5; |
232 | sctx->state[6] = SHA224_H6; | 226 | sctx->state[6] = SHA224_H6; |
233 | sctx->state[7] = SHA224_H7; | 227 | sctx->state[7] = SHA224_H7; |
234 | sctx->count[0] = 0; | 228 | sctx->count = 0; |
235 | sctx->count[1] = 0; | ||
236 | 229 | ||
237 | return 0; | 230 | return 0; |
238 | } | 231 | } |
239 | 232 | ||
240 | static int sha256_init(struct shash_desc *desc) | 233 | static int sha256_init(struct shash_desc *desc) |
241 | { | 234 | { |
242 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 235 | struct sha256_state *sctx = shash_desc_ctx(desc); |
243 | sctx->state[0] = SHA256_H0; | 236 | sctx->state[0] = SHA256_H0; |
244 | sctx->state[1] = SHA256_H1; | 237 | sctx->state[1] = SHA256_H1; |
245 | sctx->state[2] = SHA256_H2; | 238 | sctx->state[2] = SHA256_H2; |
@@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc) | |||
248 | sctx->state[5] = SHA256_H5; | 241 | sctx->state[5] = SHA256_H5; |
249 | sctx->state[6] = SHA256_H6; | 242 | sctx->state[6] = SHA256_H6; |
250 | sctx->state[7] = SHA256_H7; | 243 | sctx->state[7] = SHA256_H7; |
251 | sctx->count[0] = sctx->count[1] = 0; | 244 | sctx->count = 0; |
252 | 245 | ||
253 | return 0; | 246 | return 0; |
254 | } | 247 | } |
@@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc) | |||
256 | static int sha256_update(struct shash_desc *desc, const u8 *data, | 249 | static int sha256_update(struct shash_desc *desc, const u8 *data, |
257 | unsigned int len) | 250 | unsigned int len) |
258 | { | 251 | { |
259 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 252 | struct sha256_state *sctx = shash_desc_ctx(desc); |
260 | unsigned int i, index, part_len; | 253 | unsigned int partial, done; |
261 | 254 | const u8 *src; | |
262 | /* Compute number of bytes mod 128 */ | 255 | |
263 | index = (unsigned int)((sctx->count[0] >> 3) & 0x3f); | 256 | partial = sctx->count & 0x3f; |
264 | 257 | sctx->count += len; | |
265 | /* Update number of bits */ | 258 | done = 0; |
266 | if ((sctx->count[0] += (len << 3)) < (len << 3)) { | 259 | src = data; |
267 | sctx->count[1]++; | 260 | |
268 | sctx->count[1] += (len >> 29); | 261 | if ((partial + len) > 63) { |
269 | } | 262 | if (partial) { |
270 | 263 | done = -partial; | |
271 | part_len = 64 - index; | 264 | memcpy(sctx->buf + partial, data, done + 64); |
272 | 265 | src = sctx->buf; | |
273 | /* Transform as many times as possible. */ | 266 | } |
274 | if (len >= part_len) { | 267 | |
275 | memcpy(&sctx->buf[index], data, part_len); | 268 | do { |
276 | sha256_transform(sctx->state, sctx->buf); | 269 | sha256_transform(sctx->state, src); |
277 | 270 | done += 64; | |
278 | for (i = part_len; i + 63 < len; i += 64) | 271 | src = data + done; |
279 | sha256_transform(sctx->state, &data[i]); | 272 | } while (done + 63 < len); |
280 | index = 0; | 273 | |
281 | } else { | 274 | partial = 0; |
282 | i = 0; | ||
283 | } | 275 | } |
284 | 276 | memcpy(sctx->buf + partial, src, len - done); | |
285 | /* Buffer remaining input */ | ||
286 | memcpy(&sctx->buf[index], &data[i], len-i); | ||
287 | 277 | ||
288 | return 0; | 278 | return 0; |
289 | } | 279 | } |
290 | 280 | ||
291 | static int sha256_final(struct shash_desc *desc, u8 *out) | 281 | static int sha256_final(struct shash_desc *desc, u8 *out) |
292 | { | 282 | { |
293 | struct sha256_ctx *sctx = shash_desc_ctx(desc); | 283 | struct sha256_state *sctx = shash_desc_ctx(desc); |
294 | __be32 *dst = (__be32 *)out; | 284 | __be32 *dst = (__be32 *)out; |
295 | __be32 bits[2]; | 285 | __be64 bits; |
296 | unsigned int index, pad_len; | 286 | unsigned int index, pad_len; |
297 | int i; | 287 | int i; |
298 | static const u8 padding[64] = { 0x80, }; | 288 | static const u8 padding[64] = { 0x80, }; |
299 | 289 | ||
300 | /* Save number of bits */ | 290 | /* Save number of bits */ |
301 | bits[1] = cpu_to_be32(sctx->count[0]); | 291 | bits = cpu_to_be64(sctx->count << 3); |
302 | bits[0] = cpu_to_be32(sctx->count[1]); | ||
303 | 292 | ||
304 | /* Pad out to 56 mod 64. */ | 293 | /* Pad out to 56 mod 64. */ |
305 | index = (sctx->count[0] >> 3) & 0x3f; | 294 | index = sctx->count & 0x3f; |
306 | pad_len = (index < 56) ? (56 - index) : ((64+56) - index); | 295 | pad_len = (index < 56) ? (56 - index) : ((64+56) - index); |
307 | sha256_update(desc, padding, pad_len); | 296 | sha256_update(desc, padding, pad_len); |
308 | 297 | ||
309 | /* Append length (before padding) */ | 298 | /* Append length (before padding) */ |
310 | sha256_update(desc, (const u8 *)bits, sizeof(bits)); | 299 | sha256_update(desc, (const u8 *)&bits, sizeof(bits)); |
311 | 300 | ||
312 | /* Store state in digest */ | 301 | /* Store state in digest */ |
313 | for (i = 0; i < 8; i++) | 302 | for (i = 0; i < 8; i++) |
@@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash) | |||
331 | return 0; | 320 | return 0; |
332 | } | 321 | } |
333 | 322 | ||
323 | static int sha256_export(struct shash_desc *desc, void *out) | ||
324 | { | ||
325 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
326 | |||
327 | memcpy(out, sctx, sizeof(*sctx)); | ||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static int sha256_import(struct shash_desc *desc, const void *in) | ||
332 | { | ||
333 | struct sha256_state *sctx = shash_desc_ctx(desc); | ||
334 | |||
335 | memcpy(sctx, in, sizeof(*sctx)); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
334 | static struct shash_alg sha256 = { | 339 | static struct shash_alg sha256 = { |
335 | .digestsize = SHA256_DIGEST_SIZE, | 340 | .digestsize = SHA256_DIGEST_SIZE, |
336 | .init = sha256_init, | 341 | .init = sha256_init, |
337 | .update = sha256_update, | 342 | .update = sha256_update, |
338 | .final = sha256_final, | 343 | .final = sha256_final, |
339 | .descsize = sizeof(struct sha256_ctx), | 344 | .export = sha256_export, |
345 | .import = sha256_import, | ||
346 | .descsize = sizeof(struct sha256_state), | ||
347 | .statesize = sizeof(struct sha256_state), | ||
340 | .base = { | 348 | .base = { |
341 | .cra_name = "sha256", | 349 | .cra_name = "sha256", |
342 | .cra_driver_name= "sha256-generic", | 350 | .cra_driver_name= "sha256-generic", |
@@ -351,7 +359,7 @@ static struct shash_alg sha224 = { | |||
351 | .init = sha224_init, | 359 | .init = sha224_init, |
352 | .update = sha256_update, | 360 | .update = sha256_update, |
353 | .final = sha224_final, | 361 | .final = sha224_final, |
354 | .descsize = sizeof(struct sha256_ctx), | 362 | .descsize = sizeof(struct sha256_state), |
355 | .base = { | 363 | .base = { |
356 | .cra_name = "sha224", | 364 | .cra_name = "sha224", |
357 | .cra_driver_name= "sha224-generic", | 365 | .cra_driver_name= "sha224-generic", |
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 3bea38d12242..9ed9f60316e5 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c | |||
@@ -21,12 +21,6 @@ | |||
21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
22 | #include <asm/byteorder.h> | 22 | #include <asm/byteorder.h> |
23 | 23 | ||
24 | struct sha512_ctx { | ||
25 | u64 state[8]; | ||
26 | u32 count[4]; | ||
27 | u8 buf[128]; | ||
28 | }; | ||
29 | |||
30 | static DEFINE_PER_CPU(u64[80], msg_schedule); | 24 | static DEFINE_PER_CPU(u64[80], msg_schedule); |
31 | 25 | ||
32 | static inline u64 Ch(u64 x, u64 y, u64 z) | 26 | static inline u64 Ch(u64 x, u64 y, u64 z) |
@@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input) | |||
141 | static int | 135 | static int |
142 | sha512_init(struct shash_desc *desc) | 136 | sha512_init(struct shash_desc *desc) |
143 | { | 137 | { |
144 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 138 | struct sha512_state *sctx = shash_desc_ctx(desc); |
145 | sctx->state[0] = SHA512_H0; | 139 | sctx->state[0] = SHA512_H0; |
146 | sctx->state[1] = SHA512_H1; | 140 | sctx->state[1] = SHA512_H1; |
147 | sctx->state[2] = SHA512_H2; | 141 | sctx->state[2] = SHA512_H2; |
@@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc) | |||
150 | sctx->state[5] = SHA512_H5; | 144 | sctx->state[5] = SHA512_H5; |
151 | sctx->state[6] = SHA512_H6; | 145 | sctx->state[6] = SHA512_H6; |
152 | sctx->state[7] = SHA512_H7; | 146 | sctx->state[7] = SHA512_H7; |
153 | sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; | 147 | sctx->count[0] = sctx->count[1] = 0; |
154 | 148 | ||
155 | return 0; | 149 | return 0; |
156 | } | 150 | } |
@@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc) | |||
158 | static int | 152 | static int |
159 | sha384_init(struct shash_desc *desc) | 153 | sha384_init(struct shash_desc *desc) |
160 | { | 154 | { |
161 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 155 | struct sha512_state *sctx = shash_desc_ctx(desc); |
162 | sctx->state[0] = SHA384_H0; | 156 | sctx->state[0] = SHA384_H0; |
163 | sctx->state[1] = SHA384_H1; | 157 | sctx->state[1] = SHA384_H1; |
164 | sctx->state[2] = SHA384_H2; | 158 | sctx->state[2] = SHA384_H2; |
@@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc) | |||
167 | sctx->state[5] = SHA384_H5; | 161 | sctx->state[5] = SHA384_H5; |
168 | sctx->state[6] = SHA384_H6; | 162 | sctx->state[6] = SHA384_H6; |
169 | sctx->state[7] = SHA384_H7; | 163 | sctx->state[7] = SHA384_H7; |
170 | sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; | 164 | sctx->count[0] = sctx->count[1] = 0; |
171 | 165 | ||
172 | return 0; | 166 | return 0; |
173 | } | 167 | } |
@@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc) | |||
175 | static int | 169 | static int |
176 | sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) | 170 | sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) |
177 | { | 171 | { |
178 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 172 | struct sha512_state *sctx = shash_desc_ctx(desc); |
179 | 173 | ||
180 | unsigned int i, index, part_len; | 174 | unsigned int i, index, part_len; |
181 | 175 | ||
182 | /* Compute number of bytes mod 128 */ | 176 | /* Compute number of bytes mod 128 */ |
183 | index = (unsigned int)((sctx->count[0] >> 3) & 0x7F); | 177 | index = sctx->count[0] & 0x7f; |
184 | 178 | ||
185 | /* Update number of bits */ | 179 | /* Update number of bytes */ |
186 | if ((sctx->count[0] += (len << 3)) < (len << 3)) { | 180 | if (!(sctx->count[0] += len)) |
187 | if ((sctx->count[1] += 1) < 1) | 181 | sctx->count[1]++; |
188 | if ((sctx->count[2] += 1) < 1) | ||
189 | sctx->count[3]++; | ||
190 | sctx->count[1] += (len >> 29); | ||
191 | } | ||
192 | 182 | ||
193 | part_len = 128 - index; | 183 | part_len = 128 - index; |
194 | 184 | ||
@@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) | |||
214 | static int | 204 | static int |
215 | sha512_final(struct shash_desc *desc, u8 *hash) | 205 | sha512_final(struct shash_desc *desc, u8 *hash) |
216 | { | 206 | { |
217 | struct sha512_ctx *sctx = shash_desc_ctx(desc); | 207 | struct sha512_state *sctx = shash_desc_ctx(desc); |
218 | static u8 padding[128] = { 0x80, }; | 208 | static u8 padding[128] = { 0x80, }; |
219 | __be64 *dst = (__be64 *)hash; | 209 | __be64 *dst = (__be64 *)hash; |
220 | __be32 bits[4]; | 210 | __be64 bits[2]; |
221 | unsigned int index, pad_len; | 211 | unsigned int index, pad_len; |
222 | int i; | 212 | int i; |
223 | 213 | ||
224 | /* Save number of bits */ | 214 | /* Save number of bits */ |
225 | bits[3] = cpu_to_be32(sctx->count[0]); | 215 | bits[1] = cpu_to_be64(sctx->count[0] << 3); |
226 | bits[2] = cpu_to_be32(sctx->count[1]); | 216 | bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); |
227 | bits[1] = cpu_to_be32(sctx->count[2]); | ||
228 | bits[0] = cpu_to_be32(sctx->count[3]); | ||
229 | 217 | ||
230 | /* Pad out to 112 mod 128. */ | 218 | /* Pad out to 112 mod 128. */ |
231 | index = (sctx->count[0] >> 3) & 0x7f; | 219 | index = sctx->count[0] & 0x7f; |
232 | pad_len = (index < 112) ? (112 - index) : ((128+112) - index); | 220 | pad_len = (index < 112) ? (112 - index) : ((128+112) - index); |
233 | sha512_update(desc, padding, pad_len); | 221 | sha512_update(desc, padding, pad_len); |
234 | 222 | ||
@@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash) | |||
240 | dst[i] = cpu_to_be64(sctx->state[i]); | 228 | dst[i] = cpu_to_be64(sctx->state[i]); |
241 | 229 | ||
242 | /* Zeroize sensitive information. */ | 230 | /* Zeroize sensitive information. */ |
243 | memset(sctx, 0, sizeof(struct sha512_ctx)); | 231 | memset(sctx, 0, sizeof(struct sha512_state)); |
244 | 232 | ||
245 | return 0; | 233 | return 0; |
246 | } | 234 | } |
@@ -262,7 +250,7 @@ static struct shash_alg sha512 = { | |||
262 | .init = sha512_init, | 250 | .init = sha512_init, |
263 | .update = sha512_update, | 251 | .update = sha512_update, |
264 | .final = sha512_final, | 252 | .final = sha512_final, |
265 | .descsize = sizeof(struct sha512_ctx), | 253 | .descsize = sizeof(struct sha512_state), |
266 | .base = { | 254 | .base = { |
267 | .cra_name = "sha512", | 255 | .cra_name = "sha512", |
268 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 256 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
@@ -276,7 +264,7 @@ static struct shash_alg sha384 = { | |||
276 | .init = sha384_init, | 264 | .init = sha384_init, |
277 | .update = sha512_update, | 265 | .update = sha512_update, |
278 | .final = sha384_final, | 266 | .final = sha384_final, |
279 | .descsize = sizeof(struct sha512_ctx), | 267 | .descsize = sizeof(struct sha512_state), |
280 | .base = { | 268 | .base = { |
281 | .cra_name = "sha384", | 269 | .cra_name = "sha384", |
282 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | 270 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
diff --git a/crypto/shash.c b/crypto/shash.c index 2ccc8b0076ce..91f7b9d83881 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -22,6 +22,12 @@ | |||
22 | 22 | ||
23 | static const struct crypto_type crypto_shash_type; | 23 | static const struct crypto_type crypto_shash_type; |
24 | 24 | ||
25 | static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, | ||
26 | unsigned int keylen) | ||
27 | { | ||
28 | return -ENOSYS; | ||
29 | } | ||
30 | |||
25 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | 31 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, |
26 | unsigned int keylen) | 32 | unsigned int keylen) |
27 | { | 33 | { |
@@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
39 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | 45 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
40 | memcpy(alignbuffer, key, keylen); | 46 | memcpy(alignbuffer, key, keylen); |
41 | err = shash->setkey(tfm, alignbuffer, keylen); | 47 | err = shash->setkey(tfm, alignbuffer, keylen); |
42 | memset(alignbuffer, 0, keylen); | 48 | kzfree(buffer); |
43 | kfree(buffer); | ||
44 | return err; | 49 | return err; |
45 | } | 50 | } |
46 | 51 | ||
@@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
50 | struct shash_alg *shash = crypto_shash_alg(tfm); | 55 | struct shash_alg *shash = crypto_shash_alg(tfm); |
51 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 56 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
52 | 57 | ||
53 | if (!shash->setkey) | ||
54 | return -ENOSYS; | ||
55 | |||
56 | if ((unsigned long)key & alignmask) | 58 | if ((unsigned long)key & alignmask) |
57 | return shash_setkey_unaligned(tfm, key, keylen); | 59 | return shash_setkey_unaligned(tfm, key, keylen); |
58 | 60 | ||
@@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, | |||
74 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 76 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
75 | unsigned int unaligned_len = alignmask + 1 - | 77 | unsigned int unaligned_len = alignmask + 1 - |
76 | ((unsigned long)data & alignmask); | 78 | ((unsigned long)data & alignmask); |
77 | u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] | 79 | u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] |
78 | __attribute__ ((aligned)); | 80 | __attribute__ ((aligned)); |
81 | u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); | ||
82 | int err; | ||
79 | 83 | ||
80 | if (unaligned_len > len) | 84 | if (unaligned_len > len) |
81 | unaligned_len = len; | 85 | unaligned_len = len; |
82 | 86 | ||
83 | memcpy(buf, data, unaligned_len); | 87 | memcpy(buf, data, unaligned_len); |
88 | err = shash->update(desc, buf, unaligned_len); | ||
89 | memset(buf, 0, unaligned_len); | ||
84 | 90 | ||
85 | return shash->update(desc, buf, unaligned_len) ?: | 91 | return err ?: |
86 | shash->update(desc, data + unaligned_len, len - unaligned_len); | 92 | shash->update(desc, data + unaligned_len, len - unaligned_len); |
87 | } | 93 | } |
88 | 94 | ||
@@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) | |||
106 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 112 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
107 | struct shash_alg *shash = crypto_shash_alg(tfm); | 113 | struct shash_alg *shash = crypto_shash_alg(tfm); |
108 | unsigned int ds = crypto_shash_digestsize(tfm); | 114 | unsigned int ds = crypto_shash_digestsize(tfm); |
109 | u8 buf[shash_align_buffer_size(ds, alignmask)] | 115 | u8 ubuf[shash_align_buffer_size(ds, alignmask)] |
110 | __attribute__ ((aligned)); | 116 | __attribute__ ((aligned)); |
117 | u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); | ||
111 | int err; | 118 | int err; |
112 | 119 | ||
113 | err = shash->final(desc, buf); | 120 | err = shash->final(desc, buf); |
121 | if (err) | ||
122 | goto out; | ||
123 | |||
114 | memcpy(out, buf, ds); | 124 | memcpy(out, buf, ds); |
125 | |||
126 | out: | ||
127 | memset(buf, 0, ds); | ||
115 | return err; | 128 | return err; |
116 | } | 129 | } |
117 | 130 | ||
@@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, | |||
142 | struct shash_alg *shash = crypto_shash_alg(tfm); | 155 | struct shash_alg *shash = crypto_shash_alg(tfm); |
143 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 156 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
144 | 157 | ||
145 | if (((unsigned long)data | (unsigned long)out) & alignmask || | 158 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
146 | !shash->finup) | ||
147 | return shash_finup_unaligned(desc, data, len, out); | 159 | return shash_finup_unaligned(desc, data, len, out); |
148 | 160 | ||
149 | return shash->finup(desc, data, len, out); | 161 | return shash->finup(desc, data, len, out); |
@@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, | |||
154 | unsigned int len, u8 *out) | 166 | unsigned int len, u8 *out) |
155 | { | 167 | { |
156 | return crypto_shash_init(desc) ?: | 168 | return crypto_shash_init(desc) ?: |
157 | crypto_shash_update(desc, data, len) ?: | 169 | crypto_shash_finup(desc, data, len, out); |
158 | crypto_shash_final(desc, out); | ||
159 | } | 170 | } |
160 | 171 | ||
161 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | 172 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, |
@@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | |||
165 | struct shash_alg *shash = crypto_shash_alg(tfm); | 176 | struct shash_alg *shash = crypto_shash_alg(tfm); |
166 | unsigned long alignmask = crypto_shash_alignmask(tfm); | 177 | unsigned long alignmask = crypto_shash_alignmask(tfm); |
167 | 178 | ||
168 | if (((unsigned long)data | (unsigned long)out) & alignmask || | 179 | if (((unsigned long)data | (unsigned long)out) & alignmask) |
169 | !shash->digest) | ||
170 | return shash_digest_unaligned(desc, data, len, out); | 180 | return shash_digest_unaligned(desc, data, len, out); |
171 | 181 | ||
172 | return shash->digest(desc, data, len, out); | 182 | return shash->digest(desc, data, len, out); |
173 | } | 183 | } |
174 | EXPORT_SYMBOL_GPL(crypto_shash_digest); | 184 | EXPORT_SYMBOL_GPL(crypto_shash_digest); |
175 | 185 | ||
176 | int crypto_shash_import(struct shash_desc *desc, const u8 *in) | 186 | static int shash_default_export(struct shash_desc *desc, void *out) |
177 | { | 187 | { |
178 | struct crypto_shash *tfm = desc->tfm; | 188 | memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); |
179 | struct shash_alg *alg = crypto_shash_alg(tfm); | 189 | return 0; |
180 | 190 | } | |
181 | memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); | ||
182 | |||
183 | if (alg->reinit) | ||
184 | alg->reinit(desc); | ||
185 | 191 | ||
192 | static int shash_default_import(struct shash_desc *desc, const void *in) | ||
193 | { | ||
194 | memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); | ||
186 | return 0; | 195 | return 0; |
187 | } | 196 | } |
188 | EXPORT_SYMBOL_GPL(crypto_shash_import); | ||
189 | 197 | ||
190 | static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, | 198 | static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, |
191 | unsigned int keylen) | 199 | unsigned int keylen) |
@@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req) | |||
206 | return crypto_shash_init(desc); | 214 | return crypto_shash_init(desc); |
207 | } | 215 | } |
208 | 216 | ||
209 | static int shash_async_update(struct ahash_request *req) | 217 | int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) |
210 | { | 218 | { |
211 | struct shash_desc *desc = ahash_request_ctx(req); | ||
212 | struct crypto_hash_walk walk; | 219 | struct crypto_hash_walk walk; |
213 | int nbytes; | 220 | int nbytes; |
214 | 221 | ||
@@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req) | |||
218 | 225 | ||
219 | return nbytes; | 226 | return nbytes; |
220 | } | 227 | } |
228 | EXPORT_SYMBOL_GPL(shash_ahash_update); | ||
229 | |||
230 | static int shash_async_update(struct ahash_request *req) | ||
231 | { | ||
232 | return shash_ahash_update(req, ahash_request_ctx(req)); | ||
233 | } | ||
221 | 234 | ||
222 | static int shash_async_final(struct ahash_request *req) | 235 | static int shash_async_final(struct ahash_request *req) |
223 | { | 236 | { |
224 | return crypto_shash_final(ahash_request_ctx(req), req->result); | 237 | return crypto_shash_final(ahash_request_ctx(req), req->result); |
225 | } | 238 | } |
226 | 239 | ||
227 | static int shash_async_digest(struct ahash_request *req) | 240 | int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) |
241 | { | ||
242 | struct crypto_hash_walk walk; | ||
243 | int nbytes; | ||
244 | |||
245 | nbytes = crypto_hash_walk_first(req, &walk); | ||
246 | if (!nbytes) | ||
247 | return crypto_shash_final(desc, req->result); | ||
248 | |||
249 | do { | ||
250 | nbytes = crypto_hash_walk_last(&walk) ? | ||
251 | crypto_shash_finup(desc, walk.data, nbytes, | ||
252 | req->result) : | ||
253 | crypto_shash_update(desc, walk.data, nbytes); | ||
254 | nbytes = crypto_hash_walk_done(&walk, nbytes); | ||
255 | } while (nbytes > 0); | ||
256 | |||
257 | return nbytes; | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(shash_ahash_finup); | ||
260 | |||
261 | static int shash_async_finup(struct ahash_request *req) | ||
262 | { | ||
263 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); | ||
264 | struct shash_desc *desc = ahash_request_ctx(req); | ||
265 | |||
266 | desc->tfm = *ctx; | ||
267 | desc->flags = req->base.flags; | ||
268 | |||
269 | return shash_ahash_finup(req, desc); | ||
270 | } | ||
271 | |||
272 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) | ||
228 | { | 273 | { |
229 | struct scatterlist *sg = req->src; | 274 | struct scatterlist *sg = req->src; |
230 | unsigned int offset = sg->offset; | 275 | unsigned int offset = sg->offset; |
@@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req) | |||
232 | int err; | 277 | int err; |
233 | 278 | ||
234 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 279 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { |
235 | struct crypto_shash **ctx = | ||
236 | crypto_ahash_ctx(crypto_ahash_reqtfm(req)); | ||
237 | struct shash_desc *desc = ahash_request_ctx(req); | ||
238 | void *data; | 280 | void *data; |
239 | 281 | ||
240 | desc->tfm = *ctx; | ||
241 | desc->flags = req->base.flags; | ||
242 | |||
243 | data = crypto_kmap(sg_page(sg), 0); | 282 | data = crypto_kmap(sg_page(sg), 0); |
244 | err = crypto_shash_digest(desc, data + offset, nbytes, | 283 | err = crypto_shash_digest(desc, data + offset, nbytes, |
245 | req->result); | 284 | req->result); |
246 | crypto_kunmap(data, 0); | 285 | crypto_kunmap(data, 0); |
247 | crypto_yield(desc->flags); | 286 | crypto_yield(desc->flags); |
248 | goto out; | 287 | } else |
249 | } | 288 | err = crypto_shash_init(desc) ?: |
289 | shash_ahash_finup(req, desc); | ||
250 | 290 | ||
251 | err = shash_async_init(req); | 291 | return err; |
252 | if (err) | 292 | } |
253 | goto out; | 293 | EXPORT_SYMBOL_GPL(shash_ahash_digest); |
254 | 294 | ||
255 | err = shash_async_update(req); | 295 | static int shash_async_digest(struct ahash_request *req) |
256 | if (err) | 296 | { |
257 | goto out; | 297 | struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); |
298 | struct shash_desc *desc = ahash_request_ctx(req); | ||
258 | 299 | ||
259 | err = shash_async_final(req); | 300 | desc->tfm = *ctx; |
301 | desc->flags = req->base.flags; | ||
260 | 302 | ||
261 | out: | 303 | return shash_ahash_digest(req, desc); |
262 | return err; | 304 | } |
305 | |||
306 | static int shash_async_export(struct ahash_request *req, void *out) | ||
307 | { | ||
308 | return crypto_shash_export(ahash_request_ctx(req), out); | ||
309 | } | ||
310 | |||
311 | static int shash_async_import(struct ahash_request *req, const void *in) | ||
312 | { | ||
313 | return crypto_shash_import(ahash_request_ctx(req), in); | ||
263 | } | 314 | } |
264 | 315 | ||
265 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | 316 | static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) |
@@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) | |||
269 | crypto_free_shash(*ctx); | 320 | crypto_free_shash(*ctx); |
270 | } | 321 | } |
271 | 322 | ||
272 | static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | 323 | int crypto_init_shash_ops_async(struct crypto_tfm *tfm) |
273 | { | 324 | { |
274 | struct crypto_alg *calg = tfm->__crt_alg; | 325 | struct crypto_alg *calg = tfm->__crt_alg; |
275 | struct shash_alg *alg = __crypto_shash_alg(calg); | 326 | struct shash_alg *alg = __crypto_shash_alg(calg); |
276 | struct ahash_tfm *crt = &tfm->crt_ahash; | 327 | struct crypto_ahash *crt = __crypto_ahash_cast(tfm); |
277 | struct crypto_shash **ctx = crypto_tfm_ctx(tfm); | 328 | struct crypto_shash **ctx = crypto_tfm_ctx(tfm); |
278 | struct crypto_shash *shash; | 329 | struct crypto_shash *shash; |
279 | 330 | ||
@@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
291 | 342 | ||
292 | crt->init = shash_async_init; | 343 | crt->init = shash_async_init; |
293 | crt->update = shash_async_update; | 344 | crt->update = shash_async_update; |
294 | crt->final = shash_async_final; | 345 | crt->final = shash_async_final; |
346 | crt->finup = shash_async_finup; | ||
295 | crt->digest = shash_async_digest; | 347 | crt->digest = shash_async_digest; |
296 | crt->setkey = shash_async_setkey; | ||
297 | 348 | ||
298 | crt->digestsize = alg->digestsize; | 349 | if (alg->setkey) |
350 | crt->setkey = shash_async_setkey; | ||
351 | if (alg->export) | ||
352 | crt->export = shash_async_export; | ||
353 | if (alg->import) | ||
354 | crt->import = shash_async_import; | ||
355 | |||
299 | crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); | 356 | crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); |
300 | 357 | ||
301 | return 0; | 358 | return 0; |
@@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) | |||
304 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, | 361 | static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, |
305 | unsigned int keylen) | 362 | unsigned int keylen) |
306 | { | 363 | { |
307 | struct shash_desc *desc = crypto_hash_ctx(tfm); | 364 | struct shash_desc **descp = crypto_hash_ctx(tfm); |
365 | struct shash_desc *desc = *descp; | ||
308 | 366 | ||
309 | return crypto_shash_setkey(desc->tfm, key, keylen); | 367 | return crypto_shash_setkey(desc->tfm, key, keylen); |
310 | } | 368 | } |
311 | 369 | ||
312 | static int shash_compat_init(struct hash_desc *hdesc) | 370 | static int shash_compat_init(struct hash_desc *hdesc) |
313 | { | 371 | { |
314 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 372 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
373 | struct shash_desc *desc = *descp; | ||
315 | 374 | ||
316 | desc->flags = hdesc->flags; | 375 | desc->flags = hdesc->flags; |
317 | 376 | ||
@@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc) | |||
321 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | 380 | static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, |
322 | unsigned int len) | 381 | unsigned int len) |
323 | { | 382 | { |
324 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 383 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
384 | struct shash_desc *desc = *descp; | ||
325 | struct crypto_hash_walk walk; | 385 | struct crypto_hash_walk walk; |
326 | int nbytes; | 386 | int nbytes; |
327 | 387 | ||
@@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, | |||
334 | 394 | ||
335 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) | 395 | static int shash_compat_final(struct hash_desc *hdesc, u8 *out) |
336 | { | 396 | { |
337 | return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); | 397 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
398 | |||
399 | return crypto_shash_final(*descp, out); | ||
338 | } | 400 | } |
339 | 401 | ||
340 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | 402 | static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, |
@@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, | |||
344 | int err; | 406 | int err; |
345 | 407 | ||
346 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 408 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { |
347 | struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); | 409 | struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); |
410 | struct shash_desc *desc = *descp; | ||
348 | void *data; | 411 | void *data; |
349 | 412 | ||
350 | desc->flags = hdesc->flags; | 413 | desc->flags = hdesc->flags; |
@@ -372,9 +435,11 @@ out: | |||
372 | 435 | ||
373 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) | 436 | static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) |
374 | { | 437 | { |
375 | struct shash_desc *desc= crypto_tfm_ctx(tfm); | 438 | struct shash_desc **descp = crypto_tfm_ctx(tfm); |
439 | struct shash_desc *desc = *descp; | ||
376 | 440 | ||
377 | crypto_free_shash(desc->tfm); | 441 | crypto_free_shash(desc->tfm); |
442 | kzfree(desc); | ||
378 | } | 443 | } |
379 | 444 | ||
380 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | 445 | static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) |
@@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | |||
382 | struct hash_tfm *crt = &tfm->crt_hash; | 447 | struct hash_tfm *crt = &tfm->crt_hash; |
383 | struct crypto_alg *calg = tfm->__crt_alg; | 448 | struct crypto_alg *calg = tfm->__crt_alg; |
384 | struct shash_alg *alg = __crypto_shash_alg(calg); | 449 | struct shash_alg *alg = __crypto_shash_alg(calg); |
385 | struct shash_desc *desc = crypto_tfm_ctx(tfm); | 450 | struct shash_desc **descp = crypto_tfm_ctx(tfm); |
386 | struct crypto_shash *shash; | 451 | struct crypto_shash *shash; |
452 | struct shash_desc *desc; | ||
387 | 453 | ||
388 | if (!crypto_mod_get(calg)) | 454 | if (!crypto_mod_get(calg)) |
389 | return -EAGAIN; | 455 | return -EAGAIN; |
@@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) | |||
394 | return PTR_ERR(shash); | 460 | return PTR_ERR(shash); |
395 | } | 461 | } |
396 | 462 | ||
463 | desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), | ||
464 | GFP_KERNEL); | ||
465 | if (!desc) { | ||
466 | crypto_free_shash(shash); | ||
467 | return -ENOMEM; | ||
468 | } | ||
469 | |||
470 | *descp = desc; | ||
397 | desc->tfm = shash; | 471 | desc->tfm = shash; |
398 | tfm->exit = crypto_exit_shash_ops_compat; | 472 | tfm->exit = crypto_exit_shash_ops_compat; |
399 | 473 | ||
@@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
413 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | 487 | switch (mask & CRYPTO_ALG_TYPE_MASK) { |
414 | case CRYPTO_ALG_TYPE_HASH_MASK: | 488 | case CRYPTO_ALG_TYPE_HASH_MASK: |
415 | return crypto_init_shash_ops_compat(tfm); | 489 | return crypto_init_shash_ops_compat(tfm); |
416 | case CRYPTO_ALG_TYPE_AHASH_MASK: | ||
417 | return crypto_init_shash_ops_async(tfm); | ||
418 | } | 490 | } |
419 | 491 | ||
420 | return -EINVAL; | 492 | return -EINVAL; |
@@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |||
423 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, | 495 | static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, |
424 | u32 mask) | 496 | u32 mask) |
425 | { | 497 | { |
426 | struct shash_alg *salg = __crypto_shash_alg(alg); | ||
427 | |||
428 | switch (mask & CRYPTO_ALG_TYPE_MASK) { | 498 | switch (mask & CRYPTO_ALG_TYPE_MASK) { |
429 | case CRYPTO_ALG_TYPE_HASH_MASK: | 499 | case CRYPTO_ALG_TYPE_HASH_MASK: |
430 | return sizeof(struct shash_desc) + salg->descsize; | 500 | return sizeof(struct shash_desc *); |
431 | case CRYPTO_ALG_TYPE_AHASH_MASK: | ||
432 | return sizeof(struct crypto_shash *); | ||
433 | } | 501 | } |
434 | 502 | ||
435 | return 0; | 503 | return 0; |
436 | } | 504 | } |
437 | 505 | ||
438 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm, | 506 | static int crypto_shash_init_tfm(struct crypto_tfm *tfm) |
439 | const struct crypto_type *frontend) | ||
440 | { | 507 | { |
508 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | ||
509 | |||
510 | hash->descsize = crypto_shash_alg(hash)->descsize; | ||
441 | return 0; | 511 | return 0; |
442 | } | 512 | } |
443 | 513 | ||
444 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg, | 514 | static unsigned int crypto_shash_extsize(struct crypto_alg *alg) |
445 | const struct crypto_type *frontend) | ||
446 | { | 515 | { |
447 | return alg->cra_ctxsize; | 516 | return alg->cra_ctxsize; |
448 | } | 517 | } |
@@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) | |||
456 | seq_printf(m, "type : shash\n"); | 525 | seq_printf(m, "type : shash\n"); |
457 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | 526 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
458 | seq_printf(m, "digestsize : %u\n", salg->digestsize); | 527 | seq_printf(m, "digestsize : %u\n", salg->digestsize); |
459 | seq_printf(m, "descsize : %u\n", salg->descsize); | ||
460 | } | 528 | } |
461 | 529 | ||
462 | static const struct crypto_type crypto_shash_type = { | 530 | static const struct crypto_type crypto_shash_type = { |
@@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, | |||
480 | } | 548 | } |
481 | EXPORT_SYMBOL_GPL(crypto_alloc_shash); | 549 | EXPORT_SYMBOL_GPL(crypto_alloc_shash); |
482 | 550 | ||
483 | int crypto_register_shash(struct shash_alg *alg) | 551 | static int shash_prepare_alg(struct shash_alg *alg) |
484 | { | 552 | { |
485 | struct crypto_alg *base = &alg->base; | 553 | struct crypto_alg *base = &alg->base; |
486 | 554 | ||
487 | if (alg->digestsize > PAGE_SIZE / 8 || | 555 | if (alg->digestsize > PAGE_SIZE / 8 || |
488 | alg->descsize > PAGE_SIZE / 8) | 556 | alg->descsize > PAGE_SIZE / 8 || |
557 | alg->statesize > PAGE_SIZE / 8) | ||
489 | return -EINVAL; | 558 | return -EINVAL; |
490 | 559 | ||
491 | base->cra_type = &crypto_shash_type; | 560 | base->cra_type = &crypto_shash_type; |
492 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | 561 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
493 | base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; | 562 | base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; |
494 | 563 | ||
564 | if (!alg->finup) | ||
565 | alg->finup = shash_finup_unaligned; | ||
566 | if (!alg->digest) | ||
567 | alg->digest = shash_digest_unaligned; | ||
568 | if (!alg->export) { | ||
569 | alg->export = shash_default_export; | ||
570 | alg->import = shash_default_import; | ||
571 | alg->statesize = alg->descsize; | ||
572 | } | ||
573 | if (!alg->setkey) | ||
574 | alg->setkey = shash_no_setkey; | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | int crypto_register_shash(struct shash_alg *alg) | ||
580 | { | ||
581 | struct crypto_alg *base = &alg->base; | ||
582 | int err; | ||
583 | |||
584 | err = shash_prepare_alg(alg); | ||
585 | if (err) | ||
586 | return err; | ||
587 | |||
495 | return crypto_register_alg(base); | 588 | return crypto_register_alg(base); |
496 | } | 589 | } |
497 | EXPORT_SYMBOL_GPL(crypto_register_shash); | 590 | EXPORT_SYMBOL_GPL(crypto_register_shash); |
@@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg) | |||
502 | } | 595 | } |
503 | EXPORT_SYMBOL_GPL(crypto_unregister_shash); | 596 | EXPORT_SYMBOL_GPL(crypto_unregister_shash); |
504 | 597 | ||
598 | int shash_register_instance(struct crypto_template *tmpl, | ||
599 | struct shash_instance *inst) | ||
600 | { | ||
601 | int err; | ||
602 | |||
603 | err = shash_prepare_alg(&inst->alg); | ||
604 | if (err) | ||
605 | return err; | ||
606 | |||
607 | return crypto_register_instance(tmpl, shash_crypto_instance(inst)); | ||
608 | } | ||
609 | EXPORT_SYMBOL_GPL(shash_register_instance); | ||
610 | |||
611 | void shash_free_instance(struct crypto_instance *inst) | ||
612 | { | ||
613 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
614 | kfree(shash_instance(inst)); | ||
615 | } | ||
616 | EXPORT_SYMBOL_GPL(shash_free_instance); | ||
617 | |||
618 | int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, | ||
619 | struct shash_alg *alg, | ||
620 | struct crypto_instance *inst) | ||
621 | { | ||
622 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | ||
623 | &crypto_shash_type); | ||
624 | } | ||
625 | EXPORT_SYMBOL_GPL(crypto_init_shash_spawn); | ||
626 | |||
627 | struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | ||
628 | { | ||
629 | struct crypto_alg *alg; | ||
630 | |||
631 | alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask); | ||
632 | return IS_ERR(alg) ? ERR_CAST(alg) : | ||
633 | container_of(alg, struct shash_alg, base); | ||
634 | } | ||
635 | EXPORT_SYMBOL_GPL(shash_attr_alg); | ||
636 | |||
505 | MODULE_LICENSE("GPL"); | 637 | MODULE_LICENSE("GPL"); |
506 | MODULE_DESCRIPTION("Synchronous cryptographic hash type"); | 638 | MODULE_DESCRIPTION("Synchronous cryptographic hash type"); |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d59ba5079d14..aa3f84ccc786 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -45,6 +45,9 @@ | |||
45 | */ | 45 | */ |
46 | static unsigned int sec; | 46 | static unsigned int sec; |
47 | 47 | ||
48 | static char *alg = NULL; | ||
49 | static u32 type; | ||
50 | static u32 mask; | ||
48 | static int mode; | 51 | static int mode; |
49 | static char *tvmem[TVMEMSIZE]; | 52 | static char *tvmem[TVMEMSIZE]; |
50 | 53 | ||
@@ -716,6 +719,10 @@ static int do_test(int m) | |||
716 | ret += tcrypt_test("hmac(rmd160)"); | 719 | ret += tcrypt_test("hmac(rmd160)"); |
717 | break; | 720 | break; |
718 | 721 | ||
722 | case 109: | ||
723 | ret += tcrypt_test("vmac(aes)"); | ||
724 | break; | ||
725 | |||
719 | case 150: | 726 | case 150: |
720 | ret += tcrypt_test("ansi_cprng"); | 727 | ret += tcrypt_test("ansi_cprng"); |
721 | break; | 728 | break; |
@@ -885,6 +892,12 @@ static int do_test(int m) | |||
885 | return ret; | 892 | return ret; |
886 | } | 893 | } |
887 | 894 | ||
895 | static int do_alg_test(const char *alg, u32 type, u32 mask) | ||
896 | { | ||
897 | return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ? | ||
898 | 0 : -ENOENT; | ||
899 | } | ||
900 | |||
888 | static int __init tcrypt_mod_init(void) | 901 | static int __init tcrypt_mod_init(void) |
889 | { | 902 | { |
890 | int err = -ENOMEM; | 903 | int err = -ENOMEM; |
@@ -896,7 +909,11 @@ static int __init tcrypt_mod_init(void) | |||
896 | goto err_free_tv; | 909 | goto err_free_tv; |
897 | } | 910 | } |
898 | 911 | ||
899 | err = do_test(mode); | 912 | if (alg) |
913 | err = do_alg_test(alg, type, mask); | ||
914 | else | ||
915 | err = do_test(mode); | ||
916 | |||
900 | if (err) { | 917 | if (err) { |
901 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); | 918 | printk(KERN_ERR "tcrypt: one or more tests failed!\n"); |
902 | goto err_free_tv; | 919 | goto err_free_tv; |
@@ -928,6 +945,9 @@ static void __exit tcrypt_mod_fini(void) { } | |||
928 | module_init(tcrypt_mod_init); | 945 | module_init(tcrypt_mod_init); |
929 | module_exit(tcrypt_mod_fini); | 946 | module_exit(tcrypt_mod_fini); |
930 | 947 | ||
948 | module_param(alg, charp, 0); | ||
949 | module_param(type, uint, 0); | ||
950 | module_param(mask, uint, 0); | ||
931 | module_param(mode, int, 0); | 951 | module_param(mode, int, 0); |
932 | module_param(sec, uint, 0); | 952 | module_param(sec, uint, 0); |
933 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " | 953 | MODULE_PARM_DESC(sec, "Length in seconds of speed tests " |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index e9e9d84293b9..6d5b746637be 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, | |||
190 | 190 | ||
191 | hash_buff = xbuf[0]; | 191 | hash_buff = xbuf[0]; |
192 | 192 | ||
193 | ret = -EINVAL; | ||
194 | if (WARN_ON(template[i].psize > PAGE_SIZE)) | ||
195 | goto out; | ||
196 | |||
197 | memcpy(hash_buff, template[i].plaintext, template[i].psize); | 193 | memcpy(hash_buff, template[i].plaintext, template[i].psize); |
198 | sg_init_one(&sg[0], hash_buff, template[i].psize); | 194 | sg_init_one(&sg[0], hash_buff, template[i].psize); |
199 | 195 | ||
@@ -2252,6 +2248,15 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2252 | } | 2248 | } |
2253 | } | 2249 | } |
2254 | }, { | 2250 | }, { |
2251 | .alg = "vmac(aes)", | ||
2252 | .test = alg_test_hash, | ||
2253 | .suite = { | ||
2254 | .hash = { | ||
2255 | .vecs = aes_vmac128_tv_template, | ||
2256 | .count = VMAC_AES_TEST_VECTORS | ||
2257 | } | ||
2258 | } | ||
2259 | }, { | ||
2255 | .alg = "wp256", | 2260 | .alg = "wp256", |
2256 | .test = alg_test_hash, | 2261 | .test = alg_test_hash, |
2257 | .suite = { | 2262 | .suite = { |
@@ -2348,6 +2353,7 @@ static int alg_find_test(const char *alg) | |||
2348 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | 2353 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) |
2349 | { | 2354 | { |
2350 | int i; | 2355 | int i; |
2356 | int j; | ||
2351 | int rc; | 2357 | int rc; |
2352 | 2358 | ||
2353 | if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { | 2359 | if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { |
@@ -2369,14 +2375,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | |||
2369 | } | 2375 | } |
2370 | 2376 | ||
2371 | i = alg_find_test(alg); | 2377 | i = alg_find_test(alg); |
2372 | if (i < 0) | 2378 | j = alg_find_test(driver); |
2379 | if (i < 0 && j < 0) | ||
2373 | goto notest; | 2380 | goto notest; |
2374 | 2381 | ||
2375 | if (fips_enabled && !alg_test_descs[i].fips_allowed) | 2382 | if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) || |
2383 | (j >= 0 && !alg_test_descs[j].fips_allowed))) | ||
2376 | goto non_fips_alg; | 2384 | goto non_fips_alg; |
2377 | 2385 | ||
2378 | rc = alg_test_descs[i].test(alg_test_descs + i, driver, | 2386 | rc = 0; |
2379 | type, mask); | 2387 | if (i >= 0) |
2388 | rc |= alg_test_descs[i].test(alg_test_descs + i, driver, | ||
2389 | type, mask); | ||
2390 | if (j >= 0) | ||
2391 | rc |= alg_test_descs[j].test(alg_test_descs + j, driver, | ||
2392 | type, mask); | ||
2393 | |||
2380 | test_done: | 2394 | test_done: |
2381 | if (fips_enabled && rc) | 2395 | if (fips_enabled && rc) |
2382 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); | 2396 | panic("%s: %s alg self test failed in fips mode!\n", driver, alg); |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 69316228fc19..9963b18983ab 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { | |||
1654 | } | 1654 | } |
1655 | }; | 1655 | }; |
1656 | 1656 | ||
1657 | #define VMAC_AES_TEST_VECTORS 1 | ||
1658 | static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', | ||
1659 | '\x02', '\x03', '\x02', '\x02', | ||
1660 | '\x02', '\x04', '\x01', '\x07', | ||
1661 | '\x04', '\x01', '\x04', '\x03',}; | ||
1662 | static struct hash_testvec aes_vmac128_tv_template[] = { | ||
1663 | { | ||
1664 | .key = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
1665 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", | ||
1666 | .plaintext = vmac_string, | ||
1667 | .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", | ||
1668 | .psize = 128, | ||
1669 | .ksize = 16, | ||
1670 | }, | ||
1671 | }; | ||
1672 | |||
1657 | /* | 1673 | /* |
1658 | * SHA384 HMAC test vectors from RFC4231 | 1674 | * SHA384 HMAC test vectors from RFC4231 |
1659 | */ | 1675 | */ |
diff --git a/crypto/vmac.c b/crypto/vmac.c new file mode 100644 index 000000000000..0a9468e575de --- /dev/null +++ b/crypto/vmac.c | |||
@@ -0,0 +1,678 @@ | |||
1 | /* | ||
2 | * Modified to interface to the Linux kernel | ||
3 | * Copyright (c) 2009, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | */ | ||
18 | |||
19 | /* -------------------------------------------------------------------------- | ||
20 | * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. | ||
21 | * This implementation is herby placed in the public domain. | ||
22 | * The authors offers no warranty. Use at your own risk. | ||
23 | * Please send bug reports to the authors. | ||
24 | * Last modified: 17 APR 08, 1700 PDT | ||
25 | * ----------------------------------------------------------------------- */ | ||
26 | |||
27 | #include <linux/init.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/crypto.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | #include <asm/byteorder.h> | ||
32 | #include <crypto/scatterwalk.h> | ||
33 | #include <crypto/vmac.h> | ||
34 | #include <crypto/internal/hash.h> | ||
35 | |||
36 | /* | ||
37 | * Constants and masks | ||
38 | */ | ||
39 | #define UINT64_C(x) x##ULL | ||
40 | const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ | ||
41 | const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ | ||
42 | const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ | ||
43 | const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ | ||
44 | const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ | ||
45 | |||
46 | #ifdef __LITTLE_ENDIAN | ||
47 | #define INDEX_HIGH 1 | ||
48 | #define INDEX_LOW 0 | ||
49 | #else | ||
50 | #define INDEX_HIGH 0 | ||
51 | #define INDEX_LOW 1 | ||
52 | #endif | ||
53 | |||
54 | /* | ||
55 | * The following routines are used in this implementation. They are | ||
56 | * written via macros to simulate zero-overhead call-by-reference. | ||
57 | * | ||
58 | * MUL64: 64x64->128-bit multiplication | ||
59 | * PMUL64: assumes top bits cleared on inputs | ||
60 | * ADD128: 128x128->128-bit addition | ||
61 | */ | ||
62 | |||
63 | #define ADD128(rh, rl, ih, il) \ | ||
64 | do { \ | ||
65 | u64 _il = (il); \ | ||
66 | (rl) += (_il); \ | ||
67 | if ((rl) < (_il)) \ | ||
68 | (rh)++; \ | ||
69 | (rh) += (ih); \ | ||
70 | } while (0) | ||
71 | |||
72 | #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) | ||
73 | |||
74 | #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ | ||
75 | do { \ | ||
76 | u64 _i1 = (i1), _i2 = (i2); \ | ||
77 | u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ | ||
78 | rh = MUL32(_i1>>32, _i2>>32); \ | ||
79 | rl = MUL32(_i1, _i2); \ | ||
80 | ADD128(rh, rl, (m >> 32), (m << 32)); \ | ||
81 | } while (0) | ||
82 | |||
83 | #define MUL64(rh, rl, i1, i2) \ | ||
84 | do { \ | ||
85 | u64 _i1 = (i1), _i2 = (i2); \ | ||
86 | u64 m1 = MUL32(_i1, _i2>>32); \ | ||
87 | u64 m2 = MUL32(_i1>>32, _i2); \ | ||
88 | rh = MUL32(_i1>>32, _i2>>32); \ | ||
89 | rl = MUL32(_i1, _i2); \ | ||
90 | ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ | ||
91 | ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ | ||
92 | } while (0) | ||
93 | |||
94 | /* | ||
95 | * For highest performance the L1 NH and L2 polynomial hashes should be | ||
96 | * carefully implemented to take advantage of one's target architechture. | ||
97 | * Here these two hash functions are defined multiple time; once for | ||
98 | * 64-bit architectures, once for 32-bit SSE2 architectures, and once | ||
99 | * for the rest (32-bit) architectures. | ||
100 | * For each, nh_16 *must* be defined (works on multiples of 16 bytes). | ||
101 | * Optionally, nh_vmac_nhbytes can be defined (for multiples of | ||
102 | * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two | ||
103 | * NH computations at once). | ||
104 | */ | ||
105 | |||
106 | #ifdef CONFIG_64BIT | ||
107 | |||
108 | #define nh_16(mp, kp, nw, rh, rl) \ | ||
109 | do { \ | ||
110 | int i; u64 th, tl; \ | ||
111 | rh = rl = 0; \ | ||
112 | for (i = 0; i < nw; i += 2) { \ | ||
113 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
114 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
115 | ADD128(rh, rl, th, tl); \ | ||
116 | } \ | ||
117 | } while (0) | ||
118 | |||
119 | #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ | ||
120 | do { \ | ||
121 | int i; u64 th, tl; \ | ||
122 | rh1 = rl1 = rh = rl = 0; \ | ||
123 | for (i = 0; i < nw; i += 2) { \ | ||
124 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
125 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
126 | ADD128(rh, rl, th, tl); \ | ||
127 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ | ||
128 | le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | ||
129 | ADD128(rh1, rl1, th, tl); \ | ||
130 | } \ | ||
131 | } while (0) | ||
132 | |||
133 | #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ | ||
134 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | ||
135 | do { \ | ||
136 | int i; u64 th, tl; \ | ||
137 | rh = rl = 0; \ | ||
138 | for (i = 0; i < nw; i += 8) { \ | ||
139 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
140 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
141 | ADD128(rh, rl, th, tl); \ | ||
142 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ | ||
143 | le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | ||
144 | ADD128(rh, rl, th, tl); \ | ||
145 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ | ||
146 | le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | ||
147 | ADD128(rh, rl, th, tl); \ | ||
148 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ | ||
149 | le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | ||
150 | ADD128(rh, rl, th, tl); \ | ||
151 | } \ | ||
152 | } while (0) | ||
153 | |||
154 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ | ||
155 | do { \ | ||
156 | int i; u64 th, tl; \ | ||
157 | rh1 = rl1 = rh = rl = 0; \ | ||
158 | for (i = 0; i < nw; i += 8) { \ | ||
159 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ | ||
160 | le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ | ||
161 | ADD128(rh, rl, th, tl); \ | ||
162 | MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ | ||
163 | le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ | ||
164 | ADD128(rh1, rl1, th, tl); \ | ||
165 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ | ||
166 | le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ | ||
167 | ADD128(rh, rl, th, tl); \ | ||
168 | MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ | ||
169 | le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ | ||
170 | ADD128(rh1, rl1, th, tl); \ | ||
171 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ | ||
172 | le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ | ||
173 | ADD128(rh, rl, th, tl); \ | ||
174 | MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ | ||
175 | le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ | ||
176 | ADD128(rh1, rl1, th, tl); \ | ||
177 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ | ||
178 | le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ | ||
179 | ADD128(rh, rl, th, tl); \ | ||
180 | MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ | ||
181 | le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ | ||
182 | ADD128(rh1, rl1, th, tl); \ | ||
183 | } \ | ||
184 | } while (0) | ||
185 | #endif | ||
186 | |||
187 | #define poly_step(ah, al, kh, kl, mh, ml) \ | ||
188 | do { \ | ||
189 | u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ | ||
190 | /* compute ab*cd, put bd into result registers */ \ | ||
191 | PMUL64(t3h, t3l, al, kh); \ | ||
192 | PMUL64(t2h, t2l, ah, kl); \ | ||
193 | PMUL64(t1h, t1l, ah, 2*kh); \ | ||
194 | PMUL64(ah, al, al, kl); \ | ||
195 | /* add 2 * ac to result */ \ | ||
196 | ADD128(ah, al, t1h, t1l); \ | ||
197 | /* add together ad + bc */ \ | ||
198 | ADD128(t2h, t2l, t3h, t3l); \ | ||
199 | /* now (ah,al), (t2l,2*t2h) need summing */ \ | ||
200 | /* first add the high registers, carrying into t2h */ \ | ||
201 | ADD128(t2h, ah, z, t2l); \ | ||
202 | /* double t2h and add top bit of ah */ \ | ||
203 | t2h = 2 * t2h + (ah >> 63); \ | ||
204 | ah &= m63; \ | ||
205 | /* now add the low registers */ \ | ||
206 | ADD128(ah, al, mh, ml); \ | ||
207 | ADD128(ah, al, z, t2h); \ | ||
208 | } while (0) | ||
209 | |||
210 | #else /* ! CONFIG_64BIT */ | ||
211 | |||
212 | #ifndef nh_16 | ||
213 | #define nh_16(mp, kp, nw, rh, rl) \ | ||
214 | do { \ | ||
215 | u64 t1, t2, m1, m2, t; \ | ||
216 | int i; \ | ||
217 | rh = rl = t = 0; \ | ||
218 | for (i = 0; i < nw; i += 2) { \ | ||
219 | t1 = le64_to_cpup(mp+i) + kp[i]; \ | ||
220 | t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ | ||
221 | m2 = MUL32(t1 >> 32, t2); \ | ||
222 | m1 = MUL32(t1, t2 >> 32); \ | ||
223 | ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ | ||
224 | MUL32(t1, t2)); \ | ||
225 | rh += (u64)(u32)(m1 >> 32) \ | ||
226 | + (u32)(m2 >> 32); \ | ||
227 | t += (u64)(u32)m1 + (u32)m2; \ | ||
228 | } \ | ||
229 | ADD128(rh, rl, (t >> 32), (t << 32)); \ | ||
230 | } while (0) | ||
231 | #endif | ||
232 | |||
233 | static void poly_step_func(u64 *ahi, u64 *alo, | ||
234 | const u64 *kh, const u64 *kl, | ||
235 | const u64 *mh, const u64 *ml) | ||
236 | { | ||
237 | #define a0 (*(((u32 *)alo)+INDEX_LOW)) | ||
238 | #define a1 (*(((u32 *)alo)+INDEX_HIGH)) | ||
239 | #define a2 (*(((u32 *)ahi)+INDEX_LOW)) | ||
240 | #define a3 (*(((u32 *)ahi)+INDEX_HIGH)) | ||
241 | #define k0 (*(((u32 *)kl)+INDEX_LOW)) | ||
242 | #define k1 (*(((u32 *)kl)+INDEX_HIGH)) | ||
243 | #define k2 (*(((u32 *)kh)+INDEX_LOW)) | ||
244 | #define k3 (*(((u32 *)kh)+INDEX_HIGH)) | ||
245 | |||
246 | u64 p, q, t; | ||
247 | u32 t2; | ||
248 | |||
249 | p = MUL32(a3, k3); | ||
250 | p += p; | ||
251 | p += *(u64 *)mh; | ||
252 | p += MUL32(a0, k2); | ||
253 | p += MUL32(a1, k1); | ||
254 | p += MUL32(a2, k0); | ||
255 | t = (u32)(p); | ||
256 | p >>= 32; | ||
257 | p += MUL32(a0, k3); | ||
258 | p += MUL32(a1, k2); | ||
259 | p += MUL32(a2, k1); | ||
260 | p += MUL32(a3, k0); | ||
261 | t |= ((u64)((u32)p & 0x7fffffff)) << 32; | ||
262 | p >>= 31; | ||
263 | p += (u64)(((u32 *)ml)[INDEX_LOW]); | ||
264 | p += MUL32(a0, k0); | ||
265 | q = MUL32(a1, k3); | ||
266 | q += MUL32(a2, k2); | ||
267 | q += MUL32(a3, k1); | ||
268 | q += q; | ||
269 | p += q; | ||
270 | t2 = (u32)(p); | ||
271 | p >>= 32; | ||
272 | p += (u64)(((u32 *)ml)[INDEX_HIGH]); | ||
273 | p += MUL32(a0, k1); | ||
274 | p += MUL32(a1, k0); | ||
275 | q = MUL32(a2, k3); | ||
276 | q += MUL32(a3, k2); | ||
277 | q += q; | ||
278 | p += q; | ||
279 | *(u64 *)(alo) = (p << 32) | t2; | ||
280 | p >>= 32; | ||
281 | *(u64 *)(ahi) = p + t; | ||
282 | |||
283 | #undef a0 | ||
284 | #undef a1 | ||
285 | #undef a2 | ||
286 | #undef a3 | ||
287 | #undef k0 | ||
288 | #undef k1 | ||
289 | #undef k2 | ||
290 | #undef k3 | ||
291 | } | ||
292 | |||
293 | #define poly_step(ah, al, kh, kl, mh, ml) \ | ||
294 | poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) | ||
295 | |||
296 | #endif /* end of specialized NH and poly definitions */ | ||
297 | |||
298 | /* At least nh_16 is defined. Defined others as needed here */ | ||
299 | #ifndef nh_16_2 | ||
300 | #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ | ||
301 | do { \ | ||
302 | nh_16(mp, kp, nw, rh, rl); \ | ||
303 | nh_16(mp, ((kp)+2), nw, rh2, rl2); \ | ||
304 | } while (0) | ||
305 | #endif | ||
306 | #ifndef nh_vmac_nhbytes | ||
307 | #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ | ||
308 | nh_16(mp, kp, nw, rh, rl) | ||
309 | #endif | ||
310 | #ifndef nh_vmac_nhbytes_2 | ||
311 | #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ | ||
312 | do { \ | ||
313 | nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ | ||
314 | nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ | ||
315 | } while (0) | ||
316 | #endif | ||
317 | |||
318 | static void vhash_abort(struct vmac_ctx *ctx) | ||
319 | { | ||
320 | ctx->polytmp[0] = ctx->polykey[0] ; | ||
321 | ctx->polytmp[1] = ctx->polykey[1] ; | ||
322 | ctx->first_block_processed = 0; | ||
323 | } | ||
324 | |||
325 | static u64 l3hash(u64 p1, u64 p2, | ||
326 | u64 k1, u64 k2, u64 len) | ||
327 | { | ||
328 | u64 rh, rl, t, z = 0; | ||
329 | |||
330 | /* fully reduce (p1,p2)+(len,0) mod p127 */ | ||
331 | t = p1 >> 63; | ||
332 | p1 &= m63; | ||
333 | ADD128(p1, p2, len, t); | ||
334 | /* At this point, (p1,p2) is at most 2^127+(len<<64) */ | ||
335 | t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); | ||
336 | ADD128(p1, p2, z, t); | ||
337 | p1 &= m63; | ||
338 | |||
339 | /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ | ||
340 | t = p1 + (p2 >> 32); | ||
341 | t += (t >> 32); | ||
342 | t += (u32)t > 0xfffffffeu; | ||
343 | p1 += (t >> 32); | ||
344 | p2 += (p1 << 32); | ||
345 | |||
346 | /* compute (p1+k1)%p64 and (p2+k2)%p64 */ | ||
347 | p1 += k1; | ||
348 | p1 += (0 - (p1 < k1)) & 257; | ||
349 | p2 += k2; | ||
350 | p2 += (0 - (p2 < k2)) & 257; | ||
351 | |||
352 | /* compute (p1+k1)*(p2+k2)%p64 */ | ||
353 | MUL64(rh, rl, p1, p2); | ||
354 | t = rh >> 56; | ||
355 | ADD128(t, rl, z, rh); | ||
356 | rh <<= 8; | ||
357 | ADD128(t, rl, z, rh); | ||
358 | t += t << 8; | ||
359 | rl += t; | ||
360 | rl += (0 - (rl < t)) & 257; | ||
361 | rl += (0 - (rl > p64-1)) & 257; | ||
362 | return rl; | ||
363 | } | ||
364 | |||
365 | static void vhash_update(const unsigned char *m, | ||
366 | unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ | ||
367 | struct vmac_ctx *ctx) | ||
368 | { | ||
369 | u64 rh, rl, *mptr; | ||
370 | const u64 *kptr = (u64 *)ctx->nhkey; | ||
371 | int i; | ||
372 | u64 ch, cl; | ||
373 | u64 pkh = ctx->polykey[0]; | ||
374 | u64 pkl = ctx->polykey[1]; | ||
375 | |||
376 | mptr = (u64 *)m; | ||
377 | i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ | ||
378 | |||
379 | ch = ctx->polytmp[0]; | ||
380 | cl = ctx->polytmp[1]; | ||
381 | |||
382 | if (!ctx->first_block_processed) { | ||
383 | ctx->first_block_processed = 1; | ||
384 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
385 | rh &= m62; | ||
386 | ADD128(ch, cl, rh, rl); | ||
387 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
388 | i--; | ||
389 | } | ||
390 | |||
391 | while (i--) { | ||
392 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
393 | rh &= m62; | ||
394 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
395 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
396 | } | ||
397 | |||
398 | ctx->polytmp[0] = ch; | ||
399 | ctx->polytmp[1] = cl; | ||
400 | } | ||
401 | |||
402 | static u64 vhash(unsigned char m[], unsigned int mbytes, | ||
403 | u64 *tagl, struct vmac_ctx *ctx) | ||
404 | { | ||
405 | u64 rh, rl, *mptr; | ||
406 | const u64 *kptr = (u64 *)ctx->nhkey; | ||
407 | int i, remaining; | ||
408 | u64 ch, cl; | ||
409 | u64 pkh = ctx->polykey[0]; | ||
410 | u64 pkl = ctx->polykey[1]; | ||
411 | |||
412 | mptr = (u64 *)m; | ||
413 | i = mbytes / VMAC_NHBYTES; | ||
414 | remaining = mbytes % VMAC_NHBYTES; | ||
415 | |||
416 | if (ctx->first_block_processed) { | ||
417 | ch = ctx->polytmp[0]; | ||
418 | cl = ctx->polytmp[1]; | ||
419 | } else if (i) { | ||
420 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); | ||
421 | ch &= m62; | ||
422 | ADD128(ch, cl, pkh, pkl); | ||
423 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
424 | i--; | ||
425 | } else if (remaining) { | ||
426 | nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); | ||
427 | ch &= m62; | ||
428 | ADD128(ch, cl, pkh, pkl); | ||
429 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
430 | goto do_l3; | ||
431 | } else {/* Empty String */ | ||
432 | ch = pkh; cl = pkl; | ||
433 | goto do_l3; | ||
434 | } | ||
435 | |||
436 | while (i--) { | ||
437 | nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); | ||
438 | rh &= m62; | ||
439 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
440 | mptr += (VMAC_NHBYTES/sizeof(u64)); | ||
441 | } | ||
442 | if (remaining) { | ||
443 | nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); | ||
444 | rh &= m62; | ||
445 | poly_step(ch, cl, pkh, pkl, rh, rl); | ||
446 | } | ||
447 | |||
448 | do_l3: | ||
449 | vhash_abort(ctx); | ||
450 | remaining *= 8; | ||
451 | return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); | ||
452 | } | ||
453 | |||
454 | static u64 vmac(unsigned char m[], unsigned int mbytes, | ||
455 | unsigned char n[16], u64 *tagl, | ||
456 | struct vmac_ctx_t *ctx) | ||
457 | { | ||
458 | u64 *in_n, *out_p; | ||
459 | u64 p, h; | ||
460 | int i; | ||
461 | |||
462 | in_n = ctx->__vmac_ctx.cached_nonce; | ||
463 | out_p = ctx->__vmac_ctx.cached_aes; | ||
464 | |||
465 | i = n[15] & 1; | ||
466 | if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { | ||
467 | in_n[0] = *(u64 *)(n); | ||
468 | in_n[1] = *(u64 *)(n+8); | ||
469 | ((unsigned char *)in_n)[15] &= 0xFE; | ||
470 | crypto_cipher_encrypt_one(ctx->child, | ||
471 | (unsigned char *)out_p, (unsigned char *)in_n); | ||
472 | |||
473 | ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); | ||
474 | } | ||
475 | p = be64_to_cpup(out_p + i); | ||
476 | h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); | ||
477 | return p + h; | ||
478 | } | ||
479 | |||
480 | static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) | ||
481 | { | ||
482 | u64 in[2] = {0}, out[2]; | ||
483 | unsigned i; | ||
484 | int err = 0; | ||
485 | |||
486 | err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); | ||
487 | if (err) | ||
488 | return err; | ||
489 | |||
490 | /* Fill nh key */ | ||
491 | ((unsigned char *)in)[0] = 0x80; | ||
492 | for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { | ||
493 | crypto_cipher_encrypt_one(ctx->child, | ||
494 | (unsigned char *)out, (unsigned char *)in); | ||
495 | ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); | ||
496 | ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); | ||
497 | ((unsigned char *)in)[15] += 1; | ||
498 | } | ||
499 | |||
500 | /* Fill poly key */ | ||
501 | ((unsigned char *)in)[0] = 0xC0; | ||
502 | in[1] = 0; | ||
503 | for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { | ||
504 | crypto_cipher_encrypt_one(ctx->child, | ||
505 | (unsigned char *)out, (unsigned char *)in); | ||
506 | ctx->__vmac_ctx.polytmp[i] = | ||
507 | ctx->__vmac_ctx.polykey[i] = | ||
508 | be64_to_cpup(out) & mpoly; | ||
509 | ctx->__vmac_ctx.polytmp[i+1] = | ||
510 | ctx->__vmac_ctx.polykey[i+1] = | ||
511 | be64_to_cpup(out+1) & mpoly; | ||
512 | ((unsigned char *)in)[15] += 1; | ||
513 | } | ||
514 | |||
515 | /* Fill ip key */ | ||
516 | ((unsigned char *)in)[0] = 0xE0; | ||
517 | in[1] = 0; | ||
518 | for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { | ||
519 | do { | ||
520 | crypto_cipher_encrypt_one(ctx->child, | ||
521 | (unsigned char *)out, (unsigned char *)in); | ||
522 | ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); | ||
523 | ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); | ||
524 | ((unsigned char *)in)[15] += 1; | ||
525 | } while (ctx->__vmac_ctx.l3key[i] >= p64 | ||
526 | || ctx->__vmac_ctx.l3key[i+1] >= p64); | ||
527 | } | ||
528 | |||
529 | /* Invalidate nonce/aes cache and reset other elements */ | ||
530 | ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ | ||
531 | ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ | ||
532 | ctx->__vmac_ctx.first_block_processed = 0; | ||
533 | |||
534 | return err; | ||
535 | } | ||
536 | |||
537 | static int vmac_setkey(struct crypto_shash *parent, | ||
538 | const u8 *key, unsigned int keylen) | ||
539 | { | ||
540 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
541 | |||
542 | if (keylen != VMAC_KEY_LEN) { | ||
543 | crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | |||
547 | return vmac_set_key((u8 *)key, ctx); | ||
548 | } | ||
549 | |||
550 | static int vmac_init(struct shash_desc *pdesc) | ||
551 | { | ||
552 | struct crypto_shash *parent = pdesc->tfm; | ||
553 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
554 | |||
555 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | ||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static int vmac_update(struct shash_desc *pdesc, const u8 *p, | ||
560 | unsigned int len) | ||
561 | { | ||
562 | struct crypto_shash *parent = pdesc->tfm; | ||
563 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
564 | |||
565 | vhash_update(p, len, &ctx->__vmac_ctx); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static int vmac_final(struct shash_desc *pdesc, u8 *out) | ||
571 | { | ||
572 | struct crypto_shash *parent = pdesc->tfm; | ||
573 | struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); | ||
574 | vmac_t mac; | ||
575 | u8 nonce[16] = {}; | ||
576 | |||
577 | mac = vmac(NULL, 0, nonce, NULL, ctx); | ||
578 | memcpy(out, &mac, sizeof(vmac_t)); | ||
579 | memset(&mac, 0, sizeof(vmac_t)); | ||
580 | memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); | ||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | static int vmac_init_tfm(struct crypto_tfm *tfm) | ||
585 | { | ||
586 | struct crypto_cipher *cipher; | ||
587 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | ||
588 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | ||
589 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | ||
590 | |||
591 | cipher = crypto_spawn_cipher(spawn); | ||
592 | if (IS_ERR(cipher)) | ||
593 | return PTR_ERR(cipher); | ||
594 | |||
595 | ctx->child = cipher; | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static void vmac_exit_tfm(struct crypto_tfm *tfm) | ||
600 | { | ||
601 | struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); | ||
602 | crypto_free_cipher(ctx->child); | ||
603 | } | ||
604 | |||
605 | static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
606 | { | ||
607 | struct shash_instance *inst; | ||
608 | struct crypto_alg *alg; | ||
609 | int err; | ||
610 | |||
611 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); | ||
612 | if (err) | ||
613 | return err; | ||
614 | |||
615 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | ||
616 | CRYPTO_ALG_TYPE_MASK); | ||
617 | if (IS_ERR(alg)) | ||
618 | return PTR_ERR(alg); | ||
619 | |||
620 | inst = shash_alloc_instance("vmac", alg); | ||
621 | err = PTR_ERR(inst); | ||
622 | if (IS_ERR(inst)) | ||
623 | goto out_put_alg; | ||
624 | |||
625 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, | ||
626 | shash_crypto_instance(inst), | ||
627 | CRYPTO_ALG_TYPE_MASK); | ||
628 | if (err) | ||
629 | goto out_free_inst; | ||
630 | |||
631 | inst->alg.base.cra_priority = alg->cra_priority; | ||
632 | inst->alg.base.cra_blocksize = alg->cra_blocksize; | ||
633 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | ||
634 | |||
635 | inst->alg.digestsize = sizeof(vmac_t); | ||
636 | inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); | ||
637 | inst->alg.base.cra_init = vmac_init_tfm; | ||
638 | inst->alg.base.cra_exit = vmac_exit_tfm; | ||
639 | |||
640 | inst->alg.init = vmac_init; | ||
641 | inst->alg.update = vmac_update; | ||
642 | inst->alg.final = vmac_final; | ||
643 | inst->alg.setkey = vmac_setkey; | ||
644 | |||
645 | err = shash_register_instance(tmpl, inst); | ||
646 | if (err) { | ||
647 | out_free_inst: | ||
648 | shash_free_instance(shash_crypto_instance(inst)); | ||
649 | } | ||
650 | |||
651 | out_put_alg: | ||
652 | crypto_mod_put(alg); | ||
653 | return err; | ||
654 | } | ||
655 | |||
656 | static struct crypto_template vmac_tmpl = { | ||
657 | .name = "vmac", | ||
658 | .create = vmac_create, | ||
659 | .free = shash_free_instance, | ||
660 | .module = THIS_MODULE, | ||
661 | }; | ||
662 | |||
663 | static int __init vmac_module_init(void) | ||
664 | { | ||
665 | return crypto_register_template(&vmac_tmpl); | ||
666 | } | ||
667 | |||
668 | static void __exit vmac_module_exit(void) | ||
669 | { | ||
670 | crypto_unregister_template(&vmac_tmpl); | ||
671 | } | ||
672 | |||
673 | module_init(vmac_module_init); | ||
674 | module_exit(vmac_module_exit); | ||
675 | |||
676 | MODULE_LICENSE("GPL"); | ||
677 | MODULE_DESCRIPTION("VMAC hash algorithm"); | ||
678 | |||
diff --git a/crypto/xcbc.c b/crypto/xcbc.c index b63b633e549c..bb7b67fba349 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c | |||
@@ -19,211 +19,142 @@ | |||
19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> | 19 | * Kazunori Miyazawa <miyazawa@linux-ipv6.org> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <crypto/scatterwalk.h> | 22 | #include <crypto/internal/hash.h> |
23 | #include <linux/crypto.h> | ||
24 | #include <linux/err.h> | 23 | #include <linux/err.h> |
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | ||
28 | #include <linux/rtnetlink.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | 25 | ||
32 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, | 26 | static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, |
33 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, | 27 | 0x02020202, 0x02020202, 0x02020202, 0x02020202, |
34 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; | 28 | 0x03030303, 0x03030303, 0x03030303, 0x03030303}; |
29 | |||
35 | /* | 30 | /* |
36 | * +------------------------ | 31 | * +------------------------ |
37 | * | <parent tfm> | 32 | * | <parent tfm> |
38 | * +------------------------ | 33 | * +------------------------ |
39 | * | crypto_xcbc_ctx | 34 | * | xcbc_tfm_ctx |
40 | * +------------------------ | 35 | * +------------------------ |
41 | * | odds (block size) | 36 | * | consts (block size * 2) |
42 | * +------------------------ | 37 | * +------------------------ |
43 | * | prev (block size) | 38 | */ |
39 | struct xcbc_tfm_ctx { | ||
40 | struct crypto_cipher *child; | ||
41 | u8 ctx[]; | ||
42 | }; | ||
43 | |||
44 | /* | ||
44 | * +------------------------ | 45 | * +------------------------ |
45 | * | key (block size) | 46 | * | <shash desc> |
46 | * +------------------------ | 47 | * +------------------------ |
47 | * | consts (block size * 3) | 48 | * | xcbc_desc_ctx |
49 | * +------------------------ | ||
50 | * | odds (block size) | ||
51 | * +------------------------ | ||
52 | * | prev (block size) | ||
48 | * +------------------------ | 53 | * +------------------------ |
49 | */ | 54 | */ |
50 | struct crypto_xcbc_ctx { | 55 | struct xcbc_desc_ctx { |
51 | struct crypto_cipher *child; | ||
52 | u8 *odds; | ||
53 | u8 *prev; | ||
54 | u8 *key; | ||
55 | u8 *consts; | ||
56 | void (*xor)(u8 *a, const u8 *b, unsigned int bs); | ||
57 | unsigned int keylen; | ||
58 | unsigned int len; | 56 | unsigned int len; |
57 | u8 ctx[]; | ||
59 | }; | 58 | }; |
60 | 59 | ||
61 | static void xor_128(u8 *a, const u8 *b, unsigned int bs) | 60 | static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, |
62 | { | 61 | const u8 *inkey, unsigned int keylen) |
63 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; | ||
64 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; | ||
65 | ((u32 *)a)[2] ^= ((u32 *)b)[2]; | ||
66 | ((u32 *)a)[3] ^= ((u32 *)b)[3]; | ||
67 | } | ||
68 | |||
69 | static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent, | ||
70 | struct crypto_xcbc_ctx *ctx) | ||
71 | { | 62 | { |
72 | int bs = crypto_hash_blocksize(parent); | 63 | unsigned long alignmask = crypto_shash_alignmask(parent); |
64 | struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); | ||
65 | int bs = crypto_shash_blocksize(parent); | ||
66 | u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | ||
73 | int err = 0; | 67 | int err = 0; |
74 | u8 key1[bs]; | 68 | u8 key1[bs]; |
75 | 69 | ||
76 | if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) | 70 | if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) |
77 | return err; | 71 | return err; |
78 | 72 | ||
79 | crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts); | 73 | crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); |
74 | crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); | ||
75 | crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); | ||
80 | 76 | ||
81 | return crypto_cipher_setkey(ctx->child, key1, bs); | 77 | return crypto_cipher_setkey(ctx->child, key1, bs); |
82 | } | ||
83 | |||
84 | static int crypto_xcbc_digest_setkey(struct crypto_hash *parent, | ||
85 | const u8 *inkey, unsigned int keylen) | ||
86 | { | ||
87 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | ||
88 | |||
89 | if (keylen != crypto_cipher_blocksize(ctx->child)) | ||
90 | return -EINVAL; | ||
91 | 78 | ||
92 | ctx->keylen = keylen; | ||
93 | memcpy(ctx->key, inkey, keylen); | ||
94 | ctx->consts = (u8*)ks; | ||
95 | |||
96 | return _crypto_xcbc_digest_setkey(parent, ctx); | ||
97 | } | 79 | } |
98 | 80 | ||
99 | static int crypto_xcbc_digest_init(struct hash_desc *pdesc) | 81 | static int crypto_xcbc_digest_init(struct shash_desc *pdesc) |
100 | { | 82 | { |
101 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm); | 83 | unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); |
102 | int bs = crypto_hash_blocksize(pdesc->tfm); | 84 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
85 | int bs = crypto_shash_blocksize(pdesc->tfm); | ||
86 | u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; | ||
103 | 87 | ||
104 | ctx->len = 0; | 88 | ctx->len = 0; |
105 | memset(ctx->odds, 0, bs); | 89 | memset(prev, 0, bs); |
106 | memset(ctx->prev, 0, bs); | ||
107 | 90 | ||
108 | return 0; | 91 | return 0; |
109 | } | 92 | } |
110 | 93 | ||
111 | static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, | 94 | static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, |
112 | struct scatterlist *sg, | 95 | unsigned int len) |
113 | unsigned int nbytes) | ||
114 | { | 96 | { |
115 | struct crypto_hash *parent = pdesc->tfm; | 97 | struct crypto_shash *parent = pdesc->tfm; |
116 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | 98 | unsigned long alignmask = crypto_shash_alignmask(parent); |
117 | struct crypto_cipher *tfm = ctx->child; | 99 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); |
118 | int bs = crypto_hash_blocksize(parent); | 100 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); |
119 | 101 | struct crypto_cipher *tfm = tctx->child; | |
120 | for (;;) { | 102 | int bs = crypto_shash_blocksize(parent); |
121 | struct page *pg = sg_page(sg); | 103 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); |
122 | unsigned int offset = sg->offset; | 104 | u8 *prev = odds + bs; |
123 | unsigned int slen = sg->length; | 105 | |
124 | 106 | /* checking the data can fill the block */ | |
125 | if (unlikely(slen > nbytes)) | 107 | if ((ctx->len + len) <= bs) { |
126 | slen = nbytes; | 108 | memcpy(odds + ctx->len, p, len); |
127 | 109 | ctx->len += len; | |
128 | nbytes -= slen; | 110 | return 0; |
129 | |||
130 | while (slen > 0) { | ||
131 | unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | ||
132 | char *p = crypto_kmap(pg, 0) + offset; | ||
133 | |||
134 | /* checking the data can fill the block */ | ||
135 | if ((ctx->len + len) <= bs) { | ||
136 | memcpy(ctx->odds + ctx->len, p, len); | ||
137 | ctx->len += len; | ||
138 | slen -= len; | ||
139 | |||
140 | /* checking the rest of the page */ | ||
141 | if (len + offset >= PAGE_SIZE) { | ||
142 | offset = 0; | ||
143 | pg++; | ||
144 | } else | ||
145 | offset += len; | ||
146 | |||
147 | crypto_kunmap(p, 0); | ||
148 | crypto_yield(pdesc->flags); | ||
149 | continue; | ||
150 | } | ||
151 | |||
152 | /* filling odds with new data and encrypting it */ | ||
153 | memcpy(ctx->odds + ctx->len, p, bs - ctx->len); | ||
154 | len -= bs - ctx->len; | ||
155 | p += bs - ctx->len; | ||
156 | |||
157 | ctx->xor(ctx->prev, ctx->odds, bs); | ||
158 | crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); | ||
159 | |||
160 | /* clearing the length */ | ||
161 | ctx->len = 0; | ||
162 | |||
163 | /* encrypting the rest of data */ | ||
164 | while (len > bs) { | ||
165 | ctx->xor(ctx->prev, p, bs); | ||
166 | crypto_cipher_encrypt_one(tfm, ctx->prev, | ||
167 | ctx->prev); | ||
168 | p += bs; | ||
169 | len -= bs; | ||
170 | } | ||
171 | |||
172 | /* keeping the surplus of blocksize */ | ||
173 | if (len) { | ||
174 | memcpy(ctx->odds, p, len); | ||
175 | ctx->len = len; | ||
176 | } | ||
177 | crypto_kunmap(p, 0); | ||
178 | crypto_yield(pdesc->flags); | ||
179 | slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | ||
180 | offset = 0; | ||
181 | pg++; | ||
182 | } | ||
183 | |||
184 | if (!nbytes) | ||
185 | break; | ||
186 | sg = scatterwalk_sg_next(sg); | ||
187 | } | 111 | } |
188 | 112 | ||
189 | return 0; | 113 | /* filling odds with new data and encrypting it */ |
190 | } | 114 | memcpy(odds + ctx->len, p, bs - ctx->len); |
115 | len -= bs - ctx->len; | ||
116 | p += bs - ctx->len; | ||
191 | 117 | ||
192 | static int crypto_xcbc_digest_update(struct hash_desc *pdesc, | 118 | crypto_xor(prev, odds, bs); |
193 | struct scatterlist *sg, | 119 | crypto_cipher_encrypt_one(tfm, prev, prev); |
194 | unsigned int nbytes) | ||
195 | { | ||
196 | if (WARN_ON_ONCE(in_irq())) | ||
197 | return -EDEADLK; | ||
198 | return crypto_xcbc_digest_update2(pdesc, sg, nbytes); | ||
199 | } | ||
200 | 120 | ||
201 | static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) | 121 | /* clearing the length */ |
202 | { | 122 | ctx->len = 0; |
203 | struct crypto_hash *parent = pdesc->tfm; | ||
204 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); | ||
205 | struct crypto_cipher *tfm = ctx->child; | ||
206 | int bs = crypto_hash_blocksize(parent); | ||
207 | int err = 0; | ||
208 | |||
209 | if (ctx->len == bs) { | ||
210 | u8 key2[bs]; | ||
211 | 123 | ||
212 | if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) | 124 | /* encrypting the rest of data */ |
213 | return err; | 125 | while (len > bs) { |
126 | crypto_xor(prev, p, bs); | ||
127 | crypto_cipher_encrypt_one(tfm, prev, prev); | ||
128 | p += bs; | ||
129 | len -= bs; | ||
130 | } | ||
214 | 131 | ||
215 | crypto_cipher_encrypt_one(tfm, key2, | 132 | /* keeping the surplus of blocksize */ |
216 | (u8 *)(ctx->consts + bs)); | 133 | if (len) { |
134 | memcpy(odds, p, len); | ||
135 | ctx->len = len; | ||
136 | } | ||
217 | 137 | ||
218 | ctx->xor(ctx->prev, ctx->odds, bs); | 138 | return 0; |
219 | ctx->xor(ctx->prev, key2, bs); | 139 | } |
220 | _crypto_xcbc_digest_setkey(parent, ctx); | ||
221 | 140 | ||
222 | crypto_cipher_encrypt_one(tfm, out, ctx->prev); | 141 | static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) |
223 | } else { | 142 | { |
224 | u8 key3[bs]; | 143 | struct crypto_shash *parent = pdesc->tfm; |
144 | unsigned long alignmask = crypto_shash_alignmask(parent); | ||
145 | struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); | ||
146 | struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); | ||
147 | struct crypto_cipher *tfm = tctx->child; | ||
148 | int bs = crypto_shash_blocksize(parent); | ||
149 | u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); | ||
150 | u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); | ||
151 | u8 *prev = odds + bs; | ||
152 | unsigned int offset = 0; | ||
153 | |||
154 | if (ctx->len != bs) { | ||
225 | unsigned int rlen; | 155 | unsigned int rlen; |
226 | u8 *p = ctx->odds + ctx->len; | 156 | u8 *p = odds + ctx->len; |
157 | |||
227 | *p = 0x80; | 158 | *p = 0x80; |
228 | p++; | 159 | p++; |
229 | 160 | ||
@@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) | |||
231 | if (rlen) | 162 | if (rlen) |
232 | memset(p, 0, rlen); | 163 | memset(p, 0, rlen); |
233 | 164 | ||
234 | if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) | 165 | offset += bs; |
235 | return err; | ||
236 | |||
237 | crypto_cipher_encrypt_one(tfm, key3, | ||
238 | (u8 *)(ctx->consts + bs * 2)); | ||
239 | |||
240 | ctx->xor(ctx->prev, ctx->odds, bs); | ||
241 | ctx->xor(ctx->prev, key3, bs); | ||
242 | |||
243 | _crypto_xcbc_digest_setkey(parent, ctx); | ||
244 | |||
245 | crypto_cipher_encrypt_one(tfm, out, ctx->prev); | ||
246 | } | 166 | } |
247 | 167 | ||
248 | return 0; | 168 | crypto_xor(prev, odds, bs); |
249 | } | 169 | crypto_xor(prev, consts + offset, bs); |
250 | 170 | ||
251 | static int crypto_xcbc_digest(struct hash_desc *pdesc, | 171 | crypto_cipher_encrypt_one(tfm, out, prev); |
252 | struct scatterlist *sg, unsigned int nbytes, u8 *out) | ||
253 | { | ||
254 | if (WARN_ON_ONCE(in_irq())) | ||
255 | return -EDEADLK; | ||
256 | 172 | ||
257 | crypto_xcbc_digest_init(pdesc); | 173 | return 0; |
258 | crypto_xcbc_digest_update2(pdesc, sg, nbytes); | ||
259 | return crypto_xcbc_digest_final(pdesc, out); | ||
260 | } | 174 | } |
261 | 175 | ||
262 | static int xcbc_init_tfm(struct crypto_tfm *tfm) | 176 | static int xcbc_init_tfm(struct crypto_tfm *tfm) |
@@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) | |||
264 | struct crypto_cipher *cipher; | 178 | struct crypto_cipher *cipher; |
265 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | 179 | struct crypto_instance *inst = (void *)tfm->__crt_alg; |
266 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); | 180 | struct crypto_spawn *spawn = crypto_instance_ctx(inst); |
267 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); | 181 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
268 | int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm)); | ||
269 | 182 | ||
270 | cipher = crypto_spawn_cipher(spawn); | 183 | cipher = crypto_spawn_cipher(spawn); |
271 | if (IS_ERR(cipher)) | 184 | if (IS_ERR(cipher)) |
272 | return PTR_ERR(cipher); | 185 | return PTR_ERR(cipher); |
273 | 186 | ||
274 | switch(bs) { | ||
275 | case 16: | ||
276 | ctx->xor = xor_128; | ||
277 | break; | ||
278 | default: | ||
279 | return -EINVAL; | ||
280 | } | ||
281 | |||
282 | ctx->child = cipher; | 187 | ctx->child = cipher; |
283 | ctx->odds = (u8*)(ctx+1); | ||
284 | ctx->prev = ctx->odds + bs; | ||
285 | ctx->key = ctx->prev + bs; | ||
286 | 188 | ||
287 | return 0; | 189 | return 0; |
288 | }; | 190 | }; |
289 | 191 | ||
290 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) | 192 | static void xcbc_exit_tfm(struct crypto_tfm *tfm) |
291 | { | 193 | { |
292 | struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); | 194 | struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); |
293 | crypto_free_cipher(ctx->child); | 195 | crypto_free_cipher(ctx->child); |
294 | } | 196 | } |
295 | 197 | ||
296 | static struct crypto_instance *xcbc_alloc(struct rtattr **tb) | 198 | static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) |
297 | { | 199 | { |
298 | struct crypto_instance *inst; | 200 | struct shash_instance *inst; |
299 | struct crypto_alg *alg; | 201 | struct crypto_alg *alg; |
202 | unsigned long alignmask; | ||
300 | int err; | 203 | int err; |
301 | 204 | ||
302 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); | 205 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); |
303 | if (err) | 206 | if (err) |
304 | return ERR_PTR(err); | 207 | return err; |
305 | 208 | ||
306 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 209 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
307 | CRYPTO_ALG_TYPE_MASK); | 210 | CRYPTO_ALG_TYPE_MASK); |
308 | if (IS_ERR(alg)) | 211 | if (IS_ERR(alg)) |
309 | return ERR_CAST(alg); | 212 | return PTR_ERR(alg); |
310 | 213 | ||
311 | switch(alg->cra_blocksize) { | 214 | switch(alg->cra_blocksize) { |
312 | case 16: | 215 | case 16: |
313 | break; | 216 | break; |
314 | default: | 217 | default: |
315 | inst = ERR_PTR(-EINVAL); | ||
316 | goto out_put_alg; | 218 | goto out_put_alg; |
317 | } | 219 | } |
318 | 220 | ||
319 | inst = crypto_alloc_instance("xcbc", alg); | 221 | inst = shash_alloc_instance("xcbc", alg); |
222 | err = PTR_ERR(inst); | ||
320 | if (IS_ERR(inst)) | 223 | if (IS_ERR(inst)) |
321 | goto out_put_alg; | 224 | goto out_put_alg; |
322 | 225 | ||
323 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; | 226 | err = crypto_init_spawn(shash_instance_ctx(inst), alg, |
324 | inst->alg.cra_priority = alg->cra_priority; | 227 | shash_crypto_instance(inst), |
325 | inst->alg.cra_blocksize = alg->cra_blocksize; | 228 | CRYPTO_ALG_TYPE_MASK); |
326 | inst->alg.cra_alignmask = alg->cra_alignmask; | 229 | if (err) |
327 | inst->alg.cra_type = &crypto_hash_type; | 230 | goto out_free_inst; |
328 | 231 | ||
329 | inst->alg.cra_hash.digestsize = alg->cra_blocksize; | 232 | alignmask = alg->cra_alignmask | 3; |
330 | inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + | 233 | inst->alg.base.cra_alignmask = alignmask; |
331 | ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); | 234 | inst->alg.base.cra_priority = alg->cra_priority; |
332 | inst->alg.cra_init = xcbc_init_tfm; | 235 | inst->alg.base.cra_blocksize = alg->cra_blocksize; |
333 | inst->alg.cra_exit = xcbc_exit_tfm; | 236 | |
334 | 237 | inst->alg.digestsize = alg->cra_blocksize; | |
335 | inst->alg.cra_hash.init = crypto_xcbc_digest_init; | 238 | inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), |
336 | inst->alg.cra_hash.update = crypto_xcbc_digest_update; | 239 | crypto_tfm_ctx_alignment()) + |
337 | inst->alg.cra_hash.final = crypto_xcbc_digest_final; | 240 | (alignmask & |
338 | inst->alg.cra_hash.digest = crypto_xcbc_digest; | 241 | ~(crypto_tfm_ctx_alignment() - 1)) + |
339 | inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey; | 242 | alg->cra_blocksize * 2; |
243 | |||
244 | inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), | ||
245 | alignmask + 1) + | ||
246 | alg->cra_blocksize * 2; | ||
247 | inst->alg.base.cra_init = xcbc_init_tfm; | ||
248 | inst->alg.base.cra_exit = xcbc_exit_tfm; | ||
249 | |||
250 | inst->alg.init = crypto_xcbc_digest_init; | ||
251 | inst->alg.update = crypto_xcbc_digest_update; | ||
252 | inst->alg.final = crypto_xcbc_digest_final; | ||
253 | inst->alg.setkey = crypto_xcbc_digest_setkey; | ||
254 | |||
255 | err = shash_register_instance(tmpl, inst); | ||
256 | if (err) { | ||
257 | out_free_inst: | ||
258 | shash_free_instance(shash_crypto_instance(inst)); | ||
259 | } | ||
340 | 260 | ||
341 | out_put_alg: | 261 | out_put_alg: |
342 | crypto_mod_put(alg); | 262 | crypto_mod_put(alg); |
343 | return inst; | 263 | return err; |
344 | } | ||
345 | |||
346 | static void xcbc_free(struct crypto_instance *inst) | ||
347 | { | ||
348 | crypto_drop_spawn(crypto_instance_ctx(inst)); | ||
349 | kfree(inst); | ||
350 | } | 264 | } |
351 | 265 | ||
352 | static struct crypto_template crypto_xcbc_tmpl = { | 266 | static struct crypto_template crypto_xcbc_tmpl = { |
353 | .name = "xcbc", | 267 | .name = "xcbc", |
354 | .alloc = xcbc_alloc, | 268 | .create = xcbc_create, |
355 | .free = xcbc_free, | 269 | .free = shash_free_instance, |
356 | .module = THIS_MODULE, | 270 | .module = THIS_MODULE, |
357 | }; | 271 | }; |
358 | 272 | ||
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index cd0ba51f7c80..0d8c5788b8e4 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c | |||
@@ -44,8 +44,8 @@ | |||
44 | * want to register another driver on the same PCI id. | 44 | * want to register another driver on the same PCI id. |
45 | */ | 45 | */ |
46 | static const struct pci_device_id pci_tbl[] = { | 46 | static const struct pci_device_id pci_tbl[] = { |
47 | { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 47 | { PCI_VDEVICE(AMD, 0x7443), 0, }, |
48 | { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | 48 | { PCI_VDEVICE(AMD, 0x746b), 0, }, |
49 | { 0, }, /* terminate list */ | 49 | { 0, }, /* terminate list */ |
50 | }; | 50 | }; |
51 | MODULE_DEVICE_TABLE(pci, pci_tbl); | 51 | MODULE_DEVICE_TABLE(pci, pci_tbl); |
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 64d513f68368..4c4d4e140f98 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c | |||
@@ -46,8 +46,7 @@ | |||
46 | * want to register another driver on the same PCI id. | 46 | * want to register another driver on the same PCI id. |
47 | */ | 47 | */ |
48 | static const struct pci_device_id pci_tbl[] = { | 48 | static const struct pci_device_id pci_tbl[] = { |
49 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, | 49 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, }, |
50 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, | ||
51 | { 0, }, /* terminate list */ | 50 | { 0, }, /* terminate list */ |
52 | }; | 51 | }; |
53 | MODULE_DEVICE_TABLE(pci, pci_tbl); | 52 | MODULE_DEVICE_TABLE(pci, pci_tbl); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8c7444857a4b..d8a9255e1a3f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -240,6 +240,7 @@ | |||
240 | #include <linux/spinlock.h> | 240 | #include <linux/spinlock.h> |
241 | #include <linux/percpu.h> | 241 | #include <linux/percpu.h> |
242 | #include <linux/cryptohash.h> | 242 | #include <linux/cryptohash.h> |
243 | #include <linux/fips.h> | ||
243 | 244 | ||
244 | #ifdef CONFIG_GENERIC_HARDIRQS | 245 | #ifdef CONFIG_GENERIC_HARDIRQS |
245 | # include <linux/irq.h> | 246 | # include <linux/irq.h> |
@@ -413,6 +414,7 @@ struct entropy_store { | |||
413 | unsigned add_ptr; | 414 | unsigned add_ptr; |
414 | int entropy_count; | 415 | int entropy_count; |
415 | int input_rotate; | 416 | int input_rotate; |
417 | __u8 *last_data; | ||
416 | }; | 418 | }; |
417 | 419 | ||
418 | static __u32 input_pool_data[INPUT_POOL_WORDS]; | 420 | static __u32 input_pool_data[INPUT_POOL_WORDS]; |
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
852 | { | 854 | { |
853 | ssize_t ret = 0, i; | 855 | ssize_t ret = 0, i; |
854 | __u8 tmp[EXTRACT_SIZE]; | 856 | __u8 tmp[EXTRACT_SIZE]; |
857 | unsigned long flags; | ||
855 | 858 | ||
856 | xfer_secondary_pool(r, nbytes); | 859 | xfer_secondary_pool(r, nbytes); |
857 | nbytes = account(r, nbytes, min, reserved); | 860 | nbytes = account(r, nbytes, min, reserved); |
858 | 861 | ||
859 | while (nbytes) { | 862 | while (nbytes) { |
860 | extract_buf(r, tmp); | 863 | extract_buf(r, tmp); |
864 | |||
865 | if (r->last_data) { | ||
866 | spin_lock_irqsave(&r->lock, flags); | ||
867 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) | ||
868 | panic("Hardware RNG duplicated output!\n"); | ||
869 | memcpy(r->last_data, tmp, EXTRACT_SIZE); | ||
870 | spin_unlock_irqrestore(&r->lock, flags); | ||
871 | } | ||
861 | i = min_t(int, nbytes, EXTRACT_SIZE); | 872 | i = min_t(int, nbytes, EXTRACT_SIZE); |
862 | memcpy(buf, tmp, i); | 873 | memcpy(buf, tmp, i); |
863 | nbytes -= i; | 874 | nbytes -= i; |
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r) | |||
940 | now = ktime_get_real(); | 951 | now = ktime_get_real(); |
941 | mix_pool_bytes(r, &now, sizeof(now)); | 952 | mix_pool_bytes(r, &now, sizeof(now)); |
942 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); | 953 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); |
954 | /* Enable continuous test in fips mode */ | ||
955 | if (fips_enabled) | ||
956 | r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL); | ||
943 | } | 957 | } |
944 | 958 | ||
945 | static int rand_initialize(void) | 959 | static int rand_initialize(void) |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5b27692372bf..b08403d7d1ca 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -13,7 +13,6 @@ if CRYPTO_HW | |||
13 | config CRYPTO_DEV_PADLOCK | 13 | config CRYPTO_DEV_PADLOCK |
14 | tristate "Support for VIA PadLock ACE" | 14 | tristate "Support for VIA PadLock ACE" |
15 | depends on X86 && !UML | 15 | depends on X86 && !UML |
16 | select CRYPTO_ALGAPI | ||
17 | help | 16 | help |
18 | Some VIA processors come with an integrated crypto engine | 17 | Some VIA processors come with an integrated crypto engine |
19 | (so called VIA PadLock ACE, Advanced Cryptography Engine) | 18 | (so called VIA PadLock ACE, Advanced Cryptography Engine) |
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES | |||
39 | config CRYPTO_DEV_PADLOCK_SHA | 38 | config CRYPTO_DEV_PADLOCK_SHA |
40 | tristate "PadLock driver for SHA1 and SHA256 algorithms" | 39 | tristate "PadLock driver for SHA1 and SHA256 algorithms" |
41 | depends on CRYPTO_DEV_PADLOCK | 40 | depends on CRYPTO_DEV_PADLOCK |
41 | select CRYPTO_HASH | ||
42 | select CRYPTO_SHA1 | 42 | select CRYPTO_SHA1 |
43 | select CRYPTO_SHA256 | 43 | select CRYPTO_SHA256 |
44 | help | 44 | help |
@@ -157,6 +157,19 @@ config S390_PRNG | |||
157 | ANSI X9.17 standard. The PRNG is usable via the char device | 157 | ANSI X9.17 standard. The PRNG is usable via the char device |
158 | /dev/prandom. | 158 | /dev/prandom. |
159 | 159 | ||
160 | config CRYPTO_DEV_MV_CESA | ||
161 | tristate "Marvell's Cryptographic Engine" | ||
162 | depends on PLAT_ORION | ||
163 | select CRYPTO_ALGAPI | ||
164 | select CRYPTO_AES | ||
165 | select CRYPTO_BLKCIPHER2 | ||
166 | help | ||
167 | This driver allows you to utilize the Cryptographic Engines and | ||
168 | Security Accelerator (CESA) which can be found on the Marvell Orion | ||
169 | and Kirkwood SoCs, such as QNAP's TS-209. | ||
170 | |||
171 | Currently the driver supports AES in ECB and CBC mode without DMA. | ||
172 | |||
160 | config CRYPTO_DEV_HIFN_795X | 173 | config CRYPTO_DEV_HIFN_795X |
161 | tristate "Driver HIFN 795x crypto accelerator chips" | 174 | tristate "Driver HIFN 795x crypto accelerator chips" |
162 | select CRYPTO_DES | 175 | select CRYPTO_DES |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 9bf4a2bc8846..6ffcb3f7f942 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | |||
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
4 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | 4 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
5 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | ||
5 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 6 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
6 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 7 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
7 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | 8 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 61b6e1bec8c6..a33243c17b00 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); | 211 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
212 | sizeof(struct crypto4xx_ctx)); | ||
212 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; | 213 | sa = (struct dynamic_sa_ctl *) ctx->sa_in; |
213 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, | 214 | set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, |
214 | SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, | 215 | SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 4c0dfb2b872e..46e899ac924e 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -31,8 +31,6 @@ | |||
31 | #include <asm/dcr.h> | 31 | #include <asm/dcr.h> |
32 | #include <asm/dcr-regs.h> | 32 | #include <asm/dcr-regs.h> |
33 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
34 | #include <crypto/internal/hash.h> | ||
35 | #include <crypto/algapi.h> | ||
36 | #include <crypto/aes.h> | 34 | #include <crypto/aes.h> |
37 | #include <crypto/sha.h> | 35 | #include <crypto/sha.h> |
38 | #include "crypto4xx_reg_def.h" | 36 | #include "crypto4xx_reg_def.h" |
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm) | |||
998 | ctx->sa_out_dma_addr = 0; | 996 | ctx->sa_out_dma_addr = 0; |
999 | ctx->sa_len = 0; | 997 | ctx->sa_len = 0; |
1000 | 998 | ||
1001 | if (alg->cra_type == &crypto_ablkcipher_type) | 999 | switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
1000 | default: | ||
1002 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); | 1001 | tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); |
1003 | else if (alg->cra_type == &crypto_ahash_type) | 1002 | break; |
1004 | tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); | 1003 | case CRYPTO_ALG_TYPE_AHASH: |
1004 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
1005 | sizeof(struct crypto4xx_ctx)); | ||
1006 | break; | ||
1007 | } | ||
1005 | 1008 | ||
1006 | return 0; | 1009 | return 0; |
1007 | } | 1010 | } |
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm) | |||
1015 | } | 1018 | } |
1016 | 1019 | ||
1017 | int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | 1020 | int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, |
1018 | struct crypto_alg *crypto_alg, int array_size) | 1021 | struct crypto4xx_alg_common *crypto_alg, |
1022 | int array_size) | ||
1019 | { | 1023 | { |
1020 | struct crypto4xx_alg *alg; | 1024 | struct crypto4xx_alg *alg; |
1021 | int i; | 1025 | int i; |
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, | |||
1027 | return -ENOMEM; | 1031 | return -ENOMEM; |
1028 | 1032 | ||
1029 | alg->alg = crypto_alg[i]; | 1033 | alg->alg = crypto_alg[i]; |
1030 | INIT_LIST_HEAD(&alg->alg.cra_list); | ||
1031 | if (alg->alg.cra_init == NULL) | ||
1032 | alg->alg.cra_init = crypto4xx_alg_init; | ||
1033 | if (alg->alg.cra_exit == NULL) | ||
1034 | alg->alg.cra_exit = crypto4xx_alg_exit; | ||
1035 | alg->dev = sec_dev; | 1034 | alg->dev = sec_dev; |
1036 | rc = crypto_register_alg(&alg->alg); | 1035 | |
1036 | switch (alg->alg.type) { | ||
1037 | case CRYPTO_ALG_TYPE_AHASH: | ||
1038 | rc = crypto_register_ahash(&alg->alg.u.hash); | ||
1039 | break; | ||
1040 | |||
1041 | default: | ||
1042 | rc = crypto_register_alg(&alg->alg.u.cipher); | ||
1043 | break; | ||
1044 | } | ||
1045 | |||
1037 | if (rc) { | 1046 | if (rc) { |
1038 | list_del(&alg->entry); | 1047 | list_del(&alg->entry); |
1039 | kfree(alg); | 1048 | kfree(alg); |
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) | |||
1051 | 1060 | ||
1052 | list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { | 1061 | list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { |
1053 | list_del(&alg->entry); | 1062 | list_del(&alg->entry); |
1054 | crypto_unregister_alg(&alg->alg); | 1063 | switch (alg->alg.type) { |
1064 | case CRYPTO_ALG_TYPE_AHASH: | ||
1065 | crypto_unregister_ahash(&alg->alg.u.hash); | ||
1066 | break; | ||
1067 | |||
1068 | default: | ||
1069 | crypto_unregister_alg(&alg->alg.u.cipher); | ||
1070 | } | ||
1055 | kfree(alg); | 1071 | kfree(alg); |
1056 | } | 1072 | } |
1057 | } | 1073 | } |
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) | |||
1104 | /** | 1120 | /** |
1105 | * Supported Crypto Algorithms | 1121 | * Supported Crypto Algorithms |
1106 | */ | 1122 | */ |
1107 | struct crypto_alg crypto4xx_alg[] = { | 1123 | struct crypto4xx_alg_common crypto4xx_alg[] = { |
1108 | /* Crypto AES modes */ | 1124 | /* Crypto AES modes */ |
1109 | { | 1125 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { |
1110 | .cra_name = "cbc(aes)", | 1126 | .cra_name = "cbc(aes)", |
1111 | .cra_driver_name = "cbc-aes-ppc4xx", | 1127 | .cra_driver_name = "cbc-aes-ppc4xx", |
1112 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | 1128 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1113 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1129 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1114 | .cra_blocksize = AES_BLOCK_SIZE, | 1130 | .cra_blocksize = AES_BLOCK_SIZE, |
1115 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1131 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1116 | .cra_alignmask = 0, | ||
1117 | .cra_type = &crypto_ablkcipher_type, | 1132 | .cra_type = &crypto_ablkcipher_type, |
1133 | .cra_init = crypto4xx_alg_init, | ||
1134 | .cra_exit = crypto4xx_alg_exit, | ||
1118 | .cra_module = THIS_MODULE, | 1135 | .cra_module = THIS_MODULE, |
1119 | .cra_u = { | 1136 | .cra_u = { |
1120 | .ablkcipher = { | 1137 | .ablkcipher = { |
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = { | |||
1126 | .decrypt = crypto4xx_decrypt, | 1143 | .decrypt = crypto4xx_decrypt, |
1127 | } | 1144 | } |
1128 | } | 1145 | } |
1129 | }, | 1146 | }}, |
1130 | /* Hash SHA1 */ | ||
1131 | { | ||
1132 | .cra_name = "sha1", | ||
1133 | .cra_driver_name = "sha1-ppc4xx", | ||
1134 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | ||
1135 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | ||
1136 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
1137 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | ||
1138 | .cra_alignmask = 0, | ||
1139 | .cra_type = &crypto_ahash_type, | ||
1140 | .cra_init = crypto4xx_sha1_alg_init, | ||
1141 | .cra_module = THIS_MODULE, | ||
1142 | .cra_u = { | ||
1143 | .ahash = { | ||
1144 | .digestsize = SHA1_DIGEST_SIZE, | ||
1145 | .init = crypto4xx_hash_init, | ||
1146 | .update = crypto4xx_hash_update, | ||
1147 | .final = crypto4xx_hash_final, | ||
1148 | .digest = crypto4xx_hash_digest, | ||
1149 | } | ||
1150 | } | ||
1151 | }, | ||
1152 | }; | 1147 | }; |
1153 | 1148 | ||
1154 | /** | 1149 | /** |
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 1ef103449364..da9cbe3b9fc3 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h | |||
@@ -22,6 +22,8 @@ | |||
22 | #ifndef __CRYPTO4XX_CORE_H__ | 22 | #ifndef __CRYPTO4XX_CORE_H__ |
23 | #define __CRYPTO4XX_CORE_H__ | 23 | #define __CRYPTO4XX_CORE_H__ |
24 | 24 | ||
25 | #include <crypto/internal/hash.h> | ||
26 | |||
25 | #define PPC460SX_SDR0_SRST 0x201 | 27 | #define PPC460SX_SDR0_SRST 0x201 |
26 | #define PPC405EX_SDR0_SRST 0x200 | 28 | #define PPC405EX_SDR0_SRST 0x200 |
27 | #define PPC460EX_SDR0_SRST 0x201 | 29 | #define PPC460EX_SDR0_SRST 0x201 |
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx { | |||
138 | u16 sa_len; | 140 | u16 sa_len; |
139 | }; | 141 | }; |
140 | 142 | ||
143 | struct crypto4xx_alg_common { | ||
144 | u32 type; | ||
145 | union { | ||
146 | struct crypto_alg cipher; | ||
147 | struct ahash_alg hash; | ||
148 | } u; | ||
149 | }; | ||
150 | |||
141 | struct crypto4xx_alg { | 151 | struct crypto4xx_alg { |
142 | struct list_head entry; | 152 | struct list_head entry; |
143 | struct crypto_alg alg; | 153 | struct crypto4xx_alg_common alg; |
144 | struct crypto4xx_device *dev; | 154 | struct crypto4xx_device *dev; |
145 | }; | 155 | }; |
146 | 156 | ||
147 | #define crypto_alg_to_crypto4xx_alg(x) \ | 157 | static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg( |
148 | container_of(x, struct crypto4xx_alg, alg) | 158 | struct crypto_alg *x) |
159 | { | ||
160 | switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
161 | case CRYPTO_ALG_TYPE_AHASH: | ||
162 | return container_of(__crypto_ahash_alg(x), | ||
163 | struct crypto4xx_alg, alg.u.hash); | ||
164 | } | ||
165 | |||
166 | return container_of(x, struct crypto4xx_alg, alg.u.cipher); | ||
167 | } | ||
149 | 168 | ||
150 | extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); | 169 | extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); |
151 | extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); | 170 | extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c new file mode 100644 index 000000000000..b21ef635f352 --- /dev/null +++ b/drivers/crypto/mv_cesa.c | |||
@@ -0,0 +1,606 @@ | |||
1 | /* | ||
2 | * Support for Marvell's crypto engine which can be found on some Orion5X | ||
3 | * boards. | ||
4 | * | ||
5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | ||
6 | * License: GPLv2 | ||
7 | * | ||
8 | */ | ||
9 | #include <crypto/aes.h> | ||
10 | #include <crypto/algapi.h> | ||
11 | #include <linux/crypto.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/kthread.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/scatterlist.h> | ||
17 | |||
18 | #include "mv_cesa.h" | ||
19 | /* | ||
20 | * STM: | ||
21 | * /---------------------------------------\ | ||
22 | * | | request complete | ||
23 | * \./ | | ||
24 | * IDLE -> new request -> BUSY -> done -> DEQUEUE | ||
25 | * /°\ | | ||
26 | * | | more scatter entries | ||
27 | * \________________/ | ||
28 | */ | ||
29 | enum engine_status { | ||
30 | ENGINE_IDLE, | ||
31 | ENGINE_BUSY, | ||
32 | ENGINE_W_DEQUEUE, | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct req_progress - used for every crypt request | ||
37 | * @src_sg_it: sg iterator for src | ||
38 | * @dst_sg_it: sg iterator for dst | ||
39 | * @sg_src_left: bytes left in src to process (scatter list) | ||
40 | * @src_start: offset to add to src start position (scatter list) | ||
41 | * @crypt_len: length of current crypt process | ||
42 | * @sg_dst_left: bytes left dst to process in this scatter list | ||
43 | * @dst_start: offset to add to dst start position (scatter list) | ||
44 | * @total_req_bytes: total number of bytes processed (request). | ||
45 | * | ||
46 | * sg helper are used to iterate over the scatterlist. Since the size of the | ||
47 | * SRAM may be less than the scatter size, this struct struct is used to keep | ||
48 | * track of progress within current scatterlist. | ||
49 | */ | ||
50 | struct req_progress { | ||
51 | struct sg_mapping_iter src_sg_it; | ||
52 | struct sg_mapping_iter dst_sg_it; | ||
53 | |||
54 | /* src mostly */ | ||
55 | int sg_src_left; | ||
56 | int src_start; | ||
57 | int crypt_len; | ||
58 | /* dst mostly */ | ||
59 | int sg_dst_left; | ||
60 | int dst_start; | ||
61 | int total_req_bytes; | ||
62 | }; | ||
63 | |||
64 | struct crypto_priv { | ||
65 | void __iomem *reg; | ||
66 | void __iomem *sram; | ||
67 | int irq; | ||
68 | struct task_struct *queue_th; | ||
69 | |||
70 | /* the lock protects queue and eng_st */ | ||
71 | spinlock_t lock; | ||
72 | struct crypto_queue queue; | ||
73 | enum engine_status eng_st; | ||
74 | struct ablkcipher_request *cur_req; | ||
75 | struct req_progress p; | ||
76 | int max_req_size; | ||
77 | int sram_size; | ||
78 | }; | ||
79 | |||
80 | static struct crypto_priv *cpg; | ||
81 | |||
82 | struct mv_ctx { | ||
83 | u8 aes_enc_key[AES_KEY_LEN]; | ||
84 | u32 aes_dec_key[8]; | ||
85 | int key_len; | ||
86 | u32 need_calc_aes_dkey; | ||
87 | }; | ||
88 | |||
89 | enum crypto_op { | ||
90 | COP_AES_ECB, | ||
91 | COP_AES_CBC, | ||
92 | }; | ||
93 | |||
94 | struct mv_req_ctx { | ||
95 | enum crypto_op op; | ||
96 | int decrypt; | ||
97 | }; | ||
98 | |||
99 | static void compute_aes_dec_key(struct mv_ctx *ctx) | ||
100 | { | ||
101 | struct crypto_aes_ctx gen_aes_key; | ||
102 | int key_pos; | ||
103 | |||
104 | if (!ctx->need_calc_aes_dkey) | ||
105 | return; | ||
106 | |||
107 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); | ||
108 | |||
109 | key_pos = ctx->key_len + 24; | ||
110 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); | ||
111 | switch (ctx->key_len) { | ||
112 | case AES_KEYSIZE_256: | ||
113 | key_pos -= 2; | ||
114 | /* fall */ | ||
115 | case AES_KEYSIZE_192: | ||
116 | key_pos -= 2; | ||
117 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], | ||
118 | 4 * 4); | ||
119 | break; | ||
120 | } | ||
121 | ctx->need_calc_aes_dkey = 0; | ||
122 | } | ||
123 | |||
124 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | ||
125 | unsigned int len) | ||
126 | { | ||
127 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
128 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); | ||
129 | |||
130 | switch (len) { | ||
131 | case AES_KEYSIZE_128: | ||
132 | case AES_KEYSIZE_192: | ||
133 | case AES_KEYSIZE_256: | ||
134 | break; | ||
135 | default: | ||
136 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | ctx->key_len = len; | ||
140 | ctx->need_calc_aes_dkey = 1; | ||
141 | |||
142 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static void setup_data_in(struct ablkcipher_request *req) | ||
147 | { | ||
148 | int ret; | ||
149 | void *buf; | ||
150 | |||
151 | if (!cpg->p.sg_src_left) { | ||
152 | ret = sg_miter_next(&cpg->p.src_sg_it); | ||
153 | BUG_ON(!ret); | ||
154 | cpg->p.sg_src_left = cpg->p.src_sg_it.length; | ||
155 | cpg->p.src_start = 0; | ||
156 | } | ||
157 | |||
158 | cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size); | ||
159 | |||
160 | buf = cpg->p.src_sg_it.addr; | ||
161 | buf += cpg->p.src_start; | ||
162 | |||
163 | memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); | ||
164 | |||
165 | cpg->p.sg_src_left -= cpg->p.crypt_len; | ||
166 | cpg->p.src_start += cpg->p.crypt_len; | ||
167 | } | ||
168 | |||
169 | static void mv_process_current_q(int first_block) | ||
170 | { | ||
171 | struct ablkcipher_request *req = cpg->cur_req; | ||
172 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
173 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
174 | struct sec_accel_config op; | ||
175 | |||
176 | switch (req_ctx->op) { | ||
177 | case COP_AES_ECB: | ||
178 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | ||
179 | break; | ||
180 | case COP_AES_CBC: | ||
181 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | ||
182 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | ||
183 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | ||
184 | if (first_block) | ||
185 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); | ||
186 | break; | ||
187 | } | ||
188 | if (req_ctx->decrypt) { | ||
189 | op.config |= CFG_DIR_DEC; | ||
190 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, | ||
191 | AES_KEY_LEN); | ||
192 | } else { | ||
193 | op.config |= CFG_DIR_ENC; | ||
194 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, | ||
195 | AES_KEY_LEN); | ||
196 | } | ||
197 | |||
198 | switch (ctx->key_len) { | ||
199 | case AES_KEYSIZE_128: | ||
200 | op.config |= CFG_AES_LEN_128; | ||
201 | break; | ||
202 | case AES_KEYSIZE_192: | ||
203 | op.config |= CFG_AES_LEN_192; | ||
204 | break; | ||
205 | case AES_KEYSIZE_256: | ||
206 | op.config |= CFG_AES_LEN_256; | ||
207 | break; | ||
208 | } | ||
209 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | | ||
210 | ENC_P_DST(SRAM_DATA_OUT_START); | ||
211 | op.enc_key_p = SRAM_DATA_KEY_P; | ||
212 | |||
213 | setup_data_in(req); | ||
214 | op.enc_len = cpg->p.crypt_len; | ||
215 | memcpy(cpg->sram + SRAM_CONFIG, &op, | ||
216 | sizeof(struct sec_accel_config)); | ||
217 | |||
218 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
219 | /* GO */ | ||
220 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | ||
221 | |||
222 | /* | ||
223 | * XXX: add timer if the interrupt does not occur for some mystery | ||
224 | * reason | ||
225 | */ | ||
226 | } | ||
227 | |||
228 | static void mv_crypto_algo_completion(void) | ||
229 | { | ||
230 | struct ablkcipher_request *req = cpg->cur_req; | ||
231 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
232 | |||
233 | if (req_ctx->op != COP_AES_CBC) | ||
234 | return ; | ||
235 | |||
236 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | ||
237 | } | ||
238 | |||
239 | static void dequeue_complete_req(void) | ||
240 | { | ||
241 | struct ablkcipher_request *req = cpg->cur_req; | ||
242 | void *buf; | ||
243 | int ret; | ||
244 | |||
245 | cpg->p.total_req_bytes += cpg->p.crypt_len; | ||
246 | do { | ||
247 | int dst_copy; | ||
248 | |||
249 | if (!cpg->p.sg_dst_left) { | ||
250 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
251 | BUG_ON(!ret); | ||
252 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
253 | cpg->p.dst_start = 0; | ||
254 | } | ||
255 | |||
256 | buf = cpg->p.dst_sg_it.addr; | ||
257 | buf += cpg->p.dst_start; | ||
258 | |||
259 | dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); | ||
260 | |||
261 | memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); | ||
262 | |||
263 | cpg->p.sg_dst_left -= dst_copy; | ||
264 | cpg->p.crypt_len -= dst_copy; | ||
265 | cpg->p.dst_start += dst_copy; | ||
266 | } while (cpg->p.crypt_len > 0); | ||
267 | |||
268 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | ||
269 | if (cpg->p.total_req_bytes < req->nbytes) { | ||
270 | /* process next scatter list entry */ | ||
271 | cpg->eng_st = ENGINE_BUSY; | ||
272 | mv_process_current_q(0); | ||
273 | } else { | ||
274 | sg_miter_stop(&cpg->p.src_sg_it); | ||
275 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
276 | mv_crypto_algo_completion(); | ||
277 | cpg->eng_st = ENGINE_IDLE; | ||
278 | req->base.complete(&req->base, 0); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | ||
283 | { | ||
284 | int i = 0; | ||
285 | |||
286 | do { | ||
287 | total_bytes -= sl[i].length; | ||
288 | i++; | ||
289 | |||
290 | } while (total_bytes > 0); | ||
291 | |||
292 | return i; | ||
293 | } | ||
294 | |||
295 | static void mv_enqueue_new_req(struct ablkcipher_request *req) | ||
296 | { | ||
297 | int num_sgs; | ||
298 | |||
299 | cpg->cur_req = req; | ||
300 | memset(&cpg->p, 0, sizeof(struct req_progress)); | ||
301 | |||
302 | num_sgs = count_sgs(req->src, req->nbytes); | ||
303 | sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | ||
304 | |||
305 | num_sgs = count_sgs(req->dst, req->nbytes); | ||
306 | sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | ||
307 | mv_process_current_q(1); | ||
308 | } | ||
309 | |||
310 | static int queue_manag(void *data) | ||
311 | { | ||
312 | cpg->eng_st = ENGINE_IDLE; | ||
313 | do { | ||
314 | struct ablkcipher_request *req; | ||
315 | struct crypto_async_request *async_req = NULL; | ||
316 | struct crypto_async_request *backlog; | ||
317 | |||
318 | __set_current_state(TASK_INTERRUPTIBLE); | ||
319 | |||
320 | if (cpg->eng_st == ENGINE_W_DEQUEUE) | ||
321 | dequeue_complete_req(); | ||
322 | |||
323 | spin_lock_irq(&cpg->lock); | ||
324 | if (cpg->eng_st == ENGINE_IDLE) { | ||
325 | backlog = crypto_get_backlog(&cpg->queue); | ||
326 | async_req = crypto_dequeue_request(&cpg->queue); | ||
327 | if (async_req) { | ||
328 | BUG_ON(cpg->eng_st != ENGINE_IDLE); | ||
329 | cpg->eng_st = ENGINE_BUSY; | ||
330 | } | ||
331 | } | ||
332 | spin_unlock_irq(&cpg->lock); | ||
333 | |||
334 | if (backlog) { | ||
335 | backlog->complete(backlog, -EINPROGRESS); | ||
336 | backlog = NULL; | ||
337 | } | ||
338 | |||
339 | if (async_req) { | ||
340 | req = container_of(async_req, | ||
341 | struct ablkcipher_request, base); | ||
342 | mv_enqueue_new_req(req); | ||
343 | async_req = NULL; | ||
344 | } | ||
345 | |||
346 | schedule(); | ||
347 | |||
348 | } while (!kthread_should_stop()); | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static int mv_handle_req(struct ablkcipher_request *req) | ||
353 | { | ||
354 | unsigned long flags; | ||
355 | int ret; | ||
356 | |||
357 | spin_lock_irqsave(&cpg->lock, flags); | ||
358 | ret = ablkcipher_enqueue_request(&cpg->queue, req); | ||
359 | spin_unlock_irqrestore(&cpg->lock, flags); | ||
360 | wake_up_process(cpg->queue_th); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) | ||
365 | { | ||
366 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
367 | |||
368 | req_ctx->op = COP_AES_ECB; | ||
369 | req_ctx->decrypt = 0; | ||
370 | |||
371 | return mv_handle_req(req); | ||
372 | } | ||
373 | |||
374 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | ||
375 | { | ||
376 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
377 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
378 | |||
379 | req_ctx->op = COP_AES_ECB; | ||
380 | req_ctx->decrypt = 1; | ||
381 | |||
382 | compute_aes_dec_key(ctx); | ||
383 | return mv_handle_req(req); | ||
384 | } | ||
385 | |||
386 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | ||
387 | { | ||
388 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
389 | |||
390 | req_ctx->op = COP_AES_CBC; | ||
391 | req_ctx->decrypt = 0; | ||
392 | |||
393 | return mv_handle_req(req); | ||
394 | } | ||
395 | |||
396 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | ||
397 | { | ||
398 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
399 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
400 | |||
401 | req_ctx->op = COP_AES_CBC; | ||
402 | req_ctx->decrypt = 1; | ||
403 | |||
404 | compute_aes_dec_key(ctx); | ||
405 | return mv_handle_req(req); | ||
406 | } | ||
407 | |||
408 | static int mv_cra_init(struct crypto_tfm *tfm) | ||
409 | { | ||
410 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | irqreturn_t crypto_int(int irq, void *priv) | ||
415 | { | ||
416 | u32 val; | ||
417 | |||
418 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); | ||
419 | if (!(val & SEC_INT_ACCEL0_DONE)) | ||
420 | return IRQ_NONE; | ||
421 | |||
422 | val &= ~SEC_INT_ACCEL0_DONE; | ||
423 | writel(val, cpg->reg + FPGA_INT_STATUS); | ||
424 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | ||
425 | BUG_ON(cpg->eng_st != ENGINE_BUSY); | ||
426 | cpg->eng_st = ENGINE_W_DEQUEUE; | ||
427 | wake_up_process(cpg->queue_th); | ||
428 | return IRQ_HANDLED; | ||
429 | } | ||
430 | |||
431 | struct crypto_alg mv_aes_alg_ecb = { | ||
432 | .cra_name = "ecb(aes)", | ||
433 | .cra_driver_name = "mv-ecb-aes", | ||
434 | .cra_priority = 300, | ||
435 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
436 | .cra_blocksize = 16, | ||
437 | .cra_ctxsize = sizeof(struct mv_ctx), | ||
438 | .cra_alignmask = 0, | ||
439 | .cra_type = &crypto_ablkcipher_type, | ||
440 | .cra_module = THIS_MODULE, | ||
441 | .cra_init = mv_cra_init, | ||
442 | .cra_u = { | ||
443 | .ablkcipher = { | ||
444 | .min_keysize = AES_MIN_KEY_SIZE, | ||
445 | .max_keysize = AES_MAX_KEY_SIZE, | ||
446 | .setkey = mv_setkey_aes, | ||
447 | .encrypt = mv_enc_aes_ecb, | ||
448 | .decrypt = mv_dec_aes_ecb, | ||
449 | }, | ||
450 | }, | ||
451 | }; | ||
452 | |||
453 | struct crypto_alg mv_aes_alg_cbc = { | ||
454 | .cra_name = "cbc(aes)", | ||
455 | .cra_driver_name = "mv-cbc-aes", | ||
456 | .cra_priority = 300, | ||
457 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
458 | .cra_blocksize = AES_BLOCK_SIZE, | ||
459 | .cra_ctxsize = sizeof(struct mv_ctx), | ||
460 | .cra_alignmask = 0, | ||
461 | .cra_type = &crypto_ablkcipher_type, | ||
462 | .cra_module = THIS_MODULE, | ||
463 | .cra_init = mv_cra_init, | ||
464 | .cra_u = { | ||
465 | .ablkcipher = { | ||
466 | .ivsize = AES_BLOCK_SIZE, | ||
467 | .min_keysize = AES_MIN_KEY_SIZE, | ||
468 | .max_keysize = AES_MAX_KEY_SIZE, | ||
469 | .setkey = mv_setkey_aes, | ||
470 | .encrypt = mv_enc_aes_cbc, | ||
471 | .decrypt = mv_dec_aes_cbc, | ||
472 | }, | ||
473 | }, | ||
474 | }; | ||
475 | |||
476 | static int mv_probe(struct platform_device *pdev) | ||
477 | { | ||
478 | struct crypto_priv *cp; | ||
479 | struct resource *res; | ||
480 | int irq; | ||
481 | int ret; | ||
482 | |||
483 | if (cpg) { | ||
484 | printk(KERN_ERR "Second crypto dev?\n"); | ||
485 | return -EEXIST; | ||
486 | } | ||
487 | |||
488 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
489 | if (!res) | ||
490 | return -ENXIO; | ||
491 | |||
492 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
493 | if (!cp) | ||
494 | return -ENOMEM; | ||
495 | |||
496 | spin_lock_init(&cp->lock); | ||
497 | crypto_init_queue(&cp->queue, 50); | ||
498 | cp->reg = ioremap(res->start, res->end - res->start + 1); | ||
499 | if (!cp->reg) { | ||
500 | ret = -ENOMEM; | ||
501 | goto err; | ||
502 | } | ||
503 | |||
504 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); | ||
505 | if (!res) { | ||
506 | ret = -ENXIO; | ||
507 | goto err_unmap_reg; | ||
508 | } | ||
509 | cp->sram_size = res->end - res->start + 1; | ||
510 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | ||
511 | cp->sram = ioremap(res->start, cp->sram_size); | ||
512 | if (!cp->sram) { | ||
513 | ret = -ENOMEM; | ||
514 | goto err_unmap_reg; | ||
515 | } | ||
516 | |||
517 | irq = platform_get_irq(pdev, 0); | ||
518 | if (irq < 0 || irq == NO_IRQ) { | ||
519 | ret = irq; | ||
520 | goto err_unmap_sram; | ||
521 | } | ||
522 | cp->irq = irq; | ||
523 | |||
524 | platform_set_drvdata(pdev, cp); | ||
525 | cpg = cp; | ||
526 | |||
527 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | ||
528 | if (IS_ERR(cp->queue_th)) { | ||
529 | ret = PTR_ERR(cp->queue_th); | ||
530 | goto err_thread; | ||
531 | } | ||
532 | |||
533 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | ||
534 | cp); | ||
535 | if (ret) | ||
536 | goto err_unmap_sram; | ||
537 | |||
538 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | ||
539 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | ||
540 | |||
541 | ret = crypto_register_alg(&mv_aes_alg_ecb); | ||
542 | if (ret) | ||
543 | goto err_reg; | ||
544 | |||
545 | ret = crypto_register_alg(&mv_aes_alg_cbc); | ||
546 | if (ret) | ||
547 | goto err_unreg_ecb; | ||
548 | return 0; | ||
549 | err_unreg_ecb: | ||
550 | crypto_unregister_alg(&mv_aes_alg_ecb); | ||
551 | err_thread: | ||
552 | free_irq(irq, cp); | ||
553 | err_reg: | ||
554 | kthread_stop(cp->queue_th); | ||
555 | err_unmap_sram: | ||
556 | iounmap(cp->sram); | ||
557 | err_unmap_reg: | ||
558 | iounmap(cp->reg); | ||
559 | err: | ||
560 | kfree(cp); | ||
561 | cpg = NULL; | ||
562 | platform_set_drvdata(pdev, NULL); | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int mv_remove(struct platform_device *pdev) | ||
567 | { | ||
568 | struct crypto_priv *cp = platform_get_drvdata(pdev); | ||
569 | |||
570 | crypto_unregister_alg(&mv_aes_alg_ecb); | ||
571 | crypto_unregister_alg(&mv_aes_alg_cbc); | ||
572 | kthread_stop(cp->queue_th); | ||
573 | free_irq(cp->irq, cp); | ||
574 | memset(cp->sram, 0, cp->sram_size); | ||
575 | iounmap(cp->sram); | ||
576 | iounmap(cp->reg); | ||
577 | kfree(cp); | ||
578 | cpg = NULL; | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | static struct platform_driver marvell_crypto = { | ||
583 | .probe = mv_probe, | ||
584 | .remove = mv_remove, | ||
585 | .driver = { | ||
586 | .owner = THIS_MODULE, | ||
587 | .name = "mv_crypto", | ||
588 | }, | ||
589 | }; | ||
590 | MODULE_ALIAS("platform:mv_crypto"); | ||
591 | |||
592 | static int __init mv_crypto_init(void) | ||
593 | { | ||
594 | return platform_driver_register(&marvell_crypto); | ||
595 | } | ||
596 | module_init(mv_crypto_init); | ||
597 | |||
598 | static void __exit mv_crypto_exit(void) | ||
599 | { | ||
600 | platform_driver_unregister(&marvell_crypto); | ||
601 | } | ||
602 | module_exit(mv_crypto_exit); | ||
603 | |||
604 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); | ||
605 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); | ||
606 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h new file mode 100644 index 000000000000..c3e25d3bb171 --- /dev/null +++ b/drivers/crypto/mv_cesa.h | |||
@@ -0,0 +1,119 @@ | |||
1 | #ifndef __MV_CRYPTO_H__ | ||
2 | |||
3 | #define DIGEST_INITIAL_VAL_A 0xdd00 | ||
4 | #define DES_CMD_REG 0xdd58 | ||
5 | |||
6 | #define SEC_ACCEL_CMD 0xde00 | ||
7 | #define SEC_CMD_EN_SEC_ACCL0 (1 << 0) | ||
8 | #define SEC_CMD_EN_SEC_ACCL1 (1 << 1) | ||
9 | #define SEC_CMD_DISABLE_SEC (1 << 2) | ||
10 | |||
11 | #define SEC_ACCEL_DESC_P0 0xde04 | ||
12 | #define SEC_DESC_P0_PTR(x) (x) | ||
13 | |||
14 | #define SEC_ACCEL_DESC_P1 0xde14 | ||
15 | #define SEC_DESC_P1_PTR(x) (x) | ||
16 | |||
17 | #define SEC_ACCEL_CFG 0xde08 | ||
18 | #define SEC_CFG_STOP_DIG_ERR (1 << 0) | ||
19 | #define SEC_CFG_CH0_W_IDMA (1 << 7) | ||
20 | #define SEC_CFG_CH1_W_IDMA (1 << 8) | ||
21 | #define SEC_CFG_ACT_CH0_IDMA (1 << 9) | ||
22 | #define SEC_CFG_ACT_CH1_IDMA (1 << 10) | ||
23 | |||
24 | #define SEC_ACCEL_STATUS 0xde0c | ||
25 | #define SEC_ST_ACT_0 (1 << 0) | ||
26 | #define SEC_ST_ACT_1 (1 << 1) | ||
27 | |||
28 | /* | ||
29 | * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata | ||
30 | * 4.12. It looks like that it was part of an IRQ-controller in FPGA and | ||
31 | * someone forgot to remove it while switching to the core and moving to | ||
32 | * SEC_ACCEL_INT_STATUS. | ||
33 | */ | ||
34 | #define FPGA_INT_STATUS 0xdd68 | ||
35 | #define SEC_ACCEL_INT_STATUS 0xde20 | ||
36 | #define SEC_INT_AUTH_DONE (1 << 0) | ||
37 | #define SEC_INT_DES_E_DONE (1 << 1) | ||
38 | #define SEC_INT_AES_E_DONE (1 << 2) | ||
39 | #define SEC_INT_AES_D_DONE (1 << 3) | ||
40 | #define SEC_INT_ENC_DONE (1 << 4) | ||
41 | #define SEC_INT_ACCEL0_DONE (1 << 5) | ||
42 | #define SEC_INT_ACCEL1_DONE (1 << 6) | ||
43 | #define SEC_INT_ACC0_IDMA_DONE (1 << 7) | ||
44 | #define SEC_INT_ACC1_IDMA_DONE (1 << 8) | ||
45 | |||
46 | #define SEC_ACCEL_INT_MASK 0xde24 | ||
47 | |||
48 | #define AES_KEY_LEN (8 * 4) | ||
49 | |||
50 | struct sec_accel_config { | ||
51 | |||
52 | u32 config; | ||
53 | #define CFG_OP_MAC_ONLY 0 | ||
54 | #define CFG_OP_CRYPT_ONLY 1 | ||
55 | #define CFG_OP_MAC_CRYPT 2 | ||
56 | #define CFG_OP_CRYPT_MAC 3 | ||
57 | #define CFG_MACM_MD5 (4 << 4) | ||
58 | #define CFG_MACM_SHA1 (5 << 4) | ||
59 | #define CFG_MACM_HMAC_MD5 (6 << 4) | ||
60 | #define CFG_MACM_HMAC_SHA1 (7 << 4) | ||
61 | #define CFG_ENCM_DES (1 << 8) | ||
62 | #define CFG_ENCM_3DES (2 << 8) | ||
63 | #define CFG_ENCM_AES (3 << 8) | ||
64 | #define CFG_DIR_ENC (0 << 12) | ||
65 | #define CFG_DIR_DEC (1 << 12) | ||
66 | #define CFG_ENC_MODE_ECB (0 << 16) | ||
67 | #define CFG_ENC_MODE_CBC (1 << 16) | ||
68 | #define CFG_3DES_EEE (0 << 20) | ||
69 | #define CFG_3DES_EDE (1 << 20) | ||
70 | #define CFG_AES_LEN_128 (0 << 24) | ||
71 | #define CFG_AES_LEN_192 (1 << 24) | ||
72 | #define CFG_AES_LEN_256 (2 << 24) | ||
73 | |||
74 | u32 enc_p; | ||
75 | #define ENC_P_SRC(x) (x) | ||
76 | #define ENC_P_DST(x) ((x) << 16) | ||
77 | |||
78 | u32 enc_len; | ||
79 | #define ENC_LEN(x) (x) | ||
80 | |||
81 | u32 enc_key_p; | ||
82 | #define ENC_KEY_P(x) (x) | ||
83 | |||
84 | u32 enc_iv; | ||
85 | #define ENC_IV_POINT(x) ((x) << 0) | ||
86 | #define ENC_IV_BUF_POINT(x) ((x) << 16) | ||
87 | |||
88 | u32 mac_src_p; | ||
89 | #define MAC_SRC_DATA_P(x) (x) | ||
90 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) | ||
91 | |||
92 | u32 mac_digest; | ||
93 | u32 mac_iv; | ||
94 | }__attribute__ ((packed)); | ||
95 | /* | ||
96 | * /-----------\ 0 | ||
97 | * | ACCEL CFG | 4 * 8 | ||
98 | * |-----------| 0x20 | ||
99 | * | CRYPT KEY | 8 * 4 | ||
100 | * |-----------| 0x40 | ||
101 | * | IV IN | 4 * 4 | ||
102 | * |-----------| 0x40 (inplace) | ||
103 | * | IV BUF | 4 * 4 | ||
104 | * |-----------| 0x50 | ||
105 | * | DATA IN | 16 * x (max ->max_req_size) | ||
106 | * |-----------| 0x50 (inplace operation) | ||
107 | * | DATA OUT | 16 * x (max ->max_req_size) | ||
108 | * \-----------/ SRAM size | ||
109 | */ | ||
110 | #define SRAM_CONFIG 0x00 | ||
111 | #define SRAM_DATA_KEY_P 0x20 | ||
112 | #define SRAM_DATA_IV 0x40 | ||
113 | #define SRAM_DATA_IV_BUF 0x40 | ||
114 | #define SRAM_DATA_IN_START 0x50 | ||
115 | #define SRAM_DATA_OUT_START 0x50 | ||
116 | |||
117 | #define SRAM_CFG_SPACE 0x50 | ||
118 | |||
119 | #endif | ||
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index a2c8e8514b63..76cb6b345e7b 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -12,81 +12,43 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/algapi.h> | 15 | #include <crypto/internal/hash.h> |
16 | #include <crypto/sha.h> | 16 | #include <crypto/sha.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/cryptohash.h> | ||
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
24 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
25 | #include <asm/i387.h> | 24 | #include <asm/i387.h> |
26 | #include "padlock.h" | 25 | #include "padlock.h" |
27 | 26 | ||
28 | #define SHA1_DEFAULT_FALLBACK "sha1-generic" | 27 | struct padlock_sha_desc { |
29 | #define SHA256_DEFAULT_FALLBACK "sha256-generic" | 28 | struct shash_desc fallback; |
29 | }; | ||
30 | 30 | ||
31 | struct padlock_sha_ctx { | 31 | struct padlock_sha_ctx { |
32 | char *data; | 32 | struct crypto_shash *fallback; |
33 | size_t used; | ||
34 | int bypass; | ||
35 | void (*f_sha_padlock)(const char *in, char *out, int count); | ||
36 | struct hash_desc fallback; | ||
37 | }; | 33 | }; |
38 | 34 | ||
39 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) | 35 | static int padlock_sha_init(struct shash_desc *desc) |
40 | { | ||
41 | return crypto_tfm_ctx(tfm); | ||
42 | } | ||
43 | |||
44 | /* We'll need aligned address on the stack */ | ||
45 | #define NEAREST_ALIGNED(ptr) \ | ||
46 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) | ||
47 | |||
48 | static struct crypto_alg sha1_alg, sha256_alg; | ||
49 | |||
50 | static void padlock_sha_bypass(struct crypto_tfm *tfm) | ||
51 | { | 36 | { |
52 | if (ctx(tfm)->bypass) | 37 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
53 | return; | 38 | struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); |
54 | 39 | ||
55 | crypto_hash_init(&ctx(tfm)->fallback); | 40 | dctx->fallback.tfm = ctx->fallback; |
56 | if (ctx(tfm)->data && ctx(tfm)->used) { | 41 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
57 | struct scatterlist sg; | 42 | return crypto_shash_init(&dctx->fallback); |
58 | |||
59 | sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); | ||
60 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); | ||
61 | } | ||
62 | |||
63 | ctx(tfm)->used = 0; | ||
64 | ctx(tfm)->bypass = 1; | ||
65 | } | ||
66 | |||
67 | static void padlock_sha_init(struct crypto_tfm *tfm) | ||
68 | { | ||
69 | ctx(tfm)->used = 0; | ||
70 | ctx(tfm)->bypass = 0; | ||
71 | } | 43 | } |
72 | 44 | ||
73 | static void padlock_sha_update(struct crypto_tfm *tfm, | 45 | static int padlock_sha_update(struct shash_desc *desc, |
74 | const uint8_t *data, unsigned int length) | 46 | const u8 *data, unsigned int length) |
75 | { | 47 | { |
76 | /* Our buffer is always one page. */ | 48 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
77 | if (unlikely(!ctx(tfm)->bypass && | ||
78 | (ctx(tfm)->used + length > PAGE_SIZE))) | ||
79 | padlock_sha_bypass(tfm); | ||
80 | |||
81 | if (unlikely(ctx(tfm)->bypass)) { | ||
82 | struct scatterlist sg; | ||
83 | sg_init_one(&sg, (uint8_t *)data, length); | ||
84 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); | ||
85 | return; | ||
86 | } | ||
87 | 49 | ||
88 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); | 50 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
89 | ctx(tfm)->used += length; | 51 | return crypto_shash_update(&dctx->fallback, data, length); |
90 | } | 52 | } |
91 | 53 | ||
92 | static inline void padlock_output_block(uint32_t *src, | 54 | static inline void padlock_output_block(uint32_t *src, |
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src, | |||
96 | *dst++ = swab32(*src++); | 58 | *dst++ = swab32(*src++); |
97 | } | 59 | } |
98 | 60 | ||
99 | static void padlock_do_sha1(const char *in, char *out, int count) | 61 | static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, |
62 | unsigned int count, u8 *out) | ||
100 | { | 63 | { |
101 | /* We can't store directly to *out as it may be unaligned. */ | 64 | /* We can't store directly to *out as it may be unaligned. */ |
102 | /* BTW Don't reduce the buffer size below 128 Bytes! | 65 | /* BTW Don't reduce the buffer size below 128 Bytes! |
103 | * PadLock microcode needs it that big. */ | 66 | * PadLock microcode needs it that big. */ |
104 | char buf[128+16]; | 67 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); |
105 | char *result = NEAREST_ALIGNED(buf); | 68 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
69 | struct sha1_state state; | ||
70 | unsigned int space; | ||
71 | unsigned int leftover; | ||
106 | int ts_state; | 72 | int ts_state; |
73 | int err; | ||
74 | |||
75 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
76 | err = crypto_shash_export(&dctx->fallback, &state); | ||
77 | if (err) | ||
78 | goto out; | ||
79 | |||
80 | if (state.count + count > ULONG_MAX) | ||
81 | return crypto_shash_finup(&dctx->fallback, in, count, out); | ||
82 | |||
83 | leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; | ||
84 | space = SHA1_BLOCK_SIZE - leftover; | ||
85 | if (space) { | ||
86 | if (count > space) { | ||
87 | err = crypto_shash_update(&dctx->fallback, in, space) ?: | ||
88 | crypto_shash_export(&dctx->fallback, &state); | ||
89 | if (err) | ||
90 | goto out; | ||
91 | count -= space; | ||
92 | in += space; | ||
93 | } else { | ||
94 | memcpy(state.buffer + leftover, in, count); | ||
95 | in = state.buffer; | ||
96 | count += leftover; | ||
97 | state.count &= ~(SHA1_BLOCK_SIZE - 1); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | memcpy(result, &state.state, SHA1_DIGEST_SIZE); | ||
107 | 102 | ||
108 | ((uint32_t *)result)[0] = SHA1_H0; | ||
109 | ((uint32_t *)result)[1] = SHA1_H1; | ||
110 | ((uint32_t *)result)[2] = SHA1_H2; | ||
111 | ((uint32_t *)result)[3] = SHA1_H3; | ||
112 | ((uint32_t *)result)[4] = SHA1_H4; | ||
113 | |||
114 | /* prevent taking the spurious DNA fault with padlock. */ | 103 | /* prevent taking the spurious DNA fault with padlock. */ |
115 | ts_state = irq_ts_save(); | 104 | ts_state = irq_ts_save(); |
116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 105 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ |
117 | : "+S"(in), "+D"(result) | 106 | : \ |
118 | : "c"(count), "a"(0)); | 107 | : "c"((unsigned long)state.count + count), \ |
108 | "a"((unsigned long)state.count), \ | ||
109 | "S"(in), "D"(result)); | ||
119 | irq_ts_restore(ts_state); | 110 | irq_ts_restore(ts_state); |
120 | 111 | ||
121 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 112 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); |
113 | |||
114 | out: | ||
115 | return err; | ||
122 | } | 116 | } |
123 | 117 | ||
124 | static void padlock_do_sha256(const char *in, char *out, int count) | 118 | static int padlock_sha1_final(struct shash_desc *desc, u8 *out) |
119 | { | ||
120 | u8 buf[4]; | ||
121 | |||
122 | return padlock_sha1_finup(desc, buf, 0, out); | ||
123 | } | ||
124 | |||
125 | static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, | ||
126 | unsigned int count, u8 *out) | ||
125 | { | 127 | { |
126 | /* We can't store directly to *out as it may be unaligned. */ | 128 | /* We can't store directly to *out as it may be unaligned. */ |
127 | /* BTW Don't reduce the buffer size below 128 Bytes! | 129 | /* BTW Don't reduce the buffer size below 128 Bytes! |
128 | * PadLock microcode needs it that big. */ | 130 | * PadLock microcode needs it that big. */ |
129 | char buf[128+16]; | 131 | char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); |
130 | char *result = NEAREST_ALIGNED(buf); | 132 | struct padlock_sha_desc *dctx = shash_desc_ctx(desc); |
133 | struct sha256_state state; | ||
134 | unsigned int space; | ||
135 | unsigned int leftover; | ||
131 | int ts_state; | 136 | int ts_state; |
137 | int err; | ||
138 | |||
139 | dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
140 | err = crypto_shash_export(&dctx->fallback, &state); | ||
141 | if (err) | ||
142 | goto out; | ||
143 | |||
144 | if (state.count + count > ULONG_MAX) | ||
145 | return crypto_shash_finup(&dctx->fallback, in, count, out); | ||
146 | |||
147 | leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; | ||
148 | space = SHA256_BLOCK_SIZE - leftover; | ||
149 | if (space) { | ||
150 | if (count > space) { | ||
151 | err = crypto_shash_update(&dctx->fallback, in, space) ?: | ||
152 | crypto_shash_export(&dctx->fallback, &state); | ||
153 | if (err) | ||
154 | goto out; | ||
155 | count -= space; | ||
156 | in += space; | ||
157 | } else { | ||
158 | memcpy(state.buf + leftover, in, count); | ||
159 | in = state.buf; | ||
160 | count += leftover; | ||
161 | state.count &= ~(SHA1_BLOCK_SIZE - 1); | ||
162 | } | ||
163 | } | ||
132 | 164 | ||
133 | ((uint32_t *)result)[0] = SHA256_H0; | 165 | memcpy(result, &state.state, SHA256_DIGEST_SIZE); |
134 | ((uint32_t *)result)[1] = SHA256_H1; | ||
135 | ((uint32_t *)result)[2] = SHA256_H2; | ||
136 | ((uint32_t *)result)[3] = SHA256_H3; | ||
137 | ((uint32_t *)result)[4] = SHA256_H4; | ||
138 | ((uint32_t *)result)[5] = SHA256_H5; | ||
139 | ((uint32_t *)result)[6] = SHA256_H6; | ||
140 | ((uint32_t *)result)[7] = SHA256_H7; | ||
141 | 166 | ||
142 | /* prevent taking the spurious DNA fault with padlock. */ | 167 | /* prevent taking the spurious DNA fault with padlock. */ |
143 | ts_state = irq_ts_save(); | 168 | ts_state = irq_ts_save(); |
144 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 169 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ |
145 | : "+S"(in), "+D"(result) | 170 | : \ |
146 | : "c"(count), "a"(0)); | 171 | : "c"((unsigned long)state.count + count), \ |
172 | "a"((unsigned long)state.count), \ | ||
173 | "S"(in), "D"(result)); | ||
147 | irq_ts_restore(ts_state); | 174 | irq_ts_restore(ts_state); |
148 | 175 | ||
149 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 176 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); |
177 | |||
178 | out: | ||
179 | return err; | ||
150 | } | 180 | } |
151 | 181 | ||
152 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) | 182 | static int padlock_sha256_final(struct shash_desc *desc, u8 *out) |
153 | { | 183 | { |
154 | if (unlikely(ctx(tfm)->bypass)) { | 184 | u8 buf[4]; |
155 | crypto_hash_final(&ctx(tfm)->fallback, out); | ||
156 | ctx(tfm)->bypass = 0; | ||
157 | return; | ||
158 | } | ||
159 | 185 | ||
160 | /* Pass the input buffer to PadLock microcode... */ | 186 | return padlock_sha256_finup(desc, buf, 0, out); |
161 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); | ||
162 | |||
163 | ctx(tfm)->used = 0; | ||
164 | } | 187 | } |
165 | 188 | ||
166 | static int padlock_cra_init(struct crypto_tfm *tfm) | 189 | static int padlock_cra_init(struct crypto_tfm *tfm) |
167 | { | 190 | { |
191 | struct crypto_shash *hash = __crypto_shash_cast(tfm); | ||
168 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 192 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; |
169 | struct crypto_hash *fallback_tfm; | 193 | struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
170 | 194 | struct crypto_shash *fallback_tfm; | |
171 | /* For now we'll allocate one page. This | 195 | int err = -ENOMEM; |
172 | * could eventually be configurable one day. */ | ||
173 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); | ||
174 | if (!ctx(tfm)->data) | ||
175 | return -ENOMEM; | ||
176 | 196 | ||
177 | /* Allocate a fallback and abort if it failed. */ | 197 | /* Allocate a fallback and abort if it failed. */ |
178 | fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, | 198 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, |
179 | CRYPTO_ALG_ASYNC | | 199 | CRYPTO_ALG_NEED_FALLBACK); |
180 | CRYPTO_ALG_NEED_FALLBACK); | ||
181 | if (IS_ERR(fallback_tfm)) { | 200 | if (IS_ERR(fallback_tfm)) { |
182 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | 201 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", |
183 | fallback_driver_name); | 202 | fallback_driver_name); |
184 | free_page((unsigned long)(ctx(tfm)->data)); | 203 | err = PTR_ERR(fallback_tfm); |
185 | return PTR_ERR(fallback_tfm); | 204 | goto out; |
186 | } | 205 | } |
187 | 206 | ||
188 | ctx(tfm)->fallback.tfm = fallback_tfm; | 207 | ctx->fallback = fallback_tfm; |
208 | hash->descsize += crypto_shash_descsize(fallback_tfm); | ||
189 | return 0; | 209 | return 0; |
190 | } | ||
191 | |||
192 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) | ||
193 | { | ||
194 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; | ||
195 | 210 | ||
196 | return padlock_cra_init(tfm); | 211 | out: |
197 | } | 212 | return err; |
198 | |||
199 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) | ||
200 | { | ||
201 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; | ||
202 | |||
203 | return padlock_cra_init(tfm); | ||
204 | } | 213 | } |
205 | 214 | ||
206 | static void padlock_cra_exit(struct crypto_tfm *tfm) | 215 | static void padlock_cra_exit(struct crypto_tfm *tfm) |
207 | { | 216 | { |
208 | if (ctx(tfm)->data) { | 217 | struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
209 | free_page((unsigned long)(ctx(tfm)->data)); | ||
210 | ctx(tfm)->data = NULL; | ||
211 | } | ||
212 | 218 | ||
213 | crypto_free_hash(ctx(tfm)->fallback.tfm); | 219 | crypto_free_shash(ctx->fallback); |
214 | ctx(tfm)->fallback.tfm = NULL; | ||
215 | } | 220 | } |
216 | 221 | ||
217 | static struct crypto_alg sha1_alg = { | 222 | static struct shash_alg sha1_alg = { |
218 | .cra_name = "sha1", | 223 | .digestsize = SHA1_DIGEST_SIZE, |
219 | .cra_driver_name = "sha1-padlock", | 224 | .init = padlock_sha_init, |
220 | .cra_priority = PADLOCK_CRA_PRIORITY, | 225 | .update = padlock_sha_update, |
221 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 226 | .finup = padlock_sha1_finup, |
222 | CRYPTO_ALG_NEED_FALLBACK, | 227 | .final = padlock_sha1_final, |
223 | .cra_blocksize = SHA1_BLOCK_SIZE, | 228 | .descsize = sizeof(struct padlock_sha_desc), |
224 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 229 | .base = { |
225 | .cra_module = THIS_MODULE, | 230 | .cra_name = "sha1", |
226 | .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), | 231 | .cra_driver_name = "sha1-padlock", |
227 | .cra_init = padlock_sha1_cra_init, | 232 | .cra_priority = PADLOCK_CRA_PRIORITY, |
228 | .cra_exit = padlock_cra_exit, | 233 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | |
229 | .cra_u = { | 234 | CRYPTO_ALG_NEED_FALLBACK, |
230 | .digest = { | 235 | .cra_blocksize = SHA1_BLOCK_SIZE, |
231 | .dia_digestsize = SHA1_DIGEST_SIZE, | 236 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
232 | .dia_init = padlock_sha_init, | 237 | .cra_module = THIS_MODULE, |
233 | .dia_update = padlock_sha_update, | 238 | .cra_init = padlock_cra_init, |
234 | .dia_final = padlock_sha_final, | 239 | .cra_exit = padlock_cra_exit, |
235 | } | ||
236 | } | 240 | } |
237 | }; | 241 | }; |
238 | 242 | ||
239 | static struct crypto_alg sha256_alg = { | 243 | static struct shash_alg sha256_alg = { |
240 | .cra_name = "sha256", | 244 | .digestsize = SHA256_DIGEST_SIZE, |
241 | .cra_driver_name = "sha256-padlock", | 245 | .init = padlock_sha_init, |
242 | .cra_priority = PADLOCK_CRA_PRIORITY, | 246 | .update = padlock_sha_update, |
243 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST | | 247 | .finup = padlock_sha256_finup, |
244 | CRYPTO_ALG_NEED_FALLBACK, | 248 | .final = padlock_sha256_final, |
245 | .cra_blocksize = SHA256_BLOCK_SIZE, | 249 | .descsize = sizeof(struct padlock_sha_desc), |
246 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), | 250 | .base = { |
247 | .cra_module = THIS_MODULE, | 251 | .cra_name = "sha256", |
248 | .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), | 252 | .cra_driver_name = "sha256-padlock", |
249 | .cra_init = padlock_sha256_cra_init, | 253 | .cra_priority = PADLOCK_CRA_PRIORITY, |
250 | .cra_exit = padlock_cra_exit, | 254 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | |
251 | .cra_u = { | 255 | CRYPTO_ALG_NEED_FALLBACK, |
252 | .digest = { | 256 | .cra_blocksize = SHA256_BLOCK_SIZE, |
253 | .dia_digestsize = SHA256_DIGEST_SIZE, | 257 | .cra_ctxsize = sizeof(struct padlock_sha_ctx), |
254 | .dia_init = padlock_sha_init, | 258 | .cra_module = THIS_MODULE, |
255 | .dia_update = padlock_sha_update, | 259 | .cra_init = padlock_cra_init, |
256 | .dia_final = padlock_sha_final, | 260 | .cra_exit = padlock_cra_exit, |
257 | } | ||
258 | } | 261 | } |
259 | }; | 262 | }; |
260 | 263 | ||
@@ -272,11 +275,11 @@ static int __init padlock_init(void) | |||
272 | return -ENODEV; | 275 | return -ENODEV; |
273 | } | 276 | } |
274 | 277 | ||
275 | rc = crypto_register_alg(&sha1_alg); | 278 | rc = crypto_register_shash(&sha1_alg); |
276 | if (rc) | 279 | if (rc) |
277 | goto out; | 280 | goto out; |
278 | 281 | ||
279 | rc = crypto_register_alg(&sha256_alg); | 282 | rc = crypto_register_shash(&sha256_alg); |
280 | if (rc) | 283 | if (rc) |
281 | goto out_unreg1; | 284 | goto out_unreg1; |
282 | 285 | ||
@@ -285,7 +288,7 @@ static int __init padlock_init(void) | |||
285 | return 0; | 288 | return 0; |
286 | 289 | ||
287 | out_unreg1: | 290 | out_unreg1: |
288 | crypto_unregister_alg(&sha1_alg); | 291 | crypto_unregister_shash(&sha1_alg); |
289 | out: | 292 | out: |
290 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 293 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); |
291 | return rc; | 294 | return rc; |
@@ -293,8 +296,8 @@ out: | |||
293 | 296 | ||
294 | static void __exit padlock_fini(void) | 297 | static void __exit padlock_fini(void) |
295 | { | 298 | { |
296 | crypto_unregister_alg(&sha1_alg); | 299 | crypto_unregister_shash(&sha1_alg); |
297 | crypto_unregister_alg(&sha256_alg); | 300 | crypto_unregister_shash(&sha256_alg); |
298 | } | 301 | } |
299 | 302 | ||
300 | module_init(padlock_init); | 303 | module_init(padlock_init); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c70775fd3ce2..c47ffe8a73ef 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -86,6 +86,25 @@ struct talitos_request { | |||
86 | void *context; | 86 | void *context; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* per-channel fifo management */ | ||
90 | struct talitos_channel { | ||
91 | /* request fifo */ | ||
92 | struct talitos_request *fifo; | ||
93 | |||
94 | /* number of requests pending in channel h/w fifo */ | ||
95 | atomic_t submit_count ____cacheline_aligned; | ||
96 | |||
97 | /* request submission (head) lock */ | ||
98 | spinlock_t head_lock ____cacheline_aligned; | ||
99 | /* index to next free descriptor request */ | ||
100 | int head; | ||
101 | |||
102 | /* request release (tail) lock */ | ||
103 | spinlock_t tail_lock ____cacheline_aligned; | ||
104 | /* index to next in-progress/done descriptor request */ | ||
105 | int tail; | ||
106 | }; | ||
107 | |||
89 | struct talitos_private { | 108 | struct talitos_private { |
90 | struct device *dev; | 109 | struct device *dev; |
91 | struct of_device *ofdev; | 110 | struct of_device *ofdev; |
@@ -101,15 +120,6 @@ struct talitos_private { | |||
101 | /* SEC Compatibility info */ | 120 | /* SEC Compatibility info */ |
102 | unsigned long features; | 121 | unsigned long features; |
103 | 122 | ||
104 | /* next channel to be assigned next incoming descriptor */ | ||
105 | atomic_t last_chan; | ||
106 | |||
107 | /* per-channel number of requests pending in channel h/w fifo */ | ||
108 | atomic_t *submit_count; | ||
109 | |||
110 | /* per-channel request fifo */ | ||
111 | struct talitos_request **fifo; | ||
112 | |||
113 | /* | 123 | /* |
114 | * length of the request fifo | 124 | * length of the request fifo |
115 | * fifo_len is chfifo_len rounded up to next power of 2 | 125 | * fifo_len is chfifo_len rounded up to next power of 2 |
@@ -117,15 +127,10 @@ struct talitos_private { | |||
117 | */ | 127 | */ |
118 | unsigned int fifo_len; | 128 | unsigned int fifo_len; |
119 | 129 | ||
120 | /* per-channel index to next free descriptor request */ | 130 | struct talitos_channel *chan; |
121 | int *head; | ||
122 | |||
123 | /* per-channel index to next in-progress/done descriptor request */ | ||
124 | int *tail; | ||
125 | 131 | ||
126 | /* per-channel request submission (head) and release (tail) locks */ | 132 | /* next channel to be assigned next incoming descriptor */ |
127 | spinlock_t *head_lock; | 133 | atomic_t last_chan ____cacheline_aligned; |
128 | spinlock_t *tail_lock; | ||
129 | 134 | ||
130 | /* request callback tasklet */ | 135 | /* request callback tasklet */ |
131 | struct tasklet_struct done_task; | 136 | struct tasklet_struct done_task; |
@@ -141,6 +146,12 @@ struct talitos_private { | |||
141 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 146 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
142 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 147 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
143 | 148 | ||
149 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | ||
150 | { | ||
151 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | ||
152 | talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); | ||
153 | } | ||
154 | |||
144 | /* | 155 | /* |
145 | * map virtual single (contiguous) pointer to h/w descriptor pointer | 156 | * map virtual single (contiguous) pointer to h/w descriptor pointer |
146 | */ | 157 | */ |
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev, | |||
150 | unsigned char extent, | 161 | unsigned char extent, |
151 | enum dma_data_direction dir) | 162 | enum dma_data_direction dir) |
152 | { | 163 | { |
164 | dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); | ||
165 | |||
153 | talitos_ptr->len = cpu_to_be16(len); | 166 | talitos_ptr->len = cpu_to_be16(len); |
154 | talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); | 167 | to_talitos_ptr(talitos_ptr, dma_addr); |
155 | talitos_ptr->j_extent = extent; | 168 | talitos_ptr->j_extent = extent; |
156 | } | 169 | } |
157 | 170 | ||
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch) | |||
182 | return -EIO; | 195 | return -EIO; |
183 | } | 196 | } |
184 | 197 | ||
185 | /* set done writeback and IRQ */ | 198 | /* set 36-bit addressing, done writeback enable and done IRQ enable */ |
186 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | | 199 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE | |
187 | TALITOS_CCCR_LO_CDIE); | 200 | TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); |
188 | 201 | ||
189 | /* and ICCR writeback, if available */ | 202 | /* and ICCR writeback, if available */ |
190 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | 203 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) |
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
282 | /* emulate SEC's round-robin channel fifo polling scheme */ | 295 | /* emulate SEC's round-robin channel fifo polling scheme */ |
283 | ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); | 296 | ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); |
284 | 297 | ||
285 | spin_lock_irqsave(&priv->head_lock[ch], flags); | 298 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); |
286 | 299 | ||
287 | if (!atomic_inc_not_zero(&priv->submit_count[ch])) { | 300 | if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { |
288 | /* h/w fifo is full */ | 301 | /* h/w fifo is full */ |
289 | spin_unlock_irqrestore(&priv->head_lock[ch], flags); | 302 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
290 | return -EAGAIN; | 303 | return -EAGAIN; |
291 | } | 304 | } |
292 | 305 | ||
293 | head = priv->head[ch]; | 306 | head = priv->chan[ch].head; |
294 | request = &priv->fifo[ch][head]; | 307 | request = &priv->chan[ch].fifo[head]; |
295 | 308 | ||
296 | /* map descriptor and save caller data */ | 309 | /* map descriptor and save caller data */ |
297 | request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), | 310 | request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), |
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
300 | request->context = context; | 313 | request->context = context; |
301 | 314 | ||
302 | /* increment fifo head */ | 315 | /* increment fifo head */ |
303 | priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); | 316 | priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); |
304 | 317 | ||
305 | smp_wmb(); | 318 | smp_wmb(); |
306 | request->desc = desc; | 319 | request->desc = desc; |
307 | 320 | ||
308 | /* GO! */ | 321 | /* GO! */ |
309 | wmb(); | 322 | wmb(); |
310 | out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); | 323 | out_be32(priv->reg + TALITOS_FF(ch), |
324 | cpu_to_be32(upper_32_bits(request->dma_desc))); | ||
325 | out_be32(priv->reg + TALITOS_FF_LO(ch), | ||
326 | cpu_to_be32(lower_32_bits(request->dma_desc))); | ||
311 | 327 | ||
312 | spin_unlock_irqrestore(&priv->head_lock[ch], flags); | 328 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
313 | 329 | ||
314 | return -EINPROGRESS; | 330 | return -EINPROGRESS; |
315 | } | 331 | } |
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
324 | unsigned long flags; | 340 | unsigned long flags; |
325 | int tail, status; | 341 | int tail, status; |
326 | 342 | ||
327 | spin_lock_irqsave(&priv->tail_lock[ch], flags); | 343 | spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
328 | 344 | ||
329 | tail = priv->tail[ch]; | 345 | tail = priv->chan[ch].tail; |
330 | while (priv->fifo[ch][tail].desc) { | 346 | while (priv->chan[ch].fifo[tail].desc) { |
331 | request = &priv->fifo[ch][tail]; | 347 | request = &priv->chan[ch].fifo[tail]; |
332 | 348 | ||
333 | /* descriptors with their done bits set don't get the error */ | 349 | /* descriptors with their done bits set don't get the error */ |
334 | rmb(); | 350 | rmb(); |
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
354 | request->desc = NULL; | 370 | request->desc = NULL; |
355 | 371 | ||
356 | /* increment fifo tail */ | 372 | /* increment fifo tail */ |
357 | priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); | 373 | priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); |
358 | 374 | ||
359 | spin_unlock_irqrestore(&priv->tail_lock[ch], flags); | 375 | spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); |
360 | 376 | ||
361 | atomic_dec(&priv->submit_count[ch]); | 377 | atomic_dec(&priv->chan[ch].submit_count); |
362 | 378 | ||
363 | saved_req.callback(dev, saved_req.desc, saved_req.context, | 379 | saved_req.callback(dev, saved_req.desc, saved_req.context, |
364 | status); | 380 | status); |
365 | /* channel may resume processing in single desc error case */ | 381 | /* channel may resume processing in single desc error case */ |
366 | if (error && !reset_ch && status == error) | 382 | if (error && !reset_ch && status == error) |
367 | return; | 383 | return; |
368 | spin_lock_irqsave(&priv->tail_lock[ch], flags); | 384 | spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); |
369 | tail = priv->tail[ch]; | 385 | tail = priv->chan[ch].tail; |
370 | } | 386 | } |
371 | 387 | ||
372 | spin_unlock_irqrestore(&priv->tail_lock[ch], flags); | 388 | spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); |
373 | } | 389 | } |
374 | 390 | ||
375 | /* | 391 | /* |
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data) | |||
397 | static struct talitos_desc *current_desc(struct device *dev, int ch) | 413 | static struct talitos_desc *current_desc(struct device *dev, int ch) |
398 | { | 414 | { |
399 | struct talitos_private *priv = dev_get_drvdata(dev); | 415 | struct talitos_private *priv = dev_get_drvdata(dev); |
400 | int tail = priv->tail[ch]; | 416 | int tail = priv->chan[ch].tail; |
401 | dma_addr_t cur_desc; | 417 | dma_addr_t cur_desc; |
402 | 418 | ||
403 | cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); | 419 | cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); |
404 | 420 | ||
405 | while (priv->fifo[ch][tail].dma_desc != cur_desc) { | 421 | while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) { |
406 | tail = (tail + 1) & (priv->fifo_len - 1); | 422 | tail = (tail + 1) & (priv->fifo_len - 1); |
407 | if (tail == priv->tail[ch]) { | 423 | if (tail == priv->chan[ch].tail) { |
408 | dev_err(dev, "couldn't locate current descriptor\n"); | 424 | dev_err(dev, "couldn't locate current descriptor\n"); |
409 | return NULL; | 425 | return NULL; |
410 | } | 426 | } |
411 | } | 427 | } |
412 | 428 | ||
413 | return priv->fifo[ch][tail].desc; | 429 | return priv->chan[ch].fifo[tail].desc; |
414 | } | 430 | } |
415 | 431 | ||
416 | /* | 432 | /* |
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
929 | int n_sg = sg_count; | 945 | int n_sg = sg_count; |
930 | 946 | ||
931 | while (n_sg--) { | 947 | while (n_sg--) { |
932 | link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); | 948 | to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); |
933 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); | 949 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); |
934 | link_tbl_ptr->j_extent = 0; | 950 | link_tbl_ptr->j_extent = 0; |
935 | link_tbl_ptr++; | 951 | link_tbl_ptr++; |
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
970 | struct talitos_desc *desc = &edesc->desc; | 986 | struct talitos_desc *desc = &edesc->desc; |
971 | unsigned int cryptlen = areq->cryptlen; | 987 | unsigned int cryptlen = areq->cryptlen; |
972 | unsigned int authsize = ctx->authsize; | 988 | unsigned int authsize = ctx->authsize; |
973 | unsigned int ivsize; | 989 | unsigned int ivsize = crypto_aead_ivsize(aead); |
974 | int sg_count, ret; | 990 | int sg_count, ret; |
975 | int sg_link_tbl_len; | 991 | int sg_link_tbl_len; |
976 | 992 | ||
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
978 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, | 994 | map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, |
979 | 0, DMA_TO_DEVICE); | 995 | 0, DMA_TO_DEVICE); |
980 | /* hmac data */ | 996 | /* hmac data */ |
981 | map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - | 997 | map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize, |
982 | sg_virt(areq->assoc), sg_virt(areq->assoc), 0, | 998 | sg_virt(areq->assoc), 0, DMA_TO_DEVICE); |
983 | DMA_TO_DEVICE); | ||
984 | /* cipher iv */ | 999 | /* cipher iv */ |
985 | ivsize = crypto_aead_ivsize(aead); | ||
986 | map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, | 1000 | map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, |
987 | DMA_TO_DEVICE); | 1001 | DMA_TO_DEVICE); |
988 | 1002 | ||
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1006 | edesc->src_is_chained); | 1020 | edesc->src_is_chained); |
1007 | 1021 | ||
1008 | if (sg_count == 1) { | 1022 | if (sg_count == 1) { |
1009 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1023 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); |
1010 | } else { | 1024 | } else { |
1011 | sg_link_tbl_len = cryptlen; | 1025 | sg_link_tbl_len = cryptlen; |
1012 | 1026 | ||
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1017 | &edesc->link_tbl[0]); | 1031 | &edesc->link_tbl[0]); |
1018 | if (sg_count > 1) { | 1032 | if (sg_count > 1) { |
1019 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1033 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1020 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 1034 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); |
1021 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1035 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1022 | edesc->dma_len, | 1036 | edesc->dma_len, |
1023 | DMA_BIDIRECTIONAL); | 1037 | DMA_BIDIRECTIONAL); |
1024 | } else { | 1038 | } else { |
1025 | /* Only one segment now, so no link tbl needed */ | 1039 | /* Only one segment now, so no link tbl needed */ |
1026 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> | 1040 | to_talitos_ptr(&desc->ptr[4], |
1027 | src)); | 1041 | sg_dma_address(areq->src)); |
1028 | } | 1042 | } |
1029 | } | 1043 | } |
1030 | 1044 | ||
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1039 | edesc->dst_is_chained); | 1053 | edesc->dst_is_chained); |
1040 | 1054 | ||
1041 | if (sg_count == 1) { | 1055 | if (sg_count == 1) { |
1042 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1056 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); |
1043 | } else { | 1057 | } else { |
1044 | struct talitos_ptr *link_tbl_ptr = | 1058 | struct talitos_ptr *link_tbl_ptr = |
1045 | &edesc->link_tbl[edesc->src_nents + 1]; | 1059 | &edesc->link_tbl[edesc->src_nents + 1]; |
1046 | 1060 | ||
1047 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) | 1061 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
1048 | edesc->dma_link_tbl + | 1062 | (edesc->src_nents + 1) * |
1049 | edesc->src_nents + 1); | 1063 | sizeof(struct talitos_ptr)); |
1050 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1064 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1051 | link_tbl_ptr); | 1065 | link_tbl_ptr); |
1052 | 1066 | ||
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1059 | link_tbl_ptr->len = cpu_to_be16(authsize); | 1073 | link_tbl_ptr->len = cpu_to_be16(authsize); |
1060 | 1074 | ||
1061 | /* icv data follows link tables */ | 1075 | /* icv data follows link tables */ |
1062 | link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) | 1076 | to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl + |
1063 | edesc->dma_link_tbl + | 1077 | (edesc->src_nents + edesc->dst_nents + 2) * |
1064 | edesc->src_nents + | 1078 | sizeof(struct talitos_ptr)); |
1065 | edesc->dst_nents + 2); | ||
1066 | |||
1067 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1079 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1068 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1080 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
1069 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1081 | edesc->dma_len, DMA_BIDIRECTIONAL); |
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1338 | 1350 | ||
1339 | /* first DWORD empty */ | 1351 | /* first DWORD empty */ |
1340 | desc->ptr[0].len = 0; | 1352 | desc->ptr[0].len = 0; |
1341 | desc->ptr[0].ptr = 0; | 1353 | to_talitos_ptr(&desc->ptr[0], 0); |
1342 | desc->ptr[0].j_extent = 0; | 1354 | desc->ptr[0].j_extent = 0; |
1343 | 1355 | ||
1344 | /* cipher iv */ | 1356 | /* cipher iv */ |
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1362 | edesc->src_is_chained); | 1374 | edesc->src_is_chained); |
1363 | 1375 | ||
1364 | if (sg_count == 1) { | 1376 | if (sg_count == 1) { |
1365 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1377 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); |
1366 | } else { | 1378 | } else { |
1367 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | 1379 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, |
1368 | &edesc->link_tbl[0]); | 1380 | &edesc->link_tbl[0]); |
1369 | if (sg_count > 1) { | 1381 | if (sg_count > 1) { |
1382 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1370 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1383 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1371 | desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); | ||
1372 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1384 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1373 | edesc->dma_len, | 1385 | edesc->dma_len, |
1374 | DMA_BIDIRECTIONAL); | 1386 | DMA_BIDIRECTIONAL); |
1375 | } else { | 1387 | } else { |
1376 | /* Only one segment now, so no link tbl needed */ | 1388 | /* Only one segment now, so no link tbl needed */ |
1377 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> | 1389 | to_talitos_ptr(&desc->ptr[3], |
1378 | src)); | 1390 | sg_dma_address(areq->src)); |
1379 | } | 1391 | } |
1380 | } | 1392 | } |
1381 | 1393 | ||
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1390 | edesc->dst_is_chained); | 1402 | edesc->dst_is_chained); |
1391 | 1403 | ||
1392 | if (sg_count == 1) { | 1404 | if (sg_count == 1) { |
1393 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1405 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); |
1394 | } else { | 1406 | } else { |
1395 | struct talitos_ptr *link_tbl_ptr = | 1407 | struct talitos_ptr *link_tbl_ptr = |
1396 | &edesc->link_tbl[edesc->src_nents + 1]; | 1408 | &edesc->link_tbl[edesc->src_nents + 1]; |
1397 | 1409 | ||
1410 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + | ||
1411 | (edesc->src_nents + 1) * | ||
1412 | sizeof(struct talitos_ptr)); | ||
1398 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1413 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1399 | desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) | ||
1400 | edesc->dma_link_tbl + | ||
1401 | edesc->src_nents + 1); | ||
1402 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1414 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1403 | link_tbl_ptr); | 1415 | link_tbl_ptr); |
1404 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1416 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1411 | 1423 | ||
1412 | /* last DWORD empty */ | 1424 | /* last DWORD empty */ |
1413 | desc->ptr[6].len = 0; | 1425 | desc->ptr[6].len = 0; |
1414 | desc->ptr[6].ptr = 0; | 1426 | to_talitos_ptr(&desc->ptr[6], 0); |
1415 | desc->ptr[6].j_extent = 0; | 1427 | desc->ptr[6].j_extent = 0; |
1416 | 1428 | ||
1417 | ret = talitos_submit(dev, desc, callback, areq); | 1429 | ret = talitos_submit(dev, desc, callback, areq); |
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev) | |||
1742 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) | 1754 | if (hw_supports(dev, DESC_HDR_SEL0_RNG)) |
1743 | talitos_unregister_rng(dev); | 1755 | talitos_unregister_rng(dev); |
1744 | 1756 | ||
1745 | kfree(priv->submit_count); | 1757 | for (i = 0; i < priv->num_channels; i++) |
1746 | kfree(priv->tail); | 1758 | if (priv->chan[i].fifo) |
1747 | kfree(priv->head); | 1759 | kfree(priv->chan[i].fifo); |
1748 | |||
1749 | if (priv->fifo) | ||
1750 | for (i = 0; i < priv->num_channels; i++) | ||
1751 | kfree(priv->fifo[i]); | ||
1752 | 1760 | ||
1753 | kfree(priv->fifo); | 1761 | kfree(priv->chan); |
1754 | kfree(priv->head_lock); | ||
1755 | kfree(priv->tail_lock); | ||
1756 | 1762 | ||
1757 | if (priv->irq != NO_IRQ) { | 1763 | if (priv->irq != NO_IRQ) { |
1758 | free_irq(priv->irq, dev); | 1764 | free_irq(priv->irq, dev); |
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev, | |||
1872 | if (of_device_is_compatible(np, "fsl,sec2.1")) | 1878 | if (of_device_is_compatible(np, "fsl,sec2.1")) |
1873 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; | 1879 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; |
1874 | 1880 | ||
1875 | priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1881 | priv->chan = kzalloc(sizeof(struct talitos_channel) * |
1876 | GFP_KERNEL); | 1882 | priv->num_channels, GFP_KERNEL); |
1877 | priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, | 1883 | if (!priv->chan) { |
1878 | GFP_KERNEL); | 1884 | dev_err(dev, "failed to allocate channel management space\n"); |
1879 | if (!priv->head_lock || !priv->tail_lock) { | ||
1880 | dev_err(dev, "failed to allocate fifo locks\n"); | ||
1881 | err = -ENOMEM; | 1885 | err = -ENOMEM; |
1882 | goto err_out; | 1886 | goto err_out; |
1883 | } | 1887 | } |
1884 | 1888 | ||
1885 | for (i = 0; i < priv->num_channels; i++) { | 1889 | for (i = 0; i < priv->num_channels; i++) { |
1886 | spin_lock_init(&priv->head_lock[i]); | 1890 | spin_lock_init(&priv->chan[i].head_lock); |
1887 | spin_lock_init(&priv->tail_lock[i]); | 1891 | spin_lock_init(&priv->chan[i].tail_lock); |
1888 | } | ||
1889 | |||
1890 | priv->fifo = kmalloc(sizeof(struct talitos_request *) * | ||
1891 | priv->num_channels, GFP_KERNEL); | ||
1892 | if (!priv->fifo) { | ||
1893 | dev_err(dev, "failed to allocate request fifo\n"); | ||
1894 | err = -ENOMEM; | ||
1895 | goto err_out; | ||
1896 | } | 1892 | } |
1897 | 1893 | ||
1898 | priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); | 1894 | priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); |
1899 | 1895 | ||
1900 | for (i = 0; i < priv->num_channels; i++) { | 1896 | for (i = 0; i < priv->num_channels; i++) { |
1901 | priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * | 1897 | priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * |
1902 | priv->fifo_len, GFP_KERNEL); | 1898 | priv->fifo_len, GFP_KERNEL); |
1903 | if (!priv->fifo[i]) { | 1899 | if (!priv->chan[i].fifo) { |
1904 | dev_err(dev, "failed to allocate request fifo %d\n", i); | 1900 | dev_err(dev, "failed to allocate request fifo %d\n", i); |
1905 | err = -ENOMEM; | 1901 | err = -ENOMEM; |
1906 | goto err_out; | 1902 | goto err_out; |
1907 | } | 1903 | } |
1908 | } | 1904 | } |
1909 | 1905 | ||
1910 | priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels, | ||
1911 | GFP_KERNEL); | ||
1912 | if (!priv->submit_count) { | ||
1913 | dev_err(dev, "failed to allocate fifo submit count space\n"); | ||
1914 | err = -ENOMEM; | ||
1915 | goto err_out; | ||
1916 | } | ||
1917 | for (i = 0; i < priv->num_channels; i++) | 1906 | for (i = 0; i < priv->num_channels; i++) |
1918 | atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); | 1907 | atomic_set(&priv->chan[i].submit_count, |
1908 | -(priv->chfifo_len - 1)); | ||
1919 | 1909 | ||
1920 | priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); | 1910 | dma_set_mask(dev, DMA_BIT_MASK(36)); |
1921 | priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); | ||
1922 | if (!priv->head || !priv->tail) { | ||
1923 | dev_err(dev, "failed to allocate request index space\n"); | ||
1924 | err = -ENOMEM; | ||
1925 | goto err_out; | ||
1926 | } | ||
1927 | 1911 | ||
1928 | /* reset and initialize the h/w */ | 1912 | /* reset and initialize the h/w */ |
1929 | err = init_device(dev); | 1913 | err = init_device(dev); |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 575981f0cfda..ff5a1450e145 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -57,6 +57,7 @@ | |||
57 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ | 57 | #define TALITOS_CCCR_RESET 0x1 /* channel reset */ |
58 | #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) | 58 | #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) |
59 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ | 59 | #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ |
60 | #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ | ||
60 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ | 61 | #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ |
61 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ | 62 | #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ |
62 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ | 63 | #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ |
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 5a2bd1cc9656..1ffb53f74d37 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
@@ -22,11 +22,9 @@ struct seq_file; | |||
22 | 22 | ||
23 | struct crypto_type { | 23 | struct crypto_type { |
24 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); | 24 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); |
25 | unsigned int (*extsize)(struct crypto_alg *alg, | 25 | unsigned int (*extsize)(struct crypto_alg *alg); |
26 | const struct crypto_type *frontend); | ||
27 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); | 26 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); |
28 | int (*init_tfm)(struct crypto_tfm *tfm, | 27 | int (*init_tfm)(struct crypto_tfm *tfm); |
29 | const struct crypto_type *frontend); | ||
30 | void (*show)(struct seq_file *m, struct crypto_alg *alg); | 28 | void (*show)(struct seq_file *m, struct crypto_alg *alg); |
31 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | 29 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); |
32 | 30 | ||
@@ -52,6 +50,7 @@ struct crypto_template { | |||
52 | 50 | ||
53 | struct crypto_instance *(*alloc)(struct rtattr **tb); | 51 | struct crypto_instance *(*alloc)(struct rtattr **tb); |
54 | void (*free)(struct crypto_instance *inst); | 52 | void (*free)(struct crypto_instance *inst); |
53 | int (*create)(struct crypto_template *tmpl, struct rtattr **tb); | ||
55 | 54 | ||
56 | char name[CRYPTO_MAX_ALG_NAME]; | 55 | char name[CRYPTO_MAX_ALG_NAME]; |
57 | }; | 56 | }; |
@@ -60,6 +59,7 @@ struct crypto_spawn { | |||
60 | struct list_head list; | 59 | struct list_head list; |
61 | struct crypto_alg *alg; | 60 | struct crypto_alg *alg; |
62 | struct crypto_instance *inst; | 61 | struct crypto_instance *inst; |
62 | const struct crypto_type *frontend; | ||
63 | u32 mask; | 63 | u32 mask; |
64 | }; | 64 | }; |
65 | 65 | ||
@@ -114,11 +114,19 @@ int crypto_register_template(struct crypto_template *tmpl); | |||
114 | void crypto_unregister_template(struct crypto_template *tmpl); | 114 | void crypto_unregister_template(struct crypto_template *tmpl); |
115 | struct crypto_template *crypto_lookup_template(const char *name); | 115 | struct crypto_template *crypto_lookup_template(const char *name); |
116 | 116 | ||
117 | int crypto_register_instance(struct crypto_template *tmpl, | ||
118 | struct crypto_instance *inst); | ||
119 | |||
117 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | 120 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, |
118 | struct crypto_instance *inst, u32 mask); | 121 | struct crypto_instance *inst, u32 mask); |
122 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | ||
123 | struct crypto_instance *inst, | ||
124 | const struct crypto_type *frontend); | ||
125 | |||
119 | void crypto_drop_spawn(struct crypto_spawn *spawn); | 126 | void crypto_drop_spawn(struct crypto_spawn *spawn); |
120 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | 127 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
121 | u32 mask); | 128 | u32 mask); |
129 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn); | ||
122 | 130 | ||
123 | static inline void crypto_set_spawn(struct crypto_spawn *spawn, | 131 | static inline void crypto_set_spawn(struct crypto_spawn *spawn, |
124 | struct crypto_instance *inst) | 132 | struct crypto_instance *inst) |
@@ -129,8 +137,19 @@ static inline void crypto_set_spawn(struct crypto_spawn *spawn, | |||
129 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); | 137 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); |
130 | int crypto_check_attr_type(struct rtattr **tb, u32 type); | 138 | int crypto_check_attr_type(struct rtattr **tb, u32 type); |
131 | const char *crypto_attr_alg_name(struct rtattr *rta); | 139 | const char *crypto_attr_alg_name(struct rtattr *rta); |
132 | struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); | 140 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
141 | const struct crypto_type *frontend, | ||
142 | u32 type, u32 mask); | ||
143 | |||
144 | static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, | ||
145 | u32 type, u32 mask) | ||
146 | { | ||
147 | return crypto_attr_alg2(rta, NULL, type, mask); | ||
148 | } | ||
149 | |||
133 | int crypto_attr_u32(struct rtattr *rta, u32 *num); | 150 | int crypto_attr_u32(struct rtattr *rta, u32 *num); |
151 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, | ||
152 | unsigned int head); | ||
134 | struct crypto_instance *crypto_alloc_instance(const char *name, | 153 | struct crypto_instance *crypto_alloc_instance(const char *name, |
135 | struct crypto_alg *alg); | 154 | struct crypto_alg *alg); |
136 | 155 | ||
@@ -157,12 +176,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc, | |||
157 | 176 | ||
158 | static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) | 177 | static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) |
159 | { | 178 | { |
160 | unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); | 179 | return PTR_ALIGN(crypto_tfm_ctx(tfm), |
161 | unsigned long align = crypto_tfm_alg_alignmask(tfm); | 180 | crypto_tfm_alg_alignmask(tfm) + 1); |
162 | |||
163 | if (align <= crypto_tfm_ctx_alignment()) | ||
164 | align = 1; | ||
165 | return (void *)ALIGN(addr, align); | ||
166 | } | 181 | } |
167 | 182 | ||
168 | static inline struct crypto_instance *crypto_tfm_alg_instance( | 183 | static inline struct crypto_instance *crypto_tfm_alg_instance( |
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 55fa7bbdbc71..2f65a6e8ea4d 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/crypto.h> | 8 | #include <linux/crypto.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <crypto/hash.h> | ||
10 | 11 | ||
11 | struct cryptd_ablkcipher { | 12 | struct cryptd_ablkcipher { |
12 | struct crypto_ablkcipher base; | 13 | struct crypto_ablkcipher base; |
@@ -24,4 +25,20 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | |||
24 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); | 25 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); |
25 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); | 26 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); |
26 | 27 | ||
28 | struct cryptd_ahash { | ||
29 | struct crypto_ahash base; | ||
30 | }; | ||
31 | |||
32 | static inline struct cryptd_ahash *__cryptd_ahash_cast( | ||
33 | struct crypto_ahash *tfm) | ||
34 | { | ||
35 | return (struct cryptd_ahash *)tfm; | ||
36 | } | ||
37 | |||
38 | /* alg_name should be algorithm to be cryptd-ed */ | ||
39 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | ||
40 | u32 type, u32 mask); | ||
41 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); | ||
42 | void cryptd_free_ahash(struct cryptd_ahash *tfm); | ||
43 | |||
27 | #endif | 44 | #endif |
diff --git a/include/crypto/hash.h b/include/crypto/hash.h index d56bb71617c3..26cb1eb16f4c 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h | |||
@@ -15,6 +15,42 @@ | |||
15 | 15 | ||
16 | #include <linux/crypto.h> | 16 | #include <linux/crypto.h> |
17 | 17 | ||
18 | struct crypto_ahash; | ||
19 | |||
20 | struct hash_alg_common { | ||
21 | unsigned int digestsize; | ||
22 | unsigned int statesize; | ||
23 | |||
24 | struct crypto_alg base; | ||
25 | }; | ||
26 | |||
27 | struct ahash_request { | ||
28 | struct crypto_async_request base; | ||
29 | |||
30 | unsigned int nbytes; | ||
31 | struct scatterlist *src; | ||
32 | u8 *result; | ||
33 | |||
34 | /* This field may only be used by the ahash API code. */ | ||
35 | void *priv; | ||
36 | |||
37 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
38 | }; | ||
39 | |||
40 | struct ahash_alg { | ||
41 | int (*init)(struct ahash_request *req); | ||
42 | int (*update)(struct ahash_request *req); | ||
43 | int (*final)(struct ahash_request *req); | ||
44 | int (*finup)(struct ahash_request *req); | ||
45 | int (*digest)(struct ahash_request *req); | ||
46 | int (*export)(struct ahash_request *req, void *out); | ||
47 | int (*import)(struct ahash_request *req, const void *in); | ||
48 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | ||
49 | unsigned int keylen); | ||
50 | |||
51 | struct hash_alg_common halg; | ||
52 | }; | ||
53 | |||
18 | struct shash_desc { | 54 | struct shash_desc { |
19 | struct crypto_shash *tfm; | 55 | struct crypto_shash *tfm; |
20 | u32 flags; | 56 | u32 flags; |
@@ -24,7 +60,6 @@ struct shash_desc { | |||
24 | 60 | ||
25 | struct shash_alg { | 61 | struct shash_alg { |
26 | int (*init)(struct shash_desc *desc); | 62 | int (*init)(struct shash_desc *desc); |
27 | int (*reinit)(struct shash_desc *desc); | ||
28 | int (*update)(struct shash_desc *desc, const u8 *data, | 63 | int (*update)(struct shash_desc *desc, const u8 *data, |
29 | unsigned int len); | 64 | unsigned int len); |
30 | int (*final)(struct shash_desc *desc, u8 *out); | 65 | int (*final)(struct shash_desc *desc, u8 *out); |
@@ -32,38 +67,48 @@ struct shash_alg { | |||
32 | unsigned int len, u8 *out); | 67 | unsigned int len, u8 *out); |
33 | int (*digest)(struct shash_desc *desc, const u8 *data, | 68 | int (*digest)(struct shash_desc *desc, const u8 *data, |
34 | unsigned int len, u8 *out); | 69 | unsigned int len, u8 *out); |
70 | int (*export)(struct shash_desc *desc, void *out); | ||
71 | int (*import)(struct shash_desc *desc, const void *in); | ||
35 | int (*setkey)(struct crypto_shash *tfm, const u8 *key, | 72 | int (*setkey)(struct crypto_shash *tfm, const u8 *key, |
36 | unsigned int keylen); | 73 | unsigned int keylen); |
37 | 74 | ||
38 | unsigned int descsize; | 75 | unsigned int descsize; |
39 | unsigned int digestsize; | 76 | |
77 | /* These fields must match hash_alg_common. */ | ||
78 | unsigned int digestsize | ||
79 | __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); | ||
80 | unsigned int statesize; | ||
40 | 81 | ||
41 | struct crypto_alg base; | 82 | struct crypto_alg base; |
42 | }; | 83 | }; |
43 | 84 | ||
44 | struct crypto_ahash { | 85 | struct crypto_ahash { |
86 | int (*init)(struct ahash_request *req); | ||
87 | int (*update)(struct ahash_request *req); | ||
88 | int (*final)(struct ahash_request *req); | ||
89 | int (*finup)(struct ahash_request *req); | ||
90 | int (*digest)(struct ahash_request *req); | ||
91 | int (*export)(struct ahash_request *req, void *out); | ||
92 | int (*import)(struct ahash_request *req, const void *in); | ||
93 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | ||
94 | unsigned int keylen); | ||
95 | |||
96 | unsigned int reqsize; | ||
45 | struct crypto_tfm base; | 97 | struct crypto_tfm base; |
46 | }; | 98 | }; |
47 | 99 | ||
48 | struct crypto_shash { | 100 | struct crypto_shash { |
101 | unsigned int descsize; | ||
49 | struct crypto_tfm base; | 102 | struct crypto_tfm base; |
50 | }; | 103 | }; |
51 | 104 | ||
52 | static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) | 105 | static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) |
53 | { | 106 | { |
54 | return (struct crypto_ahash *)tfm; | 107 | return container_of(tfm, struct crypto_ahash, base); |
55 | } | 108 | } |
56 | 109 | ||
57 | static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, | 110 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, |
58 | u32 type, u32 mask) | 111 | u32 mask); |
59 | { | ||
60 | type &= ~CRYPTO_ALG_TYPE_MASK; | ||
61 | mask &= ~CRYPTO_ALG_TYPE_MASK; | ||
62 | type |= CRYPTO_ALG_TYPE_AHASH; | ||
63 | mask |= CRYPTO_ALG_TYPE_AHASH_MASK; | ||
64 | |||
65 | return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask)); | ||
66 | } | ||
67 | 112 | ||
68 | static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) | 113 | static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) |
69 | { | 114 | { |
@@ -72,7 +117,7 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) | |||
72 | 117 | ||
73 | static inline void crypto_free_ahash(struct crypto_ahash *tfm) | 118 | static inline void crypto_free_ahash(struct crypto_ahash *tfm) |
74 | { | 119 | { |
75 | crypto_free_tfm(crypto_ahash_tfm(tfm)); | 120 | crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); |
76 | } | 121 | } |
77 | 122 | ||
78 | static inline unsigned int crypto_ahash_alignmask( | 123 | static inline unsigned int crypto_ahash_alignmask( |
@@ -81,14 +126,26 @@ static inline unsigned int crypto_ahash_alignmask( | |||
81 | return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); | 126 | return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); |
82 | } | 127 | } |
83 | 128 | ||
84 | static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm) | 129 | static inline struct hash_alg_common *__crypto_hash_alg_common( |
130 | struct crypto_alg *alg) | ||
131 | { | ||
132 | return container_of(alg, struct hash_alg_common, base); | ||
133 | } | ||
134 | |||
135 | static inline struct hash_alg_common *crypto_hash_alg_common( | ||
136 | struct crypto_ahash *tfm) | ||
85 | { | 137 | { |
86 | return &crypto_ahash_tfm(tfm)->crt_ahash; | 138 | return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); |
87 | } | 139 | } |
88 | 140 | ||
89 | static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) | 141 | static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) |
90 | { | 142 | { |
91 | return crypto_ahash_crt(tfm)->digestsize; | 143 | return crypto_hash_alg_common(tfm)->digestsize; |
144 | } | ||
145 | |||
146 | static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) | ||
147 | { | ||
148 | return crypto_hash_alg_common(tfm)->statesize; | ||
92 | } | 149 | } |
93 | 150 | ||
94 | static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) | 151 | static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) |
@@ -114,7 +171,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm( | |||
114 | 171 | ||
115 | static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) | 172 | static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) |
116 | { | 173 | { |
117 | return crypto_ahash_crt(tfm)->reqsize; | 174 | return tfm->reqsize; |
118 | } | 175 | } |
119 | 176 | ||
120 | static inline void *ahash_request_ctx(struct ahash_request *req) | 177 | static inline void *ahash_request_ctx(struct ahash_request *req) |
@@ -122,44 +179,30 @@ static inline void *ahash_request_ctx(struct ahash_request *req) | |||
122 | return req->__ctx; | 179 | return req->__ctx; |
123 | } | 180 | } |
124 | 181 | ||
125 | static inline int crypto_ahash_setkey(struct crypto_ahash *tfm, | 182 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
126 | const u8 *key, unsigned int keylen) | 183 | unsigned int keylen); |
127 | { | 184 | int crypto_ahash_finup(struct ahash_request *req); |
128 | struct ahash_tfm *crt = crypto_ahash_crt(tfm); | 185 | int crypto_ahash_final(struct ahash_request *req); |
129 | 186 | int crypto_ahash_digest(struct ahash_request *req); | |
130 | return crt->setkey(tfm, key, keylen); | ||
131 | } | ||
132 | 187 | ||
133 | static inline int crypto_ahash_digest(struct ahash_request *req) | 188 | static inline int crypto_ahash_export(struct ahash_request *req, void *out) |
134 | { | 189 | { |
135 | struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); | 190 | return crypto_ahash_reqtfm(req)->export(req, out); |
136 | return crt->digest(req); | ||
137 | } | 191 | } |
138 | 192 | ||
139 | static inline void crypto_ahash_export(struct ahash_request *req, u8 *out) | 193 | static inline int crypto_ahash_import(struct ahash_request *req, const void *in) |
140 | { | 194 | { |
141 | memcpy(out, ahash_request_ctx(req), | 195 | return crypto_ahash_reqtfm(req)->import(req, in); |
142 | crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); | ||
143 | } | 196 | } |
144 | 197 | ||
145 | int crypto_ahash_import(struct ahash_request *req, const u8 *in); | ||
146 | |||
147 | static inline int crypto_ahash_init(struct ahash_request *req) | 198 | static inline int crypto_ahash_init(struct ahash_request *req) |
148 | { | 199 | { |
149 | struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); | 200 | return crypto_ahash_reqtfm(req)->init(req); |
150 | return crt->init(req); | ||
151 | } | 201 | } |
152 | 202 | ||
153 | static inline int crypto_ahash_update(struct ahash_request *req) | 203 | static inline int crypto_ahash_update(struct ahash_request *req) |
154 | { | 204 | { |
155 | struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); | 205 | return crypto_ahash_reqtfm(req)->update(req); |
156 | return crt->update(req); | ||
157 | } | ||
158 | |||
159 | static inline int crypto_ahash_final(struct ahash_request *req) | ||
160 | { | ||
161 | struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); | ||
162 | return crt->final(req); | ||
163 | } | 206 | } |
164 | 207 | ||
165 | static inline void ahash_request_set_tfm(struct ahash_request *req, | 208 | static inline void ahash_request_set_tfm(struct ahash_request *req, |
@@ -184,7 +227,7 @@ static inline struct ahash_request *ahash_request_alloc( | |||
184 | 227 | ||
185 | static inline void ahash_request_free(struct ahash_request *req) | 228 | static inline void ahash_request_free(struct ahash_request *req) |
186 | { | 229 | { |
187 | kfree(req); | 230 | kzfree(req); |
188 | } | 231 | } |
189 | 232 | ||
190 | static inline struct ahash_request *ahash_request_cast( | 233 | static inline struct ahash_request *ahash_request_cast( |
@@ -251,6 +294,11 @@ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) | |||
251 | return crypto_shash_alg(tfm)->digestsize; | 294 | return crypto_shash_alg(tfm)->digestsize; |
252 | } | 295 | } |
253 | 296 | ||
297 | static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) | ||
298 | { | ||
299 | return crypto_shash_alg(tfm)->statesize; | ||
300 | } | ||
301 | |||
254 | static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) | 302 | static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) |
255 | { | 303 | { |
256 | return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); | 304 | return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); |
@@ -268,7 +316,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) | |||
268 | 316 | ||
269 | static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) | 317 | static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) |
270 | { | 318 | { |
271 | return crypto_shash_alg(tfm)->descsize; | 319 | return tfm->descsize; |
272 | } | 320 | } |
273 | 321 | ||
274 | static inline void *shash_desc_ctx(struct shash_desc *desc) | 322 | static inline void *shash_desc_ctx(struct shash_desc *desc) |
@@ -281,12 +329,15 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
281 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | 329 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, |
282 | unsigned int len, u8 *out); | 330 | unsigned int len, u8 *out); |
283 | 331 | ||
284 | static inline void crypto_shash_export(struct shash_desc *desc, u8 *out) | 332 | static inline int crypto_shash_export(struct shash_desc *desc, void *out) |
285 | { | 333 | { |
286 | memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); | 334 | return crypto_shash_alg(desc->tfm)->export(desc, out); |
287 | } | 335 | } |
288 | 336 | ||
289 | int crypto_shash_import(struct shash_desc *desc, const u8 *in); | 337 | static inline int crypto_shash_import(struct shash_desc *desc, const void *in) |
338 | { | ||
339 | return crypto_shash_alg(desc->tfm)->import(desc, in); | ||
340 | } | ||
290 | 341 | ||
291 | static inline int crypto_shash_init(struct shash_desc *desc) | 342 | static inline int crypto_shash_init(struct shash_desc *desc) |
292 | { | 343 | { |
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 82b70564bcab..5bfad8c80595 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
@@ -34,6 +34,22 @@ struct crypto_hash_walk { | |||
34 | unsigned int flags; | 34 | unsigned int flags; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct ahash_instance { | ||
38 | struct ahash_alg alg; | ||
39 | }; | ||
40 | |||
41 | struct shash_instance { | ||
42 | struct shash_alg alg; | ||
43 | }; | ||
44 | |||
45 | struct crypto_ahash_spawn { | ||
46 | struct crypto_spawn base; | ||
47 | }; | ||
48 | |||
49 | struct crypto_shash_spawn { | ||
50 | struct crypto_spawn base; | ||
51 | }; | ||
52 | |||
37 | extern const struct crypto_type crypto_ahash_type; | 53 | extern const struct crypto_type crypto_ahash_type; |
38 | 54 | ||
39 | int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); | 55 | int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); |
@@ -43,18 +59,100 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, | |||
43 | struct crypto_hash_walk *walk, | 59 | struct crypto_hash_walk *walk, |
44 | struct scatterlist *sg, unsigned int len); | 60 | struct scatterlist *sg, unsigned int len); |
45 | 61 | ||
62 | static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) | ||
63 | { | ||
64 | return !(walk->entrylen | walk->total); | ||
65 | } | ||
66 | |||
67 | int crypto_register_ahash(struct ahash_alg *alg); | ||
68 | int crypto_unregister_ahash(struct ahash_alg *alg); | ||
69 | int ahash_register_instance(struct crypto_template *tmpl, | ||
70 | struct ahash_instance *inst); | ||
71 | void ahash_free_instance(struct crypto_instance *inst); | ||
72 | |||
73 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | ||
74 | struct hash_alg_common *alg, | ||
75 | struct crypto_instance *inst); | ||
76 | |||
77 | static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn) | ||
78 | { | ||
79 | crypto_drop_spawn(&spawn->base); | ||
80 | } | ||
81 | |||
82 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); | ||
83 | |||
46 | int crypto_register_shash(struct shash_alg *alg); | 84 | int crypto_register_shash(struct shash_alg *alg); |
47 | int crypto_unregister_shash(struct shash_alg *alg); | 85 | int crypto_unregister_shash(struct shash_alg *alg); |
86 | int shash_register_instance(struct crypto_template *tmpl, | ||
87 | struct shash_instance *inst); | ||
88 | void shash_free_instance(struct crypto_instance *inst); | ||
89 | |||
90 | int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, | ||
91 | struct shash_alg *alg, | ||
92 | struct crypto_instance *inst); | ||
93 | |||
94 | static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) | ||
95 | { | ||
96 | crypto_drop_spawn(&spawn->base); | ||
97 | } | ||
98 | |||
99 | struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); | ||
100 | |||
101 | int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); | ||
102 | int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); | ||
103 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); | ||
104 | |||
105 | int crypto_init_shash_ops_async(struct crypto_tfm *tfm); | ||
48 | 106 | ||
49 | static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) | 107 | static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) |
50 | { | 108 | { |
51 | return crypto_tfm_ctx(&tfm->base); | 109 | return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
110 | } | ||
111 | |||
112 | static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) | ||
113 | { | ||
114 | return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, | ||
115 | halg); | ||
116 | } | ||
117 | |||
118 | static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, | ||
119 | unsigned int reqsize) | ||
120 | { | ||
121 | tfm->reqsize = reqsize; | ||
122 | } | ||
123 | |||
124 | static inline struct crypto_instance *ahash_crypto_instance( | ||
125 | struct ahash_instance *inst) | ||
126 | { | ||
127 | return container_of(&inst->alg.halg.base, struct crypto_instance, alg); | ||
52 | } | 128 | } |
53 | 129 | ||
54 | static inline struct ahash_alg *crypto_ahash_alg( | 130 | static inline struct ahash_instance *ahash_instance( |
55 | struct crypto_ahash *tfm) | 131 | struct crypto_instance *inst) |
56 | { | 132 | { |
57 | return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash; | 133 | return container_of(&inst->alg, struct ahash_instance, alg.halg.base); |
134 | } | ||
135 | |||
136 | static inline void *ahash_instance_ctx(struct ahash_instance *inst) | ||
137 | { | ||
138 | return crypto_instance_ctx(ahash_crypto_instance(inst)); | ||
139 | } | ||
140 | |||
141 | static inline unsigned int ahash_instance_headroom(void) | ||
142 | { | ||
143 | return sizeof(struct ahash_alg) - sizeof(struct crypto_alg); | ||
144 | } | ||
145 | |||
146 | static inline struct ahash_instance *ahash_alloc_instance( | ||
147 | const char *name, struct crypto_alg *alg) | ||
148 | { | ||
149 | return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); | ||
150 | } | ||
151 | |||
152 | static inline struct crypto_ahash *crypto_spawn_ahash( | ||
153 | struct crypto_ahash_spawn *spawn) | ||
154 | { | ||
155 | return crypto_spawn_tfm2(&spawn->base); | ||
58 | } | 156 | } |
59 | 157 | ||
60 | static inline int ahash_enqueue_request(struct crypto_queue *queue, | 158 | static inline int ahash_enqueue_request(struct crypto_queue *queue, |
@@ -80,5 +178,46 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm) | |||
80 | return crypto_tfm_ctx(&tfm->base); | 178 | return crypto_tfm_ctx(&tfm->base); |
81 | } | 179 | } |
82 | 180 | ||
181 | static inline struct crypto_instance *shash_crypto_instance( | ||
182 | struct shash_instance *inst) | ||
183 | { | ||
184 | return container_of(&inst->alg.base, struct crypto_instance, alg); | ||
185 | } | ||
186 | |||
187 | static inline struct shash_instance *shash_instance( | ||
188 | struct crypto_instance *inst) | ||
189 | { | ||
190 | return container_of(__crypto_shash_alg(&inst->alg), | ||
191 | struct shash_instance, alg); | ||
192 | } | ||
193 | |||
194 | static inline void *shash_instance_ctx(struct shash_instance *inst) | ||
195 | { | ||
196 | return crypto_instance_ctx(shash_crypto_instance(inst)); | ||
197 | } | ||
198 | |||
199 | static inline struct shash_instance *shash_alloc_instance( | ||
200 | const char *name, struct crypto_alg *alg) | ||
201 | { | ||
202 | return crypto_alloc_instance2(name, alg, | ||
203 | sizeof(struct shash_alg) - sizeof(*alg)); | ||
204 | } | ||
205 | |||
206 | static inline struct crypto_shash *crypto_spawn_shash( | ||
207 | struct crypto_shash_spawn *spawn) | ||
208 | { | ||
209 | return crypto_spawn_tfm2(&spawn->base); | ||
210 | } | ||
211 | |||
212 | static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) | ||
213 | { | ||
214 | return crypto_tfm_ctx_aligned(&tfm->base); | ||
215 | } | ||
216 | |||
217 | static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) | ||
218 | { | ||
219 | return container_of(tfm, struct crypto_shash, base); | ||
220 | } | ||
221 | |||
83 | #endif /* _CRYPTO_INTERNAL_HASH_H */ | 222 | #endif /* _CRYPTO_INTERNAL_HASH_H */ |
84 | 223 | ||
diff --git a/include/crypto/sha.h b/include/crypto/sha.h index c0ccc2b1a2d8..069e85ba97e1 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h | |||
@@ -5,6 +5,8 @@ | |||
5 | #ifndef _CRYPTO_SHA_H | 5 | #ifndef _CRYPTO_SHA_H |
6 | #define _CRYPTO_SHA_H | 6 | #define _CRYPTO_SHA_H |
7 | 7 | ||
8 | #include <linux/types.h> | ||
9 | |||
8 | #define SHA1_DIGEST_SIZE 20 | 10 | #define SHA1_DIGEST_SIZE 20 |
9 | #define SHA1_BLOCK_SIZE 64 | 11 | #define SHA1_BLOCK_SIZE 64 |
10 | 12 | ||
@@ -62,4 +64,22 @@ | |||
62 | #define SHA512_H6 0x1f83d9abfb41bd6bULL | 64 | #define SHA512_H6 0x1f83d9abfb41bd6bULL |
63 | #define SHA512_H7 0x5be0cd19137e2179ULL | 65 | #define SHA512_H7 0x5be0cd19137e2179ULL |
64 | 66 | ||
67 | struct sha1_state { | ||
68 | u64 count; | ||
69 | u32 state[SHA1_DIGEST_SIZE / 4]; | ||
70 | u8 buffer[SHA1_BLOCK_SIZE]; | ||
71 | }; | ||
72 | |||
73 | struct sha256_state { | ||
74 | u64 count; | ||
75 | u32 state[SHA256_DIGEST_SIZE / 4]; | ||
76 | u8 buf[SHA256_BLOCK_SIZE]; | ||
77 | }; | ||
78 | |||
79 | struct sha512_state { | ||
80 | u64 count[2]; | ||
81 | u64 state[SHA512_DIGEST_SIZE / 8]; | ||
82 | u8 buf[SHA512_BLOCK_SIZE]; | ||
83 | }; | ||
84 | |||
65 | #endif | 85 | #endif |
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h new file mode 100644 index 000000000000..c4467c55df1e --- /dev/null +++ b/include/crypto/vmac.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Modified to interface to the Linux kernel | ||
3 | * Copyright (c) 2009, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __CRYPTO_VMAC_H | ||
20 | #define __CRYPTO_VMAC_H | ||
21 | |||
22 | /* -------------------------------------------------------------------------- | ||
23 | * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. | ||
24 | * This implementation is herby placed in the public domain. | ||
25 | * The authors offers no warranty. Use at your own risk. | ||
26 | * Please send bug reports to the authors. | ||
27 | * Last modified: 17 APR 08, 1700 PDT | ||
28 | * ----------------------------------------------------------------------- */ | ||
29 | |||
30 | /* | ||
31 | * User definable settings. | ||
32 | */ | ||
33 | #define VMAC_TAG_LEN 64 | ||
34 | #define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ | ||
35 | #define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) | ||
36 | #define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ | ||
37 | |||
38 | /* | ||
39 | * This implementation uses u32 and u64 as names for unsigned 32- | ||
40 | * and 64-bit integer types. These are defined in C99 stdint.h. The | ||
41 | * following may need adaptation if you are not running a C99 or | ||
42 | * Microsoft C environment. | ||
43 | */ | ||
44 | struct vmac_ctx { | ||
45 | u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; | ||
46 | u64 polykey[2*VMAC_TAG_LEN/64]; | ||
47 | u64 l3key[2*VMAC_TAG_LEN/64]; | ||
48 | u64 polytmp[2*VMAC_TAG_LEN/64]; | ||
49 | u64 cached_nonce[2]; | ||
50 | u64 cached_aes[2]; | ||
51 | int first_block_processed; | ||
52 | }; | ||
53 | |||
54 | typedef u64 vmac_t; | ||
55 | |||
56 | struct vmac_ctx_t { | ||
57 | struct crypto_cipher *child; | ||
58 | struct vmac_ctx __vmac_ctx; | ||
59 | }; | ||
60 | |||
61 | #endif /* __CRYPTO_VMAC_H */ | ||
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ec29fa268b94..fd929889e8dc 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -115,7 +115,6 @@ struct crypto_async_request; | |||
115 | struct crypto_aead; | 115 | struct crypto_aead; |
116 | struct crypto_blkcipher; | 116 | struct crypto_blkcipher; |
117 | struct crypto_hash; | 117 | struct crypto_hash; |
118 | struct crypto_ahash; | ||
119 | struct crypto_rng; | 118 | struct crypto_rng; |
120 | struct crypto_tfm; | 119 | struct crypto_tfm; |
121 | struct crypto_type; | 120 | struct crypto_type; |
@@ -146,16 +145,6 @@ struct ablkcipher_request { | |||
146 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | 145 | void *__ctx[] CRYPTO_MINALIGN_ATTR; |
147 | }; | 146 | }; |
148 | 147 | ||
149 | struct ahash_request { | ||
150 | struct crypto_async_request base; | ||
151 | |||
152 | unsigned int nbytes; | ||
153 | struct scatterlist *src; | ||
154 | u8 *result; | ||
155 | |||
156 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | ||
157 | }; | ||
158 | |||
159 | /** | 148 | /** |
160 | * struct aead_request - AEAD request | 149 | * struct aead_request - AEAD request |
161 | * @base: Common attributes for async crypto requests | 150 | * @base: Common attributes for async crypto requests |
@@ -220,18 +209,6 @@ struct ablkcipher_alg { | |||
220 | unsigned int ivsize; | 209 | unsigned int ivsize; |
221 | }; | 210 | }; |
222 | 211 | ||
223 | struct ahash_alg { | ||
224 | int (*init)(struct ahash_request *req); | ||
225 | int (*reinit)(struct ahash_request *req); | ||
226 | int (*update)(struct ahash_request *req); | ||
227 | int (*final)(struct ahash_request *req); | ||
228 | int (*digest)(struct ahash_request *req); | ||
229 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | ||
230 | unsigned int keylen); | ||
231 | |||
232 | unsigned int digestsize; | ||
233 | }; | ||
234 | |||
235 | struct aead_alg { | 212 | struct aead_alg { |
236 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | 213 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, |
237 | unsigned int keylen); | 214 | unsigned int keylen); |
@@ -318,7 +295,6 @@ struct rng_alg { | |||
318 | #define cra_cipher cra_u.cipher | 295 | #define cra_cipher cra_u.cipher |
319 | #define cra_digest cra_u.digest | 296 | #define cra_digest cra_u.digest |
320 | #define cra_hash cra_u.hash | 297 | #define cra_hash cra_u.hash |
321 | #define cra_ahash cra_u.ahash | ||
322 | #define cra_compress cra_u.compress | 298 | #define cra_compress cra_u.compress |
323 | #define cra_rng cra_u.rng | 299 | #define cra_rng cra_u.rng |
324 | 300 | ||
@@ -346,7 +322,6 @@ struct crypto_alg { | |||
346 | struct cipher_alg cipher; | 322 | struct cipher_alg cipher; |
347 | struct digest_alg digest; | 323 | struct digest_alg digest; |
348 | struct hash_alg hash; | 324 | struct hash_alg hash; |
349 | struct ahash_alg ahash; | ||
350 | struct compress_alg compress; | 325 | struct compress_alg compress; |
351 | struct rng_alg rng; | 326 | struct rng_alg rng; |
352 | } cra_u; | 327 | } cra_u; |
@@ -433,18 +408,6 @@ struct hash_tfm { | |||
433 | unsigned int digestsize; | 408 | unsigned int digestsize; |
434 | }; | 409 | }; |
435 | 410 | ||
436 | struct ahash_tfm { | ||
437 | int (*init)(struct ahash_request *req); | ||
438 | int (*update)(struct ahash_request *req); | ||
439 | int (*final)(struct ahash_request *req); | ||
440 | int (*digest)(struct ahash_request *req); | ||
441 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | ||
442 | unsigned int keylen); | ||
443 | |||
444 | unsigned int digestsize; | ||
445 | unsigned int reqsize; | ||
446 | }; | ||
447 | |||
448 | struct compress_tfm { | 411 | struct compress_tfm { |
449 | int (*cot_compress)(struct crypto_tfm *tfm, | 412 | int (*cot_compress)(struct crypto_tfm *tfm, |
450 | const u8 *src, unsigned int slen, | 413 | const u8 *src, unsigned int slen, |
@@ -465,7 +428,6 @@ struct rng_tfm { | |||
465 | #define crt_blkcipher crt_u.blkcipher | 428 | #define crt_blkcipher crt_u.blkcipher |
466 | #define crt_cipher crt_u.cipher | 429 | #define crt_cipher crt_u.cipher |
467 | #define crt_hash crt_u.hash | 430 | #define crt_hash crt_u.hash |
468 | #define crt_ahash crt_u.ahash | ||
469 | #define crt_compress crt_u.compress | 431 | #define crt_compress crt_u.compress |
470 | #define crt_rng crt_u.rng | 432 | #define crt_rng crt_u.rng |
471 | 433 | ||
@@ -479,7 +441,6 @@ struct crypto_tfm { | |||
479 | struct blkcipher_tfm blkcipher; | 441 | struct blkcipher_tfm blkcipher; |
480 | struct cipher_tfm cipher; | 442 | struct cipher_tfm cipher; |
481 | struct hash_tfm hash; | 443 | struct hash_tfm hash; |
482 | struct ahash_tfm ahash; | ||
483 | struct compress_tfm compress; | 444 | struct compress_tfm compress; |
484 | struct rng_tfm rng; | 445 | struct rng_tfm rng; |
485 | } crt_u; | 446 | } crt_u; |
@@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( | |||
770 | 731 | ||
771 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) | 732 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) |
772 | { | 733 | { |
773 | kfree(req); | 734 | kzfree(req); |
774 | } | 735 | } |
775 | 736 | ||
776 | static inline void ablkcipher_request_set_callback( | 737 | static inline void ablkcipher_request_set_callback( |
@@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | |||
901 | 862 | ||
902 | static inline void aead_request_free(struct aead_request *req) | 863 | static inline void aead_request_free(struct aead_request *req) |
903 | { | 864 | { |
904 | kfree(req); | 865 | kzfree(req); |
905 | } | 866 | } |
906 | 867 | ||
907 | static inline void aead_request_set_callback(struct aead_request *req, | 868 | static inline void aead_request_set_callback(struct aead_request *req, |
diff --git a/include/linux/fips.h b/include/linux/fips.h new file mode 100644 index 000000000000..f8fb07b0b6b8 --- /dev/null +++ b/include/linux/fips.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _FIPS_H | ||
2 | #define _FIPS_H | ||
3 | |||
4 | #ifdef CONFIG_CRYPTO_FIPS | ||
5 | extern int fips_enabled; | ||
6 | #else | ||
7 | #define fips_enabled 0 | ||
8 | #endif | ||
9 | |||
10 | #endif | ||