diff options
-rw-r--r-- | arch/s390/crypto/sha256_s390.c | 66 | ||||
-rw-r--r-- | arch/x86/crypto/ghash-clmulni-intel_glue.c | 2 | ||||
-rw-r--r-- | crypto/Kconfig | 4 | ||||
-rw-r--r-- | crypto/algif_hash.c | 4 | ||||
-rw-r--r-- | crypto/arc4.c | 15 | ||||
-rw-r--r-- | crypto/crc32c.c | 10 | ||||
-rw-r--r-- | crypto/gf128mul.c | 4 | ||||
-rw-r--r-- | crypto/sha1_generic.c | 11 | ||||
-rw-r--r-- | crypto/testmgr.h | 293 | ||||
-rw-r--r-- | drivers/char/hw_random/Kconfig | 12 | ||||
-rw-r--r-- | drivers/char/hw_random/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/hw_random/nomadik-rng.c | 3 | ||||
-rw-r--r-- | drivers/char/hw_random/omap-rng.c | 6 | ||||
-rw-r--r-- | drivers/char/hw_random/ppc4xx-rng.c | 156 | ||||
-rw-r--r-- | drivers/char/hw_random/timeriomem-rng.c | 3 | ||||
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 5 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 1832 | ||||
-rw-r--r-- | drivers/crypto/caam/compat.h | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 4 | ||||
-rw-r--r-- | drivers/crypto/caam/desc_constr.h | 58 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 180 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 47 |
22 files changed, 2128 insertions, 589 deletions
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 5ed8d64fc2ed..0317a3547cb9 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -1,15 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API. | 2 | * Cryptographic API. |
3 | * | 3 | * |
4 | * s390 implementation of the SHA256 Secure Hash Algorithm. | 4 | * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm. |
5 | * | 5 | * |
6 | * s390 Version: | 6 | * s390 Version: |
7 | * Copyright IBM Corp. 2005,2007 | 7 | * Copyright IBM Corp. 2005,2011 |
8 | * Author(s): Jan Glauber (jang@de.ibm.com) | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
9 | * | 9 | * |
10 | * Derived from "crypto/sha256_generic.c" | ||
11 | * and "arch/s390/crypto/sha1_s390.c" | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
15 | * Software Foundation; either version 2 of the License, or (at your option) | 12 | * Software Foundation; either version 2 of the License, or (at your option) |
@@ -65,7 +62,7 @@ static int sha256_import(struct shash_desc *desc, const void *in) | |||
65 | return 0; | 62 | return 0; |
66 | } | 63 | } |
67 | 64 | ||
68 | static struct shash_alg alg = { | 65 | static struct shash_alg sha256_alg = { |
69 | .digestsize = SHA256_DIGEST_SIZE, | 66 | .digestsize = SHA256_DIGEST_SIZE, |
70 | .init = sha256_init, | 67 | .init = sha256_init, |
71 | .update = s390_sha_update, | 68 | .update = s390_sha_update, |
@@ -84,22 +81,69 @@ static struct shash_alg alg = { | |||
84 | } | 81 | } |
85 | }; | 82 | }; |
86 | 83 | ||
87 | static int sha256_s390_init(void) | 84 | static int sha224_init(struct shash_desc *desc) |
88 | { | 85 | { |
86 | struct s390_sha_ctx *sctx = shash_desc_ctx(desc); | ||
87 | |||
88 | sctx->state[0] = SHA224_H0; | ||
89 | sctx->state[1] = SHA224_H1; | ||
90 | sctx->state[2] = SHA224_H2; | ||
91 | sctx->state[3] = SHA224_H3; | ||
92 | sctx->state[4] = SHA224_H4; | ||
93 | sctx->state[5] = SHA224_H5; | ||
94 | sctx->state[6] = SHA224_H6; | ||
95 | sctx->state[7] = SHA224_H7; | ||
96 | sctx->count = 0; | ||
97 | sctx->func = KIMD_SHA_256; | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static struct shash_alg sha224_alg = { | ||
103 | .digestsize = SHA224_DIGEST_SIZE, | ||
104 | .init = sha224_init, | ||
105 | .update = s390_sha_update, | ||
106 | .final = s390_sha_final, | ||
107 | .export = sha256_export, | ||
108 | .import = sha256_import, | ||
109 | .descsize = sizeof(struct s390_sha_ctx), | ||
110 | .statesize = sizeof(struct sha256_state), | ||
111 | .base = { | ||
112 | .cra_name = "sha224", | ||
113 | .cra_driver_name= "sha224-s390", | ||
114 | .cra_priority = CRYPT_S390_PRIORITY, | ||
115 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, | ||
116 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
117 | .cra_module = THIS_MODULE, | ||
118 | } | ||
119 | }; | ||
120 | |||
121 | static int __init sha256_s390_init(void) | ||
122 | { | ||
123 | int ret; | ||
124 | |||
89 | if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA)) | 125 | if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA)) |
90 | return -EOPNOTSUPP; | 126 | return -EOPNOTSUPP; |
91 | 127 | ret = crypto_register_shash(&sha256_alg); | |
92 | return crypto_register_shash(&alg); | 128 | if (ret < 0) |
129 | goto out; | ||
130 | ret = crypto_register_shash(&sha224_alg); | ||
131 | if (ret < 0) | ||
132 | crypto_unregister_shash(&sha256_alg); | ||
133 | out: | ||
134 | return ret; | ||
93 | } | 135 | } |
94 | 136 | ||
95 | static void __exit sha256_s390_fini(void) | 137 | static void __exit sha256_s390_fini(void) |
96 | { | 138 | { |
97 | crypto_unregister_shash(&alg); | 139 | crypto_unregister_shash(&sha224_alg); |
140 | crypto_unregister_shash(&sha256_alg); | ||
98 | } | 141 | } |
99 | 142 | ||
100 | module_init(sha256_s390_init); | 143 | module_init(sha256_s390_init); |
101 | module_exit(sha256_s390_fini); | 144 | module_exit(sha256_s390_fini); |
102 | 145 | ||
103 | MODULE_ALIAS("sha256"); | 146 | MODULE_ALIAS("sha256"); |
147 | MODULE_ALIAS("sha224"); | ||
104 | MODULE_LICENSE("GPL"); | 148 | MODULE_LICENSE("GPL"); |
105 | MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); | 149 | MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); |
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 7a6e68e4f748..976aa64d9a20 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c | |||
@@ -245,7 +245,7 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
245 | crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) | 245 | crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child) |
246 | & CRYPTO_TFM_RES_MASK); | 246 | & CRYPTO_TFM_RES_MASK); |
247 | 247 | ||
248 | return 0; | 248 | return err; |
249 | } | 249 | } |
250 | 250 | ||
251 | static int ghash_async_init_tfm(struct crypto_tfm *tfm) | 251 | static int ghash_async_init_tfm(struct crypto_tfm *tfm) |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 87b22ca9c223..2af81552d65b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -458,7 +458,7 @@ config CRYPTO_WP512 | |||
458 | 458 | ||
459 | config CRYPTO_GHASH_CLMUL_NI_INTEL | 459 | config CRYPTO_GHASH_CLMUL_NI_INTEL |
460 | tristate "GHASH digest algorithm (CLMUL-NI accelerated)" | 460 | tristate "GHASH digest algorithm (CLMUL-NI accelerated)" |
461 | depends on (X86 || UML_X86) && 64BIT | 461 | depends on X86 && 64BIT |
462 | select CRYPTO_SHASH | 462 | select CRYPTO_SHASH |
463 | select CRYPTO_CRYPTD | 463 | select CRYPTO_CRYPTD |
464 | help | 464 | help |
@@ -533,7 +533,7 @@ config CRYPTO_AES_X86_64 | |||
533 | 533 | ||
534 | config CRYPTO_AES_NI_INTEL | 534 | config CRYPTO_AES_NI_INTEL |
535 | tristate "AES cipher algorithms (AES-NI)" | 535 | tristate "AES cipher algorithms (AES-NI)" |
536 | depends on (X86 || UML_X86) | 536 | depends on X86 |
537 | select CRYPTO_AES_X86_64 if 64BIT | 537 | select CRYPTO_AES_X86_64 if 64BIT |
538 | select CRYPTO_AES_586 if !64BIT | 538 | select CRYPTO_AES_586 if !64BIT |
539 | select CRYPTO_CRYPTD | 539 | select CRYPTO_CRYPTD |
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 62122a1a2f7a..ef5356cd280a 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -68,8 +68,10 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock, | |||
68 | int newlen; | 68 | int newlen; |
69 | 69 | ||
70 | newlen = af_alg_make_sg(&ctx->sgl, from, len, 0); | 70 | newlen = af_alg_make_sg(&ctx->sgl, from, len, 0); |
71 | if (newlen < 0) | 71 | if (newlen < 0) { |
72 | err = copied ? 0 : newlen; | ||
72 | goto unlock; | 73 | goto unlock; |
74 | } | ||
73 | 75 | ||
74 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, | 76 | ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, |
75 | newlen); | 77 | newlen); |
diff --git a/crypto/arc4.c b/crypto/arc4.c index 8be47e13a9e3..0d12a96da1d8 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * Cryptographic API | 2 | * Cryptographic API |
3 | * | 3 | * |
4 | * ARC4 Cipher Algorithm | 4 | * ARC4 Cipher Algorithm |
@@ -33,16 +33,15 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
33 | ctx->x = 1; | 33 | ctx->x = 1; |
34 | ctx->y = 0; | 34 | ctx->y = 0; |
35 | 35 | ||
36 | for(i = 0; i < 256; i++) | 36 | for (i = 0; i < 256; i++) |
37 | ctx->S[i] = i; | 37 | ctx->S[i] = i; |
38 | 38 | ||
39 | for(i = 0; i < 256; i++) | 39 | for (i = 0; i < 256; i++) { |
40 | { | ||
41 | u8 a = ctx->S[i]; | 40 | u8 a = ctx->S[i]; |
42 | j = (j + in_key[k] + a) & 0xff; | 41 | j = (j + in_key[k] + a) & 0xff; |
43 | ctx->S[i] = ctx->S[j]; | 42 | ctx->S[i] = ctx->S[j]; |
44 | ctx->S[j] = a; | 43 | ctx->S[j] = a; |
45 | if(++k >= key_len) | 44 | if (++k >= key_len) |
46 | k = 0; | 45 | k = 0; |
47 | } | 46 | } |
48 | 47 | ||
@@ -80,9 +79,9 @@ static struct crypto_alg arc4_alg = { | |||
80 | .cra_u = { .cipher = { | 79 | .cra_u = { .cipher = { |
81 | .cia_min_keysize = ARC4_MIN_KEY_SIZE, | 80 | .cia_min_keysize = ARC4_MIN_KEY_SIZE, |
82 | .cia_max_keysize = ARC4_MAX_KEY_SIZE, | 81 | .cia_max_keysize = ARC4_MAX_KEY_SIZE, |
83 | .cia_setkey = arc4_set_key, | 82 | .cia_setkey = arc4_set_key, |
84 | .cia_encrypt = arc4_crypt, | 83 | .cia_encrypt = arc4_crypt, |
85 | .cia_decrypt = arc4_crypt } } | 84 | .cia_decrypt = arc4_crypt } } |
86 | }; | 85 | }; |
87 | 86 | ||
88 | static int __init arc4_init(void) | 87 | static int __init arc4_init(void) |
diff --git a/crypto/crc32c.c b/crypto/crc32c.c index de9e55c29794..3f9ad2801052 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c | |||
@@ -224,11 +224,11 @@ static int crc32c_cra_init(struct crypto_tfm *tfm) | |||
224 | static struct shash_alg alg = { | 224 | static struct shash_alg alg = { |
225 | .digestsize = CHKSUM_DIGEST_SIZE, | 225 | .digestsize = CHKSUM_DIGEST_SIZE, |
226 | .setkey = chksum_setkey, | 226 | .setkey = chksum_setkey, |
227 | .init = chksum_init, | 227 | .init = chksum_init, |
228 | .update = chksum_update, | 228 | .update = chksum_update, |
229 | .final = chksum_final, | 229 | .final = chksum_final, |
230 | .finup = chksum_finup, | 230 | .finup = chksum_finup, |
231 | .digest = chksum_digest, | 231 | .digest = chksum_digest, |
232 | .descsize = sizeof(struct chksum_desc_ctx), | 232 | .descsize = sizeof(struct chksum_desc_ctx), |
233 | .base = { | 233 | .base = { |
234 | .cra_name = "crc32c", | 234 | .cra_name = "crc32c", |
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index df35e4ccd07e..5276607c72d0 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c | |||
@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b) | |||
182 | for (i = 0; i < 7; ++i) | 182 | for (i = 0; i < 7; ++i) |
183 | gf128mul_x_lle(&p[i + 1], &p[i]); | 183 | gf128mul_x_lle(&p[i + 1], &p[i]); |
184 | 184 | ||
185 | memset(r, 0, sizeof(r)); | 185 | memset(r, 0, sizeof(*r)); |
186 | for (i = 0;;) { | 186 | for (i = 0;;) { |
187 | u8 ch = ((u8 *)b)[15 - i]; | 187 | u8 ch = ((u8 *)b)[15 - i]; |
188 | 188 | ||
@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b) | |||
220 | for (i = 0; i < 7; ++i) | 220 | for (i = 0; i < 7; ++i) |
221 | gf128mul_x_bbe(&p[i + 1], &p[i]); | 221 | gf128mul_x_bbe(&p[i + 1], &p[i]); |
222 | 222 | ||
223 | memset(r, 0, sizeof(r)); | 223 | memset(r, 0, sizeof(*r)); |
224 | for (i = 0;;) { | 224 | for (i = 0;;) { |
225 | u8 ch = ((u8 *)b)[i]; | 225 | u8 ch = ((u8 *)b)[i]; |
226 | 226 | ||
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 0416091bf45a..00ae60eb9254 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c | |||
@@ -43,25 +43,26 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, | |||
43 | unsigned int partial, done; | 43 | unsigned int partial, done; |
44 | const u8 *src; | 44 | const u8 *src; |
45 | 45 | ||
46 | partial = sctx->count & 0x3f; | 46 | partial = sctx->count % SHA1_BLOCK_SIZE; |
47 | sctx->count += len; | 47 | sctx->count += len; |
48 | done = 0; | 48 | done = 0; |
49 | src = data; | 49 | src = data; |
50 | 50 | ||
51 | if ((partial + len) > 63) { | 51 | if ((partial + len) >= SHA1_BLOCK_SIZE) { |
52 | u32 temp[SHA_WORKSPACE_WORDS]; | 52 | u32 temp[SHA_WORKSPACE_WORDS]; |
53 | 53 | ||
54 | if (partial) { | 54 | if (partial) { |
55 | done = -partial; | 55 | done = -partial; |
56 | memcpy(sctx->buffer + partial, data, done + 64); | 56 | memcpy(sctx->buffer + partial, data, |
57 | done + SHA1_BLOCK_SIZE); | ||
57 | src = sctx->buffer; | 58 | src = sctx->buffer; |
58 | } | 59 | } |
59 | 60 | ||
60 | do { | 61 | do { |
61 | sha_transform(sctx->state, src, temp); | 62 | sha_transform(sctx->state, src, temp); |
62 | done += 64; | 63 | done += SHA1_BLOCK_SIZE; |
63 | src = data + done; | 64 | src = data + done; |
64 | } while (done + 63 < len); | 65 | } while (done + SHA1_BLOCK_SIZE <= len); |
65 | 66 | ||
66 | memset(temp, 0, sizeof(temp)); | 67 | memset(temp, 0, sizeof(temp)); |
67 | partial = 0; | 68 | partial = 0; |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 27e60619538e..27adc92842ba 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -2976,8 +2976,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { | |||
2976 | #define AES_CBC_DEC_TEST_VECTORS 4 | 2976 | #define AES_CBC_DEC_TEST_VECTORS 4 |
2977 | #define AES_LRW_ENC_TEST_VECTORS 8 | 2977 | #define AES_LRW_ENC_TEST_VECTORS 8 |
2978 | #define AES_LRW_DEC_TEST_VECTORS 8 | 2978 | #define AES_LRW_DEC_TEST_VECTORS 8 |
2979 | #define AES_XTS_ENC_TEST_VECTORS 4 | 2979 | #define AES_XTS_ENC_TEST_VECTORS 5 |
2980 | #define AES_XTS_DEC_TEST_VECTORS 4 | 2980 | #define AES_XTS_DEC_TEST_VECTORS 5 |
2981 | #define AES_CTR_ENC_TEST_VECTORS 3 | 2981 | #define AES_CTR_ENC_TEST_VECTORS 3 |
2982 | #define AES_CTR_DEC_TEST_VECTORS 3 | 2982 | #define AES_CTR_DEC_TEST_VECTORS 3 |
2983 | #define AES_OFB_ENC_TEST_VECTORS 1 | 2983 | #define AES_OFB_ENC_TEST_VECTORS 1 |
@@ -3926,6 +3926,150 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = { | |||
3926 | "\x0a\x28\x2d\xf9\x20\x14\x7b\xea" | 3926 | "\x0a\x28\x2d\xf9\x20\x14\x7b\xea" |
3927 | "\xbe\x42\x1e\xe5\x31\x9d\x05\x68", | 3927 | "\xbe\x42\x1e\xe5\x31\x9d\x05\x68", |
3928 | .rlen = 512, | 3928 | .rlen = 512, |
3929 | }, { /* XTS-AES 10, XTS-AES-256, data unit 512 bytes */ | ||
3930 | .key = "\x27\x18\x28\x18\x28\x45\x90\x45" | ||
3931 | "\x23\x53\x60\x28\x74\x71\x35\x26" | ||
3932 | "\x62\x49\x77\x57\x24\x70\x93\x69" | ||
3933 | "\x99\x59\x57\x49\x66\x96\x76\x27" | ||
3934 | "\x31\x41\x59\x26\x53\x58\x97\x93" | ||
3935 | "\x23\x84\x62\x64\x33\x83\x27\x95" | ||
3936 | "\x02\x88\x41\x97\x16\x93\x99\x37" | ||
3937 | "\x51\x05\x82\x09\x74\x94\x45\x92", | ||
3938 | .klen = 64, | ||
3939 | .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" | ||
3940 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3941 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3942 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
3943 | .input = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
3944 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
3945 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
3946 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
3947 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
3948 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
3949 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
3950 | "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" | ||
3951 | "\x40\x41\x42\x43\x44\x45\x46\x47" | ||
3952 | "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" | ||
3953 | "\x50\x51\x52\x53\x54\x55\x56\x57" | ||
3954 | "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" | ||
3955 | "\x60\x61\x62\x63\x64\x65\x66\x67" | ||
3956 | "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" | ||
3957 | "\x70\x71\x72\x73\x74\x75\x76\x77" | ||
3958 | "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" | ||
3959 | "\x80\x81\x82\x83\x84\x85\x86\x87" | ||
3960 | "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" | ||
3961 | "\x90\x91\x92\x93\x94\x95\x96\x97" | ||
3962 | "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" | ||
3963 | "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
3964 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
3965 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
3966 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
3967 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
3968 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
3969 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
3970 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" | ||
3971 | "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" | ||
3972 | "\xe8\xe9\xea\xeb\xec\xed\xee\xef" | ||
3973 | "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
3974 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" | ||
3975 | "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
3976 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
3977 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
3978 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
3979 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
3980 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
3981 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
3982 | "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" | ||
3983 | "\x40\x41\x42\x43\x44\x45\x46\x47" | ||
3984 | "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" | ||
3985 | "\x50\x51\x52\x53\x54\x55\x56\x57" | ||
3986 | "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" | ||
3987 | "\x60\x61\x62\x63\x64\x65\x66\x67" | ||
3988 | "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" | ||
3989 | "\x70\x71\x72\x73\x74\x75\x76\x77" | ||
3990 | "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" | ||
3991 | "\x80\x81\x82\x83\x84\x85\x86\x87" | ||
3992 | "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" | ||
3993 | "\x90\x91\x92\x93\x94\x95\x96\x97" | ||
3994 | "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" | ||
3995 | "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
3996 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
3997 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
3998 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
3999 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
4000 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
4001 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
4002 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" | ||
4003 | "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" | ||
4004 | "\xe8\xe9\xea\xeb\xec\xed\xee\xef" | ||
4005 | "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4006 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4007 | .ilen = 512, | ||
4008 | .result = "\x1c\x3b\x3a\x10\x2f\x77\x03\x86" | ||
4009 | "\xe4\x83\x6c\x99\xe3\x70\xcf\x9b" | ||
4010 | "\xea\x00\x80\x3f\x5e\x48\x23\x57" | ||
4011 | "\xa4\xae\x12\xd4\x14\xa3\xe6\x3b" | ||
4012 | "\x5d\x31\xe2\x76\xf8\xfe\x4a\x8d" | ||
4013 | "\x66\xb3\x17\xf9\xac\x68\x3f\x44" | ||
4014 | "\x68\x0a\x86\xac\x35\xad\xfc\x33" | ||
4015 | "\x45\xbe\xfe\xcb\x4b\xb1\x88\xfd" | ||
4016 | "\x57\x76\x92\x6c\x49\xa3\x09\x5e" | ||
4017 | "\xb1\x08\xfd\x10\x98\xba\xec\x70" | ||
4018 | "\xaa\xa6\x69\x99\xa7\x2a\x82\xf2" | ||
4019 | "\x7d\x84\x8b\x21\xd4\xa7\x41\xb0" | ||
4020 | "\xc5\xcd\x4d\x5f\xff\x9d\xac\x89" | ||
4021 | "\xae\xba\x12\x29\x61\xd0\x3a\x75" | ||
4022 | "\x71\x23\xe9\x87\x0f\x8a\xcf\x10" | ||
4023 | "\x00\x02\x08\x87\x89\x14\x29\xca" | ||
4024 | "\x2a\x3e\x7a\x7d\x7d\xf7\xb1\x03" | ||
4025 | "\x55\x16\x5c\x8b\x9a\x6d\x0a\x7d" | ||
4026 | "\xe8\xb0\x62\xc4\x50\x0d\xc4\xcd" | ||
4027 | "\x12\x0c\x0f\x74\x18\xda\xe3\xd0" | ||
4028 | "\xb5\x78\x1c\x34\x80\x3f\xa7\x54" | ||
4029 | "\x21\xc7\x90\xdf\xe1\xde\x18\x34" | ||
4030 | "\xf2\x80\xd7\x66\x7b\x32\x7f\x6c" | ||
4031 | "\x8c\xd7\x55\x7e\x12\xac\x3a\x0f" | ||
4032 | "\x93\xec\x05\xc5\x2e\x04\x93\xef" | ||
4033 | "\x31\xa1\x2d\x3d\x92\x60\xf7\x9a" | ||
4034 | "\x28\x9d\x6a\x37\x9b\xc7\x0c\x50" | ||
4035 | "\x84\x14\x73\xd1\xa8\xcc\x81\xec" | ||
4036 | "\x58\x3e\x96\x45\xe0\x7b\x8d\x96" | ||
4037 | "\x70\x65\x5b\xa5\xbb\xcf\xec\xc6" | ||
4038 | "\xdc\x39\x66\x38\x0a\xd8\xfe\xcb" | ||
4039 | "\x17\xb6\xba\x02\x46\x9a\x02\x0a" | ||
4040 | "\x84\xe1\x8e\x8f\x84\x25\x20\x70" | ||
4041 | "\xc1\x3e\x9f\x1f\x28\x9b\xe5\x4f" | ||
4042 | "\xbc\x48\x14\x57\x77\x8f\x61\x60" | ||
4043 | "\x15\xe1\x32\x7a\x02\xb1\x40\xf1" | ||
4044 | "\x50\x5e\xb3\x09\x32\x6d\x68\x37" | ||
4045 | "\x8f\x83\x74\x59\x5c\x84\x9d\x84" | ||
4046 | "\xf4\xc3\x33\xec\x44\x23\x88\x51" | ||
4047 | "\x43\xcb\x47\xbd\x71\xc5\xed\xae" | ||
4048 | "\x9b\xe6\x9a\x2f\xfe\xce\xb1\xbe" | ||
4049 | "\xc9\xde\x24\x4f\xbe\x15\x99\x2b" | ||
4050 | "\x11\xb7\x7c\x04\x0f\x12\xbd\x8f" | ||
4051 | "\x6a\x97\x5a\x44\xa0\xf9\x0c\x29" | ||
4052 | "\xa9\xab\xc3\xd4\xd8\x93\x92\x72" | ||
4053 | "\x84\xc5\x87\x54\xcc\xe2\x94\x52" | ||
4054 | "\x9f\x86\x14\xdc\xd2\xab\xa9\x91" | ||
4055 | "\x92\x5f\xed\xc4\xae\x74\xff\xac" | ||
4056 | "\x6e\x33\x3b\x93\xeb\x4a\xff\x04" | ||
4057 | "\x79\xda\x9a\x41\x0e\x44\x50\xe0" | ||
4058 | "\xdd\x7a\xe4\xc6\xe2\x91\x09\x00" | ||
4059 | "\x57\x5d\xa4\x01\xfc\x07\x05\x9f" | ||
4060 | "\x64\x5e\x8b\x7e\x9b\xfd\xef\x33" | ||
4061 | "\x94\x30\x54\xff\x84\x01\x14\x93" | ||
4062 | "\xc2\x7b\x34\x29\xea\xed\xb4\xed" | ||
4063 | "\x53\x76\x44\x1a\x77\xed\x43\x85" | ||
4064 | "\x1a\xd7\x7f\x16\xf5\x41\xdf\xd2" | ||
4065 | "\x69\xd5\x0d\x6a\x5f\x14\xfb\x0a" | ||
4066 | "\xab\x1c\xbb\x4c\x15\x50\xbe\x97" | ||
4067 | "\xf7\xab\x40\x66\x19\x3c\x4c\xaa" | ||
4068 | "\x77\x3d\xad\x38\x01\x4b\xd2\x09" | ||
4069 | "\x2f\xa7\x55\xc8\x24\xbb\x5e\x54" | ||
4070 | "\xc4\xf3\x6f\xfd\xa9\xfc\xea\x70" | ||
4071 | "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51", | ||
4072 | .rlen = 512, | ||
3929 | } | 4073 | } |
3930 | }; | 4074 | }; |
3931 | 4075 | ||
@@ -4123,6 +4267,151 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { | |||
4123 | "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | 4267 | "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" |
4124 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | 4268 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", |
4125 | .rlen = 512, | 4269 | .rlen = 512, |
4270 | }, { /* XTS-AES 10, XTS-AES-256, data unit 512 bytes */ | ||
4271 | .key = "\x27\x18\x28\x18\x28\x45\x90\x45" | ||
4272 | "\x23\x53\x60\x28\x74\x71\x35\x26" | ||
4273 | "\x62\x49\x77\x57\x24\x70\x93\x69" | ||
4274 | "\x99\x59\x57\x49\x66\x96\x76\x27" | ||
4275 | "\x31\x41\x59\x26\x53\x58\x97\x93" | ||
4276 | "\x23\x84\x62\x64\x33\x83\x27\x95" | ||
4277 | "\x02\x88\x41\x97\x16\x93\x99\x37" | ||
4278 | "\x51\x05\x82\x09\x74\x94\x45\x92", | ||
4279 | .klen = 64, | ||
4280 | .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" | ||
4281 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
4282 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
4283 | "\x00\x00\x00\x00\x00\x00\x00\x00", | ||
4284 | .input = "\x1c\x3b\x3a\x10\x2f\x77\x03\x86" | ||
4285 | "\xe4\x83\x6c\x99\xe3\x70\xcf\x9b" | ||
4286 | "\xea\x00\x80\x3f\x5e\x48\x23\x57" | ||
4287 | "\xa4\xae\x12\xd4\x14\xa3\xe6\x3b" | ||
4288 | "\x5d\x31\xe2\x76\xf8\xfe\x4a\x8d" | ||
4289 | "\x66\xb3\x17\xf9\xac\x68\x3f\x44" | ||
4290 | "\x68\x0a\x86\xac\x35\xad\xfc\x33" | ||
4291 | "\x45\xbe\xfe\xcb\x4b\xb1\x88\xfd" | ||
4292 | "\x57\x76\x92\x6c\x49\xa3\x09\x5e" | ||
4293 | "\xb1\x08\xfd\x10\x98\xba\xec\x70" | ||
4294 | "\xaa\xa6\x69\x99\xa7\x2a\x82\xf2" | ||
4295 | "\x7d\x84\x8b\x21\xd4\xa7\x41\xb0" | ||
4296 | "\xc5\xcd\x4d\x5f\xff\x9d\xac\x89" | ||
4297 | "\xae\xba\x12\x29\x61\xd0\x3a\x75" | ||
4298 | "\x71\x23\xe9\x87\x0f\x8a\xcf\x10" | ||
4299 | "\x00\x02\x08\x87\x89\x14\x29\xca" | ||
4300 | "\x2a\x3e\x7a\x7d\x7d\xf7\xb1\x03" | ||
4301 | "\x55\x16\x5c\x8b\x9a\x6d\x0a\x7d" | ||
4302 | "\xe8\xb0\x62\xc4\x50\x0d\xc4\xcd" | ||
4303 | "\x12\x0c\x0f\x74\x18\xda\xe3\xd0" | ||
4304 | "\xb5\x78\x1c\x34\x80\x3f\xa7\x54" | ||
4305 | "\x21\xc7\x90\xdf\xe1\xde\x18\x34" | ||
4306 | "\xf2\x80\xd7\x66\x7b\x32\x7f\x6c" | ||
4307 | "\x8c\xd7\x55\x7e\x12\xac\x3a\x0f" | ||
4308 | "\x93\xec\x05\xc5\x2e\x04\x93\xef" | ||
4309 | "\x31\xa1\x2d\x3d\x92\x60\xf7\x9a" | ||
4310 | "\x28\x9d\x6a\x37\x9b\xc7\x0c\x50" | ||
4311 | "\x84\x14\x73\xd1\xa8\xcc\x81\xec" | ||
4312 | "\x58\x3e\x96\x45\xe0\x7b\x8d\x96" | ||
4313 | "\x70\x65\x5b\xa5\xbb\xcf\xec\xc6" | ||
4314 | "\xdc\x39\x66\x38\x0a\xd8\xfe\xcb" | ||
4315 | "\x17\xb6\xba\x02\x46\x9a\x02\x0a" | ||
4316 | "\x84\xe1\x8e\x8f\x84\x25\x20\x70" | ||
4317 | "\xc1\x3e\x9f\x1f\x28\x9b\xe5\x4f" | ||
4318 | "\xbc\x48\x14\x57\x77\x8f\x61\x60" | ||
4319 | "\x15\xe1\x32\x7a\x02\xb1\x40\xf1" | ||
4320 | "\x50\x5e\xb3\x09\x32\x6d\x68\x37" | ||
4321 | "\x8f\x83\x74\x59\x5c\x84\x9d\x84" | ||
4322 | "\xf4\xc3\x33\xec\x44\x23\x88\x51" | ||
4323 | "\x43\xcb\x47\xbd\x71\xc5\xed\xae" | ||
4324 | "\x9b\xe6\x9a\x2f\xfe\xce\xb1\xbe" | ||
4325 | "\xc9\xde\x24\x4f\xbe\x15\x99\x2b" | ||
4326 | "\x11\xb7\x7c\x04\x0f\x12\xbd\x8f" | ||
4327 | "\x6a\x97\x5a\x44\xa0\xf9\x0c\x29" | ||
4328 | "\xa9\xab\xc3\xd4\xd8\x93\x92\x72" | ||
4329 | "\x84\xc5\x87\x54\xcc\xe2\x94\x52" | ||
4330 | "\x9f\x86\x14\xdc\xd2\xab\xa9\x91" | ||
4331 | "\x92\x5f\xed\xc4\xae\x74\xff\xac" | ||
4332 | "\x6e\x33\x3b\x93\xeb\x4a\xff\x04" | ||
4333 | "\x79\xda\x9a\x41\x0e\x44\x50\xe0" | ||
4334 | "\xdd\x7a\xe4\xc6\xe2\x91\x09\x00" | ||
4335 | "\x57\x5d\xa4\x01\xfc\x07\x05\x9f" | ||
4336 | "\x64\x5e\x8b\x7e\x9b\xfd\xef\x33" | ||
4337 | "\x94\x30\x54\xff\x84\x01\x14\x93" | ||
4338 | "\xc2\x7b\x34\x29\xea\xed\xb4\xed" | ||
4339 | "\x53\x76\x44\x1a\x77\xed\x43\x85" | ||
4340 | "\x1a\xd7\x7f\x16\xf5\x41\xdf\xd2" | ||
4341 | "\x69\xd5\x0d\x6a\x5f\x14\xfb\x0a" | ||
4342 | "\xab\x1c\xbb\x4c\x15\x50\xbe\x97" | ||
4343 | "\xf7\xab\x40\x66\x19\x3c\x4c\xaa" | ||
4344 | "\x77\x3d\xad\x38\x01\x4b\xd2\x09" | ||
4345 | "\x2f\xa7\x55\xc8\x24\xbb\x5e\x54" | ||
4346 | "\xc4\xf3\x6f\xfd\xa9\xfc\xea\x70" | ||
4347 | "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51", | ||
4348 | .ilen = 512, | ||
4349 | .result = "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
4350 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
4351 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
4352 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
4353 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
4354 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
4355 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
4356 | "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" | ||
4357 | "\x40\x41\x42\x43\x44\x45\x46\x47" | ||
4358 | "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" | ||
4359 | "\x50\x51\x52\x53\x54\x55\x56\x57" | ||
4360 | "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" | ||
4361 | "\x60\x61\x62\x63\x64\x65\x66\x67" | ||
4362 | "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" | ||
4363 | "\x70\x71\x72\x73\x74\x75\x76\x77" | ||
4364 | "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" | ||
4365 | "\x80\x81\x82\x83\x84\x85\x86\x87" | ||
4366 | "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" | ||
4367 | "\x90\x91\x92\x93\x94\x95\x96\x97" | ||
4368 | "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" | ||
4369 | "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
4370 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
4371 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
4372 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
4373 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
4374 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
4375 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
4376 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" | ||
4377 | "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" | ||
4378 | "\xe8\xe9\xea\xeb\xec\xed\xee\xef" | ||
4379 | "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4380 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" | ||
4381 | "\x00\x01\x02\x03\x04\x05\x06\x07" | ||
4382 | "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" | ||
4383 | "\x10\x11\x12\x13\x14\x15\x16\x17" | ||
4384 | "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" | ||
4385 | "\x20\x21\x22\x23\x24\x25\x26\x27" | ||
4386 | "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" | ||
4387 | "\x30\x31\x32\x33\x34\x35\x36\x37" | ||
4388 | "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" | ||
4389 | "\x40\x41\x42\x43\x44\x45\x46\x47" | ||
4390 | "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" | ||
4391 | "\x50\x51\x52\x53\x54\x55\x56\x57" | ||
4392 | "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" | ||
4393 | "\x60\x61\x62\x63\x64\x65\x66\x67" | ||
4394 | "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" | ||
4395 | "\x70\x71\x72\x73\x74\x75\x76\x77" | ||
4396 | "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" | ||
4397 | "\x80\x81\x82\x83\x84\x85\x86\x87" | ||
4398 | "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" | ||
4399 | "\x90\x91\x92\x93\x94\x95\x96\x97" | ||
4400 | "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" | ||
4401 | "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" | ||
4402 | "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" | ||
4403 | "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" | ||
4404 | "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" | ||
4405 | "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" | ||
4406 | "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" | ||
4407 | "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" | ||
4408 | "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" | ||
4409 | "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" | ||
4410 | "\xe8\xe9\xea\xeb\xec\xed\xee\xef" | ||
4411 | "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" | ||
4412 | "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", | ||
4413 | .rlen = 512, | ||
4414 | |||
4126 | } | 4415 | } |
4127 | }; | 4416 | }; |
4128 | 4417 | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index a60043b3e409..1d2ebc7a4947 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -210,3 +210,15 @@ config HW_RANDOM_PICOXCELL | |||
210 | module will be called picoxcell-rng. | 210 | module will be called picoxcell-rng. |
211 | 211 | ||
212 | If unsure, say Y. | 212 | If unsure, say Y. |
213 | |||
214 | config HW_RANDOM_PPC4XX | ||
215 | tristate "PowerPC 4xx generic true random number generator support" | ||
216 | depends on HW_RANDOM && PPC && 4xx | ||
217 | ---help--- | ||
218 | This driver provides the kernel-side support for the TRNG hardware | ||
219 | found in the security function of some PowerPC 4xx SoCs. | ||
220 | |||
221 | To compile this driver as a module, choose M here: the | ||
222 | module will be called ppc4xx-rng. | ||
223 | |||
224 | If unsure, say N. | ||
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 3db4eb8b19c0..c88f244c8a71 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
@@ -20,3 +20,4 @@ obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o | |||
20 | obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o | 20 | obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o |
21 | obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o | 21 | obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o |
22 | obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o | 22 | obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o |
23 | obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o | ||
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index dd1d143eb8ea..52e08ca3ccd7 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c | |||
@@ -55,7 +55,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) | |||
55 | 55 | ||
56 | ret = amba_request_regions(dev, dev->dev.init_name); | 56 | ret = amba_request_regions(dev, dev->dev.init_name); |
57 | if (ret) | 57 | if (ret) |
58 | return ret; | 58 | goto out_clk; |
59 | ret = -ENOMEM; | 59 | ret = -ENOMEM; |
60 | base = ioremap(dev->res.start, resource_size(&dev->res)); | 60 | base = ioremap(dev->res.start, resource_size(&dev->res)); |
61 | if (!base) | 61 | if (!base) |
@@ -70,6 +70,7 @@ out_unmap: | |||
70 | iounmap(base); | 70 | iounmap(base); |
71 | out_release: | 71 | out_release: |
72 | amba_release_regions(dev); | 72 | amba_release_regions(dev); |
73 | out_clk: | ||
73 | clk_disable(rng_clk); | 74 | clk_disable(rng_clk); |
74 | clk_put(rng_clk); | 75 | clk_put(rng_clk); |
75 | return ret; | 76 | return ret; |
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 2cc755a64302..b757fac3cd1f 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c | |||
@@ -113,8 +113,10 @@ static int __devinit omap_rng_probe(struct platform_device *pdev) | |||
113 | 113 | ||
114 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 114 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
115 | 115 | ||
116 | if (!res) | 116 | if (!res) { |
117 | return -ENOENT; | 117 | ret = -ENOENT; |
118 | goto err_region; | ||
119 | } | ||
118 | 120 | ||
119 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | 121 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { |
120 | ret = -EBUSY; | 122 | ret = -EBUSY; |
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c new file mode 100644 index 000000000000..b8afa6a4ff67 --- /dev/null +++ b/drivers/char/hw_random/ppc4xx-rng.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * Generic PowerPC 44x RNG driver | ||
3 | * | ||
4 | * Copyright 2011 IBM Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/hw_random.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/of_platform.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | #define PPC4XX_TRNG_DEV_CTRL 0x60080 | ||
20 | |||
21 | #define PPC4XX_TRNGE 0x00020000 | ||
22 | #define PPC4XX_TRNG_CTRL 0x0008 | ||
23 | #define PPC4XX_TRNG_CTRL_DALM 0x20 | ||
24 | #define PPC4XX_TRNG_STAT 0x0004 | ||
25 | #define PPC4XX_TRNG_STAT_B 0x1 | ||
26 | #define PPC4XX_TRNG_DATA 0x0000 | ||
27 | |||
28 | #define MODULE_NAME "ppc4xx_rng" | ||
29 | |||
30 | static int ppc4xx_rng_data_present(struct hwrng *rng, int wait) | ||
31 | { | ||
32 | void __iomem *rng_regs = (void __iomem *) rng->priv; | ||
33 | int busy, i, present = 0; | ||
34 | |||
35 | for (i = 0; i < 20; i++) { | ||
36 | busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B); | ||
37 | if (!busy || !wait) { | ||
38 | present = 1; | ||
39 | break; | ||
40 | } | ||
41 | udelay(10); | ||
42 | } | ||
43 | return present; | ||
44 | } | ||
45 | |||
46 | static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data) | ||
47 | { | ||
48 | void __iomem *rng_regs = (void __iomem *) rng->priv; | ||
49 | *data = in_le32(rng_regs + PPC4XX_TRNG_DATA); | ||
50 | return 4; | ||
51 | } | ||
52 | |||
53 | static int ppc4xx_rng_enable(int enable) | ||
54 | { | ||
55 | struct device_node *ctrl; | ||
56 | void __iomem *ctrl_reg; | ||
57 | int err = 0; | ||
58 | u32 val; | ||
59 | |||
60 | /* Find the main crypto device node and map it to turn the TRNG on */ | ||
61 | ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto"); | ||
62 | if (!ctrl) | ||
63 | return -ENODEV; | ||
64 | |||
65 | ctrl_reg = of_iomap(ctrl, 0); | ||
66 | if (!ctrl_reg) { | ||
67 | err = -ENODEV; | ||
68 | goto out; | ||
69 | } | ||
70 | |||
71 | val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL); | ||
72 | |||
73 | if (enable) | ||
74 | val |= PPC4XX_TRNGE; | ||
75 | else | ||
76 | val = val & ~PPC4XX_TRNGE; | ||
77 | |||
78 | out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val); | ||
79 | iounmap(ctrl_reg); | ||
80 | |||
81 | out: | ||
82 | of_node_put(ctrl); | ||
83 | |||
84 | return err; | ||
85 | } | ||
86 | |||
87 | static struct hwrng ppc4xx_rng = { | ||
88 | .name = MODULE_NAME, | ||
89 | .data_present = ppc4xx_rng_data_present, | ||
90 | .data_read = ppc4xx_rng_data_read, | ||
91 | }; | ||
92 | |||
93 | static int __devinit ppc4xx_rng_probe(struct platform_device *dev) | ||
94 | { | ||
95 | void __iomem *rng_regs; | ||
96 | int err = 0; | ||
97 | |||
98 | rng_regs = of_iomap(dev->dev.of_node, 0); | ||
99 | if (!rng_regs) | ||
100 | return -ENODEV; | ||
101 | |||
102 | err = ppc4xx_rng_enable(1); | ||
103 | if (err) | ||
104 | return err; | ||
105 | |||
106 | out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM); | ||
107 | ppc4xx_rng.priv = (unsigned long) rng_regs; | ||
108 | |||
109 | err = hwrng_register(&ppc4xx_rng); | ||
110 | |||
111 | return err; | ||
112 | } | ||
113 | |||
114 | static int __devexit ppc4xx_rng_remove(struct platform_device *dev) | ||
115 | { | ||
116 | void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv; | ||
117 | |||
118 | hwrng_unregister(&ppc4xx_rng); | ||
119 | ppc4xx_rng_enable(0); | ||
120 | iounmap(rng_regs); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static struct of_device_id ppc4xx_rng_match[] = { | ||
126 | { .compatible = "ppc4xx-rng", }, | ||
127 | { .compatible = "amcc,ppc460ex-rng", }, | ||
128 | { .compatible = "amcc,ppc440epx-rng", }, | ||
129 | {}, | ||
130 | }; | ||
131 | |||
132 | static struct platform_driver ppc4xx_rng_driver = { | ||
133 | .driver = { | ||
134 | .name = MODULE_NAME, | ||
135 | .owner = THIS_MODULE, | ||
136 | .of_match_table = ppc4xx_rng_match, | ||
137 | }, | ||
138 | .probe = ppc4xx_rng_probe, | ||
139 | .remove = ppc4xx_rng_remove, | ||
140 | }; | ||
141 | |||
142 | static int __init ppc4xx_rng_init(void) | ||
143 | { | ||
144 | return platform_driver_register(&ppc4xx_rng_driver); | ||
145 | } | ||
146 | module_init(ppc4xx_rng_init); | ||
147 | |||
148 | static void __exit ppc4xx_rng_exit(void) | ||
149 | { | ||
150 | platform_driver_unregister(&ppc4xx_rng_driver); | ||
151 | } | ||
152 | module_exit(ppc4xx_rng_exit); | ||
153 | |||
154 | MODULE_LICENSE("GPL"); | ||
155 | MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>"); | ||
156 | MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors"); | ||
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index a94e930575f2..a8428e6f64a9 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c | |||
@@ -100,8 +100,7 @@ static int __devinit timeriomem_rng_probe(struct platform_device *pdev) | |||
100 | 100 | ||
101 | timeriomem_rng_data = pdev->dev.platform_data; | 101 | timeriomem_rng_data = pdev->dev.platform_data; |
102 | 102 | ||
103 | timeriomem_rng_data->address = ioremap(res->start, | 103 | timeriomem_rng_data->address = ioremap(res->start, resource_size(res)); |
104 | res->end - res->start + 1); | ||
105 | if (!timeriomem_rng_data->address) | 104 | if (!timeriomem_rng_data->address) |
106 | return -EIO; | 105 | return -EIO; |
107 | 106 | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 18912521a7a5..1d103f997dc2 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -51,6 +51,7 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |||
51 | union ce_io_threshold io_threshold; | 51 | union ce_io_threshold io_threshold; |
52 | u32 rand_num; | 52 | u32 rand_num; |
53 | union ce_pe_dma_cfg pe_dma_cfg; | 53 | union ce_pe_dma_cfg pe_dma_cfg; |
54 | u32 device_ctrl; | ||
54 | 55 | ||
55 | writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); | 56 | writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); |
56 | /* setup pe dma, include reset sg, pdr and pe, then release reset */ | 57 | /* setup pe dma, include reset sg, pdr and pe, then release reset */ |
@@ -84,7 +85,9 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) | |||
84 | writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); | 85 | writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); |
85 | ring_ctrl.w = 0; | 86 | ring_ctrl.w = 0; |
86 | writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); | 87 | writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); |
87 | writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); | 88 | device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL); |
89 | device_ctrl |= PPC4XX_DC_3DES_EN; | ||
90 | writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); | ||
88 | writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); | 91 | writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); |
89 | writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); | 92 | writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); |
90 | part_ring_size.w = 0; | 93 | part_ring_size.w = 0; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 676d957c22b0..4159265b453b 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -62,10 +62,22 @@ | |||
62 | #define CAAM_MAX_IV_LENGTH 16 | 62 | #define CAAM_MAX_IV_LENGTH 16 |
63 | 63 | ||
64 | /* length of descriptors text */ | 64 | /* length of descriptors text */ |
65 | #define DESC_AEAD_SHARED_TEXT_LEN 4 | 65 | #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) |
66 | #define DESC_AEAD_ENCRYPT_TEXT_LEN 21 | 66 | |
67 | #define DESC_AEAD_DECRYPT_TEXT_LEN 24 | 67 | #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) |
68 | #define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27 | 68 | #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) |
69 | #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) | ||
70 | #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) | ||
71 | |||
72 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) | ||
73 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
74 | 20 * CAAM_CMD_SZ) | ||
75 | #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ | ||
76 | 15 * CAAM_CMD_SZ) | ||
77 | |||
78 | #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ | ||
79 | CAAM_MAX_KEY_SIZE) | ||
80 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | ||
69 | 81 | ||
70 | #ifdef DEBUG | 82 | #ifdef DEBUG |
71 | /* for print_hex_dumps with line references */ | 83 | /* for print_hex_dumps with line references */ |
@@ -76,30 +88,366 @@ | |||
76 | #define debug(format, arg...) | 88 | #define debug(format, arg...) |
77 | #endif | 89 | #endif |
78 | 90 | ||
91 | /* Set DK bit in class 1 operation if shared */ | ||
92 | static inline void append_dec_op1(u32 *desc, u32 type) | ||
93 | { | ||
94 | u32 *jump_cmd, *uncond_jump_cmd; | ||
95 | |||
96 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
97 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
98 | OP_ALG_DECRYPT); | ||
99 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
100 | set_jump_tgt_here(desc, jump_cmd); | ||
101 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | ||
102 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | ||
103 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Wait for completion of class 1 key loading before allowing | ||
108 | * error propagation | ||
109 | */ | ||
110 | static inline void append_dec_shr_done(u32 *desc) | ||
111 | { | ||
112 | u32 *jump_cmd; | ||
113 | |||
114 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); | ||
115 | set_jump_tgt_here(desc, jump_cmd); | ||
116 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * For aead functions, read payload and write payload, | ||
121 | * both of which are specified in req->src and req->dst | ||
122 | */ | ||
123 | static inline void aead_append_src_dst(u32 *desc, u32 msg_type) | ||
124 | { | ||
125 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | | ||
126 | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); | ||
127 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * For aead encrypt and decrypt, read iv for both classes | ||
132 | */ | ||
133 | static inline void aead_append_ld_iv(u32 *desc, int ivsize) | ||
134 | { | ||
135 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
136 | LDST_CLASS_1_CCB | ivsize); | ||
137 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * For ablkcipher encrypt and decrypt, read from req->src and | ||
142 | * write to req->dst | ||
143 | */ | ||
144 | static inline void ablkcipher_append_src_dst(u32 *desc) | ||
145 | { | ||
146 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | ||
147 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ | ||
148 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ | ||
149 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ | ||
150 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * If all data, including src (with assoc and iv) or dst (with iv only) are | ||
155 | * contiguous | ||
156 | */ | ||
157 | #define GIV_SRC_CONTIG 1 | ||
158 | #define GIV_DST_CONTIG (1 << 1) | ||
159 | |||
79 | /* | 160 | /* |
80 | * per-session context | 161 | * per-session context |
81 | */ | 162 | */ |
82 | struct caam_ctx { | 163 | struct caam_ctx { |
83 | struct device *jrdev; | 164 | struct device *jrdev; |
84 | u32 *sh_desc; | 165 | u32 sh_desc_enc[DESC_MAX_USED_LEN]; |
85 | dma_addr_t shared_desc_phys; | 166 | u32 sh_desc_dec[DESC_MAX_USED_LEN]; |
167 | u32 sh_desc_givenc[DESC_MAX_USED_LEN]; | ||
168 | dma_addr_t sh_desc_enc_dma; | ||
169 | dma_addr_t sh_desc_dec_dma; | ||
170 | dma_addr_t sh_desc_givenc_dma; | ||
86 | u32 class1_alg_type; | 171 | u32 class1_alg_type; |
87 | u32 class2_alg_type; | 172 | u32 class2_alg_type; |
88 | u32 alg_op; | 173 | u32 alg_op; |
89 | u8 *key; | 174 | u8 key[CAAM_MAX_KEY_SIZE]; |
90 | dma_addr_t key_phys; | 175 | dma_addr_t key_dma; |
91 | unsigned int enckeylen; | 176 | unsigned int enckeylen; |
92 | unsigned int split_key_len; | 177 | unsigned int split_key_len; |
93 | unsigned int split_key_pad_len; | 178 | unsigned int split_key_pad_len; |
94 | unsigned int authsize; | 179 | unsigned int authsize; |
95 | }; | 180 | }; |
96 | 181 | ||
97 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | 182 | static void append_key_aead(u32 *desc, struct caam_ctx *ctx, |
183 | int keys_fit_inline) | ||
184 | { | ||
185 | if (keys_fit_inline) { | ||
186 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | ||
187 | ctx->split_key_len, CLASS_2 | | ||
188 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
189 | append_key_as_imm(desc, (void *)ctx->key + | ||
190 | ctx->split_key_pad_len, ctx->enckeylen, | ||
191 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
192 | } else { | ||
193 | append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | | ||
194 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
195 | append_key(desc, ctx->key_dma + ctx->split_key_pad_len, | ||
196 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, | ||
201 | int keys_fit_inline) | ||
202 | { | ||
203 | u32 *key_jump_cmd; | ||
204 | |||
205 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
206 | |||
207 | /* Skip if already shared */ | ||
208 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
209 | JUMP_COND_SHRD); | ||
210 | |||
211 | append_key_aead(desc, ctx, keys_fit_inline); | ||
212 | |||
213 | set_jump_tgt_here(desc, key_jump_cmd); | ||
214 | |||
215 | /* Propagate errors from shared to job descriptor */ | ||
216 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
217 | } | ||
218 | |||
219 | static int aead_set_sh_desc(struct crypto_aead *aead) | ||
220 | { | ||
221 | struct aead_tfm *tfm = &aead->base.crt_aead; | ||
222 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
223 | struct device *jrdev = ctx->jrdev; | ||
224 | bool keys_fit_inline = 0; | ||
225 | u32 *key_jump_cmd, *jump_cmd; | ||
226 | u32 geniv, moveiv; | ||
227 | u32 *desc; | ||
228 | |||
229 | if (!ctx->enckeylen || !ctx->authsize) | ||
230 | return 0; | ||
231 | |||
232 | /* | ||
233 | * Job Descriptor and Shared Descriptors | ||
234 | * must all fit into the 64-word Descriptor h/w Buffer | ||
235 | */ | ||
236 | if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + | ||
237 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
238 | CAAM_DESC_BYTES_MAX) | ||
239 | keys_fit_inline = 1; | ||
240 | |||
241 | /* aead_encrypt shared descriptor */ | ||
242 | desc = ctx->sh_desc_enc; | ||
243 | |||
244 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); | ||
245 | |||
246 | /* Class 2 operation */ | ||
247 | append_operation(desc, ctx->class2_alg_type | | ||
248 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
249 | |||
250 | /* cryptlen = seqoutlen - authsize */ | ||
251 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
252 | |||
253 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
254 | append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); | ||
255 | |||
256 | /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ | ||
257 | append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); | ||
258 | |||
259 | /* read assoc before reading payload */ | ||
260 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
261 | KEY_VLF); | ||
262 | aead_append_ld_iv(desc, tfm->ivsize); | ||
263 | |||
264 | /* Class 1 operation */ | ||
265 | append_operation(desc, ctx->class1_alg_type | | ||
266 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
267 | |||
268 | /* Read and write cryptlen bytes */ | ||
269 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
270 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
271 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
272 | |||
273 | /* Write ICV */ | ||
274 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
275 | LDST_SRCDST_BYTE_CONTEXT); | ||
276 | |||
277 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | ||
278 | desc_bytes(desc), | ||
279 | DMA_TO_DEVICE); | ||
280 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
281 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
282 | return -ENOMEM; | ||
283 | } | ||
284 | #ifdef DEBUG | ||
285 | print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", | ||
286 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
287 | desc_bytes(desc), 1); | ||
288 | #endif | ||
289 | |||
290 | /* | ||
291 | * Job Descriptor and Shared Descriptors | ||
292 | * must all fit into the 64-word Descriptor h/w Buffer | ||
293 | */ | ||
294 | if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + | ||
295 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
296 | CAAM_DESC_BYTES_MAX) | ||
297 | keys_fit_inline = 1; | ||
298 | |||
299 | desc = ctx->sh_desc_dec; | ||
300 | |||
301 | /* aead_decrypt shared descriptor */ | ||
302 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
303 | |||
304 | /* Skip if already shared */ | ||
305 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
306 | JUMP_COND_SHRD); | ||
307 | |||
308 | append_key_aead(desc, ctx, keys_fit_inline); | ||
309 | |||
310 | /* Only propagate error immediately if shared */ | ||
311 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
312 | set_jump_tgt_here(desc, key_jump_cmd); | ||
313 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
314 | set_jump_tgt_here(desc, jump_cmd); | ||
315 | |||
316 | /* Class 2 operation */ | ||
317 | append_operation(desc, ctx->class2_alg_type | | ||
318 | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); | ||
319 | |||
320 | /* assoclen + cryptlen = seqinlen - ivsize */ | ||
321 | append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, | ||
322 | ctx->authsize + tfm->ivsize) | ||
323 | /* assoclen = (assoclen + cryptlen) - cryptlen */ | ||
324 | append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); | ||
325 | append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); | ||
326 | |||
327 | /* read assoc before reading payload */ | ||
328 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
329 | KEY_VLF); | ||
330 | |||
331 | aead_append_ld_iv(desc, tfm->ivsize); | ||
332 | |||
333 | append_dec_op1(desc, ctx->class1_alg_type); | ||
334 | |||
335 | /* Read and write cryptlen bytes */ | ||
336 | append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
337 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); | ||
338 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG); | ||
339 | |||
340 | /* Load ICV */ | ||
341 | append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | | ||
342 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | ||
343 | append_dec_shr_done(desc); | ||
344 | |||
345 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | ||
346 | desc_bytes(desc), | ||
347 | DMA_TO_DEVICE); | ||
348 | if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { | ||
349 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
350 | return -ENOMEM; | ||
351 | } | ||
352 | #ifdef DEBUG | ||
353 | print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", | ||
354 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
355 | desc_bytes(desc), 1); | ||
356 | #endif | ||
357 | |||
358 | /* | ||
359 | * Job Descriptor and Shared Descriptors | ||
360 | * must all fit into the 64-word Descriptor h/w Buffer | ||
361 | */ | ||
362 | if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + | ||
363 | ctx->split_key_pad_len + ctx->enckeylen <= | ||
364 | CAAM_DESC_BYTES_MAX) | ||
365 | keys_fit_inline = 1; | ||
366 | |||
367 | /* aead_givencrypt shared descriptor */ | ||
368 | desc = ctx->sh_desc_givenc; | ||
369 | |||
370 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline); | ||
371 | |||
372 | /* Generate IV */ | ||
373 | geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | | ||
374 | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | | ||
375 | NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
376 | append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | | ||
377 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
378 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | ||
379 | append_move(desc, MOVE_SRC_INFIFO | | ||
380 | MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
381 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | ||
382 | |||
383 | /* Copy IV to class 1 context */ | ||
384 | append_move(desc, MOVE_SRC_CLASS1CTX | | ||
385 | MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); | ||
386 | |||
387 | /* Return to encryption */ | ||
388 | append_operation(desc, ctx->class2_alg_type | | ||
389 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
390 | |||
391 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
392 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
393 | |||
394 | /* assoclen = seqinlen - (ivsize + cryptlen) */ | ||
395 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); | ||
396 | |||
397 | /* read assoc before reading payload */ | ||
398 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | ||
399 | KEY_VLF); | ||
400 | |||
401 | /* Copy iv from class 1 ctx to class 2 fifo*/ | ||
402 | moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | | ||
403 | NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); | ||
404 | append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | | ||
405 | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); | ||
406 | append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | | ||
407 | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); | ||
408 | |||
409 | /* Class 1 operation */ | ||
410 | append_operation(desc, ctx->class1_alg_type | | ||
411 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
412 | |||
413 | /* Will write ivsize + cryptlen */ | ||
414 | append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
415 | |||
416 | /* Not need to reload iv */ | ||
417 | append_seq_fifo_load(desc, tfm->ivsize, | ||
418 | FIFOLD_CLASS_SKIP); | ||
419 | |||
420 | /* Will read cryptlen */ | ||
421 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | ||
422 | aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); | ||
423 | |||
424 | /* Write ICV */ | ||
425 | append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | | ||
426 | LDST_SRCDST_BYTE_CONTEXT); | ||
427 | |||
428 | ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, | ||
429 | desc_bytes(desc), | ||
430 | DMA_TO_DEVICE); | ||
431 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | ||
432 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
433 | return -ENOMEM; | ||
434 | } | ||
435 | #ifdef DEBUG | ||
436 | print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", | ||
437 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
438 | desc_bytes(desc), 1); | ||
439 | #endif | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int aead_setauthsize(struct crypto_aead *authenc, | ||
98 | unsigned int authsize) | 445 | unsigned int authsize) |
99 | { | 446 | { |
100 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | 447 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
101 | 448 | ||
102 | ctx->authsize = authsize; | 449 | ctx->authsize = authsize; |
450 | aead_set_sh_desc(authenc); | ||
103 | 451 | ||
104 | return 0; | 452 | return 0; |
105 | } | 453 | } |
@@ -117,6 +465,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err, | |||
117 | #ifdef DEBUG | 465 | #ifdef DEBUG |
118 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 466 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
119 | #endif | 467 | #endif |
468 | |||
120 | if (err) { | 469 | if (err) { |
121 | char tmp[CAAM_ERROR_STR_MAX]; | 470 | char tmp[CAAM_ERROR_STR_MAX]; |
122 | 471 | ||
@@ -220,73 +569,7 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | |||
220 | return ret; | 569 | return ret; |
221 | } | 570 | } |
222 | 571 | ||
223 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | 572 | static int aead_setkey(struct crypto_aead *aead, |
224 | { | ||
225 | struct device *jrdev = ctx->jrdev; | ||
226 | u32 *sh_desc; | ||
227 | u32 *jump_cmd; | ||
228 | bool keys_fit_inline = 0; | ||
229 | |||
230 | /* | ||
231 | * largest Job Descriptor and its Shared Descriptor | ||
232 | * must both fit into the 64-word Descriptor h/w Buffer | ||
233 | */ | ||
234 | if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN + | ||
235 | DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ + | ||
236 | ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) | ||
237 | keys_fit_inline = 1; | ||
238 | |||
239 | /* build shared descriptor for this session */ | ||
240 | sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + | ||
241 | (keys_fit_inline ? | ||
242 | ctx->split_key_pad_len + ctx->enckeylen : | ||
243 | CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL); | ||
244 | if (!sh_desc) { | ||
245 | dev_err(jrdev, "could not allocate shared descriptor\n"); | ||
246 | return -ENOMEM; | ||
247 | } | ||
248 | |||
249 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | ||
250 | |||
251 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | ||
252 | JUMP_COND_SHRD | JUMP_COND_SELF); | ||
253 | |||
254 | /* | ||
255 | * process keys, starting with class 2/authentication. | ||
256 | */ | ||
257 | if (keys_fit_inline) { | ||
258 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | ||
259 | ctx->split_key_len, | ||
260 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
261 | |||
262 | append_key_as_imm(sh_desc, (void *)ctx->key + | ||
263 | ctx->split_key_pad_len, ctx->enckeylen, | ||
264 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
265 | } else { | ||
266 | append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 | | ||
267 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | ||
268 | append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len, | ||
269 | ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); | ||
270 | } | ||
271 | |||
272 | /* update jump cmd now that we are at the jump target */ | ||
273 | set_jump_tgt_here(sh_desc, jump_cmd); | ||
274 | |||
275 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | ||
276 | desc_bytes(sh_desc), | ||
277 | DMA_TO_DEVICE); | ||
278 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | ||
279 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
280 | kfree(sh_desc); | ||
281 | return -ENOMEM; | ||
282 | } | ||
283 | |||
284 | ctx->sh_desc = sh_desc; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static int aead_authenc_setkey(struct crypto_aead *aead, | ||
290 | const u8 *key, unsigned int keylen) | 573 | const u8 *key, unsigned int keylen) |
291 | { | 574 | { |
292 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | 575 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ |
@@ -326,27 +609,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead, | |||
326 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | 609 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", |
327 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 610 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
328 | #endif | 611 | #endif |
329 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | ||
330 | GFP_KERNEL | GFP_DMA); | ||
331 | if (!ctx->key) { | ||
332 | dev_err(jrdev, "could not allocate key output memory\n"); | ||
333 | return -ENOMEM; | ||
334 | } | ||
335 | 612 | ||
336 | ret = gen_split_key(ctx, key, authkeylen); | 613 | ret = gen_split_key(ctx, key, authkeylen); |
337 | if (ret) { | 614 | if (ret) { |
338 | kfree(ctx->key); | ||
339 | goto badkey; | 615 | goto badkey; |
340 | } | 616 | } |
341 | 617 | ||
342 | /* postpend encryption key to auth split key */ | 618 | /* postpend encryption key to auth split key */ |
343 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); | 619 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); |
344 | 620 | ||
345 | ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | 621 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + |
346 | enckeylen, DMA_TO_DEVICE); | 622 | enckeylen, DMA_TO_DEVICE); |
347 | if (dma_mapping_error(jrdev, ctx->key_phys)) { | 623 | if (dma_mapping_error(jrdev, ctx->key_dma)) { |
348 | dev_err(jrdev, "unable to map key i/o memory\n"); | 624 | dev_err(jrdev, "unable to map key i/o memory\n"); |
349 | kfree(ctx->key); | ||
350 | return -ENOMEM; | 625 | return -ENOMEM; |
351 | } | 626 | } |
352 | #ifdef DEBUG | 627 | #ifdef DEBUG |
@@ -357,11 +632,10 @@ static int aead_authenc_setkey(struct crypto_aead *aead, | |||
357 | 632 | ||
358 | ctx->enckeylen = enckeylen; | 633 | ctx->enckeylen = enckeylen; |
359 | 634 | ||
360 | ret = build_sh_desc_ipsec(ctx); | 635 | ret = aead_set_sh_desc(aead); |
361 | if (ret) { | 636 | if (ret) { |
362 | dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + | 637 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + |
363 | enckeylen, DMA_TO_DEVICE); | 638 | enckeylen, DMA_TO_DEVICE); |
364 | kfree(ctx->key); | ||
365 | } | 639 | } |
366 | 640 | ||
367 | return ret; | 641 | return ret; |
@@ -370,6 +644,119 @@ badkey: | |||
370 | return -EINVAL; | 644 | return -EINVAL; |
371 | } | 645 | } |
372 | 646 | ||
647 | static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | ||
648 | const u8 *key, unsigned int keylen) | ||
649 | { | ||
650 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
651 | struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; | ||
652 | struct device *jrdev = ctx->jrdev; | ||
653 | int ret = 0; | ||
654 | u32 *key_jump_cmd, *jump_cmd; | ||
655 | u32 *desc; | ||
656 | |||
657 | #ifdef DEBUG | ||
658 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | ||
659 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
660 | #endif | ||
661 | |||
662 | memcpy(ctx->key, key, keylen); | ||
663 | ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, | ||
664 | DMA_TO_DEVICE); | ||
665 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | ||
666 | dev_err(jrdev, "unable to map key i/o memory\n"); | ||
667 | return -ENOMEM; | ||
668 | } | ||
669 | ctx->enckeylen = keylen; | ||
670 | |||
671 | /* ablkcipher_encrypt shared descriptor */ | ||
672 | desc = ctx->sh_desc_enc; | ||
673 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
674 | /* Skip if already shared */ | ||
675 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
676 | JUMP_COND_SHRD); | ||
677 | |||
678 | /* Load class1 key only */ | ||
679 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
680 | ctx->enckeylen, CLASS_1 | | ||
681 | KEY_DEST_CLASS_REG); | ||
682 | |||
683 | set_jump_tgt_here(desc, key_jump_cmd); | ||
684 | |||
685 | /* Propagate errors from shared to job descriptor */ | ||
686 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
687 | |||
688 | /* Load iv */ | ||
689 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
690 | LDST_CLASS_1_CCB | tfm->ivsize); | ||
691 | |||
692 | /* Load operation */ | ||
693 | append_operation(desc, ctx->class1_alg_type | | ||
694 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | ||
695 | |||
696 | /* Perform operation */ | ||
697 | ablkcipher_append_src_dst(desc); | ||
698 | |||
699 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | ||
700 | desc_bytes(desc), | ||
701 | DMA_TO_DEVICE); | ||
702 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
703 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
704 | return -ENOMEM; | ||
705 | } | ||
706 | #ifdef DEBUG | ||
707 | print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", | ||
708 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
709 | desc_bytes(desc), 1); | ||
710 | #endif | ||
711 | /* ablkcipher_decrypt shared descriptor */ | ||
712 | desc = ctx->sh_desc_dec; | ||
713 | |||
714 | init_sh_desc(desc, HDR_SHARE_WAIT); | ||
715 | /* Skip if already shared */ | ||
716 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | ||
717 | JUMP_COND_SHRD); | ||
718 | |||
719 | /* Load class1 key only */ | ||
720 | append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, | ||
721 | ctx->enckeylen, CLASS_1 | | ||
722 | KEY_DEST_CLASS_REG); | ||
723 | |||
724 | /* For aead, only propagate error immediately if shared */ | ||
725 | jump_cmd = append_jump(desc, JUMP_TEST_ALL); | ||
726 | set_jump_tgt_here(desc, key_jump_cmd); | ||
727 | append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); | ||
728 | set_jump_tgt_here(desc, jump_cmd); | ||
729 | |||
730 | /* load IV */ | ||
731 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | ||
732 | LDST_CLASS_1_CCB | tfm->ivsize); | ||
733 | |||
734 | /* Choose operation */ | ||
735 | append_dec_op1(desc, ctx->class1_alg_type); | ||
736 | |||
737 | /* Perform operation */ | ||
738 | ablkcipher_append_src_dst(desc); | ||
739 | |||
740 | /* Wait for key to load before allowing propagating error */ | ||
741 | append_dec_shr_done(desc); | ||
742 | |||
743 | ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, | ||
744 | desc_bytes(desc), | ||
745 | DMA_TO_DEVICE); | ||
746 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { | ||
747 | dev_err(jrdev, "unable to map shared descriptor\n"); | ||
748 | return -ENOMEM; | ||
749 | } | ||
750 | |||
751 | #ifdef DEBUG | ||
752 | print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", | ||
753 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
754 | desc_bytes(desc), 1); | ||
755 | #endif | ||
756 | |||
757 | return ret; | ||
758 | } | ||
759 | |||
373 | struct link_tbl_entry { | 760 | struct link_tbl_entry { |
374 | u64 ptr; | 761 | u64 ptr; |
375 | u32 len; | 762 | u32 len; |
@@ -379,64 +766,109 @@ struct link_tbl_entry { | |||
379 | }; | 766 | }; |
380 | 767 | ||
381 | /* | 768 | /* |
382 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | 769 | * aead_edesc - s/w-extended aead descriptor |
770 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | ||
383 | * @src_nents: number of segments in input scatterlist | 771 | * @src_nents: number of segments in input scatterlist |
384 | * @dst_nents: number of segments in output scatterlist | 772 | * @dst_nents: number of segments in output scatterlist |
385 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | 773 | * @iv_dma: dma address of iv for checking continuity and link table |
386 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | 774 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) |
387 | * @link_tbl_bytes: length of dma mapped link_tbl space | 775 | * @link_tbl_bytes: length of dma mapped link_tbl space |
388 | * @link_tbl_dma: bus physical mapped address of h/w link table | 776 | * @link_tbl_dma: bus physical mapped address of h/w link table |
389 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | 777 | * @hw_desc: the h/w job descriptor followed by any referenced link tables |
390 | */ | 778 | */ |
391 | struct ipsec_esp_edesc { | 779 | struct aead_edesc { |
392 | int assoc_nents; | 780 | int assoc_nents; |
393 | int src_nents; | 781 | int src_nents; |
394 | int dst_nents; | 782 | int dst_nents; |
783 | dma_addr_t iv_dma; | ||
395 | int link_tbl_bytes; | 784 | int link_tbl_bytes; |
396 | dma_addr_t link_tbl_dma; | 785 | dma_addr_t link_tbl_dma; |
397 | struct link_tbl_entry *link_tbl; | 786 | struct link_tbl_entry *link_tbl; |
398 | u32 hw_desc[0]; | 787 | u32 hw_desc[0]; |
399 | }; | 788 | }; |
400 | 789 | ||
401 | static void ipsec_esp_unmap(struct device *dev, | 790 | /* |
402 | struct ipsec_esp_edesc *edesc, | 791 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor |
403 | struct aead_request *areq) | 792 | * @src_nents: number of segments in input scatterlist |
404 | { | 793 | * @dst_nents: number of segments in output scatterlist |
405 | dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | 794 | * @iv_dma: dma address of iv for checking continuity and link table |
795 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | ||
796 | * @link_tbl_bytes: length of dma mapped link_tbl space | ||
797 | * @link_tbl_dma: bus physical mapped address of h/w link table | ||
798 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | ||
799 | */ | ||
800 | struct ablkcipher_edesc { | ||
801 | int src_nents; | ||
802 | int dst_nents; | ||
803 | dma_addr_t iv_dma; | ||
804 | int link_tbl_bytes; | ||
805 | dma_addr_t link_tbl_dma; | ||
806 | struct link_tbl_entry *link_tbl; | ||
807 | u32 hw_desc[0]; | ||
808 | }; | ||
406 | 809 | ||
407 | if (unlikely(areq->dst != areq->src)) { | 810 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
408 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | 811 | struct scatterlist *dst, int src_nents, int dst_nents, |
409 | DMA_TO_DEVICE); | 812 | dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, |
410 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents, | 813 | int link_tbl_bytes) |
411 | DMA_FROM_DEVICE); | 814 | { |
815 | if (unlikely(dst != src)) { | ||
816 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | ||
817 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | ||
412 | } else { | 818 | } else { |
413 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | 819 | dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
414 | DMA_BIDIRECTIONAL); | ||
415 | } | 820 | } |
416 | 821 | ||
417 | if (edesc->link_tbl_bytes) | 822 | if (iv_dma) |
418 | dma_unmap_single(dev, edesc->link_tbl_dma, | 823 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
419 | edesc->link_tbl_bytes, | 824 | if (link_tbl_bytes) |
825 | dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, | ||
420 | DMA_TO_DEVICE); | 826 | DMA_TO_DEVICE); |
421 | } | 827 | } |
422 | 828 | ||
423 | /* | 829 | static void aead_unmap(struct device *dev, |
424 | * ipsec_esp descriptor callbacks | 830 | struct aead_edesc *edesc, |
425 | */ | 831 | struct aead_request *req) |
426 | static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | 832 | { |
833 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
834 | int ivsize = crypto_aead_ivsize(aead); | ||
835 | |||
836 | dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | ||
837 | |||
838 | caam_unmap(dev, req->src, req->dst, | ||
839 | edesc->src_nents, edesc->dst_nents, | ||
840 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | ||
841 | edesc->link_tbl_bytes); | ||
842 | } | ||
843 | |||
844 | static void ablkcipher_unmap(struct device *dev, | ||
845 | struct ablkcipher_edesc *edesc, | ||
846 | struct ablkcipher_request *req) | ||
847 | { | ||
848 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
849 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
850 | |||
851 | caam_unmap(dev, req->src, req->dst, | ||
852 | edesc->src_nents, edesc->dst_nents, | ||
853 | edesc->iv_dma, ivsize, edesc->link_tbl_dma, | ||
854 | edesc->link_tbl_bytes); | ||
855 | } | ||
856 | |||
857 | static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
427 | void *context) | 858 | void *context) |
428 | { | 859 | { |
429 | struct aead_request *areq = context; | 860 | struct aead_request *req = context; |
430 | struct ipsec_esp_edesc *edesc; | 861 | struct aead_edesc *edesc; |
431 | #ifdef DEBUG | 862 | #ifdef DEBUG |
432 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 863 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
433 | int ivsize = crypto_aead_ivsize(aead); | ||
434 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 864 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
865 | int ivsize = crypto_aead_ivsize(aead); | ||
435 | 866 | ||
436 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 867 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
437 | #endif | 868 | #endif |
438 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | 869 | |
439 | offsetof(struct ipsec_esp_edesc, hw_desc)); | 870 | edesc = (struct aead_edesc *)((char *)desc - |
871 | offsetof(struct aead_edesc, hw_desc)); | ||
440 | 872 | ||
441 | if (err) { | 873 | if (err) { |
442 | char tmp[CAAM_ERROR_STR_MAX]; | 874 | char tmp[CAAM_ERROR_STR_MAX]; |
@@ -444,39 +876,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
444 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | 876 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); |
445 | } | 877 | } |
446 | 878 | ||
447 | ipsec_esp_unmap(jrdev, edesc, areq); | 879 | aead_unmap(jrdev, edesc, req); |
448 | 880 | ||
449 | #ifdef DEBUG | 881 | #ifdef DEBUG |
450 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 882 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", |
451 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | 883 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
452 | areq->assoclen , 1); | 884 | req->assoclen , 1); |
453 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | 885 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", |
454 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | 886 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, |
455 | edesc->src_nents ? 100 : ivsize, 1); | 887 | edesc->src_nents ? 100 : ivsize, 1); |
456 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | 888 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", |
457 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | 889 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
458 | edesc->src_nents ? 100 : areq->cryptlen + | 890 | edesc->src_nents ? 100 : req->cryptlen + |
459 | ctx->authsize + 4, 1); | 891 | ctx->authsize + 4, 1); |
460 | #endif | 892 | #endif |
461 | 893 | ||
462 | kfree(edesc); | 894 | kfree(edesc); |
463 | 895 | ||
464 | aead_request_complete(areq, err); | 896 | aead_request_complete(req, err); |
465 | } | 897 | } |
466 | 898 | ||
467 | static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | 899 | static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, |
468 | void *context) | 900 | void *context) |
469 | { | 901 | { |
470 | struct aead_request *areq = context; | 902 | struct aead_request *req = context; |
471 | struct ipsec_esp_edesc *edesc; | 903 | struct aead_edesc *edesc; |
472 | #ifdef DEBUG | 904 | #ifdef DEBUG |
473 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 905 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
474 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 906 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
907 | int ivsize = crypto_aead_ivsize(aead); | ||
475 | 908 | ||
476 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 909 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
477 | #endif | 910 | #endif |
478 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | 911 | |
479 | offsetof(struct ipsec_esp_edesc, hw_desc)); | 912 | edesc = (struct aead_edesc *)((char *)desc - |
913 | offsetof(struct aead_edesc, hw_desc)); | ||
914 | |||
915 | #ifdef DEBUG | ||
916 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
917 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
918 | ivsize, 1); | ||
919 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
920 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | ||
921 | req->cryptlen, 1); | ||
922 | #endif | ||
480 | 923 | ||
481 | if (err) { | 924 | if (err) { |
482 | char tmp[CAAM_ERROR_STR_MAX]; | 925 | char tmp[CAAM_ERROR_STR_MAX]; |
@@ -484,7 +927,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
484 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | 927 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); |
485 | } | 928 | } |
486 | 929 | ||
487 | ipsec_esp_unmap(jrdev, edesc, areq); | 930 | aead_unmap(jrdev, edesc, req); |
488 | 931 | ||
489 | /* | 932 | /* |
490 | * verify hw auth check passed else return -EBADMSG | 933 | * verify hw auth check passed else return -EBADMSG |
@@ -495,255 +938,413 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
495 | #ifdef DEBUG | 938 | #ifdef DEBUG |
496 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | 939 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", |
497 | DUMP_PREFIX_ADDRESS, 16, 4, | 940 | DUMP_PREFIX_ADDRESS, 16, 4, |
498 | ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), | 941 | ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), |
499 | sizeof(struct iphdr) + areq->assoclen + | 942 | sizeof(struct iphdr) + req->assoclen + |
500 | ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + | 943 | ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + |
501 | ctx->authsize + 36, 1); | 944 | ctx->authsize + 36, 1); |
502 | if (!err && edesc->link_tbl_bytes) { | 945 | if (!err && edesc->link_tbl_bytes) { |
503 | struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); | 946 | struct scatterlist *sg = sg_last(req->src, edesc->src_nents); |
504 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | 947 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", |
505 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | 948 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), |
506 | sg->length + ctx->authsize + 16, 1); | 949 | sg->length + ctx->authsize + 16, 1); |
507 | } | 950 | } |
508 | #endif | 951 | #endif |
952 | |||
509 | kfree(edesc); | 953 | kfree(edesc); |
510 | 954 | ||
511 | aead_request_complete(areq, err); | 955 | aead_request_complete(req, err); |
956 | } | ||
957 | |||
958 | static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
959 | void *context) | ||
960 | { | ||
961 | struct ablkcipher_request *req = context; | ||
962 | struct ablkcipher_edesc *edesc; | ||
963 | #ifdef DEBUG | ||
964 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
965 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
966 | |||
967 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
968 | #endif | ||
969 | |||
970 | edesc = (struct ablkcipher_edesc *)((char *)desc - | ||
971 | offsetof(struct ablkcipher_edesc, hw_desc)); | ||
972 | |||
973 | if (err) { | ||
974 | char tmp[CAAM_ERROR_STR_MAX]; | ||
975 | |||
976 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
977 | } | ||
978 | |||
979 | #ifdef DEBUG | ||
980 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
981 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | ||
982 | edesc->src_nents > 1 ? 100 : ivsize, 1); | ||
983 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
984 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
985 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | ||
986 | #endif | ||
987 | |||
988 | ablkcipher_unmap(jrdev, edesc, req); | ||
989 | kfree(edesc); | ||
990 | |||
991 | ablkcipher_request_complete(req, err); | ||
992 | } | ||
993 | |||
994 | static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | ||
995 | void *context) | ||
996 | { | ||
997 | struct ablkcipher_request *req = context; | ||
998 | struct ablkcipher_edesc *edesc; | ||
999 | #ifdef DEBUG | ||
1000 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1001 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1002 | |||
1003 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
1004 | #endif | ||
1005 | |||
1006 | edesc = (struct ablkcipher_edesc *)((char *)desc - | ||
1007 | offsetof(struct ablkcipher_edesc, hw_desc)); | ||
1008 | if (err) { | ||
1009 | char tmp[CAAM_ERROR_STR_MAX]; | ||
1010 | |||
1011 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | ||
1012 | } | ||
1013 | |||
1014 | #ifdef DEBUG | ||
1015 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | ||
1016 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, | ||
1017 | ivsize, 1); | ||
1018 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | ||
1019 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1020 | edesc->dst_nents > 1 ? 100 : req->nbytes, 1); | ||
1021 | #endif | ||
1022 | |||
1023 | ablkcipher_unmap(jrdev, edesc, req); | ||
1024 | kfree(edesc); | ||
1025 | |||
1026 | ablkcipher_request_complete(req, err); | ||
1027 | } | ||
1028 | |||
1029 | static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, | ||
1030 | dma_addr_t dma, u32 len, u32 offset) | ||
1031 | { | ||
1032 | link_tbl_ptr->ptr = dma; | ||
1033 | link_tbl_ptr->len = len; | ||
1034 | link_tbl_ptr->reserved = 0; | ||
1035 | link_tbl_ptr->buf_pool_id = 0; | ||
1036 | link_tbl_ptr->offset = offset; | ||
1037 | #ifdef DEBUG | ||
1038 | print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", | ||
1039 | DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, | ||
1040 | sizeof(struct link_tbl_entry), 1); | ||
1041 | #endif | ||
512 | } | 1042 | } |
513 | 1043 | ||
514 | /* | 1044 | /* |
515 | * convert scatterlist to h/w link table format | 1045 | * convert scatterlist to h/w link table format |
516 | * scatterlist must have been previously dma mapped | 1046 | * but does not have final bit; instead, returns last entry |
517 | */ | 1047 | */ |
518 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | 1048 | static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, |
519 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | 1049 | int sg_count, struct link_tbl_entry |
1050 | *link_tbl_ptr, u32 offset) | ||
520 | { | 1051 | { |
521 | while (sg_count) { | 1052 | while (sg_count) { |
522 | link_tbl_ptr->ptr = sg_dma_address(sg); | 1053 | sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), |
523 | link_tbl_ptr->len = sg_dma_len(sg); | 1054 | sg_dma_len(sg), offset); |
524 | link_tbl_ptr->reserved = 0; | ||
525 | link_tbl_ptr->buf_pool_id = 0; | ||
526 | link_tbl_ptr->offset = offset; | ||
527 | link_tbl_ptr++; | 1055 | link_tbl_ptr++; |
528 | sg = sg_next(sg); | 1056 | sg = sg_next(sg); |
529 | sg_count--; | 1057 | sg_count--; |
530 | } | 1058 | } |
1059 | return link_tbl_ptr - 1; | ||
1060 | } | ||
531 | 1061 | ||
532 | /* set Final bit (marks end of link table) */ | 1062 | /* |
533 | link_tbl_ptr--; | 1063 | * convert scatterlist to h/w link table format |
1064 | * scatterlist must have been previously dma mapped | ||
1065 | */ | ||
1066 | static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, | ||
1067 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | ||
1068 | { | ||
1069 | link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); | ||
534 | link_tbl_ptr->len |= 0x40000000; | 1070 | link_tbl_ptr->len |= 0x40000000; |
535 | } | 1071 | } |
536 | 1072 | ||
537 | /* | 1073 | /* |
538 | * fill in and submit ipsec_esp job descriptor | 1074 | * Fill in aead job descriptor |
539 | */ | 1075 | */ |
540 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | 1076 | static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, |
541 | u32 encrypt, | 1077 | struct aead_edesc *edesc, |
542 | void (*callback) (struct device *dev, u32 *desc, | 1078 | struct aead_request *req, |
543 | u32 err, void *context)) | 1079 | bool all_contig, bool encrypt) |
544 | { | 1080 | { |
545 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1081 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
546 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1082 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
547 | struct device *jrdev = ctx->jrdev; | ||
548 | u32 *desc = edesc->hw_desc, options; | ||
549 | int ret, sg_count, assoc_sg_count; | ||
550 | int ivsize = crypto_aead_ivsize(aead); | 1083 | int ivsize = crypto_aead_ivsize(aead); |
551 | int authsize = ctx->authsize; | 1084 | int authsize = ctx->authsize; |
552 | dma_addr_t ptr, dst_dma, src_dma; | 1085 | u32 *desc = edesc->hw_desc; |
553 | #ifdef DEBUG | 1086 | u32 out_options = 0, in_options; |
554 | u32 *sh_desc = ctx->sh_desc; | 1087 | dma_addr_t dst_dma, src_dma; |
1088 | int len, link_tbl_index = 0; | ||
555 | 1089 | ||
1090 | #ifdef DEBUG | ||
556 | debug("assoclen %d cryptlen %d authsize %d\n", | 1091 | debug("assoclen %d cryptlen %d authsize %d\n", |
557 | areq->assoclen, areq->cryptlen, authsize); | 1092 | req->assoclen, req->cryptlen, authsize); |
558 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | 1093 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", |
559 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | 1094 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), |
560 | areq->assoclen , 1); | 1095 | req->assoclen , 1); |
561 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | 1096 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
562 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | 1097 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
563 | edesc->src_nents ? 100 : ivsize, 1); | 1098 | edesc->src_nents ? 100 : ivsize, 1); |
564 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | 1099 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", |
565 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | 1100 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
566 | edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); | 1101 | edesc->src_nents ? 100 : req->cryptlen, 1); |
567 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | 1102 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", |
568 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | 1103 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, |
569 | desc_bytes(sh_desc), 1); | 1104 | desc_bytes(sh_desc), 1); |
570 | #endif | 1105 | #endif |
571 | assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, | ||
572 | DMA_TO_DEVICE); | ||
573 | if (areq->src == areq->dst) | ||
574 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
575 | DMA_BIDIRECTIONAL); | ||
576 | else | ||
577 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | ||
578 | DMA_TO_DEVICE); | ||
579 | 1106 | ||
580 | /* start auth operation */ | 1107 | len = desc_len(sh_desc); |
581 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | 1108 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
582 | (encrypt ? : OP_ALG_ICV_ON)); | ||
583 | 1109 | ||
584 | /* Load FIFO with data for Class 2 CHA */ | 1110 | if (all_contig) { |
585 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | 1111 | src_dma = sg_dma_address(req->assoc); |
586 | if (!edesc->assoc_nents) { | 1112 | in_options = 0; |
587 | ptr = sg_dma_address(areq->assoc); | ||
588 | } else { | 1113 | } else { |
589 | sg_to_link_tbl(areq->assoc, edesc->assoc_nents, | 1114 | src_dma = edesc->link_tbl_dma; |
590 | edesc->link_tbl, 0); | 1115 | link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + |
591 | ptr = edesc->link_tbl_dma; | 1116 | (edesc->src_nents ? : 1); |
592 | options |= LDST_SGF; | 1117 | in_options = LDST_SGF; |
593 | } | 1118 | } |
594 | append_fifo_load(desc, ptr, areq->assoclen, options); | 1119 | if (encrypt) |
595 | 1120 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | |
596 | /* copy iv from cipher/class1 input context to class2 infifo */ | 1121 | req->cryptlen - authsize, in_options); |
597 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | 1122 | else |
598 | 1123 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | |
599 | if (!encrypt) { | 1124 | req->cryptlen, in_options); |
600 | u32 *jump_cmd, *uncond_jump_cmd; | ||
601 | |||
602 | /* JUMP if shared */ | ||
603 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | ||
604 | 1125 | ||
605 | /* start class 1 (cipher) operation, non-shared version */ | 1126 | if (likely(req->src == req->dst)) { |
606 | append_operation(desc, ctx->class1_alg_type | | 1127 | if (all_contig) { |
607 | OP_ALG_AS_INITFINAL); | 1128 | dst_dma = sg_dma_address(req->src); |
1129 | } else { | ||
1130 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * | ||
1131 | ((edesc->assoc_nents ? : 1) + 1); | ||
1132 | out_options = LDST_SGF; | ||
1133 | } | ||
1134 | } else { | ||
1135 | if (!edesc->dst_nents) { | ||
1136 | dst_dma = sg_dma_address(req->dst); | ||
1137 | } else { | ||
1138 | dst_dma = edesc->link_tbl_dma + | ||
1139 | link_tbl_index * | ||
1140 | sizeof(struct link_tbl_entry); | ||
1141 | out_options = LDST_SGF; | ||
1142 | } | ||
1143 | } | ||
1144 | if (encrypt) | ||
1145 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | ||
1146 | else | ||
1147 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, | ||
1148 | out_options); | ||
1149 | } | ||
608 | 1150 | ||
609 | uncond_jump_cmd = append_jump(desc, 0); | 1151 | /* |
1152 | * Fill in aead givencrypt job descriptor | ||
1153 | */ | ||
1154 | static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | ||
1155 | struct aead_edesc *edesc, | ||
1156 | struct aead_request *req, | ||
1157 | int contig) | ||
1158 | { | ||
1159 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1160 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
1161 | int ivsize = crypto_aead_ivsize(aead); | ||
1162 | int authsize = ctx->authsize; | ||
1163 | u32 *desc = edesc->hw_desc; | ||
1164 | u32 out_options = 0, in_options; | ||
1165 | dma_addr_t dst_dma, src_dma; | ||
1166 | int len, link_tbl_index = 0; | ||
610 | 1167 | ||
611 | set_jump_tgt_here(desc, jump_cmd); | 1168 | #ifdef DEBUG |
1169 | debug("assoclen %d cryptlen %d authsize %d\n", | ||
1170 | req->assoclen, req->cryptlen, authsize); | ||
1171 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | ||
1172 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), | ||
1173 | req->assoclen , 1); | ||
1174 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | ||
1175 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | ||
1176 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | ||
1177 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1178 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | ||
1179 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | ||
1180 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | ||
1181 | desc_bytes(sh_desc), 1); | ||
1182 | #endif | ||
612 | 1183 | ||
613 | /* start class 1 (cipher) operation, shared version */ | 1184 | len = desc_len(sh_desc); |
614 | append_operation(desc, ctx->class1_alg_type | | 1185 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
615 | OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); | ||
616 | set_jump_tgt_here(desc, uncond_jump_cmd); | ||
617 | } else | ||
618 | append_operation(desc, ctx->class1_alg_type | | ||
619 | OP_ALG_AS_INITFINAL | encrypt); | ||
620 | 1186 | ||
621 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | 1187 | if (contig & GIV_SRC_CONTIG) { |
622 | options = 0; | 1188 | src_dma = sg_dma_address(req->assoc); |
623 | if (!edesc->src_nents) { | 1189 | in_options = 0; |
624 | src_dma = sg_dma_address(areq->src); | ||
625 | } else { | 1190 | } else { |
626 | sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + | 1191 | src_dma = edesc->link_tbl_dma; |
627 | edesc->assoc_nents, 0); | 1192 | link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; |
628 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | 1193 | in_options = LDST_SGF; |
629 | sizeof(struct link_tbl_entry); | ||
630 | options |= LDST_SGF; | ||
631 | } | 1194 | } |
632 | append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); | 1195 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + |
633 | append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | | 1196 | req->cryptlen - authsize, in_options); |
634 | FIFOLD_TYPE_LASTBOTH | | 1197 | |
635 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | 1198 | if (contig & GIV_DST_CONTIG) { |
636 | : FIFOLD_TYPE_MSG)); | 1199 | dst_dma = edesc->iv_dma; |
637 | |||
638 | /* specify destination */ | ||
639 | if (areq->src == areq->dst) { | ||
640 | dst_dma = src_dma; | ||
641 | } else { | 1200 | } else { |
642 | sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, | 1201 | if (likely(req->src == req->dst)) { |
643 | DMA_FROM_DEVICE); | 1202 | dst_dma = src_dma + sizeof(struct link_tbl_entry) * |
644 | if (!edesc->dst_nents) { | 1203 | edesc->assoc_nents; |
645 | dst_dma = sg_dma_address(areq->dst); | 1204 | out_options = LDST_SGF; |
646 | options = 0; | ||
647 | } else { | 1205 | } else { |
648 | sg_to_link_tbl(areq->dst, edesc->dst_nents, | 1206 | dst_dma = edesc->link_tbl_dma + |
649 | edesc->link_tbl + edesc->assoc_nents + | 1207 | link_tbl_index * |
650 | edesc->src_nents, 0); | ||
651 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | ||
652 | edesc->src_nents) * | ||
653 | sizeof(struct link_tbl_entry); | 1208 | sizeof(struct link_tbl_entry); |
654 | options = LDST_SGF; | 1209 | out_options = LDST_SGF; |
655 | } | 1210 | } |
656 | } | 1211 | } |
657 | append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); | ||
658 | append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | ||
659 | 1212 | ||
660 | /* ICV */ | 1213 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); |
661 | if (encrypt) | 1214 | } |
662 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | 1215 | |
663 | LDST_SRCDST_BYTE_CONTEXT); | 1216 | /* |
664 | else | 1217 | * Fill in ablkcipher job descriptor |
665 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | 1218 | */ |
666 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 1219 | static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, |
1220 | struct ablkcipher_edesc *edesc, | ||
1221 | struct ablkcipher_request *req, | ||
1222 | bool iv_contig) | ||
1223 | { | ||
1224 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1225 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1226 | u32 *desc = edesc->hw_desc; | ||
1227 | u32 out_options = 0, in_options; | ||
1228 | dma_addr_t dst_dma, src_dma; | ||
1229 | int len, link_tbl_index = 0; | ||
667 | 1230 | ||
668 | #ifdef DEBUG | 1231 | #ifdef DEBUG |
669 | debug("job_desc_len %d\n", desc_len(desc)); | 1232 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", |
670 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | 1233 | DUMP_PREFIX_ADDRESS, 16, 4, req->info, |
671 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | 1234 | ivsize, 1); |
672 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | 1235 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", |
673 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | 1236 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), |
674 | edesc->link_tbl_bytes, 1); | 1237 | edesc->src_nents ? 100 : req->nbytes, 1); |
675 | #endif | 1238 | #endif |
676 | 1239 | ||
677 | ret = caam_jr_enqueue(jrdev, desc, callback, areq); | 1240 | len = desc_len(sh_desc); |
678 | if (!ret) | 1241 | init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); |
679 | ret = -EINPROGRESS; | 1242 | |
680 | else { | 1243 | if (iv_contig) { |
681 | ipsec_esp_unmap(jrdev, edesc, areq); | 1244 | src_dma = edesc->iv_dma; |
682 | kfree(edesc); | 1245 | in_options = 0; |
1246 | } else { | ||
1247 | src_dma = edesc->link_tbl_dma; | ||
1248 | link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; | ||
1249 | in_options = LDST_SGF; | ||
683 | } | 1250 | } |
1251 | append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); | ||
684 | 1252 | ||
685 | return ret; | 1253 | if (likely(req->src == req->dst)) { |
1254 | if (!edesc->src_nents && iv_contig) { | ||
1255 | dst_dma = sg_dma_address(req->src); | ||
1256 | } else { | ||
1257 | dst_dma = edesc->link_tbl_dma + | ||
1258 | sizeof(struct link_tbl_entry); | ||
1259 | out_options = LDST_SGF; | ||
1260 | } | ||
1261 | } else { | ||
1262 | if (!edesc->dst_nents) { | ||
1263 | dst_dma = sg_dma_address(req->dst); | ||
1264 | } else { | ||
1265 | dst_dma = edesc->link_tbl_dma + | ||
1266 | link_tbl_index * sizeof(struct link_tbl_entry); | ||
1267 | out_options = LDST_SGF; | ||
1268 | } | ||
1269 | } | ||
1270 | append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); | ||
686 | } | 1271 | } |
687 | 1272 | ||
688 | /* | 1273 | /* |
689 | * derive number of elements in scatterlist | 1274 | * derive number of elements in scatterlist |
690 | */ | 1275 | */ |
691 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | 1276 | static int sg_count(struct scatterlist *sg_list, int nbytes) |
692 | { | 1277 | { |
693 | struct scatterlist *sg = sg_list; | 1278 | struct scatterlist *sg = sg_list; |
694 | int sg_nents = 0; | 1279 | int sg_nents = 0; |
695 | 1280 | ||
696 | *chained = 0; | ||
697 | while (nbytes > 0) { | 1281 | while (nbytes > 0) { |
698 | sg_nents++; | 1282 | sg_nents++; |
699 | nbytes -= sg->length; | 1283 | nbytes -= sg->length; |
700 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | 1284 | if (!sg_is_last(sg) && (sg + 1)->length == 0) |
701 | *chained = 1; | 1285 | BUG(); /* Not support chaining */ |
702 | sg = scatterwalk_sg_next(sg); | 1286 | sg = scatterwalk_sg_next(sg); |
703 | } | 1287 | } |
704 | 1288 | ||
1289 | if (likely(sg_nents == 1)) | ||
1290 | return 0; | ||
1291 | |||
705 | return sg_nents; | 1292 | return sg_nents; |
706 | } | 1293 | } |
707 | 1294 | ||
708 | /* | 1295 | /* |
709 | * allocate and map the ipsec_esp extended descriptor | 1296 | * allocate and map the aead extended descriptor |
710 | */ | 1297 | */ |
711 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | 1298 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
712 | int desc_bytes) | 1299 | int desc_bytes, bool *all_contig_ptr) |
713 | { | 1300 | { |
714 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1301 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
715 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1302 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
716 | struct device *jrdev = ctx->jrdev; | 1303 | struct device *jrdev = ctx->jrdev; |
717 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1304 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
718 | GFP_ATOMIC; | 1305 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
719 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | 1306 | int assoc_nents, src_nents, dst_nents = 0; |
720 | struct ipsec_esp_edesc *edesc; | 1307 | struct aead_edesc *edesc; |
721 | 1308 | dma_addr_t iv_dma = 0; | |
722 | assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); | 1309 | int sgc; |
723 | BUG_ON(chained); | 1310 | bool all_contig = true; |
724 | if (likely(assoc_nents == 1)) | 1311 | int ivsize = crypto_aead_ivsize(aead); |
725 | assoc_nents = 0; | 1312 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; |
726 | 1313 | ||
727 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, | 1314 | assoc_nents = sg_count(req->assoc, req->assoclen); |
728 | &chained); | 1315 | src_nents = sg_count(req->src, req->cryptlen); |
729 | BUG_ON(chained); | 1316 | |
730 | if (src_nents == 1) | 1317 | if (unlikely(req->dst != req->src)) |
731 | src_nents = 0; | 1318 | dst_nents = sg_count(req->dst, req->cryptlen); |
732 | 1319 | ||
733 | if (unlikely(areq->dst != areq->src)) { | 1320 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, |
734 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, | 1321 | DMA_BIDIRECTIONAL); |
735 | &chained); | 1322 | if (likely(req->src == req->dst)) { |
736 | BUG_ON(chained); | 1323 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, |
737 | if (dst_nents == 1) | 1324 | DMA_BIDIRECTIONAL); |
738 | dst_nents = 0; | 1325 | } else { |
1326 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1327 | DMA_TO_DEVICE); | ||
1328 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1329 | DMA_FROM_DEVICE); | ||
739 | } | 1330 | } |
740 | 1331 | ||
741 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | 1332 | /* Check if data are contiguous */ |
742 | sizeof(struct link_tbl_entry); | 1333 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); |
743 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | 1334 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != |
1335 | iv_dma || src_nents || iv_dma + ivsize != | ||
1336 | sg_dma_address(req->src)) { | ||
1337 | all_contig = false; | ||
1338 | assoc_nents = assoc_nents ? : 1; | ||
1339 | src_nents = src_nents ? : 1; | ||
1340 | link_tbl_len = assoc_nents + 1 + src_nents; | ||
1341 | } | ||
1342 | link_tbl_len += dst_nents; | ||
1343 | |||
1344 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | ||
744 | 1345 | ||
745 | /* allocate space for base edesc and hw desc commands, link tables */ | 1346 | /* allocate space for base edesc and hw desc commands, link tables */ |
746 | edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + | 1347 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + |
747 | link_tbl_bytes, GFP_DMA | flags); | 1348 | link_tbl_bytes, GFP_DMA | flags); |
748 | if (!edesc) { | 1349 | if (!edesc) { |
749 | dev_err(jrdev, "could not allocate extended descriptor\n"); | 1350 | dev_err(jrdev, "could not allocate extended descriptor\n"); |
@@ -753,142 +1354,450 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |||
753 | edesc->assoc_nents = assoc_nents; | 1354 | edesc->assoc_nents = assoc_nents; |
754 | edesc->src_nents = src_nents; | 1355 | edesc->src_nents = src_nents; |
755 | edesc->dst_nents = dst_nents; | 1356 | edesc->dst_nents = dst_nents; |
756 | edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + | 1357 | edesc->iv_dma = iv_dma; |
1358 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1359 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | ||
757 | desc_bytes; | 1360 | desc_bytes; |
758 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | 1361 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, |
759 | link_tbl_bytes, DMA_TO_DEVICE); | 1362 | link_tbl_bytes, DMA_TO_DEVICE); |
760 | edesc->link_tbl_bytes = link_tbl_bytes; | 1363 | *all_contig_ptr = all_contig; |
1364 | |||
1365 | link_tbl_index = 0; | ||
1366 | if (!all_contig) { | ||
1367 | sg_to_link_tbl(req->assoc, | ||
1368 | (assoc_nents ? : 1), | ||
1369 | edesc->link_tbl + | ||
1370 | link_tbl_index, 0); | ||
1371 | link_tbl_index += assoc_nents ? : 1; | ||
1372 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1373 | iv_dma, ivsize, 0); | ||
1374 | link_tbl_index += 1; | ||
1375 | sg_to_link_tbl_last(req->src, | ||
1376 | (src_nents ? : 1), | ||
1377 | edesc->link_tbl + | ||
1378 | link_tbl_index, 0); | ||
1379 | link_tbl_index += src_nents ? : 1; | ||
1380 | } | ||
1381 | if (dst_nents) { | ||
1382 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1383 | edesc->link_tbl + link_tbl_index, 0); | ||
1384 | } | ||
761 | 1385 | ||
762 | return edesc; | 1386 | return edesc; |
763 | } | 1387 | } |
764 | 1388 | ||
765 | static int aead_authenc_encrypt(struct aead_request *areq) | 1389 | static int aead_encrypt(struct aead_request *req) |
766 | { | 1390 | { |
767 | struct ipsec_esp_edesc *edesc; | 1391 | struct aead_edesc *edesc; |
768 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1392 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
769 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1393 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
770 | struct device *jrdev = ctx->jrdev; | 1394 | struct device *jrdev = ctx->jrdev; |
771 | int ivsize = crypto_aead_ivsize(aead); | 1395 | bool all_contig; |
772 | u32 *desc; | 1396 | u32 *desc; |
773 | dma_addr_t iv_dma; | 1397 | int ret = 0; |
1398 | |||
1399 | req->cryptlen += ctx->authsize; | ||
774 | 1400 | ||
775 | /* allocate extended descriptor */ | 1401 | /* allocate extended descriptor */ |
776 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN * | 1402 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
777 | CAAM_CMD_SZ); | 1403 | CAAM_CMD_SZ, &all_contig); |
778 | if (IS_ERR(edesc)) | 1404 | if (IS_ERR(edesc)) |
779 | return PTR_ERR(edesc); | 1405 | return PTR_ERR(edesc); |
780 | 1406 | ||
781 | desc = edesc->hw_desc; | 1407 | /* Create and submit job descriptor */ |
782 | 1408 | init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, | |
783 | /* insert shared descriptor pointer */ | 1409 | all_contig, true); |
784 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1410 | #ifdef DEBUG |
785 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | 1411 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", |
786 | 1412 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | |
787 | iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); | 1413 | desc_bytes(edesc->hw_desc), 1); |
788 | /* check dma error */ | 1414 | #endif |
789 | 1415 | ||
790 | append_load(desc, iv_dma, ivsize, | 1416 | desc = edesc->hw_desc; |
791 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | 1417 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
1418 | if (!ret) { | ||
1419 | ret = -EINPROGRESS; | ||
1420 | } else { | ||
1421 | aead_unmap(jrdev, edesc, req); | ||
1422 | kfree(edesc); | ||
1423 | } | ||
792 | 1424 | ||
793 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | 1425 | return ret; |
794 | } | 1426 | } |
795 | 1427 | ||
796 | static int aead_authenc_decrypt(struct aead_request *req) | 1428 | static int aead_decrypt(struct aead_request *req) |
797 | { | 1429 | { |
1430 | struct aead_edesc *edesc; | ||
798 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1431 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
799 | int ivsize = crypto_aead_ivsize(aead); | ||
800 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1432 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
801 | struct device *jrdev = ctx->jrdev; | 1433 | struct device *jrdev = ctx->jrdev; |
802 | struct ipsec_esp_edesc *edesc; | 1434 | bool all_contig; |
803 | u32 *desc; | 1435 | u32 *desc; |
804 | dma_addr_t iv_dma; | 1436 | int ret = 0; |
805 | |||
806 | req->cryptlen -= ctx->authsize; | ||
807 | 1437 | ||
808 | /* allocate extended descriptor */ | 1438 | /* allocate extended descriptor */ |
809 | edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * | 1439 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
810 | CAAM_CMD_SZ); | 1440 | CAAM_CMD_SZ, &all_contig); |
811 | if (IS_ERR(edesc)) | 1441 | if (IS_ERR(edesc)) |
812 | return PTR_ERR(edesc); | 1442 | return PTR_ERR(edesc); |
813 | 1443 | ||
1444 | #ifdef DEBUG | ||
1445 | print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", | ||
1446 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1447 | req->cryptlen, 1); | ||
1448 | #endif | ||
1449 | |||
1450 | /* Create and submit job descriptor*/ | ||
1451 | init_aead_job(ctx->sh_desc_dec, | ||
1452 | ctx->sh_desc_dec_dma, edesc, req, all_contig, false); | ||
1453 | #ifdef DEBUG | ||
1454 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | ||
1455 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1456 | desc_bytes(edesc->hw_desc), 1); | ||
1457 | #endif | ||
1458 | |||
814 | desc = edesc->hw_desc; | 1459 | desc = edesc->hw_desc; |
1460 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | ||
1461 | if (!ret) { | ||
1462 | ret = -EINPROGRESS; | ||
1463 | } else { | ||
1464 | aead_unmap(jrdev, edesc, req); | ||
1465 | kfree(edesc); | ||
1466 | } | ||
815 | 1467 | ||
816 | /* insert shared descriptor pointer */ | 1468 | return ret; |
817 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1469 | } |
818 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
819 | 1470 | ||
820 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | 1471 | /* |
821 | /* check dma error */ | 1472 | * allocate and map the aead extended descriptor for aead givencrypt |
1473 | */ | ||
1474 | static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | ||
1475 | *greq, int desc_bytes, | ||
1476 | u32 *contig_ptr) | ||
1477 | { | ||
1478 | struct aead_request *req = &greq->areq; | ||
1479 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
1480 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | ||
1481 | struct device *jrdev = ctx->jrdev; | ||
1482 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1483 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | ||
1484 | int assoc_nents, src_nents, dst_nents = 0; | ||
1485 | struct aead_edesc *edesc; | ||
1486 | dma_addr_t iv_dma = 0; | ||
1487 | int sgc; | ||
1488 | u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; | ||
1489 | int ivsize = crypto_aead_ivsize(aead); | ||
1490 | int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; | ||
1491 | |||
1492 | assoc_nents = sg_count(req->assoc, req->assoclen); | ||
1493 | src_nents = sg_count(req->src, req->cryptlen); | ||
822 | 1494 | ||
823 | append_load(desc, iv_dma, ivsize, | 1495 | if (unlikely(req->dst != req->src)) |
824 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | 1496 | dst_nents = sg_count(req->dst, req->cryptlen); |
825 | 1497 | ||
826 | return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); | 1498 | sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, |
1499 | DMA_BIDIRECTIONAL); | ||
1500 | if (likely(req->src == req->dst)) { | ||
1501 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1502 | DMA_BIDIRECTIONAL); | ||
1503 | } else { | ||
1504 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1505 | DMA_TO_DEVICE); | ||
1506 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1507 | DMA_FROM_DEVICE); | ||
1508 | } | ||
1509 | |||
1510 | /* Check if data are contiguous */ | ||
1511 | iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); | ||
1512 | if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != | ||
1513 | iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) | ||
1514 | contig &= ~GIV_SRC_CONTIG; | ||
1515 | if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) | ||
1516 | contig &= ~GIV_DST_CONTIG; | ||
1517 | if (unlikely(req->src != req->dst)) { | ||
1518 | dst_nents = dst_nents ? : 1; | ||
1519 | link_tbl_len += 1; | ||
1520 | } | ||
1521 | if (!(contig & GIV_SRC_CONTIG)) { | ||
1522 | assoc_nents = assoc_nents ? : 1; | ||
1523 | src_nents = src_nents ? : 1; | ||
1524 | link_tbl_len += assoc_nents + 1 + src_nents; | ||
1525 | if (likely(req->src == req->dst)) | ||
1526 | contig &= ~GIV_DST_CONTIG; | ||
1527 | } | ||
1528 | link_tbl_len += dst_nents; | ||
1529 | |||
1530 | link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); | ||
1531 | |||
1532 | /* allocate space for base edesc and hw desc commands, link tables */ | ||
1533 | edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + | ||
1534 | link_tbl_bytes, GFP_DMA | flags); | ||
1535 | if (!edesc) { | ||
1536 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1537 | return ERR_PTR(-ENOMEM); | ||
1538 | } | ||
1539 | |||
1540 | edesc->assoc_nents = assoc_nents; | ||
1541 | edesc->src_nents = src_nents; | ||
1542 | edesc->dst_nents = dst_nents; | ||
1543 | edesc->iv_dma = iv_dma; | ||
1544 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1545 | edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + | ||
1546 | desc_bytes; | ||
1547 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
1548 | link_tbl_bytes, DMA_TO_DEVICE); | ||
1549 | *contig_ptr = contig; | ||
1550 | |||
1551 | link_tbl_index = 0; | ||
1552 | if (!(contig & GIV_SRC_CONTIG)) { | ||
1553 | sg_to_link_tbl(req->assoc, assoc_nents, | ||
1554 | edesc->link_tbl + | ||
1555 | link_tbl_index, 0); | ||
1556 | link_tbl_index += assoc_nents; | ||
1557 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1558 | iv_dma, ivsize, 0); | ||
1559 | link_tbl_index += 1; | ||
1560 | sg_to_link_tbl_last(req->src, src_nents, | ||
1561 | edesc->link_tbl + | ||
1562 | link_tbl_index, 0); | ||
1563 | link_tbl_index += src_nents; | ||
1564 | } | ||
1565 | if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { | ||
1566 | sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, | ||
1567 | iv_dma, ivsize, 0); | ||
1568 | link_tbl_index += 1; | ||
1569 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1570 | edesc->link_tbl + link_tbl_index, 0); | ||
1571 | } | ||
1572 | |||
1573 | return edesc; | ||
827 | } | 1574 | } |
828 | 1575 | ||
829 | static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) | 1576 | static int aead_givencrypt(struct aead_givcrypt_request *areq) |
830 | { | 1577 | { |
831 | struct aead_request *areq = &req->areq; | 1578 | struct aead_request *req = &areq->areq; |
832 | struct ipsec_esp_edesc *edesc; | 1579 | struct aead_edesc *edesc; |
833 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1580 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
834 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1581 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
835 | struct device *jrdev = ctx->jrdev; | 1582 | struct device *jrdev = ctx->jrdev; |
836 | int ivsize = crypto_aead_ivsize(aead); | 1583 | u32 contig; |
837 | dma_addr_t iv_dma; | ||
838 | u32 *desc; | 1584 | u32 *desc; |
1585 | int ret = 0; | ||
839 | 1586 | ||
840 | iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); | 1587 | req->cryptlen += ctx->authsize; |
841 | |||
842 | debug("%s: giv %p\n", __func__, req->giv); | ||
843 | 1588 | ||
844 | /* allocate extended descriptor */ | 1589 | /* allocate extended descriptor */ |
845 | edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN * | 1590 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * |
846 | CAAM_CMD_SZ); | 1591 | CAAM_CMD_SZ, &contig); |
1592 | |||
847 | if (IS_ERR(edesc)) | 1593 | if (IS_ERR(edesc)) |
848 | return PTR_ERR(edesc); | 1594 | return PTR_ERR(edesc); |
849 | 1595 | ||
1596 | #ifdef DEBUG | ||
1597 | print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", | ||
1598 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), | ||
1599 | req->cryptlen, 1); | ||
1600 | #endif | ||
1601 | |||
1602 | /* Create and submit job descriptor*/ | ||
1603 | init_aead_giv_job(ctx->sh_desc_givenc, | ||
1604 | ctx->sh_desc_givenc_dma, edesc, req, contig); | ||
1605 | #ifdef DEBUG | ||
1606 | print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", | ||
1607 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1608 | desc_bytes(edesc->hw_desc), 1); | ||
1609 | #endif | ||
1610 | |||
850 | desc = edesc->hw_desc; | 1611 | desc = edesc->hw_desc; |
1612 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | ||
1613 | if (!ret) { | ||
1614 | ret = -EINPROGRESS; | ||
1615 | } else { | ||
1616 | aead_unmap(jrdev, edesc, req); | ||
1617 | kfree(edesc); | ||
1618 | } | ||
851 | 1619 | ||
852 | /* insert shared descriptor pointer */ | 1620 | return ret; |
853 | init_job_desc_shared(desc, ctx->shared_desc_phys, | 1621 | } |
854 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | ||
855 | 1622 | ||
856 | /* | 1623 | /* |
857 | * LOAD IMM Info FIFO | 1624 | * allocate and map the ablkcipher extended descriptor for ablkcipher |
858 | * to DECO, Last, Padding, Random, Message, 16 bytes | 1625 | */ |
859 | */ | 1626 | static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request |
860 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | 1627 | *req, int desc_bytes, |
861 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | 1628 | bool *iv_contig_out) |
862 | NFIFOENTRY_PTYPE_RND | ivsize, | 1629 | { |
863 | LDST_SRCDST_WORD_INFO_FIFO); | 1630 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
1631 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1632 | struct device *jrdev = ctx->jrdev; | ||
1633 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | ||
1634 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | ||
1635 | GFP_KERNEL : GFP_ATOMIC; | ||
1636 | int src_nents, dst_nents = 0, link_tbl_bytes; | ||
1637 | struct ablkcipher_edesc *edesc; | ||
1638 | dma_addr_t iv_dma = 0; | ||
1639 | bool iv_contig = false; | ||
1640 | int sgc; | ||
1641 | int ivsize = crypto_ablkcipher_ivsize(ablkcipher); | ||
1642 | int link_tbl_index; | ||
1643 | |||
1644 | src_nents = sg_count(req->src, req->nbytes); | ||
1645 | |||
1646 | if (unlikely(req->dst != req->src)) | ||
1647 | dst_nents = sg_count(req->dst, req->nbytes); | ||
1648 | |||
1649 | if (likely(req->src == req->dst)) { | ||
1650 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1651 | DMA_BIDIRECTIONAL); | ||
1652 | } else { | ||
1653 | sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, | ||
1654 | DMA_TO_DEVICE); | ||
1655 | sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, | ||
1656 | DMA_FROM_DEVICE); | ||
1657 | } | ||
864 | 1658 | ||
865 | /* | 1659 | /* |
866 | * disable info fifo entries since the above serves as the entry | 1660 | * Check if iv can be contiguous with source and destination. |
867 | * this way, the MOVE command won't generate an entry. | 1661 | * If so, include it. If not, create scatterlist. |
868 | * Note that this isn't required in more recent versions of | ||
869 | * SEC as a MOVE that doesn't do info FIFO entries is available. | ||
870 | */ | 1662 | */ |
871 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | 1663 | iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); |
1664 | if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) | ||
1665 | iv_contig = true; | ||
1666 | else | ||
1667 | src_nents = src_nents ? : 1; | ||
1668 | link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * | ||
1669 | sizeof(struct link_tbl_entry); | ||
872 | 1670 | ||
873 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | 1671 | /* allocate space for base edesc and hw desc commands, link tables */ |
874 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); | 1672 | edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + |
1673 | link_tbl_bytes, GFP_DMA | flags); | ||
1674 | if (!edesc) { | ||
1675 | dev_err(jrdev, "could not allocate extended descriptor\n"); | ||
1676 | return ERR_PTR(-ENOMEM); | ||
1677 | } | ||
875 | 1678 | ||
876 | /* re-enable info fifo entries */ | 1679 | edesc->src_nents = src_nents; |
877 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | 1680 | edesc->dst_nents = dst_nents; |
1681 | edesc->link_tbl_bytes = link_tbl_bytes; | ||
1682 | edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + | ||
1683 | desc_bytes; | ||
1684 | |||
1685 | link_tbl_index = 0; | ||
1686 | if (!iv_contig) { | ||
1687 | sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); | ||
1688 | sg_to_link_tbl_last(req->src, src_nents, | ||
1689 | edesc->link_tbl + 1, 0); | ||
1690 | link_tbl_index += 1 + src_nents; | ||
1691 | } | ||
1692 | |||
1693 | if (unlikely(dst_nents)) { | ||
1694 | sg_to_link_tbl_last(req->dst, dst_nents, | ||
1695 | edesc->link_tbl + link_tbl_index, 0); | ||
1696 | } | ||
1697 | |||
1698 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | ||
1699 | link_tbl_bytes, DMA_TO_DEVICE); | ||
1700 | edesc->iv_dma = iv_dma; | ||
1701 | |||
1702 | #ifdef DEBUG | ||
1703 | print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", | ||
1704 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | ||
1705 | link_tbl_bytes, 1); | ||
1706 | #endif | ||
1707 | |||
1708 | *iv_contig_out = iv_contig; | ||
1709 | return edesc; | ||
1710 | } | ||
1711 | |||
1712 | static int ablkcipher_encrypt(struct ablkcipher_request *req) | ||
1713 | { | ||
1714 | struct ablkcipher_edesc *edesc; | ||
1715 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1716 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1717 | struct device *jrdev = ctx->jrdev; | ||
1718 | bool iv_contig; | ||
1719 | u32 *desc; | ||
1720 | int ret = 0; | ||
1721 | |||
1722 | /* allocate extended descriptor */ | ||
1723 | edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * | ||
1724 | CAAM_CMD_SZ, &iv_contig); | ||
1725 | if (IS_ERR(edesc)) | ||
1726 | return PTR_ERR(edesc); | ||
878 | 1727 | ||
879 | /* MOVE C1 Context -> OFIFO 16 bytes */ | 1728 | /* Create and submit job descriptor*/ |
880 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); | 1729 | init_ablkcipher_job(ctx->sh_desc_enc, |
1730 | ctx->sh_desc_enc_dma, edesc, req, iv_contig); | ||
1731 | #ifdef DEBUG | ||
1732 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", | ||
1733 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1734 | desc_bytes(edesc->hw_desc), 1); | ||
1735 | #endif | ||
1736 | desc = edesc->hw_desc; | ||
1737 | ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); | ||
881 | 1738 | ||
882 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | 1739 | if (!ret) { |
1740 | ret = -EINPROGRESS; | ||
1741 | } else { | ||
1742 | ablkcipher_unmap(jrdev, edesc, req); | ||
1743 | kfree(edesc); | ||
1744 | } | ||
883 | 1745 | ||
884 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | 1746 | return ret; |
885 | } | 1747 | } |
886 | 1748 | ||
1749 | static int ablkcipher_decrypt(struct ablkcipher_request *req) | ||
1750 | { | ||
1751 | struct ablkcipher_edesc *edesc; | ||
1752 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | ||
1753 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | ||
1754 | struct device *jrdev = ctx->jrdev; | ||
1755 | bool iv_contig; | ||
1756 | u32 *desc; | ||
1757 | int ret = 0; | ||
1758 | |||
1759 | /* allocate extended descriptor */ | ||
1760 | edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * | ||
1761 | CAAM_CMD_SZ, &iv_contig); | ||
1762 | if (IS_ERR(edesc)) | ||
1763 | return PTR_ERR(edesc); | ||
1764 | |||
1765 | /* Create and submit job descriptor*/ | ||
1766 | init_ablkcipher_job(ctx->sh_desc_dec, | ||
1767 | ctx->sh_desc_dec_dma, edesc, req, iv_contig); | ||
1768 | desc = edesc->hw_desc; | ||
1769 | #ifdef DEBUG | ||
1770 | print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", | ||
1771 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | ||
1772 | desc_bytes(edesc->hw_desc), 1); | ||
1773 | #endif | ||
1774 | |||
1775 | ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); | ||
1776 | if (!ret) { | ||
1777 | ret = -EINPROGRESS; | ||
1778 | } else { | ||
1779 | ablkcipher_unmap(jrdev, edesc, req); | ||
1780 | kfree(edesc); | ||
1781 | } | ||
1782 | |||
1783 | return ret; | ||
1784 | } | ||
1785 | |||
1786 | #define template_aead template_u.aead | ||
1787 | #define template_ablkcipher template_u.ablkcipher | ||
887 | struct caam_alg_template { | 1788 | struct caam_alg_template { |
888 | char name[CRYPTO_MAX_ALG_NAME]; | 1789 | char name[CRYPTO_MAX_ALG_NAME]; |
889 | char driver_name[CRYPTO_MAX_ALG_NAME]; | 1790 | char driver_name[CRYPTO_MAX_ALG_NAME]; |
890 | unsigned int blocksize; | 1791 | unsigned int blocksize; |
891 | struct aead_alg aead; | 1792 | u32 type; |
1793 | union { | ||
1794 | struct ablkcipher_alg ablkcipher; | ||
1795 | struct aead_alg aead; | ||
1796 | struct blkcipher_alg blkcipher; | ||
1797 | struct cipher_alg cipher; | ||
1798 | struct compress_alg compress; | ||
1799 | struct rng_alg rng; | ||
1800 | } template_u; | ||
892 | u32 class1_alg_type; | 1801 | u32 class1_alg_type; |
893 | u32 class2_alg_type; | 1802 | u32 class2_alg_type; |
894 | u32 alg_op; | 1803 | u32 alg_op; |
@@ -900,12 +1809,13 @@ static struct caam_alg_template driver_algs[] = { | |||
900 | .name = "authenc(hmac(sha1),cbc(aes))", | 1809 | .name = "authenc(hmac(sha1),cbc(aes))", |
901 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | 1810 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", |
902 | .blocksize = AES_BLOCK_SIZE, | 1811 | .blocksize = AES_BLOCK_SIZE, |
903 | .aead = { | 1812 | .type = CRYPTO_ALG_TYPE_AEAD, |
904 | .setkey = aead_authenc_setkey, | 1813 | .template_aead = { |
905 | .setauthsize = aead_authenc_setauthsize, | 1814 | .setkey = aead_setkey, |
906 | .encrypt = aead_authenc_encrypt, | 1815 | .setauthsize = aead_setauthsize, |
907 | .decrypt = aead_authenc_decrypt, | 1816 | .encrypt = aead_encrypt, |
908 | .givencrypt = aead_authenc_givencrypt, | 1817 | .decrypt = aead_decrypt, |
1818 | .givencrypt = aead_givencrypt, | ||
909 | .geniv = "<built-in>", | 1819 | .geniv = "<built-in>", |
910 | .ivsize = AES_BLOCK_SIZE, | 1820 | .ivsize = AES_BLOCK_SIZE, |
911 | .maxauthsize = SHA1_DIGEST_SIZE, | 1821 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -918,12 +1828,13 @@ static struct caam_alg_template driver_algs[] = { | |||
918 | .name = "authenc(hmac(sha256),cbc(aes))", | 1828 | .name = "authenc(hmac(sha256),cbc(aes))", |
919 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | 1829 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", |
920 | .blocksize = AES_BLOCK_SIZE, | 1830 | .blocksize = AES_BLOCK_SIZE, |
921 | .aead = { | 1831 | .type = CRYPTO_ALG_TYPE_AEAD, |
922 | .setkey = aead_authenc_setkey, | 1832 | .template_aead = { |
923 | .setauthsize = aead_authenc_setauthsize, | 1833 | .setkey = aead_setkey, |
924 | .encrypt = aead_authenc_encrypt, | 1834 | .setauthsize = aead_setauthsize, |
925 | .decrypt = aead_authenc_decrypt, | 1835 | .encrypt = aead_encrypt, |
926 | .givencrypt = aead_authenc_givencrypt, | 1836 | .decrypt = aead_decrypt, |
1837 | .givencrypt = aead_givencrypt, | ||
927 | .geniv = "<built-in>", | 1838 | .geniv = "<built-in>", |
928 | .ivsize = AES_BLOCK_SIZE, | 1839 | .ivsize = AES_BLOCK_SIZE, |
929 | .maxauthsize = SHA256_DIGEST_SIZE, | 1840 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -937,12 +1848,13 @@ static struct caam_alg_template driver_algs[] = { | |||
937 | .name = "authenc(hmac(sha512),cbc(aes))", | 1848 | .name = "authenc(hmac(sha512),cbc(aes))", |
938 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", | 1849 | .driver_name = "authenc-hmac-sha512-cbc-aes-caam", |
939 | .blocksize = AES_BLOCK_SIZE, | 1850 | .blocksize = AES_BLOCK_SIZE, |
940 | .aead = { | 1851 | .type = CRYPTO_ALG_TYPE_AEAD, |
941 | .setkey = aead_authenc_setkey, | 1852 | .template_aead = { |
942 | .setauthsize = aead_authenc_setauthsize, | 1853 | .setkey = aead_setkey, |
943 | .encrypt = aead_authenc_encrypt, | 1854 | .setauthsize = aead_setauthsize, |
944 | .decrypt = aead_authenc_decrypt, | 1855 | .encrypt = aead_encrypt, |
945 | .givencrypt = aead_authenc_givencrypt, | 1856 | .decrypt = aead_decrypt, |
1857 | .givencrypt = aead_givencrypt, | ||
946 | .geniv = "<built-in>", | 1858 | .geniv = "<built-in>", |
947 | .ivsize = AES_BLOCK_SIZE, | 1859 | .ivsize = AES_BLOCK_SIZE, |
948 | .maxauthsize = SHA512_DIGEST_SIZE, | 1860 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -956,12 +1868,13 @@ static struct caam_alg_template driver_algs[] = { | |||
956 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | 1868 | .name = "authenc(hmac(sha1),cbc(des3_ede))", |
957 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | 1869 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", |
958 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1870 | .blocksize = DES3_EDE_BLOCK_SIZE, |
959 | .aead = { | 1871 | .type = CRYPTO_ALG_TYPE_AEAD, |
960 | .setkey = aead_authenc_setkey, | 1872 | .template_aead = { |
961 | .setauthsize = aead_authenc_setauthsize, | 1873 | .setkey = aead_setkey, |
962 | .encrypt = aead_authenc_encrypt, | 1874 | .setauthsize = aead_setauthsize, |
963 | .decrypt = aead_authenc_decrypt, | 1875 | .encrypt = aead_encrypt, |
964 | .givencrypt = aead_authenc_givencrypt, | 1876 | .decrypt = aead_decrypt, |
1877 | .givencrypt = aead_givencrypt, | ||
965 | .geniv = "<built-in>", | 1878 | .geniv = "<built-in>", |
966 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1879 | .ivsize = DES3_EDE_BLOCK_SIZE, |
967 | .maxauthsize = SHA1_DIGEST_SIZE, | 1880 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -974,12 +1887,13 @@ static struct caam_alg_template driver_algs[] = { | |||
974 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | 1887 | .name = "authenc(hmac(sha256),cbc(des3_ede))", |
975 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | 1888 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", |
976 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1889 | .blocksize = DES3_EDE_BLOCK_SIZE, |
977 | .aead = { | 1890 | .type = CRYPTO_ALG_TYPE_AEAD, |
978 | .setkey = aead_authenc_setkey, | 1891 | .template_aead = { |
979 | .setauthsize = aead_authenc_setauthsize, | 1892 | .setkey = aead_setkey, |
980 | .encrypt = aead_authenc_encrypt, | 1893 | .setauthsize = aead_setauthsize, |
981 | .decrypt = aead_authenc_decrypt, | 1894 | .encrypt = aead_encrypt, |
982 | .givencrypt = aead_authenc_givencrypt, | 1895 | .decrypt = aead_decrypt, |
1896 | .givencrypt = aead_givencrypt, | ||
983 | .geniv = "<built-in>", | 1897 | .geniv = "<built-in>", |
984 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1898 | .ivsize = DES3_EDE_BLOCK_SIZE, |
985 | .maxauthsize = SHA256_DIGEST_SIZE, | 1899 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -993,12 +1907,13 @@ static struct caam_alg_template driver_algs[] = { | |||
993 | .name = "authenc(hmac(sha512),cbc(des3_ede))", | 1907 | .name = "authenc(hmac(sha512),cbc(des3_ede))", |
994 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", | 1908 | .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", |
995 | .blocksize = DES3_EDE_BLOCK_SIZE, | 1909 | .blocksize = DES3_EDE_BLOCK_SIZE, |
996 | .aead = { | 1910 | .type = CRYPTO_ALG_TYPE_AEAD, |
997 | .setkey = aead_authenc_setkey, | 1911 | .template_aead = { |
998 | .setauthsize = aead_authenc_setauthsize, | 1912 | .setkey = aead_setkey, |
999 | .encrypt = aead_authenc_encrypt, | 1913 | .setauthsize = aead_setauthsize, |
1000 | .decrypt = aead_authenc_decrypt, | 1914 | .encrypt = aead_encrypt, |
1001 | .givencrypt = aead_authenc_givencrypt, | 1915 | .decrypt = aead_decrypt, |
1916 | .givencrypt = aead_givencrypt, | ||
1002 | .geniv = "<built-in>", | 1917 | .geniv = "<built-in>", |
1003 | .ivsize = DES3_EDE_BLOCK_SIZE, | 1918 | .ivsize = DES3_EDE_BLOCK_SIZE, |
1004 | .maxauthsize = SHA512_DIGEST_SIZE, | 1919 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -1012,12 +1927,13 @@ static struct caam_alg_template driver_algs[] = { | |||
1012 | .name = "authenc(hmac(sha1),cbc(des))", | 1927 | .name = "authenc(hmac(sha1),cbc(des))", |
1013 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | 1928 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", |
1014 | .blocksize = DES_BLOCK_SIZE, | 1929 | .blocksize = DES_BLOCK_SIZE, |
1015 | .aead = { | 1930 | .type = CRYPTO_ALG_TYPE_AEAD, |
1016 | .setkey = aead_authenc_setkey, | 1931 | .template_aead = { |
1017 | .setauthsize = aead_authenc_setauthsize, | 1932 | .setkey = aead_setkey, |
1018 | .encrypt = aead_authenc_encrypt, | 1933 | .setauthsize = aead_setauthsize, |
1019 | .decrypt = aead_authenc_decrypt, | 1934 | .encrypt = aead_encrypt, |
1020 | .givencrypt = aead_authenc_givencrypt, | 1935 | .decrypt = aead_decrypt, |
1936 | .givencrypt = aead_givencrypt, | ||
1021 | .geniv = "<built-in>", | 1937 | .geniv = "<built-in>", |
1022 | .ivsize = DES_BLOCK_SIZE, | 1938 | .ivsize = DES_BLOCK_SIZE, |
1023 | .maxauthsize = SHA1_DIGEST_SIZE, | 1939 | .maxauthsize = SHA1_DIGEST_SIZE, |
@@ -1030,12 +1946,13 @@ static struct caam_alg_template driver_algs[] = { | |||
1030 | .name = "authenc(hmac(sha256),cbc(des))", | 1946 | .name = "authenc(hmac(sha256),cbc(des))", |
1031 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | 1947 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", |
1032 | .blocksize = DES_BLOCK_SIZE, | 1948 | .blocksize = DES_BLOCK_SIZE, |
1033 | .aead = { | 1949 | .type = CRYPTO_ALG_TYPE_AEAD, |
1034 | .setkey = aead_authenc_setkey, | 1950 | .template_aead = { |
1035 | .setauthsize = aead_authenc_setauthsize, | 1951 | .setkey = aead_setkey, |
1036 | .encrypt = aead_authenc_encrypt, | 1952 | .setauthsize = aead_setauthsize, |
1037 | .decrypt = aead_authenc_decrypt, | 1953 | .encrypt = aead_encrypt, |
1038 | .givencrypt = aead_authenc_givencrypt, | 1954 | .decrypt = aead_decrypt, |
1955 | .givencrypt = aead_givencrypt, | ||
1039 | .geniv = "<built-in>", | 1956 | .geniv = "<built-in>", |
1040 | .ivsize = DES_BLOCK_SIZE, | 1957 | .ivsize = DES_BLOCK_SIZE, |
1041 | .maxauthsize = SHA256_DIGEST_SIZE, | 1958 | .maxauthsize = SHA256_DIGEST_SIZE, |
@@ -1049,12 +1966,13 @@ static struct caam_alg_template driver_algs[] = { | |||
1049 | .name = "authenc(hmac(sha512),cbc(des))", | 1966 | .name = "authenc(hmac(sha512),cbc(des))", |
1050 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", | 1967 | .driver_name = "authenc-hmac-sha512-cbc-des-caam", |
1051 | .blocksize = DES_BLOCK_SIZE, | 1968 | .blocksize = DES_BLOCK_SIZE, |
1052 | .aead = { | 1969 | .type = CRYPTO_ALG_TYPE_AEAD, |
1053 | .setkey = aead_authenc_setkey, | 1970 | .template_aead = { |
1054 | .setauthsize = aead_authenc_setauthsize, | 1971 | .setkey = aead_setkey, |
1055 | .encrypt = aead_authenc_encrypt, | 1972 | .setauthsize = aead_setauthsize, |
1056 | .decrypt = aead_authenc_decrypt, | 1973 | .encrypt = aead_encrypt, |
1057 | .givencrypt = aead_authenc_givencrypt, | 1974 | .decrypt = aead_decrypt, |
1975 | .givencrypt = aead_givencrypt, | ||
1058 | .geniv = "<built-in>", | 1976 | .geniv = "<built-in>", |
1059 | .ivsize = DES_BLOCK_SIZE, | 1977 | .ivsize = DES_BLOCK_SIZE, |
1060 | .maxauthsize = SHA512_DIGEST_SIZE, | 1978 | .maxauthsize = SHA512_DIGEST_SIZE, |
@@ -1064,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = { | |||
1064 | OP_ALG_AAI_HMAC_PRECOMP, | 1982 | OP_ALG_AAI_HMAC_PRECOMP, |
1065 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | 1983 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, |
1066 | }, | 1984 | }, |
1985 | /* ablkcipher descriptor */ | ||
1986 | { | ||
1987 | .name = "cbc(aes)", | ||
1988 | .driver_name = "cbc-aes-caam", | ||
1989 | .blocksize = AES_BLOCK_SIZE, | ||
1990 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
1991 | .template_ablkcipher = { | ||
1992 | .setkey = ablkcipher_setkey, | ||
1993 | .encrypt = ablkcipher_encrypt, | ||
1994 | .decrypt = ablkcipher_decrypt, | ||
1995 | .geniv = "eseqiv", | ||
1996 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1997 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1998 | .ivsize = AES_BLOCK_SIZE, | ||
1999 | }, | ||
2000 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | ||
2001 | }, | ||
2002 | { | ||
2003 | .name = "cbc(des3_ede)", | ||
2004 | .driver_name = "cbc-3des-caam", | ||
2005 | .blocksize = DES3_EDE_BLOCK_SIZE, | ||
2006 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2007 | .template_ablkcipher = { | ||
2008 | .setkey = ablkcipher_setkey, | ||
2009 | .encrypt = ablkcipher_encrypt, | ||
2010 | .decrypt = ablkcipher_decrypt, | ||
2011 | .geniv = "eseqiv", | ||
2012 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
2013 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
2014 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2015 | }, | ||
2016 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | ||
2017 | }, | ||
2018 | { | ||
2019 | .name = "cbc(des)", | ||
2020 | .driver_name = "cbc-des-caam", | ||
2021 | .blocksize = DES_BLOCK_SIZE, | ||
2022 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | ||
2023 | .template_ablkcipher = { | ||
2024 | .setkey = ablkcipher_setkey, | ||
2025 | .encrypt = ablkcipher_encrypt, | ||
2026 | .decrypt = ablkcipher_decrypt, | ||
2027 | .geniv = "eseqiv", | ||
2028 | .min_keysize = DES_KEY_SIZE, | ||
2029 | .max_keysize = DES_KEY_SIZE, | ||
2030 | .ivsize = DES_BLOCK_SIZE, | ||
2031 | }, | ||
2032 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | ||
2033 | } | ||
1067 | }; | 2034 | }; |
1068 | 2035 | ||
1069 | struct caam_crypto_alg { | 2036 | struct caam_crypto_alg { |
@@ -1102,16 +2069,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
1102 | { | 2069 | { |
1103 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 2070 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
1104 | 2071 | ||
1105 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | 2072 | if (ctx->sh_desc_enc_dma && |
1106 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | 2073 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) |
1107 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | 2074 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, |
1108 | kfree(ctx->sh_desc); | 2075 | desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); |
1109 | 2076 | if (ctx->sh_desc_dec_dma && | |
1110 | if (!dma_mapping_error(ctx->jrdev, ctx->key_phys)) | 2077 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) |
1111 | dma_unmap_single(ctx->jrdev, ctx->key_phys, | 2078 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, |
1112 | ctx->split_key_pad_len + ctx->enckeylen, | 2079 | desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); |
2080 | if (ctx->sh_desc_givenc_dma && | ||
2081 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) | ||
2082 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, | ||
2083 | desc_bytes(ctx->sh_desc_givenc), | ||
1113 | DMA_TO_DEVICE); | 2084 | DMA_TO_DEVICE); |
1114 | kfree(ctx->key); | ||
1115 | } | 2085 | } |
1116 | 2086 | ||
1117 | static void __exit caam_algapi_exit(void) | 2087 | static void __exit caam_algapi_exit(void) |
@@ -1175,12 +2145,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |||
1175 | alg->cra_init = caam_cra_init; | 2145 | alg->cra_init = caam_cra_init; |
1176 | alg->cra_exit = caam_cra_exit; | 2146 | alg->cra_exit = caam_cra_exit; |
1177 | alg->cra_priority = CAAM_CRA_PRIORITY; | 2147 | alg->cra_priority = CAAM_CRA_PRIORITY; |
1178 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | ||
1179 | alg->cra_blocksize = template->blocksize; | 2148 | alg->cra_blocksize = template->blocksize; |
1180 | alg->cra_alignmask = 0; | 2149 | alg->cra_alignmask = 0; |
1181 | alg->cra_type = &crypto_aead_type; | ||
1182 | alg->cra_ctxsize = sizeof(struct caam_ctx); | 2150 | alg->cra_ctxsize = sizeof(struct caam_ctx); |
1183 | alg->cra_u.aead = template->aead; | 2151 | alg->cra_flags = CRYPTO_ALG_ASYNC | template->type; |
2152 | switch (template->type) { | ||
2153 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2154 | alg->cra_type = &crypto_ablkcipher_type; | ||
2155 | alg->cra_ablkcipher = template->template_ablkcipher; | ||
2156 | break; | ||
2157 | case CRYPTO_ALG_TYPE_AEAD: | ||
2158 | alg->cra_type = &crypto_aead_type; | ||
2159 | alg->cra_aead = template->template_aead; | ||
2160 | break; | ||
2161 | } | ||
1184 | 2162 | ||
1185 | t_alg->class1_alg_type = template->class1_alg_type; | 2163 | t_alg->class1_alg_type = template->class1_alg_type; |
1186 | t_alg->class2_alg_type = template->class2_alg_type; | 2164 | t_alg->class2_alg_type = template->class2_alg_type; |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 950450346f70..d38f2afaa966 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -31,5 +31,6 @@ | |||
31 | #include <crypto/aead.h> | 31 | #include <crypto/aead.h> |
32 | #include <crypto/authenc.h> | 32 | #include <crypto/authenc.h> |
33 | #include <crypto/scatterwalk.h> | 33 | #include <crypto/scatterwalk.h> |
34 | #include <crypto/internal/skcipher.h> | ||
34 | 35 | ||
35 | #endif /* !defined(CAAM_COMPAT_H) */ | 36 | #endif /* !defined(CAAM_COMPAT_H) */ |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 9009713a3c2e..fc2d9ed22470 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -52,9 +52,11 @@ static int caam_probe(struct platform_device *pdev) | |||
52 | struct caam_ctrl __iomem *ctrl; | 52 | struct caam_ctrl __iomem *ctrl; |
53 | struct caam_full __iomem *topregs; | 53 | struct caam_full __iomem *topregs; |
54 | struct caam_drv_private *ctrlpriv; | 54 | struct caam_drv_private *ctrlpriv; |
55 | struct caam_perfmon *perfmon; | ||
56 | struct caam_deco **deco; | 55 | struct caam_deco **deco; |
57 | u32 deconum; | 56 | u32 deconum; |
57 | #ifdef CONFIG_DEBUG_FS | ||
58 | struct caam_perfmon *perfmon; | ||
59 | #endif | ||
58 | 60 | ||
59 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); | 61 | ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); |
60 | if (!ctrlpriv) | 62 | if (!ctrlpriv) |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 46915800c26f..0991323cf3fd 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #define IMMEDIATE (1 << 23) | 9 | #define IMMEDIATE (1 << 23) |
10 | #define CAAM_CMD_SZ sizeof(u32) | 10 | #define CAAM_CMD_SZ sizeof(u32) |
11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) | 11 | #define CAAM_PTR_SZ sizeof(dma_addr_t) |
12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64) | 12 | #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) |
13 | 13 | ||
14 | #ifdef DEBUG | 14 | #ifdef DEBUG |
15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ | 15 | #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ |
@@ -18,6 +18,9 @@ | |||
18 | #define PRINT_POS | 18 | #define PRINT_POS |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ | ||
22 | LDST_SRCDST_WORD_DECOCTRL | \ | ||
23 | (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT)) | ||
21 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ | 24 | #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ |
22 | LDST_SRCDST_WORD_DECOCTRL | \ | 25 | LDST_SRCDST_WORD_DECOCTRL | \ |
23 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) | 26 | (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) |
@@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ | |||
203 | append_cmd(desc, immediate); \ | 206 | append_cmd(desc, immediate); \ |
204 | } | 207 | } |
205 | APPEND_CMD_RAW_IMM(load, LOAD, u32); | 208 | APPEND_CMD_RAW_IMM(load, LOAD, u32); |
209 | |||
210 | /* | ||
211 | * Append math command. Only the last part of destination and source need to | ||
212 | * be specified | ||
213 | */ | ||
214 | #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ | ||
215 | append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ | ||
216 | MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK)); | ||
217 | |||
218 | #define append_math_add(desc, dest, src0, src1, len) \ | ||
219 | APPEND_MATH(ADD, desc, dest, src0, src1, len) | ||
220 | #define append_math_sub(desc, dest, src0, src1, len) \ | ||
221 | APPEND_MATH(SUB, desc, dest, src0, src1, len) | ||
222 | #define append_math_add_c(desc, dest, src0, src1, len) \ | ||
223 | APPEND_MATH(ADDC, desc, dest, src0, src1, len) | ||
224 | #define append_math_sub_b(desc, dest, src0, src1, len) \ | ||
225 | APPEND_MATH(SUBB, desc, dest, src0, src1, len) | ||
226 | #define append_math_and(desc, dest, src0, src1, len) \ | ||
227 | APPEND_MATH(AND, desc, dest, src0, src1, len) | ||
228 | #define append_math_or(desc, dest, src0, src1, len) \ | ||
229 | APPEND_MATH(OR, desc, dest, src0, src1, len) | ||
230 | #define append_math_xor(desc, dest, src0, src1, len) \ | ||
231 | APPEND_MATH(XOR, desc, dest, src0, src1, len) | ||
232 | #define append_math_lshift(desc, dest, src0, src1, len) \ | ||
233 | APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) | ||
234 | #define append_math_rshift(desc, dest, src0, src1, len) \ | ||
235 | APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) | ||
236 | |||
237 | /* Exactly one source is IMM. Data is passed in as u32 value */ | ||
238 | #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ | ||
239 | do { \ | ||
240 | APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ | ||
241 | append_cmd(desc, data); \ | ||
242 | } while (0); | ||
243 | |||
244 | #define append_math_add_imm_u32(desc, dest, src0, src1, data) \ | ||
245 | APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) | ||
246 | #define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ | ||
247 | APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) | ||
248 | #define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ | ||
249 | APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) | ||
250 | #define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ | ||
251 | APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) | ||
252 | #define append_math_and_imm_u32(desc, dest, src0, src1, data) \ | ||
253 | APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) | ||
254 | #define append_math_or_imm_u32(desc, dest, src0, src1, data) \ | ||
255 | APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) | ||
256 | #define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ | ||
257 | APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) | ||
258 | #define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ | ||
259 | APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) | ||
260 | #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ | ||
261 | APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ba8f1ea84c5e..6399a8f1938a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -72,17 +72,20 @@ | |||
72 | 72 | ||
73 | #define DEFAULT_TIMEOUT_INTERVAL HZ | 73 | #define DEFAULT_TIMEOUT_INTERVAL HZ |
74 | 74 | ||
75 | #define FLAGS_FINUP 0x0002 | 75 | /* mostly device flags */ |
76 | #define FLAGS_FINAL 0x0004 | 76 | #define FLAGS_BUSY 0 |
77 | #define FLAGS_SG 0x0008 | 77 | #define FLAGS_FINAL 1 |
78 | #define FLAGS_SHA1 0x0010 | 78 | #define FLAGS_DMA_ACTIVE 2 |
79 | #define FLAGS_DMA_ACTIVE 0x0020 | 79 | #define FLAGS_OUTPUT_READY 3 |
80 | #define FLAGS_OUTPUT_READY 0x0040 | 80 | #define FLAGS_INIT 4 |
81 | #define FLAGS_INIT 0x0100 | 81 | #define FLAGS_CPU 5 |
82 | #define FLAGS_CPU 0x0200 | 82 | #define FLAGS_DMA_READY 6 |
83 | #define FLAGS_HMAC 0x0400 | 83 | /* context flags */ |
84 | #define FLAGS_ERROR 0x0800 | 84 | #define FLAGS_FINUP 16 |
85 | #define FLAGS_BUSY 0x1000 | 85 | #define FLAGS_SG 17 |
86 | #define FLAGS_SHA1 18 | ||
87 | #define FLAGS_HMAC 19 | ||
88 | #define FLAGS_ERROR 20 | ||
86 | 89 | ||
87 | #define OP_UPDATE 1 | 90 | #define OP_UPDATE 1 |
88 | #define OP_FINAL 2 | 91 | #define OP_FINAL 2 |
@@ -144,7 +147,6 @@ struct omap_sham_dev { | |||
144 | int dma; | 147 | int dma; |
145 | int dma_lch; | 148 | int dma_lch; |
146 | struct tasklet_struct done_task; | 149 | struct tasklet_struct done_task; |
147 | struct tasklet_struct queue_task; | ||
148 | 150 | ||
149 | unsigned long flags; | 151 | unsigned long flags; |
150 | struct crypto_queue queue; | 152 | struct crypto_queue queue; |
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) | |||
223 | if (!hash) | 225 | if (!hash) |
224 | return; | 226 | return; |
225 | 227 | ||
226 | if (likely(ctx->flags & FLAGS_SHA1)) { | 228 | if (likely(ctx->flags & BIT(FLAGS_SHA1))) { |
227 | /* SHA1 results are in big endian */ | 229 | /* SHA1 results are in big endian */ |
228 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | 230 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
229 | hash[i] = be32_to_cpu(in[i]); | 231 | hash[i] = be32_to_cpu(in[i]); |
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
238 | { | 240 | { |
239 | clk_enable(dd->iclk); | 241 | clk_enable(dd->iclk); |
240 | 242 | ||
241 | if (!(dd->flags & FLAGS_INIT)) { | 243 | if (!test_bit(FLAGS_INIT, &dd->flags)) { |
242 | omap_sham_write_mask(dd, SHA_REG_MASK, | 244 | omap_sham_write_mask(dd, SHA_REG_MASK, |
243 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | 245 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); |
244 | 246 | ||
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) | |||
246 | SHA_REG_SYSSTATUS_RESETDONE)) | 248 | SHA_REG_SYSSTATUS_RESETDONE)) |
247 | return -ETIMEDOUT; | 249 | return -ETIMEDOUT; |
248 | 250 | ||
249 | dd->flags |= FLAGS_INIT; | 251 | set_bit(FLAGS_INIT, &dd->flags); |
250 | dd->err = 0; | 252 | dd->err = 0; |
251 | } | 253 | } |
252 | 254 | ||
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | |||
269 | * Setting ALGO_CONST only for the first iteration | 271 | * Setting ALGO_CONST only for the first iteration |
270 | * and CLOSE_HASH only for the last one. | 272 | * and CLOSE_HASH only for the last one. |
271 | */ | 273 | */ |
272 | if (ctx->flags & FLAGS_SHA1) | 274 | if (ctx->flags & BIT(FLAGS_SHA1)) |
273 | val |= SHA_REG_CTRL_ALGO; | 275 | val |= SHA_REG_CTRL_ALGO; |
274 | if (!ctx->digcnt) | 276 | if (!ctx->digcnt) |
275 | val |= SHA_REG_CTRL_ALGO_CONST; | 277 | val |= SHA_REG_CTRL_ALGO_CONST; |
@@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | |||
301 | return -ETIMEDOUT; | 303 | return -ETIMEDOUT; |
302 | 304 | ||
303 | if (final) | 305 | if (final) |
304 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 306 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
307 | |||
308 | set_bit(FLAGS_CPU, &dd->flags); | ||
305 | 309 | ||
306 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | 310 | len32 = DIV_ROUND_UP(length, sizeof(u32)); |
307 | 311 | ||
@@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | |||
334 | ctx->digcnt += length; | 338 | ctx->digcnt += length; |
335 | 339 | ||
336 | if (final) | 340 | if (final) |
337 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | 341 | set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ |
338 | 342 | ||
339 | dd->flags |= FLAGS_DMA_ACTIVE; | 343 | set_bit(FLAGS_DMA_ACTIVE, &dd->flags); |
340 | 344 | ||
341 | omap_start_dma(dd->dma_lch); | 345 | omap_start_dma(dd->dma_lch); |
342 | 346 | ||
@@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, | |||
392 | return -EINVAL; | 396 | return -EINVAL; |
393 | } | 397 | } |
394 | 398 | ||
395 | ctx->flags &= ~FLAGS_SG; | 399 | ctx->flags &= ~BIT(FLAGS_SG); |
396 | 400 | ||
397 | /* next call does not fail... so no unmap in the case of error */ | 401 | /* next call does not fail... so no unmap in the case of error */ |
398 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); | 402 | return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); |
@@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | |||
406 | 410 | ||
407 | omap_sham_append_sg(ctx); | 411 | omap_sham_append_sg(ctx); |
408 | 412 | ||
409 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 413 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
410 | 414 | ||
411 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | 415 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", |
412 | ctx->bufcnt, ctx->digcnt, final); | 416 | ctx->bufcnt, ctx->digcnt, final); |
@@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
452 | length = min(ctx->total, sg->length); | 456 | length = min(ctx->total, sg->length); |
453 | 457 | ||
454 | if (sg_is_last(sg)) { | 458 | if (sg_is_last(sg)) { |
455 | if (!(ctx->flags & FLAGS_FINUP)) { | 459 | if (!(ctx->flags & BIT(FLAGS_FINUP))) { |
456 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ | 460 | /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ |
457 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); | 461 | tail = length & (SHA1_MD5_BLOCK_SIZE - 1); |
458 | /* without finup() we need one block to close hash */ | 462 | /* without finup() we need one block to close hash */ |
@@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) | |||
467 | return -EINVAL; | 471 | return -EINVAL; |
468 | } | 472 | } |
469 | 473 | ||
470 | ctx->flags |= FLAGS_SG; | 474 | ctx->flags |= BIT(FLAGS_SG); |
471 | 475 | ||
472 | ctx->total -= length; | 476 | ctx->total -= length; |
473 | ctx->offset = length; /* offset where to start slow */ | 477 | ctx->offset = length; /* offset where to start slow */ |
474 | 478 | ||
475 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | 479 | final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; |
476 | 480 | ||
477 | /* next call does not fail... so no unmap in the case of error */ | 481 | /* next call does not fail... so no unmap in the case of error */ |
478 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); | 482 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); |
@@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | |||
495 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | 499 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); |
496 | 500 | ||
497 | omap_stop_dma(dd->dma_lch); | 501 | omap_stop_dma(dd->dma_lch); |
498 | if (ctx->flags & FLAGS_SG) { | 502 | if (ctx->flags & BIT(FLAGS_SG)) { |
499 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | 503 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); |
500 | if (ctx->sg->length == ctx->offset) { | 504 | if (ctx->sg->length == ctx->offset) { |
501 | ctx->sg = sg_next(ctx->sg); | 505 | ctx->sg = sg_next(ctx->sg); |
@@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req) | |||
537 | crypto_ahash_digestsize(tfm)); | 541 | crypto_ahash_digestsize(tfm)); |
538 | 542 | ||
539 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | 543 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) |
540 | ctx->flags |= FLAGS_SHA1; | 544 | ctx->flags |= BIT(FLAGS_SHA1); |
541 | 545 | ||
542 | ctx->bufcnt = 0; | 546 | ctx->bufcnt = 0; |
543 | ctx->digcnt = 0; | 547 | ctx->digcnt = 0; |
544 | ctx->buflen = BUFLEN; | 548 | ctx->buflen = BUFLEN; |
545 | 549 | ||
546 | if (tctx->flags & FLAGS_HMAC) { | 550 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
547 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 551 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
548 | 552 | ||
549 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | 553 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); |
550 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | 554 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; |
551 | ctx->flags |= FLAGS_HMAC; | 555 | ctx->flags |= BIT(FLAGS_HMAC); |
552 | } | 556 | } |
553 | 557 | ||
554 | return 0; | 558 | return 0; |
@@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) | |||
562 | int err; | 566 | int err; |
563 | 567 | ||
564 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | 568 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", |
565 | ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); | 569 | ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); |
566 | 570 | ||
567 | if (ctx->flags & FLAGS_CPU) | 571 | if (ctx->flags & BIT(FLAGS_CPU)) |
568 | err = omap_sham_update_cpu(dd); | 572 | err = omap_sham_update_cpu(dd); |
569 | else | 573 | else |
570 | err = omap_sham_update_dma_start(dd); | 574 | err = omap_sham_update_dma_start(dd); |
@@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req) | |||
624 | 628 | ||
625 | if (ctx->digcnt) { | 629 | if (ctx->digcnt) { |
626 | omap_sham_copy_ready_hash(req); | 630 | omap_sham_copy_ready_hash(req); |
627 | if (ctx->flags & FLAGS_HMAC) | 631 | if (ctx->flags & BIT(FLAGS_HMAC)) |
628 | err = omap_sham_finish_hmac(req); | 632 | err = omap_sham_finish_hmac(req); |
629 | } | 633 | } |
630 | 634 | ||
@@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) | |||
639 | struct omap_sham_dev *dd = ctx->dd; | 643 | struct omap_sham_dev *dd = ctx->dd; |
640 | 644 | ||
641 | if (!err) { | 645 | if (!err) { |
642 | omap_sham_copy_hash(ctx->dd->req, 1); | 646 | omap_sham_copy_hash(req, 1); |
643 | if (ctx->flags & FLAGS_FINAL) | 647 | if (test_bit(FLAGS_FINAL, &dd->flags)) |
644 | err = omap_sham_finish(req); | 648 | err = omap_sham_finish(req); |
645 | } else { | 649 | } else { |
646 | ctx->flags |= FLAGS_ERROR; | 650 | ctx->flags |= BIT(FLAGS_ERROR); |
647 | } | 651 | } |
648 | 652 | ||
653 | /* atomic operation is not needed here */ | ||
654 | dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | | ||
655 | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); | ||
649 | clk_disable(dd->iclk); | 656 | clk_disable(dd->iclk); |
650 | dd->flags &= ~FLAGS_BUSY; | ||
651 | 657 | ||
652 | if (req->base.complete) | 658 | if (req->base.complete) |
653 | req->base.complete(&req->base, err); | 659 | req->base.complete(&req->base, err); |
660 | |||
661 | /* handle new request */ | ||
662 | tasklet_schedule(&dd->done_task); | ||
654 | } | 663 | } |
655 | 664 | ||
656 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, | 665 | static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
@@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
658 | { | 667 | { |
659 | struct crypto_async_request *async_req, *backlog; | 668 | struct crypto_async_request *async_req, *backlog; |
660 | struct omap_sham_reqctx *ctx; | 669 | struct omap_sham_reqctx *ctx; |
661 | struct ahash_request *prev_req; | ||
662 | unsigned long flags; | 670 | unsigned long flags; |
663 | int err = 0, ret = 0; | 671 | int err = 0, ret = 0; |
664 | 672 | ||
665 | spin_lock_irqsave(&dd->lock, flags); | 673 | spin_lock_irqsave(&dd->lock, flags); |
666 | if (req) | 674 | if (req) |
667 | ret = ahash_enqueue_request(&dd->queue, req); | 675 | ret = ahash_enqueue_request(&dd->queue, req); |
668 | if (dd->flags & FLAGS_BUSY) { | 676 | if (test_bit(FLAGS_BUSY, &dd->flags)) { |
669 | spin_unlock_irqrestore(&dd->lock, flags); | 677 | spin_unlock_irqrestore(&dd->lock, flags); |
670 | return ret; | 678 | return ret; |
671 | } | 679 | } |
672 | backlog = crypto_get_backlog(&dd->queue); | 680 | backlog = crypto_get_backlog(&dd->queue); |
673 | async_req = crypto_dequeue_request(&dd->queue); | 681 | async_req = crypto_dequeue_request(&dd->queue); |
674 | if (async_req) | 682 | if (async_req) |
675 | dd->flags |= FLAGS_BUSY; | 683 | set_bit(FLAGS_BUSY, &dd->flags); |
676 | spin_unlock_irqrestore(&dd->lock, flags); | 684 | spin_unlock_irqrestore(&dd->lock, flags); |
677 | 685 | ||
678 | if (!async_req) | 686 | if (!async_req) |
@@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
682 | backlog->complete(backlog, -EINPROGRESS); | 690 | backlog->complete(backlog, -EINPROGRESS); |
683 | 691 | ||
684 | req = ahash_request_cast(async_req); | 692 | req = ahash_request_cast(async_req); |
685 | |||
686 | prev_req = dd->req; | ||
687 | dd->req = req; | 693 | dd->req = req; |
688 | |||
689 | ctx = ahash_request_ctx(req); | 694 | ctx = ahash_request_ctx(req); |
690 | 695 | ||
691 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | 696 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
692 | ctx->op, req->nbytes); | 697 | ctx->op, req->nbytes); |
693 | 698 | ||
694 | |||
695 | err = omap_sham_hw_init(dd); | 699 | err = omap_sham_hw_init(dd); |
696 | if (err) | 700 | if (err) |
697 | goto err1; | 701 | goto err1; |
@@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, | |||
712 | 716 | ||
713 | if (ctx->op == OP_UPDATE) { | 717 | if (ctx->op == OP_UPDATE) { |
714 | err = omap_sham_update_req(dd); | 718 | err = omap_sham_update_req(dd); |
715 | if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) | 719 | if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) |
716 | /* no final() after finup() */ | 720 | /* no final() after finup() */ |
717 | err = omap_sham_final_req(dd); | 721 | err = omap_sham_final_req(dd); |
718 | } else if (ctx->op == OP_FINAL) { | 722 | } else if (ctx->op == OP_FINAL) { |
719 | err = omap_sham_final_req(dd); | 723 | err = omap_sham_final_req(dd); |
720 | } | 724 | } |
721 | err1: | 725 | err1: |
722 | if (err != -EINPROGRESS) { | 726 | if (err != -EINPROGRESS) |
723 | /* done_task will not finish it, so do it here */ | 727 | /* done_task will not finish it, so do it here */ |
724 | omap_sham_finish_req(req, err); | 728 | omap_sham_finish_req(req, err); |
725 | tasklet_schedule(&dd->queue_task); | ||
726 | } | ||
727 | 729 | ||
728 | dev_dbg(dd->dev, "exit, err: %d\n", err); | 730 | dev_dbg(dd->dev, "exit, err: %d\n", err); |
729 | 731 | ||
@@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req) | |||
752 | ctx->sg = req->src; | 754 | ctx->sg = req->src; |
753 | ctx->offset = 0; | 755 | ctx->offset = 0; |
754 | 756 | ||
755 | if (ctx->flags & FLAGS_FINUP) { | 757 | if (ctx->flags & BIT(FLAGS_FINUP)) { |
756 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { | 758 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { |
757 | /* | 759 | /* |
758 | * OMAP HW accel works only with buffers >= 9 | 760 | * OMAP HW accel works only with buffers >= 9 |
@@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req) | |||
765 | /* | 767 | /* |
766 | * faster to use CPU for short transfers | 768 | * faster to use CPU for short transfers |
767 | */ | 769 | */ |
768 | ctx->flags |= FLAGS_CPU; | 770 | ctx->flags |= BIT(FLAGS_CPU); |
769 | } | 771 | } |
770 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | 772 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { |
771 | omap_sham_append_sg(ctx); | 773 | omap_sham_append_sg(ctx); |
@@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req) | |||
802 | { | 804 | { |
803 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 805 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
804 | 806 | ||
805 | ctx->flags |= FLAGS_FINUP; | 807 | ctx->flags |= BIT(FLAGS_FINUP); |
806 | 808 | ||
807 | if (ctx->flags & FLAGS_ERROR) | 809 | if (ctx->flags & BIT(FLAGS_ERROR)) |
808 | return 0; /* uncompleted hash is not needed */ | 810 | return 0; /* uncompleted hash is not needed */ |
809 | 811 | ||
810 | /* OMAP HW accel works only with buffers >= 9 */ | 812 | /* OMAP HW accel works only with buffers >= 9 */ |
@@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req) | |||
823 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | 825 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); |
824 | int err1, err2; | 826 | int err1, err2; |
825 | 827 | ||
826 | ctx->flags |= FLAGS_FINUP; | 828 | ctx->flags |= BIT(FLAGS_FINUP); |
827 | 829 | ||
828 | err1 = omap_sham_update(req); | 830 | err1 = omap_sham_update(req); |
829 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | 831 | if (err1 == -EINPROGRESS || err1 == -EBUSY) |
@@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | |||
895 | 897 | ||
896 | if (alg_base) { | 898 | if (alg_base) { |
897 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 899 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
898 | tctx->flags |= FLAGS_HMAC; | 900 | tctx->flags |= BIT(FLAGS_HMAC); |
899 | bctx->shash = crypto_alloc_shash(alg_base, 0, | 901 | bctx->shash = crypto_alloc_shash(alg_base, 0, |
900 | CRYPTO_ALG_NEED_FALLBACK); | 902 | CRYPTO_ALG_NEED_FALLBACK); |
901 | if (IS_ERR(bctx->shash)) { | 903 | if (IS_ERR(bctx->shash)) { |
@@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) | |||
932 | crypto_free_shash(tctx->fallback); | 934 | crypto_free_shash(tctx->fallback); |
933 | tctx->fallback = NULL; | 935 | tctx->fallback = NULL; |
934 | 936 | ||
935 | if (tctx->flags & FLAGS_HMAC) { | 937 | if (tctx->flags & BIT(FLAGS_HMAC)) { |
936 | struct omap_sham_hmac_ctx *bctx = tctx->base; | 938 | struct omap_sham_hmac_ctx *bctx = tctx->base; |
937 | crypto_free_shash(bctx->shash); | 939 | crypto_free_shash(bctx->shash); |
938 | } | 940 | } |
@@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = { | |||
1036 | static void omap_sham_done_task(unsigned long data) | 1038 | static void omap_sham_done_task(unsigned long data) |
1037 | { | 1039 | { |
1038 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | 1040 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; |
1039 | struct ahash_request *req = dd->req; | 1041 | int err = 0; |
1040 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
1041 | int ready = 0, err = 0; | ||
1042 | 1042 | ||
1043 | if (ctx->flags & FLAGS_OUTPUT_READY) { | 1043 | if (!test_bit(FLAGS_BUSY, &dd->flags)) { |
1044 | ctx->flags &= ~FLAGS_OUTPUT_READY; | 1044 | omap_sham_handle_queue(dd, NULL); |
1045 | ready = 1; | 1045 | return; |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | if (dd->flags & FLAGS_DMA_ACTIVE) { | 1048 | if (test_bit(FLAGS_CPU, &dd->flags)) { |
1049 | dd->flags &= ~FLAGS_DMA_ACTIVE; | 1049 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) |
1050 | omap_sham_update_dma_stop(dd); | 1050 | goto finish; |
1051 | if (!dd->err) | 1051 | } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { |
1052 | if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { | ||
1053 | omap_sham_update_dma_stop(dd); | ||
1054 | if (dd->err) { | ||
1055 | err = dd->err; | ||
1056 | goto finish; | ||
1057 | } | ||
1058 | } | ||
1059 | if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { | ||
1060 | /* hash or semi-hash ready */ | ||
1061 | clear_bit(FLAGS_DMA_READY, &dd->flags); | ||
1052 | err = omap_sham_update_dma_start(dd); | 1062 | err = omap_sham_update_dma_start(dd); |
1063 | if (err != -EINPROGRESS) | ||
1064 | goto finish; | ||
1065 | } | ||
1053 | } | 1066 | } |
1054 | 1067 | ||
1055 | err = dd->err ? : err; | 1068 | return; |
1056 | |||
1057 | if (err != -EINPROGRESS && (ready || err)) { | ||
1058 | dev_dbg(dd->dev, "update done: err: %d\n", err); | ||
1059 | /* finish curent request */ | ||
1060 | omap_sham_finish_req(req, err); | ||
1061 | /* start new request */ | ||
1062 | omap_sham_handle_queue(dd, NULL); | ||
1063 | } | ||
1064 | } | ||
1065 | |||
1066 | static void omap_sham_queue_task(unsigned long data) | ||
1067 | { | ||
1068 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | ||
1069 | 1069 | ||
1070 | omap_sham_handle_queue(dd, NULL); | 1070 | finish: |
1071 | dev_dbg(dd->dev, "update done: err: %d\n", err); | ||
1072 | /* finish curent request */ | ||
1073 | omap_sham_finish_req(dd->req, err); | ||
1071 | } | 1074 | } |
1072 | 1075 | ||
1073 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | 1076 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) |
1074 | { | 1077 | { |
1075 | struct omap_sham_dev *dd = dev_id; | 1078 | struct omap_sham_dev *dd = dev_id; |
1076 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
1077 | |||
1078 | if (!ctx) { | ||
1079 | dev_err(dd->dev, "unknown interrupt.\n"); | ||
1080 | return IRQ_HANDLED; | ||
1081 | } | ||
1082 | 1079 | ||
1083 | if (unlikely(ctx->flags & FLAGS_FINAL)) | 1080 | if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) |
1084 | /* final -> allow device to go to power-saving mode */ | 1081 | /* final -> allow device to go to power-saving mode */ |
1085 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); | 1082 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); |
1086 | 1083 | ||
@@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) | |||
1088 | SHA_REG_CTRL_OUTPUT_READY); | 1085 | SHA_REG_CTRL_OUTPUT_READY); |
1089 | omap_sham_read(dd, SHA_REG_CTRL); | 1086 | omap_sham_read(dd, SHA_REG_CTRL); |
1090 | 1087 | ||
1091 | ctx->flags |= FLAGS_OUTPUT_READY; | 1088 | if (!test_bit(FLAGS_BUSY, &dd->flags)) { |
1092 | dd->err = 0; | 1089 | dev_warn(dd->dev, "Interrupt when no active requests.\n"); |
1090 | return IRQ_HANDLED; | ||
1091 | } | ||
1092 | |||
1093 | set_bit(FLAGS_OUTPUT_READY, &dd->flags); | ||
1093 | tasklet_schedule(&dd->done_task); | 1094 | tasklet_schedule(&dd->done_task); |
1094 | 1095 | ||
1095 | return IRQ_HANDLED; | 1096 | return IRQ_HANDLED; |
@@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | |||
1102 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { | 1103 | if (ch_status != OMAP_DMA_BLOCK_IRQ) { |
1103 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); | 1104 | pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); |
1104 | dd->err = -EIO; | 1105 | dd->err = -EIO; |
1105 | dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ | 1106 | clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ |
1106 | } | 1107 | } |
1107 | 1108 | ||
1109 | set_bit(FLAGS_DMA_READY, &dd->flags); | ||
1108 | tasklet_schedule(&dd->done_task); | 1110 | tasklet_schedule(&dd->done_task); |
1109 | } | 1111 | } |
1110 | 1112 | ||
@@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) | |||
1151 | INIT_LIST_HEAD(&dd->list); | 1153 | INIT_LIST_HEAD(&dd->list); |
1152 | spin_lock_init(&dd->lock); | 1154 | spin_lock_init(&dd->lock); |
1153 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); | 1155 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); |
1154 | tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd); | ||
1155 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); | 1156 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); |
1156 | 1157 | ||
1157 | dd->irq = -1; | 1158 | dd->irq = -1; |
@@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev) | |||
1260 | for (i = 0; i < ARRAY_SIZE(algs); i++) | 1261 | for (i = 0; i < ARRAY_SIZE(algs); i++) |
1261 | crypto_unregister_ahash(&algs[i]); | 1262 | crypto_unregister_ahash(&algs[i]); |
1262 | tasklet_kill(&dd->done_task); | 1263 | tasklet_kill(&dd->done_task); |
1263 | tasklet_kill(&dd->queue_task); | ||
1264 | iounmap(dd->io_base); | 1264 | iounmap(dd->io_base); |
1265 | clk_put(dd->iclk); | 1265 | clk_put(dd->iclk); |
1266 | omap_sham_dma_cleanup(dd); | 1266 | omap_sham_dma_cleanup(dd); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 854e2632f9a6..8a0bb417aa11 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * talitos - Freescale Integrated Security Engine (SEC) device driver | 2 | * talitos - Freescale Integrated Security Engine (SEC) device driver |
3 | * | 3 | * |
4 | * Copyright (c) 2008-2010 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Scatterlist Crypto API glue code copied from files with the following: | 6 | * Scatterlist Crypto API glue code copied from files with the following: |
7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> |
@@ -282,6 +282,7 @@ static int init_device(struct device *dev) | |||
282 | /** | 282 | /** |
283 | * talitos_submit - submits a descriptor to the device for processing | 283 | * talitos_submit - submits a descriptor to the device for processing |
284 | * @dev: the SEC device to be used | 284 | * @dev: the SEC device to be used |
285 | * @ch: the SEC device channel to be used | ||
285 | * @desc: the descriptor to be processed by the device | 286 | * @desc: the descriptor to be processed by the device |
286 | * @callback: whom to call when processing is complete | 287 | * @callback: whom to call when processing is complete |
287 | * @context: a handle for use by caller (optional) | 288 | * @context: a handle for use by caller (optional) |
@@ -290,7 +291,7 @@ static int init_device(struct device *dev) | |||
290 | * callback must check err and feedback in descriptor header | 291 | * callback must check err and feedback in descriptor header |
291 | * for device processing status. | 292 | * for device processing status. |
292 | */ | 293 | */ |
293 | static int talitos_submit(struct device *dev, struct talitos_desc *desc, | 294 | static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, |
294 | void (*callback)(struct device *dev, | 295 | void (*callback)(struct device *dev, |
295 | struct talitos_desc *desc, | 296 | struct talitos_desc *desc, |
296 | void *context, int error), | 297 | void *context, int error), |
@@ -298,15 +299,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
298 | { | 299 | { |
299 | struct talitos_private *priv = dev_get_drvdata(dev); | 300 | struct talitos_private *priv = dev_get_drvdata(dev); |
300 | struct talitos_request *request; | 301 | struct talitos_request *request; |
301 | unsigned long flags, ch; | 302 | unsigned long flags; |
302 | int head; | 303 | int head; |
303 | 304 | ||
304 | /* select done notification */ | ||
305 | desc->hdr |= DESC_HDR_DONE_NOTIFY; | ||
306 | |||
307 | /* emulate SEC's round-robin channel fifo polling scheme */ | ||
308 | ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); | ||
309 | |||
310 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); | 305 | spin_lock_irqsave(&priv->chan[ch].head_lock, flags); |
311 | 306 | ||
312 | if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { | 307 | if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { |
@@ -706,6 +701,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
706 | 701 | ||
707 | struct talitos_ctx { | 702 | struct talitos_ctx { |
708 | struct device *dev; | 703 | struct device *dev; |
704 | int ch; | ||
709 | __be32 desc_hdr_template; | 705 | __be32 desc_hdr_template; |
710 | u8 key[TALITOS_MAX_KEY_SIZE]; | 706 | u8 key[TALITOS_MAX_KEY_SIZE]; |
711 | u8 iv[TALITOS_MAX_IV_LENGTH]; | 707 | u8 iv[TALITOS_MAX_IV_LENGTH]; |
@@ -1117,7 +1113,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1117 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, | 1113 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, |
1118 | DMA_FROM_DEVICE); | 1114 | DMA_FROM_DEVICE); |
1119 | 1115 | ||
1120 | ret = talitos_submit(dev, desc, callback, areq); | 1116 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1121 | if (ret != -EINPROGRESS) { | 1117 | if (ret != -EINPROGRESS) { |
1122 | ipsec_esp_unmap(dev, edesc, areq); | 1118 | ipsec_esp_unmap(dev, edesc, areq); |
1123 | kfree(edesc); | 1119 | kfree(edesc); |
@@ -1382,22 +1378,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1382 | const u8 *key, unsigned int keylen) | 1378 | const u8 *key, unsigned int keylen) |
1383 | { | 1379 | { |
1384 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1380 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1385 | struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); | ||
1386 | |||
1387 | if (keylen > TALITOS_MAX_KEY_SIZE) | ||
1388 | goto badkey; | ||
1389 | |||
1390 | if (keylen < alg->min_keysize || keylen > alg->max_keysize) | ||
1391 | goto badkey; | ||
1392 | 1381 | ||
1393 | memcpy(&ctx->key, key, keylen); | 1382 | memcpy(&ctx->key, key, keylen); |
1394 | ctx->keylen = keylen; | 1383 | ctx->keylen = keylen; |
1395 | 1384 | ||
1396 | return 0; | 1385 | return 0; |
1397 | |||
1398 | badkey: | ||
1399 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1400 | return -EINVAL; | ||
1401 | } | 1386 | } |
1402 | 1387 | ||
1403 | static void common_nonsnoop_unmap(struct device *dev, | 1388 | static void common_nonsnoop_unmap(struct device *dev, |
@@ -1433,7 +1418,6 @@ static void ablkcipher_done(struct device *dev, | |||
1433 | 1418 | ||
1434 | static int common_nonsnoop(struct talitos_edesc *edesc, | 1419 | static int common_nonsnoop(struct talitos_edesc *edesc, |
1435 | struct ablkcipher_request *areq, | 1420 | struct ablkcipher_request *areq, |
1436 | u8 *giv, | ||
1437 | void (*callback) (struct device *dev, | 1421 | void (*callback) (struct device *dev, |
1438 | struct talitos_desc *desc, | 1422 | struct talitos_desc *desc, |
1439 | void *context, int error)) | 1423 | void *context, int error)) |
@@ -1453,7 +1437,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1453 | 1437 | ||
1454 | /* cipher iv */ | 1438 | /* cipher iv */ |
1455 | ivsize = crypto_ablkcipher_ivsize(cipher); | 1439 | ivsize = crypto_ablkcipher_ivsize(cipher); |
1456 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, | 1440 | map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0, |
1457 | DMA_TO_DEVICE); | 1441 | DMA_TO_DEVICE); |
1458 | 1442 | ||
1459 | /* cipher key */ | 1443 | /* cipher key */ |
@@ -1524,7 +1508,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1524 | to_talitos_ptr(&desc->ptr[6], 0); | 1508 | to_talitos_ptr(&desc->ptr[6], 0); |
1525 | desc->ptr[6].j_extent = 0; | 1509 | desc->ptr[6].j_extent = 0; |
1526 | 1510 | ||
1527 | ret = talitos_submit(dev, desc, callback, areq); | 1511 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1528 | if (ret != -EINPROGRESS) { | 1512 | if (ret != -EINPROGRESS) { |
1529 | common_nonsnoop_unmap(dev, edesc, areq); | 1513 | common_nonsnoop_unmap(dev, edesc, areq); |
1530 | kfree(edesc); | 1514 | kfree(edesc); |
@@ -1556,7 +1540,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) | |||
1556 | /* set encrypt */ | 1540 | /* set encrypt */ |
1557 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | 1541 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; |
1558 | 1542 | ||
1559 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | 1543 | return common_nonsnoop(edesc, areq, ablkcipher_done); |
1560 | } | 1544 | } |
1561 | 1545 | ||
1562 | static int ablkcipher_decrypt(struct ablkcipher_request *areq) | 1546 | static int ablkcipher_decrypt(struct ablkcipher_request *areq) |
@@ -1572,7 +1556,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1572 | 1556 | ||
1573 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | 1557 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; |
1574 | 1558 | ||
1575 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | 1559 | return common_nonsnoop(edesc, areq, ablkcipher_done); |
1576 | } | 1560 | } |
1577 | 1561 | ||
1578 | static void common_nonsnoop_hash_unmap(struct device *dev, | 1562 | static void common_nonsnoop_hash_unmap(struct device *dev, |
@@ -1703,7 +1687,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1703 | /* last DWORD empty */ | 1687 | /* last DWORD empty */ |
1704 | desc->ptr[6] = zero_entry; | 1688 | desc->ptr[6] = zero_entry; |
1705 | 1689 | ||
1706 | ret = talitos_submit(dev, desc, callback, areq); | 1690 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1707 | if (ret != -EINPROGRESS) { | 1691 | if (ret != -EINPROGRESS) { |
1708 | common_nonsnoop_hash_unmap(dev, edesc, areq); | 1692 | common_nonsnoop_hash_unmap(dev, edesc, areq); |
1709 | kfree(edesc); | 1693 | kfree(edesc); |
@@ -2244,6 +2228,7 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
2244 | struct crypto_alg *alg = tfm->__crt_alg; | 2228 | struct crypto_alg *alg = tfm->__crt_alg; |
2245 | struct talitos_crypto_alg *talitos_alg; | 2229 | struct talitos_crypto_alg *talitos_alg; |
2246 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 2230 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
2231 | struct talitos_private *priv; | ||
2247 | 2232 | ||
2248 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) | 2233 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) |
2249 | talitos_alg = container_of(__crypto_ahash_alg(alg), | 2234 | talitos_alg = container_of(__crypto_ahash_alg(alg), |
@@ -2256,9 +2241,17 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
2256 | /* update context with ptr to dev */ | 2241 | /* update context with ptr to dev */ |
2257 | ctx->dev = talitos_alg->dev; | 2242 | ctx->dev = talitos_alg->dev; |
2258 | 2243 | ||
2244 | /* assign SEC channel to tfm in round-robin fashion */ | ||
2245 | priv = dev_get_drvdata(ctx->dev); | ||
2246 | ctx->ch = atomic_inc_return(&priv->last_chan) & | ||
2247 | (priv->num_channels - 1); | ||
2248 | |||
2259 | /* copy descriptor header template value */ | 2249 | /* copy descriptor header template value */ |
2260 | ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; | 2250 | ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; |
2261 | 2251 | ||
2252 | /* select done notification */ | ||
2253 | ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; | ||
2254 | |||
2262 | return 0; | 2255 | return 0; |
2263 | } | 2256 | } |
2264 | 2257 | ||