aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts7
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts7
-rw-r--r--arch/s390/crypto/sha.h6
-rw-r--r--arch/s390/crypto/sha1_s390.c40
-rw-r--r--arch/s390/crypto/sha256_s390.c40
-rw-r--r--arch/s390/crypto/sha512_s390.c81
-rw-r--r--arch/s390/crypto/sha_common.c20
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aes-i586-asm_32.S18
-rw-r--r--arch/x86/crypto/aes-x86_64-asm_64.S6
-rw-r--r--arch/x86/crypto/aes_glue.c20
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S896
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c461
-rw-r--r--arch/x86/include/asm/aes.h11
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--crypto/Kconfig44
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c19
-rw-r--r--crypto/aead.c16
-rw-r--r--crypto/algboss.c20
-rw-r--r--crypto/ansi_cprng.c17
-rw-r--r--crypto/api.c17
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--crypto/chainiv.c3
-rw-r--r--crypto/cryptd.c237
-rw-r--r--crypto/crypto_wq.c38
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/internal.h6
-rw-r--r--crypto/pcompress.c97
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/shash.c20
-rw-r--r--crypto/tcrypt.c6
-rw-r--r--crypto/testmgr.c198
-rw-r--r--crypto/testmgr.h147
-rw-r--r--crypto/zlib.c378
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c151
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c293
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c1310
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h177
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h284
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c108
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h243
-rw-r--r--include/crypto/aes.h6
-rw-r--r--include/crypto/compress.h145
-rw-r--r--include/crypto/cryptd.h27
-rw-r--r--include/crypto/crypto_wq.h7
-rw-r--r--include/crypto/hash.h5
-rw-r--r--include/crypto/internal/compress.h28
-rw-r--r--include/linux/crypto.h4
-rw-r--r--include/linux/timeriomem-rng.h21
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile2
-rw-r--r--lib/nlattr.c (renamed from net/netlink/attr.c)20
-rw-r--r--net/Kconfig1
-rw-r--r--net/netlink/Makefile2
61 files changed, 5506 insertions, 264 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e92ed4a79fa7..1978fb205bf7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1269,6 +1269,12 @@ L: linux-crypto@vger.kernel.org
1269T: git kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6.git 1269T: git kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
1270S: Maintained 1270S: Maintained
1271 1271
1272CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
1273P: Neil Horman
1274M: nhorman@tuxdriver.com
1275L: linux-crypto@vger.kernel.org
1276S: Maintained
1277
1272CS5535 Audio ALSA driver 1278CS5535 Audio ALSA driver
1273P: Jaya Kumar 1279P: Jaya Kumar
1274M: jayakumar.alsa@gmail.com 1280M: jayakumar.alsa@gmail.com
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 8b5ba8261a36..4447def69dc5 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -127,6 +127,13 @@
127 dcr-reg = <0x010 0x002>; 127 dcr-reg = <0x010 0x002>;
128 }; 128 };
129 129
130 CRYPTO: crypto@180000 {
131 compatible = "amcc,ppc460ex-crypto", "amcc,ppc4xx-crypto";
132 reg = <4 0x00180000 0x80400>;
133 interrupt-parent = <&UIC0>;
134 interrupts = <0x1d 0x4>;
135 };
136
130 MAL0: mcmal { 137 MAL0: mcmal {
131 compatible = "ibm,mcmal-460ex", "ibm,mcmal2"; 138 compatible = "ibm,mcmal-460ex", "ibm,mcmal2";
132 dcr-reg = <0x180 0x062>; 139 dcr-reg = <0x180 0x062>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 2804444812e5..5e6b08ff6f67 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -97,6 +97,13 @@
97 0x6 0x4>; /* ECC SEC Error */ 97 0x6 0x4>; /* ECC SEC Error */
98 }; 98 };
99 99
100 CRYPTO: crypto@ef700000 {
101 compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto";
102 reg = <0xef700000 0x80400>;
103 interrupt-parent = <&UIC0>;
104 interrupts = <0x17 0x2>;
105 };
106
100 MAL0: mcmal { 107 MAL0: mcmal {
101 compatible = "ibm,mcmal-405ex", "ibm,mcmal2"; 108 compatible = "ibm,mcmal-405ex", "ibm,mcmal2";
102 dcr-reg = <0x180 0x062>; 109 dcr-reg = <0x180 0x062>;
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index 1ceafa571eab..f4e9dc71675f 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -29,7 +29,9 @@ struct s390_sha_ctx {
29 int func; /* KIMD function to use */ 29 int func; /* KIMD function to use */
30}; 30};
31 31
32void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len); 32struct shash_desc;
33void s390_sha_final(struct crypto_tfm *tfm, u8 *out); 33
34int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len);
35int s390_sha_final(struct shash_desc *desc, u8 *out);
34 36
35#endif 37#endif
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index b3cb5a89b00d..e85ba348722a 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -23,17 +23,17 @@
23 * any later version. 23 * any later version.
24 * 24 *
25 */ 25 */
26#include <crypto/internal/hash.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/crypto.h>
29#include <crypto/sha.h> 29#include <crypto/sha.h>
30 30
31#include "crypt_s390.h" 31#include "crypt_s390.h"
32#include "sha.h" 32#include "sha.h"
33 33
34static void sha1_init(struct crypto_tfm *tfm) 34static int sha1_init(struct shash_desc *desc)
35{ 35{
36 struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm); 36 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
37 37
38 sctx->state[0] = SHA1_H0; 38 sctx->state[0] = SHA1_H0;
39 sctx->state[1] = SHA1_H1; 39 sctx->state[1] = SHA1_H1;
@@ -42,34 +42,36 @@ static void sha1_init(struct crypto_tfm *tfm)
42 sctx->state[4] = SHA1_H4; 42 sctx->state[4] = SHA1_H4;
43 sctx->count = 0; 43 sctx->count = 0;
44 sctx->func = KIMD_SHA_1; 44 sctx->func = KIMD_SHA_1;
45
46 return 0;
45} 47}
46 48
47static struct crypto_alg alg = { 49static struct shash_alg alg = {
48 .cra_name = "sha1", 50 .digestsize = SHA1_DIGEST_SIZE,
49 .cra_driver_name= "sha1-s390", 51 .init = sha1_init,
50 .cra_priority = CRYPT_S390_PRIORITY, 52 .update = s390_sha_update,
51 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 53 .final = s390_sha_final,
52 .cra_blocksize = SHA1_BLOCK_SIZE, 54 .descsize = sizeof(struct s390_sha_ctx),
53 .cra_ctxsize = sizeof(struct s390_sha_ctx), 55 .base = {
54 .cra_module = THIS_MODULE, 56 .cra_name = "sha1",
55 .cra_list = LIST_HEAD_INIT(alg.cra_list), 57 .cra_driver_name= "sha1-s390",
56 .cra_u = { .digest = { 58 .cra_priority = CRYPT_S390_PRIORITY,
57 .dia_digestsize = SHA1_DIGEST_SIZE, 59 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
58 .dia_init = sha1_init, 60 .cra_blocksize = SHA1_BLOCK_SIZE,
59 .dia_update = s390_sha_update, 61 .cra_module = THIS_MODULE,
60 .dia_final = s390_sha_final } } 62 }
61}; 63};
62 64
63static int __init sha1_s390_init(void) 65static int __init sha1_s390_init(void)
64{ 66{
65 if (!crypt_s390_func_available(KIMD_SHA_1)) 67 if (!crypt_s390_func_available(KIMD_SHA_1))
66 return -EOPNOTSUPP; 68 return -EOPNOTSUPP;
67 return crypto_register_alg(&alg); 69 return crypto_register_shash(&alg);
68} 70}
69 71
70static void __exit sha1_s390_fini(void) 72static void __exit sha1_s390_fini(void)
71{ 73{
72 crypto_unregister_alg(&alg); 74 crypto_unregister_shash(&alg);
73} 75}
74 76
75module_init(sha1_s390_init); 77module_init(sha1_s390_init);
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 19c03fb6ba7e..f9fefc569632 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -16,17 +16,17 @@
16 * any later version. 16 * any later version.
17 * 17 *
18 */ 18 */
19#include <crypto/internal/hash.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/crypto.h>
22#include <crypto/sha.h> 22#include <crypto/sha.h>
23 23
24#include "crypt_s390.h" 24#include "crypt_s390.h"
25#include "sha.h" 25#include "sha.h"
26 26
27static void sha256_init(struct crypto_tfm *tfm) 27static int sha256_init(struct shash_desc *desc)
28{ 28{
29 struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm); 29 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
30 30
31 sctx->state[0] = SHA256_H0; 31 sctx->state[0] = SHA256_H0;
32 sctx->state[1] = SHA256_H1; 32 sctx->state[1] = SHA256_H1;
@@ -38,22 +38,24 @@ static void sha256_init(struct crypto_tfm *tfm)
38 sctx->state[7] = SHA256_H7; 38 sctx->state[7] = SHA256_H7;
39 sctx->count = 0; 39 sctx->count = 0;
40 sctx->func = KIMD_SHA_256; 40 sctx->func = KIMD_SHA_256;
41
42 return 0;
41} 43}
42 44
43static struct crypto_alg alg = { 45static struct shash_alg alg = {
44 .cra_name = "sha256", 46 .digestsize = SHA256_DIGEST_SIZE,
45 .cra_driver_name = "sha256-s390", 47 .init = sha256_init,
46 .cra_priority = CRYPT_S390_PRIORITY, 48 .update = s390_sha_update,
47 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 49 .final = s390_sha_final,
48 .cra_blocksize = SHA256_BLOCK_SIZE, 50 .descsize = sizeof(struct s390_sha_ctx),
49 .cra_ctxsize = sizeof(struct s390_sha_ctx), 51 .base = {
50 .cra_module = THIS_MODULE, 52 .cra_name = "sha256",
51 .cra_list = LIST_HEAD_INIT(alg.cra_list), 53 .cra_driver_name= "sha256-s390",
52 .cra_u = { .digest = { 54 .cra_priority = CRYPT_S390_PRIORITY,
53 .dia_digestsize = SHA256_DIGEST_SIZE, 55 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
54 .dia_init = sha256_init, 56 .cra_blocksize = SHA256_BLOCK_SIZE,
55 .dia_update = s390_sha_update, 57 .cra_module = THIS_MODULE,
56 .dia_final = s390_sha_final } } 58 }
57}; 59};
58 60
59static int sha256_s390_init(void) 61static int sha256_s390_init(void)
@@ -61,12 +63,12 @@ static int sha256_s390_init(void)
61 if (!crypt_s390_func_available(KIMD_SHA_256)) 63 if (!crypt_s390_func_available(KIMD_SHA_256))
62 return -EOPNOTSUPP; 64 return -EOPNOTSUPP;
63 65
64 return crypto_register_alg(&alg); 66 return crypto_register_shash(&alg);
65} 67}
66 68
67static void __exit sha256_s390_fini(void) 69static void __exit sha256_s390_fini(void)
68{ 70{
69 crypto_unregister_alg(&alg); 71 crypto_unregister_shash(&alg);
70} 72}
71 73
72module_init(sha256_s390_init); 74module_init(sha256_s390_init);
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 23c7861f6aeb..83192bfc8048 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -12,16 +12,16 @@
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
15#include <crypto/internal/hash.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/crypto.h>
18 18
19#include "sha.h" 19#include "sha.h"
20#include "crypt_s390.h" 20#include "crypt_s390.h"
21 21
22static void sha512_init(struct crypto_tfm *tfm) 22static int sha512_init(struct shash_desc *desc)
23{ 23{
24 struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); 24 struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
25 25
26 *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL; 26 *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL;
27 *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL; 27 *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL;
@@ -33,29 +33,31 @@ static void sha512_init(struct crypto_tfm *tfm)
33 *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL; 33 *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
34 ctx->count = 0; 34 ctx->count = 0;
35 ctx->func = KIMD_SHA_512; 35 ctx->func = KIMD_SHA_512;
36
37 return 0;
36} 38}
37 39
38static struct crypto_alg sha512_alg = { 40static struct shash_alg sha512_alg = {
39 .cra_name = "sha512", 41 .digestsize = SHA512_DIGEST_SIZE,
40 .cra_driver_name = "sha512-s390", 42 .init = sha512_init,
41 .cra_priority = CRYPT_S390_PRIORITY, 43 .update = s390_sha_update,
42 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 44 .final = s390_sha_final,
43 .cra_blocksize = SHA512_BLOCK_SIZE, 45 .descsize = sizeof(struct s390_sha_ctx),
44 .cra_ctxsize = sizeof(struct s390_sha_ctx), 46 .base = {
45 .cra_module = THIS_MODULE, 47 .cra_name = "sha512",
46 .cra_list = LIST_HEAD_INIT(sha512_alg.cra_list), 48 .cra_driver_name= "sha512-s390",
47 .cra_u = { .digest = { 49 .cra_priority = CRYPT_S390_PRIORITY,
48 .dia_digestsize = SHA512_DIGEST_SIZE, 50 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
49 .dia_init = sha512_init, 51 .cra_blocksize = SHA512_BLOCK_SIZE,
50 .dia_update = s390_sha_update, 52 .cra_module = THIS_MODULE,
51 .dia_final = s390_sha_final } } 53 }
52}; 54};
53 55
54MODULE_ALIAS("sha512"); 56MODULE_ALIAS("sha512");
55 57
56static void sha384_init(struct crypto_tfm *tfm) 58static int sha384_init(struct shash_desc *desc)
57{ 59{
58 struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); 60 struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
59 61
60 *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL; 62 *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL;
61 *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL; 63 *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL;
@@ -67,22 +69,25 @@ static void sha384_init(struct crypto_tfm *tfm)
67 *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL; 69 *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
68 ctx->count = 0; 70 ctx->count = 0;
69 ctx->func = KIMD_SHA_512; 71 ctx->func = KIMD_SHA_512;
72
73 return 0;
70} 74}
71 75
72static struct crypto_alg sha384_alg = { 76static struct shash_alg sha384_alg = {
73 .cra_name = "sha384", 77 .digestsize = SHA384_DIGEST_SIZE,
74 .cra_driver_name = "sha384-s390", 78 .init = sha384_init,
75 .cra_priority = CRYPT_S390_PRIORITY, 79 .update = s390_sha_update,
76 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 80 .final = s390_sha_final,
77 .cra_blocksize = SHA384_BLOCK_SIZE, 81 .descsize = sizeof(struct s390_sha_ctx),
78 .cra_ctxsize = sizeof(struct s390_sha_ctx), 82 .base = {
79 .cra_module = THIS_MODULE, 83 .cra_name = "sha384",
80 .cra_list = LIST_HEAD_INIT(sha384_alg.cra_list), 84 .cra_driver_name= "sha384-s390",
81 .cra_u = { .digest = { 85 .cra_priority = CRYPT_S390_PRIORITY,
82 .dia_digestsize = SHA384_DIGEST_SIZE, 86 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
83 .dia_init = sha384_init, 87 .cra_blocksize = SHA384_BLOCK_SIZE,
84 .dia_update = s390_sha_update, 88 .cra_ctxsize = sizeof(struct s390_sha_ctx),
85 .dia_final = s390_sha_final } } 89 .cra_module = THIS_MODULE,
90 }
86}; 91};
87 92
88MODULE_ALIAS("sha384"); 93MODULE_ALIAS("sha384");
@@ -93,18 +98,18 @@ static int __init init(void)
93 98
94 if (!crypt_s390_func_available(KIMD_SHA_512)) 99 if (!crypt_s390_func_available(KIMD_SHA_512))
95 return -EOPNOTSUPP; 100 return -EOPNOTSUPP;
96 if ((ret = crypto_register_alg(&sha512_alg)) < 0) 101 if ((ret = crypto_register_shash(&sha512_alg)) < 0)
97 goto out; 102 goto out;
98 if ((ret = crypto_register_alg(&sha384_alg)) < 0) 103 if ((ret = crypto_register_shash(&sha384_alg)) < 0)
99 crypto_unregister_alg(&sha512_alg); 104 crypto_unregister_shash(&sha512_alg);
100out: 105out:
101 return ret; 106 return ret;
102} 107}
103 108
104static void __exit fini(void) 109static void __exit fini(void)
105{ 110{
106 crypto_unregister_alg(&sha512_alg); 111 crypto_unregister_shash(&sha512_alg);
107 crypto_unregister_alg(&sha384_alg); 112 crypto_unregister_shash(&sha384_alg);
108} 113}
109 114
110module_init(init); 115module_init(init);
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 9d6eb8c3d37e..7903ec47e6b9 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -13,14 +13,14 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/crypto.h> 16#include <crypto/internal/hash.h>
17#include "sha.h" 17#include "sha.h"
18#include "crypt_s390.h" 18#include "crypt_s390.h"
19 19
20void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) 20int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
21{ 21{
22 struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); 22 struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
23 unsigned int bsize = crypto_tfm_alg_blocksize(tfm); 23 unsigned int bsize = crypto_shash_blocksize(desc->tfm);
24 unsigned int index; 24 unsigned int index;
25 int ret; 25 int ret;
26 26
@@ -51,13 +51,15 @@ void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
51store: 51store:
52 if (len) 52 if (len)
53 memcpy(ctx->buf + index , data, len); 53 memcpy(ctx->buf + index , data, len);
54
55 return 0;
54} 56}
55EXPORT_SYMBOL_GPL(s390_sha_update); 57EXPORT_SYMBOL_GPL(s390_sha_update);
56 58
57void s390_sha_final(struct crypto_tfm *tfm, u8 *out) 59int s390_sha_final(struct shash_desc *desc, u8 *out)
58{ 60{
59 struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); 61 struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
60 unsigned int bsize = crypto_tfm_alg_blocksize(tfm); 62 unsigned int bsize = crypto_shash_blocksize(desc->tfm);
61 u64 bits; 63 u64 bits;
62 unsigned int index, end, plen; 64 unsigned int index, end, plen;
63 int ret; 65 int ret;
@@ -87,9 +89,11 @@ void s390_sha_final(struct crypto_tfm *tfm, u8 *out)
87 BUG_ON(ret != end); 89 BUG_ON(ret != end);
88 90
89 /* copy digest to out */ 91 /* copy digest to out */
90 memcpy(out, ctx->state, crypto_hash_digestsize(crypto_hash_cast(tfm))); 92 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
91 /* wipe context */ 93 /* wipe context */
92 memset(ctx, 0, sizeof *ctx); 94 memset(ctx, 0, sizeof *ctx);
95
96 return 0;
93} 97}
94EXPORT_SYMBOL_GPL(s390_sha_final); 98EXPORT_SYMBOL_GPL(s390_sha_final);
95 99
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 903de4aa5094..ebe7deedd5b4 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
9obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o 9obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
10obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o 10obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
11obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o 11obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
12obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
12 13
13obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o 14obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
14 15
@@ -19,3 +20,5 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
19aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o 20aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
20twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o 21twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
21salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o 22salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
23
24aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
index e41b147f4509..b949ec2f9af4 100644
--- a/arch/x86/crypto/aes-i586-asm_32.S
+++ b/arch/x86/crypto/aes-i586-asm_32.S
@@ -41,14 +41,14 @@
41#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) 41#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
42 42
43/* offsets to parameters with one register pushed onto stack */ 43/* offsets to parameters with one register pushed onto stack */
44#define tfm 8 44#define ctx 8
45#define out_blk 12 45#define out_blk 12
46#define in_blk 16 46#define in_blk 16
47 47
48/* offsets in crypto_tfm structure */ 48/* offsets in crypto_aes_ctx structure */
49#define klen (crypto_tfm_ctx_offset + 0) 49#define klen (480)
50#define ekey (crypto_tfm_ctx_offset + 4) 50#define ekey (0)
51#define dkey (crypto_tfm_ctx_offset + 244) 51#define dkey (240)
52 52
53// register mapping for encrypt and decrypt subroutines 53// register mapping for encrypt and decrypt subroutines
54 54
@@ -217,7 +217,7 @@
217 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ 217 do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */
218 218
219// AES (Rijndael) Encryption Subroutine 219// AES (Rijndael) Encryption Subroutine
220/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ 220/* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
221 221
222.global aes_enc_blk 222.global aes_enc_blk
223 223
@@ -228,7 +228,7 @@
228 228
229aes_enc_blk: 229aes_enc_blk:
230 push %ebp 230 push %ebp
231 mov tfm(%esp),%ebp 231 mov ctx(%esp),%ebp
232 232
233// CAUTION: the order and the values used in these assigns 233// CAUTION: the order and the values used in these assigns
234// rely on the register mappings 234// rely on the register mappings
@@ -292,7 +292,7 @@ aes_enc_blk:
292 ret 292 ret
293 293
294// AES (Rijndael) Decryption Subroutine 294// AES (Rijndael) Decryption Subroutine
295/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ 295/* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */
296 296
297.global aes_dec_blk 297.global aes_dec_blk
298 298
@@ -303,7 +303,7 @@ aes_enc_blk:
303 303
304aes_dec_blk: 304aes_dec_blk:
305 push %ebp 305 push %ebp
306 mov tfm(%esp),%ebp 306 mov ctx(%esp),%ebp
307 307
308// CAUTION: the order and the values used in these assigns 308// CAUTION: the order and the values used in these assigns
309// rely on the register mappings 309// rely on the register mappings
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
index a120f526c3df..5b577d5a059b 100644
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
@@ -17,8 +17,6 @@
17 17
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19 19
20#define BASE crypto_tfm_ctx_offset
21
22#define R1 %rax 20#define R1 %rax
23#define R1E %eax 21#define R1E %eax
24#define R1X %ax 22#define R1X %ax
@@ -56,13 +54,13 @@
56 .align 8; \ 54 .align 8; \
57FUNC: movq r1,r2; \ 55FUNC: movq r1,r2; \
58 movq r3,r4; \ 56 movq r3,r4; \
59 leaq BASE+KEY+48+4(r8),r9; \ 57 leaq KEY+48(r8),r9; \
60 movq r10,r11; \ 58 movq r10,r11; \
61 movl (r7),r5 ## E; \ 59 movl (r7),r5 ## E; \
62 movl 4(r7),r1 ## E; \ 60 movl 4(r7),r1 ## E; \
63 movl 8(r7),r6 ## E; \ 61 movl 8(r7),r6 ## E; \
64 movl 12(r7),r7 ## E; \ 62 movl 12(r7),r7 ## E; \
65 movl BASE+0(r8),r10 ## E; \ 63 movl 480(r8),r10 ## E; \
66 xorl -48(r9),r5 ## E; \ 64 xorl -48(r9),r5 ## E; \
67 xorl -44(r9),r1 ## E; \ 65 xorl -44(r9),r1 ## E; \
68 xorl -40(r9),r6 ## E; \ 66 xorl -40(r9),r6 ## E; \
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
index 71f457827116..49ae9fe32b22 100644
--- a/arch/x86/crypto/aes_glue.c
+++ b/arch/x86/crypto/aes_glue.c
@@ -5,17 +5,29 @@
5 5
6#include <crypto/aes.h> 6#include <crypto/aes.h>
7 7
8asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); 8asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
9asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); 9asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
10
11void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
12{
13 aes_enc_blk(ctx, dst, src);
14}
15EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86);
16
17void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
18{
19 aes_dec_blk(ctx, dst, src);
20}
21EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86);
10 22
11static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 23static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
12{ 24{
13 aes_enc_blk(tfm, dst, src); 25 aes_enc_blk(crypto_tfm_ctx(tfm), dst, src);
14} 26}
15 27
16static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 28static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
17{ 29{
18 aes_dec_blk(tfm, dst, src); 30 aes_dec_blk(crypto_tfm_ctx(tfm), dst, src);
19} 31}
20 32
21static struct crypto_alg aes_alg = { 33static struct crypto_alg aes_alg = {
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
new file mode 100644
index 000000000000..caba99601703
--- /dev/null
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -0,0 +1,896 @@
1/*
2 * Implement AES algorithm in Intel AES-NI instructions.
3 *
4 * The white paper of AES-NI instructions can be downloaded from:
5 * http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf
6 *
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
9 * Vinodh Gopal <vinodh.gopal@intel.com>
10 * Kahraman Akdemir
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 */
17
18#include <linux/linkage.h>
19
20.text
21
22#define STATE1 %xmm0
23#define STATE2 %xmm4
24#define STATE3 %xmm5
25#define STATE4 %xmm6
26#define STATE STATE1
27#define IN1 %xmm1
28#define IN2 %xmm7
29#define IN3 %xmm8
30#define IN4 %xmm9
31#define IN IN1
32#define KEY %xmm2
33#define IV %xmm3
34
35#define KEYP %rdi
36#define OUTP %rsi
37#define INP %rdx
38#define LEN %rcx
39#define IVP %r8
40#define KLEN %r9d
41#define T1 %r10
42#define TKEYP T1
43#define T2 %r11
44
45_key_expansion_128:
46_key_expansion_256a:
47 pshufd $0b11111111, %xmm1, %xmm1
48 shufps $0b00010000, %xmm0, %xmm4
49 pxor %xmm4, %xmm0
50 shufps $0b10001100, %xmm0, %xmm4
51 pxor %xmm4, %xmm0
52 pxor %xmm1, %xmm0
53 movaps %xmm0, (%rcx)
54 add $0x10, %rcx
55 ret
56
57_key_expansion_192a:
58 pshufd $0b01010101, %xmm1, %xmm1
59 shufps $0b00010000, %xmm0, %xmm4
60 pxor %xmm4, %xmm0
61 shufps $0b10001100, %xmm0, %xmm4
62 pxor %xmm4, %xmm0
63 pxor %xmm1, %xmm0
64
65 movaps %xmm2, %xmm5
66 movaps %xmm2, %xmm6
67 pslldq $4, %xmm5
68 pshufd $0b11111111, %xmm0, %xmm3
69 pxor %xmm3, %xmm2
70 pxor %xmm5, %xmm2
71
72 movaps %xmm0, %xmm1
73 shufps $0b01000100, %xmm0, %xmm6
74 movaps %xmm6, (%rcx)
75 shufps $0b01001110, %xmm2, %xmm1
76 movaps %xmm1, 16(%rcx)
77 add $0x20, %rcx
78 ret
79
80_key_expansion_192b:
81 pshufd $0b01010101, %xmm1, %xmm1
82 shufps $0b00010000, %xmm0, %xmm4
83 pxor %xmm4, %xmm0
84 shufps $0b10001100, %xmm0, %xmm4
85 pxor %xmm4, %xmm0
86 pxor %xmm1, %xmm0
87
88 movaps %xmm2, %xmm5
89 pslldq $4, %xmm5
90 pshufd $0b11111111, %xmm0, %xmm3
91 pxor %xmm3, %xmm2
92 pxor %xmm5, %xmm2
93
94 movaps %xmm0, (%rcx)
95 add $0x10, %rcx
96 ret
97
98_key_expansion_256b:
99 pshufd $0b10101010, %xmm1, %xmm1
100 shufps $0b00010000, %xmm2, %xmm4
101 pxor %xmm4, %xmm2
102 shufps $0b10001100, %xmm2, %xmm4
103 pxor %xmm4, %xmm2
104 pxor %xmm1, %xmm2
105 movaps %xmm2, (%rcx)
106 add $0x10, %rcx
107 ret
108
109/*
110 * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
111 * unsigned int key_len)
112 */
113ENTRY(aesni_set_key)
114 movups (%rsi), %xmm0 # user key (first 16 bytes)
115 movaps %xmm0, (%rdi)
116 lea 0x10(%rdi), %rcx # key addr
117 movl %edx, 480(%rdi)
118 pxor %xmm4, %xmm4 # xmm4 is assumed 0 in _key_expansion_x
119 cmp $24, %dl
120 jb .Lenc_key128
121 je .Lenc_key192
122 movups 0x10(%rsi), %xmm2 # other user key
123 movaps %xmm2, (%rcx)
124 add $0x10, %rcx
125 # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
126 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01
127 call _key_expansion_256a
128 # aeskeygenassist $0x1, %xmm0, %xmm1
129 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01
130 call _key_expansion_256b
131 # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
132 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02
133 call _key_expansion_256a
134 # aeskeygenassist $0x2, %xmm0, %xmm1
135 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02
136 call _key_expansion_256b
137 # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
138 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04
139 call _key_expansion_256a
140 # aeskeygenassist $0x4, %xmm0, %xmm1
141 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04
142 call _key_expansion_256b
143 # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
144 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08
145 call _key_expansion_256a
146 # aeskeygenassist $0x8, %xmm0, %xmm1
147 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08
148 call _key_expansion_256b
149 # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
150 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10
151 call _key_expansion_256a
152 # aeskeygenassist $0x10, %xmm0, %xmm1
153 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10
154 call _key_expansion_256b
155 # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
156 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20
157 call _key_expansion_256a
158 # aeskeygenassist $0x20, %xmm0, %xmm1
159 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20
160 call _key_expansion_256b
161 # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
162 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40
163 call _key_expansion_256a
164 jmp .Ldec_key
165.Lenc_key192:
166 movq 0x10(%rsi), %xmm2 # other user key
167 # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
168 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01
169 call _key_expansion_192a
170 # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
171 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02
172 call _key_expansion_192b
173 # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
174 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04
175 call _key_expansion_192a
176 # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
177 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08
178 call _key_expansion_192b
179 # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
180 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10
181 call _key_expansion_192a
182 # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
183 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20
184 call _key_expansion_192b
185 # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
186 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40
187 call _key_expansion_192a
188 # aeskeygenassist $0x80, %xmm2, %xmm1 # round 8
189 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x80
190 call _key_expansion_192b
191 jmp .Ldec_key
192.Lenc_key128:
193 # aeskeygenassist $0x1, %xmm0, %xmm1 # round 1
194 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01
195 call _key_expansion_128
196 # aeskeygenassist $0x2, %xmm0, %xmm1 # round 2
197 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02
198 call _key_expansion_128
199 # aeskeygenassist $0x4, %xmm0, %xmm1 # round 3
200 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04
201 call _key_expansion_128
202 # aeskeygenassist $0x8, %xmm0, %xmm1 # round 4
203 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08
204 call _key_expansion_128
205 # aeskeygenassist $0x10, %xmm0, %xmm1 # round 5
206 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10
207 call _key_expansion_128
208 # aeskeygenassist $0x20, %xmm0, %xmm1 # round 6
209 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20
210 call _key_expansion_128
211 # aeskeygenassist $0x40, %xmm0, %xmm1 # round 7
212 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x40
213 call _key_expansion_128
214 # aeskeygenassist $0x80, %xmm0, %xmm1 # round 8
215 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x80
216 call _key_expansion_128
217 # aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9
218 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x1b
219 call _key_expansion_128
220 # aeskeygenassist $0x36, %xmm0, %xmm1 # round 10
221 .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x36
222 call _key_expansion_128
223.Ldec_key:
224 sub $0x10, %rcx
225 movaps (%rdi), %xmm0
226 movaps (%rcx), %xmm1
227 movaps %xmm0, 240(%rcx)
228 movaps %xmm1, 240(%rdi)
229 add $0x10, %rdi
230 lea 240-16(%rcx), %rsi
231.align 4
232.Ldec_key_loop:
233 movaps (%rdi), %xmm0
234 # aesimc %xmm0, %xmm1
235 .byte 0x66, 0x0f, 0x38, 0xdb, 0xc8
236 movaps %xmm1, (%rsi)
237 add $0x10, %rdi
238 sub $0x10, %rsi
239 cmp %rcx, %rdi
240 jb .Ldec_key_loop
241 xor %rax, %rax
242 ret
243
244/*
245 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
246 */
247ENTRY(aesni_enc)
248 movl 480(KEYP), KLEN # key length
249 movups (INP), STATE # input
250 call _aesni_enc1
251 movups STATE, (OUTP) # output
252 ret
253
254/*
255 * _aesni_enc1: internal ABI
256 * input:
257 * KEYP: key struct pointer
258 * KLEN: round count
259 * STATE: initial state (input)
260 * output:
261 * STATE: finial state (output)
262 * changed:
263 * KEY
264 * TKEYP (T1)
265 */
266_aesni_enc1:
267 movaps (KEYP), KEY # key
268 mov KEYP, TKEYP
269 pxor KEY, STATE # round 0
270 add $0x30, TKEYP
271 cmp $24, KLEN
272 jb .Lenc128
273 lea 0x20(TKEYP), TKEYP
274 je .Lenc192
275 add $0x20, TKEYP
276 movaps -0x60(TKEYP), KEY
277 # aesenc KEY, STATE
278 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
279 movaps -0x50(TKEYP), KEY
280 # aesenc KEY, STATE
281 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
282.align 4
283.Lenc192:
284 movaps -0x40(TKEYP), KEY
285 # aesenc KEY, STATE
286 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
287 movaps -0x30(TKEYP), KEY
288 # aesenc KEY, STATE
289 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
290.align 4
291.Lenc128:
292 movaps -0x20(TKEYP), KEY
293 # aesenc KEY, STATE
294 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
295 movaps -0x10(TKEYP), KEY
296 # aesenc KEY, STATE
297 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
298 movaps (TKEYP), KEY
299 # aesenc KEY, STATE
300 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
301 movaps 0x10(TKEYP), KEY
302 # aesenc KEY, STATE
303 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
304 movaps 0x20(TKEYP), KEY
305 # aesenc KEY, STATE
306 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
307 movaps 0x30(TKEYP), KEY
308 # aesenc KEY, STATE
309 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
310 movaps 0x40(TKEYP), KEY
311 # aesenc KEY, STATE
312 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
313 movaps 0x50(TKEYP), KEY
314 # aesenc KEY, STATE
315 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
316 movaps 0x60(TKEYP), KEY
317 # aesenc KEY, STATE
318 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
319 movaps 0x70(TKEYP), KEY
320 # aesenclast KEY, STATE # last round
321 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
322 ret
323
324/*
325 * _aesni_enc4: internal ABI
326 * input:
327 * KEYP: key struct pointer
328 * KLEN: round count
329 * STATE1: initial state (input)
330 * STATE2
331 * STATE3
332 * STATE4
333 * output:
334 * STATE1: finial state (output)
335 * STATE2
336 * STATE3
337 * STATE4
338 * changed:
339 * KEY
340 * TKEYP (T1)
341 */
342_aesni_enc4:
343 movaps (KEYP), KEY # key
344 mov KEYP, TKEYP
345 pxor KEY, STATE1 # round 0
346 pxor KEY, STATE2
347 pxor KEY, STATE3
348 pxor KEY, STATE4
349 add $0x30, TKEYP
350 cmp $24, KLEN
351 jb .L4enc128
352 lea 0x20(TKEYP), TKEYP
353 je .L4enc192
354 add $0x20, TKEYP
355 movaps -0x60(TKEYP), KEY
356 # aesenc KEY, STATE1
357 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
358 # aesenc KEY, STATE2
359 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
360 # aesenc KEY, STATE3
361 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
362 # aesenc KEY, STATE4
363 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
364 movaps -0x50(TKEYP), KEY
365 # aesenc KEY, STATE1
366 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
367 # aesenc KEY, STATE2
368 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
369 # aesenc KEY, STATE3
370 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
371 # aesenc KEY, STATE4
372 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
373#.align 4
374.L4enc192:
375 movaps -0x40(TKEYP), KEY
376 # aesenc KEY, STATE1
377 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
378 # aesenc KEY, STATE2
379 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
380 # aesenc KEY, STATE3
381 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
382 # aesenc KEY, STATE4
383 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
384 movaps -0x30(TKEYP), KEY
385 # aesenc KEY, STATE1
386 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
387 # aesenc KEY, STATE2
388 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
389 # aesenc KEY, STATE3
390 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
391 # aesenc KEY, STATE4
392 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
393#.align 4
394.L4enc128:
395 movaps -0x20(TKEYP), KEY
396 # aesenc KEY, STATE1
397 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
398 # aesenc KEY, STATE2
399 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
400 # aesenc KEY, STATE3
401 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
402 # aesenc KEY, STATE4
403 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
404 movaps -0x10(TKEYP), KEY
405 # aesenc KEY, STATE1
406 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
407 # aesenc KEY, STATE2
408 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
409 # aesenc KEY, STATE3
410 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
411 # aesenc KEY, STATE4
412 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
413 movaps (TKEYP), KEY
414 # aesenc KEY, STATE1
415 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
416 # aesenc KEY, STATE2
417 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
418 # aesenc KEY, STATE3
419 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
420 # aesenc KEY, STATE4
421 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
422 movaps 0x10(TKEYP), KEY
423 # aesenc KEY, STATE1
424 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
425 # aesenc KEY, STATE2
426 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
427 # aesenc KEY, STATE3
428 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
429 # aesenc KEY, STATE4
430 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
431 movaps 0x20(TKEYP), KEY
432 # aesenc KEY, STATE1
433 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
434 # aesenc KEY, STATE2
435 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
436 # aesenc KEY, STATE3
437 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
438 # aesenc KEY, STATE4
439 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
440 movaps 0x30(TKEYP), KEY
441 # aesenc KEY, STATE1
442 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
443 # aesenc KEY, STATE2
444 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
445 # aesenc KEY, STATE3
446 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
447 # aesenc KEY, STATE4
448 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
449 movaps 0x40(TKEYP), KEY
450 # aesenc KEY, STATE1
451 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
452 # aesenc KEY, STATE2
453 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
454 # aesenc KEY, STATE3
455 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
456 # aesenc KEY, STATE4
457 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
458 movaps 0x50(TKEYP), KEY
459 # aesenc KEY, STATE1
460 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
461 # aesenc KEY, STATE2
462 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
463 # aesenc KEY, STATE3
464 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
465 # aesenc KEY, STATE4
466 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
467 movaps 0x60(TKEYP), KEY
468 # aesenc KEY, STATE1
469 .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2
470 # aesenc KEY, STATE2
471 .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2
472 # aesenc KEY, STATE3
473 .byte 0x66, 0x0f, 0x38, 0xdc, 0xea
474 # aesenc KEY, STATE4
475 .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2
476 movaps 0x70(TKEYP), KEY
477 # aesenclast KEY, STATE1 # last round
478 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
479 # aesenclast KEY, STATE2
480 .byte 0x66, 0x0f, 0x38, 0xdd, 0xe2
481 # aesenclast KEY, STATE3
482 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
483 # aesenclast KEY, STATE4
484 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
485 ret
486
487/*
488 * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
489 */
490ENTRY(aesni_dec)
491 mov 480(KEYP), KLEN # key length
492 add $240, KEYP
493 movups (INP), STATE # input
494 call _aesni_dec1
495 movups STATE, (OUTP) #output
496 ret
497
498/*
499 * _aesni_dec1: internal ABI
500 * input:
501 * KEYP: key struct pointer
502 * KLEN: key length
503 * STATE: initial state (input)
504 * output:
505 * STATE: finial state (output)
506 * changed:
507 * KEY
508 * TKEYP (T1)
509 */
510_aesni_dec1:
511 movaps (KEYP), KEY # key
512 mov KEYP, TKEYP
513 pxor KEY, STATE # round 0
514 add $0x30, TKEYP
515 cmp $24, KLEN
516 jb .Ldec128
517 lea 0x20(TKEYP), TKEYP
518 je .Ldec192
519 add $0x20, TKEYP
520 movaps -0x60(TKEYP), KEY
521 # aesdec KEY, STATE
522 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
523 movaps -0x50(TKEYP), KEY
524 # aesdec KEY, STATE
525 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
526.align 4
527.Ldec192:
528 movaps -0x40(TKEYP), KEY
529 # aesdec KEY, STATE
530 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
531 movaps -0x30(TKEYP), KEY
532 # aesdec KEY, STATE
533 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
534.align 4
535.Ldec128:
536 movaps -0x20(TKEYP), KEY
537 # aesdec KEY, STATE
538 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
539 movaps -0x10(TKEYP), KEY
540 # aesdec KEY, STATE
541 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
542 movaps (TKEYP), KEY
543 # aesdec KEY, STATE
544 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
545 movaps 0x10(TKEYP), KEY
546 # aesdec KEY, STATE
547 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
548 movaps 0x20(TKEYP), KEY
549 # aesdec KEY, STATE
550 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
551 movaps 0x30(TKEYP), KEY
552 # aesdec KEY, STATE
553 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
554 movaps 0x40(TKEYP), KEY
555 # aesdec KEY, STATE
556 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
557 movaps 0x50(TKEYP), KEY
558 # aesdec KEY, STATE
559 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
560 movaps 0x60(TKEYP), KEY
561 # aesdec KEY, STATE
562 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
563 movaps 0x70(TKEYP), KEY
564 # aesdeclast KEY, STATE # last round
565 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
566 ret
567
568/*
569 * _aesni_dec4: internal ABI
570 * input:
571 * KEYP: key struct pointer
572 * KLEN: key length
573 * STATE1: initial state (input)
574 * STATE2
575 * STATE3
576 * STATE4
577 * output:
578 * STATE1: finial state (output)
579 * STATE2
580 * STATE3
581 * STATE4
582 * changed:
583 * KEY
584 * TKEYP (T1)
585 */
586_aesni_dec4:
587 movaps (KEYP), KEY # key
588 mov KEYP, TKEYP
589 pxor KEY, STATE1 # round 0
590 pxor KEY, STATE2
591 pxor KEY, STATE3
592 pxor KEY, STATE4
593 add $0x30, TKEYP
594 cmp $24, KLEN
595 jb .L4dec128
596 lea 0x20(TKEYP), TKEYP
597 je .L4dec192
598 add $0x20, TKEYP
599 movaps -0x60(TKEYP), KEY
600 # aesdec KEY, STATE1
601 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
602 # aesdec KEY, STATE2
603 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
604 # aesdec KEY, STATE3
605 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
606 # aesdec KEY, STATE4
607 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
608 movaps -0x50(TKEYP), KEY
609 # aesdec KEY, STATE1
610 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
611 # aesdec KEY, STATE2
612 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
613 # aesdec KEY, STATE3
614 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
615 # aesdec KEY, STATE4
616 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
617.align 4
618.L4dec192:
619 movaps -0x40(TKEYP), KEY
620 # aesdec KEY, STATE1
621 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
622 # aesdec KEY, STATE2
623 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
624 # aesdec KEY, STATE3
625 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
626 # aesdec KEY, STATE4
627 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
628 movaps -0x30(TKEYP), KEY
629 # aesdec KEY, STATE1
630 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
631 # aesdec KEY, STATE2
632 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
633 # aesdec KEY, STATE3
634 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
635 # aesdec KEY, STATE4
636 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
637.align 4
638.L4dec128:
639 movaps -0x20(TKEYP), KEY
640 # aesdec KEY, STATE1
641 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
642 # aesdec KEY, STATE2
643 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
644 # aesdec KEY, STATE3
645 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
646 # aesdec KEY, STATE4
647 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
648 movaps -0x10(TKEYP), KEY
649 # aesdec KEY, STATE1
650 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
651 # aesdec KEY, STATE2
652 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
653 # aesdec KEY, STATE3
654 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
655 # aesdec KEY, STATE4
656 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
657 movaps (TKEYP), KEY
658 # aesdec KEY, STATE1
659 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
660 # aesdec KEY, STATE2
661 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
662 # aesdec KEY, STATE3
663 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
664 # aesdec KEY, STATE4
665 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
666 movaps 0x10(TKEYP), KEY
667 # aesdec KEY, STATE1
668 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
669 # aesdec KEY, STATE2
670 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
671 # aesdec KEY, STATE3
672 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
673 # aesdec KEY, STATE4
674 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
675 movaps 0x20(TKEYP), KEY
676 # aesdec KEY, STATE1
677 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
678 # aesdec KEY, STATE2
679 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
680 # aesdec KEY, STATE3
681 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
682 # aesdec KEY, STATE4
683 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
684 movaps 0x30(TKEYP), KEY
685 # aesdec KEY, STATE1
686 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
687 # aesdec KEY, STATE2
688 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
689 # aesdec KEY, STATE3
690 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
691 # aesdec KEY, STATE4
692 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
693 movaps 0x40(TKEYP), KEY
694 # aesdec KEY, STATE1
695 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
696 # aesdec KEY, STATE2
697 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
698 # aesdec KEY, STATE3
699 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
700 # aesdec KEY, STATE4
701 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
702 movaps 0x50(TKEYP), KEY
703 # aesdec KEY, STATE1
704 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
705 # aesdec KEY, STATE2
706 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
707 # aesdec KEY, STATE3
708 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
709 # aesdec KEY, STATE4
710 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
711 movaps 0x60(TKEYP), KEY
712 # aesdec KEY, STATE1
713 .byte 0x66, 0x0f, 0x38, 0xde, 0xc2
714 # aesdec KEY, STATE2
715 .byte 0x66, 0x0f, 0x38, 0xde, 0xe2
716 # aesdec KEY, STATE3
717 .byte 0x66, 0x0f, 0x38, 0xde, 0xea
718 # aesdec KEY, STATE4
719 .byte 0x66, 0x0f, 0x38, 0xde, 0xf2
720 movaps 0x70(TKEYP), KEY
721 # aesdeclast KEY, STATE1 # last round
722 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
723 # aesdeclast KEY, STATE2
724 .byte 0x66, 0x0f, 0x38, 0xdf, 0xe2
725 # aesdeclast KEY, STATE3
726 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
727 # aesdeclast KEY, STATE4
728 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
729 ret
730
731/*
732 * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
733 * size_t len)
734 */
735ENTRY(aesni_ecb_enc)
736 test LEN, LEN # check length
737 jz .Lecb_enc_ret
738 mov 480(KEYP), KLEN
739 cmp $16, LEN
740 jb .Lecb_enc_ret
741 cmp $64, LEN
742 jb .Lecb_enc_loop1
743.align 4
744.Lecb_enc_loop4:
745 movups (INP), STATE1
746 movups 0x10(INP), STATE2
747 movups 0x20(INP), STATE3
748 movups 0x30(INP), STATE4
749 call _aesni_enc4
750 movups STATE1, (OUTP)
751 movups STATE2, 0x10(OUTP)
752 movups STATE3, 0x20(OUTP)
753 movups STATE4, 0x30(OUTP)
754 sub $64, LEN
755 add $64, INP
756 add $64, OUTP
757 cmp $64, LEN
758 jge .Lecb_enc_loop4
759 cmp $16, LEN
760 jb .Lecb_enc_ret
761.align 4
762.Lecb_enc_loop1:
763 movups (INP), STATE1
764 call _aesni_enc1
765 movups STATE1, (OUTP)
766 sub $16, LEN
767 add $16, INP
768 add $16, OUTP
769 cmp $16, LEN
770 jge .Lecb_enc_loop1
771.Lecb_enc_ret:
772 ret
773
774/*
775 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
776 * size_t len);
777 */
778ENTRY(aesni_ecb_dec)
779 test LEN, LEN
780 jz .Lecb_dec_ret
781 mov 480(KEYP), KLEN
782 add $240, KEYP
783 cmp $16, LEN
784 jb .Lecb_dec_ret
785 cmp $64, LEN
786 jb .Lecb_dec_loop1
787.align 4
788.Lecb_dec_loop4:
789 movups (INP), STATE1
790 movups 0x10(INP), STATE2
791 movups 0x20(INP), STATE3
792 movups 0x30(INP), STATE4
793 call _aesni_dec4
794 movups STATE1, (OUTP)
795 movups STATE2, 0x10(OUTP)
796 movups STATE3, 0x20(OUTP)
797 movups STATE4, 0x30(OUTP)
798 sub $64, LEN
799 add $64, INP
800 add $64, OUTP
801 cmp $64, LEN
802 jge .Lecb_dec_loop4
803 cmp $16, LEN
804 jb .Lecb_dec_ret
805.align 4
806.Lecb_dec_loop1:
807 movups (INP), STATE1
808 call _aesni_dec1
809 movups STATE1, (OUTP)
810 sub $16, LEN
811 add $16, INP
812 add $16, OUTP
813 cmp $16, LEN
814 jge .Lecb_dec_loop1
815.Lecb_dec_ret:
816 ret
817
818/*
819 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
820 * size_t len, u8 *iv)
821 */
822ENTRY(aesni_cbc_enc)
823 cmp $16, LEN
824 jb .Lcbc_enc_ret
825 mov 480(KEYP), KLEN
826 movups (IVP), STATE # load iv as initial state
827.align 4
828.Lcbc_enc_loop:
829 movups (INP), IN # load input
830 pxor IN, STATE
831 call _aesni_enc1
832 movups STATE, (OUTP) # store output
833 sub $16, LEN
834 add $16, INP
835 add $16, OUTP
836 cmp $16, LEN
837 jge .Lcbc_enc_loop
838 movups STATE, (IVP)
839.Lcbc_enc_ret:
840 ret
841
842/*
843 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
844 * size_t len, u8 *iv)
845 */
846ENTRY(aesni_cbc_dec)
847 cmp $16, LEN
848 jb .Lcbc_dec_ret
849 mov 480(KEYP), KLEN
850 add $240, KEYP
851 movups (IVP), IV
852 cmp $64, LEN
853 jb .Lcbc_dec_loop1
854.align 4
855.Lcbc_dec_loop4:
856 movups (INP), IN1
857 movaps IN1, STATE1
858 movups 0x10(INP), IN2
859 movaps IN2, STATE2
860 movups 0x20(INP), IN3
861 movaps IN3, STATE3
862 movups 0x30(INP), IN4
863 movaps IN4, STATE4
864 call _aesni_dec4
865 pxor IV, STATE1
866 pxor IN1, STATE2
867 pxor IN2, STATE3
868 pxor IN3, STATE4
869 movaps IN4, IV
870 movups STATE1, (OUTP)
871 movups STATE2, 0x10(OUTP)
872 movups STATE3, 0x20(OUTP)
873 movups STATE4, 0x30(OUTP)
874 sub $64, LEN
875 add $64, INP
876 add $64, OUTP
877 cmp $64, LEN
878 jge .Lcbc_dec_loop4
879 cmp $16, LEN
880 jb .Lcbc_dec_ret
881.align 4
882.Lcbc_dec_loop1:
883 movups (INP), IN
884 movaps IN, STATE
885 call _aesni_dec1
886 pxor IV, STATE
887 movups STATE, (OUTP)
888 movaps IN, IV
889 sub $16, LEN
890 add $16, INP
891 add $16, OUTP
892 cmp $16, LEN
893 jge .Lcbc_dec_loop1
894 movups IV, (IVP)
895.Lcbc_dec_ret:
896 ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
new file mode 100644
index 000000000000..02af0af65497
--- /dev/null
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -0,0 +1,461 @@
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/hardirq.h>
15#include <linux/types.h>
16#include <linux/crypto.h>
17#include <linux/err.h>
18#include <crypto/algapi.h>
19#include <crypto/aes.h>
20#include <crypto/cryptd.h>
21#include <asm/i387.h>
22#include <asm/aes.h>
23
24struct async_aes_ctx {
25 struct cryptd_ablkcipher *cryptd_tfm;
26};
27
28#define AESNI_ALIGN 16
29#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
30
31asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
32 unsigned int key_len);
33asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
34 const u8 *in);
35asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
36 const u8 *in);
37asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
38 const u8 *in, unsigned int len);
39asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
40 const u8 *in, unsigned int len);
41asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
42 const u8 *in, unsigned int len, u8 *iv);
43asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
44 const u8 *in, unsigned int len, u8 *iv);
45
46static inline int kernel_fpu_using(void)
47{
48 if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
49 return 1;
50 return 0;
51}
52
53static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
54{
55 unsigned long addr = (unsigned long)raw_ctx;
56 unsigned long align = AESNI_ALIGN;
57
58 if (align <= crypto_tfm_ctx_alignment())
59 align = 1;
60 return (struct crypto_aes_ctx *)ALIGN(addr, align);
61}
62
63static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
64 const u8 *in_key, unsigned int key_len)
65{
66 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
67 u32 *flags = &tfm->crt_flags;
68 int err;
69
70 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
71 key_len != AES_KEYSIZE_256) {
72 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
73 return -EINVAL;
74 }
75
76 if (kernel_fpu_using())
77 err = crypto_aes_expand_key(ctx, in_key, key_len);
78 else {
79 kernel_fpu_begin();
80 err = aesni_set_key(ctx, in_key, key_len);
81 kernel_fpu_end();
82 }
83
84 return err;
85}
86
87static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
88 unsigned int key_len)
89{
90 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
91}
92
93static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
94{
95 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
96
97 if (kernel_fpu_using())
98 crypto_aes_encrypt_x86(ctx, dst, src);
99 else {
100 kernel_fpu_begin();
101 aesni_enc(ctx, dst, src);
102 kernel_fpu_end();
103 }
104}
105
106static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
107{
108 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
109
110 if (kernel_fpu_using())
111 crypto_aes_decrypt_x86(ctx, dst, src);
112 else {
113 kernel_fpu_begin();
114 aesni_dec(ctx, dst, src);
115 kernel_fpu_end();
116 }
117}
118
119static struct crypto_alg aesni_alg = {
120 .cra_name = "aes",
121 .cra_driver_name = "aes-aesni",
122 .cra_priority = 300,
123 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
124 .cra_blocksize = AES_BLOCK_SIZE,
125 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
126 .cra_alignmask = 0,
127 .cra_module = THIS_MODULE,
128 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
129 .cra_u = {
130 .cipher = {
131 .cia_min_keysize = AES_MIN_KEY_SIZE,
132 .cia_max_keysize = AES_MAX_KEY_SIZE,
133 .cia_setkey = aes_set_key,
134 .cia_encrypt = aes_encrypt,
135 .cia_decrypt = aes_decrypt
136 }
137 }
138};
139
140static int ecb_encrypt(struct blkcipher_desc *desc,
141 struct scatterlist *dst, struct scatterlist *src,
142 unsigned int nbytes)
143{
144 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
145 struct blkcipher_walk walk;
146 int err;
147
148 blkcipher_walk_init(&walk, dst, src, nbytes);
149 err = blkcipher_walk_virt(desc, &walk);
150
151 kernel_fpu_begin();
152 while ((nbytes = walk.nbytes)) {
153 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
154 nbytes & AES_BLOCK_MASK);
155 nbytes &= AES_BLOCK_SIZE - 1;
156 err = blkcipher_walk_done(desc, &walk, nbytes);
157 }
158 kernel_fpu_end();
159
160 return err;
161}
162
163static int ecb_decrypt(struct blkcipher_desc *desc,
164 struct scatterlist *dst, struct scatterlist *src,
165 unsigned int nbytes)
166{
167 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
168 struct blkcipher_walk walk;
169 int err;
170
171 blkcipher_walk_init(&walk, dst, src, nbytes);
172 err = blkcipher_walk_virt(desc, &walk);
173
174 kernel_fpu_begin();
175 while ((nbytes = walk.nbytes)) {
176 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
177 nbytes & AES_BLOCK_MASK);
178 nbytes &= AES_BLOCK_SIZE - 1;
179 err = blkcipher_walk_done(desc, &walk, nbytes);
180 }
181 kernel_fpu_end();
182
183 return err;
184}
185
186static struct crypto_alg blk_ecb_alg = {
187 .cra_name = "__ecb-aes-aesni",
188 .cra_driver_name = "__driver-ecb-aes-aesni",
189 .cra_priority = 0,
190 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
191 .cra_blocksize = AES_BLOCK_SIZE,
192 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
193 .cra_alignmask = 0,
194 .cra_type = &crypto_blkcipher_type,
195 .cra_module = THIS_MODULE,
196 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
197 .cra_u = {
198 .blkcipher = {
199 .min_keysize = AES_MIN_KEY_SIZE,
200 .max_keysize = AES_MAX_KEY_SIZE,
201 .setkey = aes_set_key,
202 .encrypt = ecb_encrypt,
203 .decrypt = ecb_decrypt,
204 },
205 },
206};
207
208static int cbc_encrypt(struct blkcipher_desc *desc,
209 struct scatterlist *dst, struct scatterlist *src,
210 unsigned int nbytes)
211{
212 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
213 struct blkcipher_walk walk;
214 int err;
215
216 blkcipher_walk_init(&walk, dst, src, nbytes);
217 err = blkcipher_walk_virt(desc, &walk);
218
219 kernel_fpu_begin();
220 while ((nbytes = walk.nbytes)) {
221 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
222 nbytes & AES_BLOCK_MASK, walk.iv);
223 nbytes &= AES_BLOCK_SIZE - 1;
224 err = blkcipher_walk_done(desc, &walk, nbytes);
225 }
226 kernel_fpu_end();
227
228 return err;
229}
230
231static int cbc_decrypt(struct blkcipher_desc *desc,
232 struct scatterlist *dst, struct scatterlist *src,
233 unsigned int nbytes)
234{
235 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
236 struct blkcipher_walk walk;
237 int err;
238
239 blkcipher_walk_init(&walk, dst, src, nbytes);
240 err = blkcipher_walk_virt(desc, &walk);
241
242 kernel_fpu_begin();
243 while ((nbytes = walk.nbytes)) {
244 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
245 nbytes & AES_BLOCK_MASK, walk.iv);
246 nbytes &= AES_BLOCK_SIZE - 1;
247 err = blkcipher_walk_done(desc, &walk, nbytes);
248 }
249 kernel_fpu_end();
250
251 return err;
252}
253
254static struct crypto_alg blk_cbc_alg = {
255 .cra_name = "__cbc-aes-aesni",
256 .cra_driver_name = "__driver-cbc-aes-aesni",
257 .cra_priority = 0,
258 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
259 .cra_blocksize = AES_BLOCK_SIZE,
260 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
261 .cra_alignmask = 0,
262 .cra_type = &crypto_blkcipher_type,
263 .cra_module = THIS_MODULE,
264 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
265 .cra_u = {
266 .blkcipher = {
267 .min_keysize = AES_MIN_KEY_SIZE,
268 .max_keysize = AES_MAX_KEY_SIZE,
269 .setkey = aes_set_key,
270 .encrypt = cbc_encrypt,
271 .decrypt = cbc_decrypt,
272 },
273 },
274};
275
276static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
277 unsigned int key_len)
278{
279 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
280
281 return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len);
282}
283
284static int ablk_encrypt(struct ablkcipher_request *req)
285{
286 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
287 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
288
289 if (kernel_fpu_using()) {
290 struct ablkcipher_request *cryptd_req =
291 ablkcipher_request_ctx(req);
292 memcpy(cryptd_req, req, sizeof(*req));
293 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
294 return crypto_ablkcipher_encrypt(cryptd_req);
295 } else {
296 struct blkcipher_desc desc;
297 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
298 desc.info = req->info;
299 desc.flags = 0;
300 return crypto_blkcipher_crt(desc.tfm)->encrypt(
301 &desc, req->dst, req->src, req->nbytes);
302 }
303}
304
305static int ablk_decrypt(struct ablkcipher_request *req)
306{
307 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
308 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
309
310 if (kernel_fpu_using()) {
311 struct ablkcipher_request *cryptd_req =
312 ablkcipher_request_ctx(req);
313 memcpy(cryptd_req, req, sizeof(*req));
314 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
315 return crypto_ablkcipher_decrypt(cryptd_req);
316 } else {
317 struct blkcipher_desc desc;
318 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
319 desc.info = req->info;
320 desc.flags = 0;
321 return crypto_blkcipher_crt(desc.tfm)->decrypt(
322 &desc, req->dst, req->src, req->nbytes);
323 }
324}
325
326static void ablk_exit(struct crypto_tfm *tfm)
327{
328 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
329
330 cryptd_free_ablkcipher(ctx->cryptd_tfm);
331}
332
333static void ablk_init_common(struct crypto_tfm *tfm,
334 struct cryptd_ablkcipher *cryptd_tfm)
335{
336 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
337
338 ctx->cryptd_tfm = cryptd_tfm;
339 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
340 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
341}
342
343static int ablk_ecb_init(struct crypto_tfm *tfm)
344{
345 struct cryptd_ablkcipher *cryptd_tfm;
346
347 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
348 if (IS_ERR(cryptd_tfm))
349 return PTR_ERR(cryptd_tfm);
350 ablk_init_common(tfm, cryptd_tfm);
351 return 0;
352}
353
354static struct crypto_alg ablk_ecb_alg = {
355 .cra_name = "ecb(aes)",
356 .cra_driver_name = "ecb-aes-aesni",
357 .cra_priority = 400,
358 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
359 .cra_blocksize = AES_BLOCK_SIZE,
360 .cra_ctxsize = sizeof(struct async_aes_ctx),
361 .cra_alignmask = 0,
362 .cra_type = &crypto_ablkcipher_type,
363 .cra_module = THIS_MODULE,
364 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
365 .cra_init = ablk_ecb_init,
366 .cra_exit = ablk_exit,
367 .cra_u = {
368 .ablkcipher = {
369 .min_keysize = AES_MIN_KEY_SIZE,
370 .max_keysize = AES_MAX_KEY_SIZE,
371 .setkey = ablk_set_key,
372 .encrypt = ablk_encrypt,
373 .decrypt = ablk_decrypt,
374 },
375 },
376};
377
378static int ablk_cbc_init(struct crypto_tfm *tfm)
379{
380 struct cryptd_ablkcipher *cryptd_tfm;
381
382 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
383 if (IS_ERR(cryptd_tfm))
384 return PTR_ERR(cryptd_tfm);
385 ablk_init_common(tfm, cryptd_tfm);
386 return 0;
387}
388
389static struct crypto_alg ablk_cbc_alg = {
390 .cra_name = "cbc(aes)",
391 .cra_driver_name = "cbc-aes-aesni",
392 .cra_priority = 400,
393 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
394 .cra_blocksize = AES_BLOCK_SIZE,
395 .cra_ctxsize = sizeof(struct async_aes_ctx),
396 .cra_alignmask = 0,
397 .cra_type = &crypto_ablkcipher_type,
398 .cra_module = THIS_MODULE,
399 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
400 .cra_init = ablk_cbc_init,
401 .cra_exit = ablk_exit,
402 .cra_u = {
403 .ablkcipher = {
404 .min_keysize = AES_MIN_KEY_SIZE,
405 .max_keysize = AES_MAX_KEY_SIZE,
406 .ivsize = AES_BLOCK_SIZE,
407 .setkey = ablk_set_key,
408 .encrypt = ablk_encrypt,
409 .decrypt = ablk_decrypt,
410 },
411 },
412};
413
414static int __init aesni_init(void)
415{
416 int err;
417
418 if (!cpu_has_aes) {
419 printk(KERN_ERR "Intel AES-NI instructions are not detected.\n");
420 return -ENODEV;
421 }
422 if ((err = crypto_register_alg(&aesni_alg)))
423 goto aes_err;
424 if ((err = crypto_register_alg(&blk_ecb_alg)))
425 goto blk_ecb_err;
426 if ((err = crypto_register_alg(&blk_cbc_alg)))
427 goto blk_cbc_err;
428 if ((err = crypto_register_alg(&ablk_ecb_alg)))
429 goto ablk_ecb_err;
430 if ((err = crypto_register_alg(&ablk_cbc_alg)))
431 goto ablk_cbc_err;
432
433 return err;
434
435ablk_cbc_err:
436 crypto_unregister_alg(&ablk_ecb_alg);
437ablk_ecb_err:
438 crypto_unregister_alg(&blk_cbc_alg);
439blk_cbc_err:
440 crypto_unregister_alg(&blk_ecb_alg);
441blk_ecb_err:
442 crypto_unregister_alg(&aesni_alg);
443aes_err:
444 return err;
445}
446
447static void __exit aesni_exit(void)
448{
449 crypto_unregister_alg(&ablk_cbc_alg);
450 crypto_unregister_alg(&ablk_ecb_alg);
451 crypto_unregister_alg(&blk_cbc_alg);
452 crypto_unregister_alg(&blk_ecb_alg);
453 crypto_unregister_alg(&aesni_alg);
454}
455
456module_init(aesni_init);
457module_exit(aesni_exit);
458
459MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
460MODULE_LICENSE("GPL");
461MODULE_ALIAS("aes");
diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/aes.h
new file mode 100644
index 000000000000..80545a1cbe39
--- /dev/null
+++ b/arch/x86/include/asm/aes.h
@@ -0,0 +1,11 @@
1#ifndef ASM_X86_AES_H
2#define ASM_X86_AES_H
3
4#include <linux/crypto.h>
5#include <crypto/aes.h>
6
7void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
8 const u8 *src);
9void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
10 const u8 *src);
11#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 7301e60dc4a8..0beba0d1468d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -213,6 +213,7 @@ extern const char * const x86_power_flags[32];
213#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 213#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
214#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 214#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
215#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 215#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
216#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
216#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 217#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
217#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) 218#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
218#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 219#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 8dde4fcf99c9..74d0e622a515 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -56,6 +56,7 @@ config CRYPTO_BLKCIPHER2
56 tristate 56 tristate
57 select CRYPTO_ALGAPI2 57 select CRYPTO_ALGAPI2
58 select CRYPTO_RNG2 58 select CRYPTO_RNG2
59 select CRYPTO_WORKQUEUE
59 60
60config CRYPTO_HASH 61config CRYPTO_HASH
61 tristate 62 tristate
@@ -75,6 +76,10 @@ config CRYPTO_RNG2
75 tristate 76 tristate
76 select CRYPTO_ALGAPI2 77 select CRYPTO_ALGAPI2
77 78
79config CRYPTO_PCOMP
80 tristate
81 select CRYPTO_ALGAPI2
82
78config CRYPTO_MANAGER 83config CRYPTO_MANAGER
79 tristate "Cryptographic algorithm manager" 84 tristate "Cryptographic algorithm manager"
80 select CRYPTO_MANAGER2 85 select CRYPTO_MANAGER2
@@ -87,6 +92,7 @@ config CRYPTO_MANAGER2
87 select CRYPTO_AEAD2 92 select CRYPTO_AEAD2
88 select CRYPTO_HASH2 93 select CRYPTO_HASH2
89 select CRYPTO_BLKCIPHER2 94 select CRYPTO_BLKCIPHER2
95 select CRYPTO_PCOMP
90 96
91config CRYPTO_GF128MUL 97config CRYPTO_GF128MUL
92 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" 98 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -106,11 +112,15 @@ config CRYPTO_NULL
106 help 112 help
107 These are 'Null' algorithms, used by IPsec, which do nothing. 113 These are 'Null' algorithms, used by IPsec, which do nothing.
108 114
115config CRYPTO_WORKQUEUE
116 tristate
117
109config CRYPTO_CRYPTD 118config CRYPTO_CRYPTD
110 tristate "Software async crypto daemon" 119 tristate "Software async crypto daemon"
111 select CRYPTO_BLKCIPHER 120 select CRYPTO_BLKCIPHER
112 select CRYPTO_HASH 121 select CRYPTO_HASH
113 select CRYPTO_MANAGER 122 select CRYPTO_MANAGER
123 select CRYPTO_WORKQUEUE
114 help 124 help
115 This is a generic software asynchronous crypto daemon that 125 This is a generic software asynchronous crypto daemon that
116 converts an arbitrary synchronous software crypto algorithm 126 converts an arbitrary synchronous software crypto algorithm
@@ -470,6 +480,31 @@ config CRYPTO_AES_X86_64
470 480
471 See <http://csrc.nist.gov/encryption/aes/> for more information. 481 See <http://csrc.nist.gov/encryption/aes/> for more information.
472 482
483config CRYPTO_AES_NI_INTEL
484 tristate "AES cipher algorithms (AES-NI)"
485 depends on (X86 || UML_X86) && 64BIT
486 select CRYPTO_AES_X86_64
487 select CRYPTO_CRYPTD
488 select CRYPTO_ALGAPI
489 help
490 Use Intel AES-NI instructions for AES algorithm.
491
492 AES cipher algorithms (FIPS-197). AES uses the Rijndael
493 algorithm.
494
495 Rijndael appears to be consistently a very good performer in
496 both hardware and software across a wide range of computing
497 environments regardless of its use in feedback or non-feedback
498 modes. Its key setup time is excellent, and its key agility is
499 good. Rijndael's very low memory requirements make it very well
500 suited for restricted-space environments, in which it also
501 demonstrates excellent performance. Rijndael's operations are
502 among the easiest to defend against power and timing attacks.
503
504 The AES specifies three key sizes: 128, 192 and 256 bits
505
506 See <http://csrc.nist.gov/encryption/aes/> for more information.
507
473config CRYPTO_ANUBIS 508config CRYPTO_ANUBIS
474 tristate "Anubis cipher algorithm" 509 tristate "Anubis cipher algorithm"
475 select CRYPTO_ALGAPI 510 select CRYPTO_ALGAPI
@@ -714,6 +749,15 @@ config CRYPTO_DEFLATE
714 749
715 You will most probably want this if using IPSec. 750 You will most probably want this if using IPSec.
716 751
752config CRYPTO_ZLIB
753 tristate "Zlib compression algorithm"
754 select CRYPTO_PCOMP
755 select ZLIB_INFLATE
756 select ZLIB_DEFLATE
757 select NLATTR
758 help
759 This is the zlib algorithm.
760
717config CRYPTO_LZO 761config CRYPTO_LZO
718 tristate "LZO compression algorithm" 762 tristate "LZO compression algorithm"
719 select CRYPTO_ALGAPI 763 select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 46b08bf2035f..673d9f7c1bda 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -5,6 +5,8 @@
5obj-$(CONFIG_CRYPTO) += crypto.o 5obj-$(CONFIG_CRYPTO) += crypto.o
6crypto-objs := api.o cipher.o digest.o compress.o 6crypto-objs := api.o cipher.o digest.o compress.o
7 7
8obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
9
8obj-$(CONFIG_CRYPTO_FIPS) += fips.o 10obj-$(CONFIG_CRYPTO_FIPS) += fips.o
9 11
10crypto_algapi-$(CONFIG_PROC_FS) += proc.o 12crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -25,6 +27,8 @@ crypto_hash-objs += ahash.o
25crypto_hash-objs += shash.o 27crypto_hash-objs += shash.o
26obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 28obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
27 29
30obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
31
28cryptomgr-objs := algboss.o testmgr.o 32cryptomgr-objs := algboss.o testmgr.o
29 33
30obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o 34obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -70,6 +74,7 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
70obj-$(CONFIG_CRYPTO_SEED) += seed.o 74obj-$(CONFIG_CRYPTO_SEED) += seed.o
71obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o 75obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
72obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o 76obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
77obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
73obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o 78obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
74obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 79obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
75obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o 80obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 94140b3756fc..e11ce37c7104 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -282,6 +282,25 @@ static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
282 alg->cra_ablkcipher.ivsize)) 282 alg->cra_ablkcipher.ivsize))
283 return alg; 283 return alg;
284 284
285 crypto_mod_put(alg);
286 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
287 mask & ~CRYPTO_ALG_TESTED);
288 if (IS_ERR(alg))
289 return alg;
290
291 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
292 CRYPTO_ALG_TYPE_GIVCIPHER) {
293 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
294 crypto_mod_put(alg);
295 alg = ERR_PTR(-ENOENT);
296 }
297 return alg;
298 }
299
300 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
301 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
302 alg->cra_ablkcipher.ivsize));
303
285 return ERR_PTR(crypto_givcipher_default(alg, type, mask)); 304 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
286} 305}
287 306
diff --git a/crypto/aead.c b/crypto/aead.c
index 3a6f3f52c7c7..d9aa733db164 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -422,6 +422,22 @@ static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
422 if (!alg->cra_aead.ivsize) 422 if (!alg->cra_aead.ivsize)
423 return alg; 423 return alg;
424 424
425 crypto_mod_put(alg);
426 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
427 mask & ~CRYPTO_ALG_TESTED);
428 if (IS_ERR(alg))
429 return alg;
430
431 if (alg->cra_type == &crypto_aead_type) {
432 if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
433 crypto_mod_put(alg);
434 alg = ERR_PTR(-ENOENT);
435 }
436 return alg;
437 }
438
439 BUG_ON(!alg->cra_aead.ivsize);
440
425 return ERR_PTR(crypto_nivaead_default(alg, type, mask)); 441 return ERR_PTR(crypto_nivaead_default(alg, type, mask));
426} 442}
427 443
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 4601e4267c88..6906f92aeac0 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -10,7 +10,7 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/crypto.h> 13#include <crypto/internal/aead.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/init.h> 16#include <linux/init.h>
@@ -206,8 +206,7 @@ static int cryptomgr_test(void *data)
206 u32 type = param->type; 206 u32 type = param->type;
207 int err = 0; 207 int err = 0;
208 208
209 if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & 209 if (type & CRYPTO_ALG_TESTED)
210 CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV))
211 goto skiptest; 210 goto skiptest;
212 211
213 err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED); 212 err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
@@ -223,6 +222,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
223{ 222{
224 struct task_struct *thread; 223 struct task_struct *thread;
225 struct crypto_test_param *param; 224 struct crypto_test_param *param;
225 u32 type;
226 226
227 if (!try_module_get(THIS_MODULE)) 227 if (!try_module_get(THIS_MODULE))
228 goto err; 228 goto err;
@@ -233,7 +233,19 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
233 233
234 memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver)); 234 memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
235 memcpy(param->alg, alg->cra_name, sizeof(param->alg)); 235 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
236 param->type = alg->cra_flags; 236 type = alg->cra_flags;
237
238 /* This piece of crap needs to disappear into per-type test hooks. */
239 if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
240 CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
241 ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
242 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
243 alg->cra_ablkcipher.ivsize)) ||
244 (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
245 alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
246 type |= CRYPTO_ALG_TESTED;
247
248 param->type = type;
237 249
238 thread = kthread_run(cryptomgr_test, param, "cryptomgr_test"); 250 thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
239 if (IS_ERR(thread)) 251 if (IS_ERR(thread))
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 0fac8ffc2fb7..d80ed4c1e009 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -132,9 +132,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
132 */ 132 */
133 if (!memcmp(ctx->rand_data, ctx->last_rand_data, 133 if (!memcmp(ctx->rand_data, ctx->last_rand_data,
134 DEFAULT_BLK_SZ)) { 134 DEFAULT_BLK_SZ)) {
135 if (fips_enabled) {
136 panic("cprng %p Failed repetition check!\n",
137 ctx);
138 }
139
135 printk(KERN_ERR 140 printk(KERN_ERR
136 "ctx %p Failed repetition check!\n", 141 "ctx %p Failed repetition check!\n",
137 ctx); 142 ctx);
143
138 ctx->flags |= PRNG_NEED_RESET; 144 ctx->flags |= PRNG_NEED_RESET;
139 return -EINVAL; 145 return -EINVAL;
140 } 146 }
@@ -338,7 +344,16 @@ static int cprng_init(struct crypto_tfm *tfm)
338 344
339 spin_lock_init(&ctx->prng_lock); 345 spin_lock_init(&ctx->prng_lock);
340 346
341 return reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL); 347 if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
348 return -EINVAL;
349
350 /*
351 * after allocation, we should always force the user to reset
352 * so they don't inadvertently use the insecure default values
353 * without specifying them intentially
354 */
355 ctx->flags |= PRNG_NEED_RESET;
356 return 0;
342} 357}
343 358
344static void cprng_exit(struct crypto_tfm *tfm) 359static void cprng_exit(struct crypto_tfm *tfm)
diff --git a/crypto/api.c b/crypto/api.c
index 38a2bc02a98c..314dab96840e 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -255,7 +255,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
255 struct crypto_alg *larval; 255 struct crypto_alg *larval;
256 int ok; 256 int ok;
257 257
258 if (!(mask & CRYPTO_ALG_TESTED)) { 258 if (!((type | mask) & CRYPTO_ALG_TESTED)) {
259 type |= CRYPTO_ALG_TESTED; 259 type |= CRYPTO_ALG_TESTED;
260 mask |= CRYPTO_ALG_TESTED; 260 mask |= CRYPTO_ALG_TESTED;
261 } 261 }
@@ -464,8 +464,8 @@ err:
464} 464}
465EXPORT_SYMBOL_GPL(crypto_alloc_base); 465EXPORT_SYMBOL_GPL(crypto_alloc_base);
466 466
467struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, 467void *crypto_create_tfm(struct crypto_alg *alg,
468 const struct crypto_type *frontend) 468 const struct crypto_type *frontend)
469{ 469{
470 char *mem; 470 char *mem;
471 struct crypto_tfm *tfm = NULL; 471 struct crypto_tfm *tfm = NULL;
@@ -499,9 +499,9 @@ out_free_tfm:
499 crypto_shoot_alg(alg); 499 crypto_shoot_alg(alg);
500 kfree(mem); 500 kfree(mem);
501out_err: 501out_err:
502 tfm = ERR_PTR(err); 502 mem = ERR_PTR(err);
503out: 503out:
504 return tfm; 504 return mem;
505} 505}
506EXPORT_SYMBOL_GPL(crypto_create_tfm); 506EXPORT_SYMBOL_GPL(crypto_create_tfm);
507 507
@@ -525,12 +525,11 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
525 * 525 *
526 * In case of error the return value is an error pointer. 526 * In case of error the return value is an error pointer.
527 */ 527 */
528struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, 528void *crypto_alloc_tfm(const char *alg_name,
529 const struct crypto_type *frontend, 529 const struct crypto_type *frontend, u32 type, u32 mask)
530 u32 type, u32 mask)
531{ 530{
532 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); 531 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
533 struct crypto_tfm *tfm; 532 void *tfm;
534 int err; 533 int err;
535 534
536 type &= frontend->maskclear; 535 type &= frontend->maskclear;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index d70a41c002df..90d26c91f4e9 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -521,7 +521,7 @@ static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
521 int err; 521 int err;
522 522
523 type = crypto_skcipher_type(type); 523 type = crypto_skcipher_type(type);
524 mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV; 524 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
525 525
526 alg = crypto_alg_mod_lookup(name, type, mask); 526 alg = crypto_alg_mod_lookup(name, type, mask);
527 if (IS_ERR(alg)) 527 if (IS_ERR(alg))
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 7c37a497b860..ba200b07449d 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -15,6 +15,7 @@
15 15
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/rng.h> 17#include <crypto/rng.h>
18#include <crypto/crypto_wq.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -133,7 +134,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
133 goto out; 134 goto out;
134 } 135 }
135 136
136 queued = schedule_work(&ctx->postponed); 137 queued = queue_work(kcrypto_wq, &ctx->postponed);
137 BUG_ON(!queued); 138 BUG_ON(!queued);
138 139
139out: 140out:
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index d29e06b350ff..d14b22658d7a 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -12,30 +12,31 @@
12 12
13#include <crypto/algapi.h> 13#include <crypto/algapi.h>
14#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
15#include <crypto/cryptd.h>
16#include <crypto/crypto_wq.h>
15#include <linux/err.h> 17#include <linux/err.h>
16#include <linux/init.h> 18#include <linux/init.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/kthread.h>
19#include <linux/list.h> 20#include <linux/list.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/spinlock.h>
26 25
27#define CRYPTD_MAX_QLEN 100 26#define CRYPTD_MAX_CPU_QLEN 100
28 27
29struct cryptd_state { 28struct cryptd_cpu_queue {
30 spinlock_t lock;
31 struct mutex mutex;
32 struct crypto_queue queue; 29 struct crypto_queue queue;
33 struct task_struct *task; 30 struct work_struct work;
31};
32
33struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue;
34}; 35};
35 36
36struct cryptd_instance_ctx { 37struct cryptd_instance_ctx {
37 struct crypto_spawn spawn; 38 struct crypto_spawn spawn;
38 struct cryptd_state *state; 39 struct cryptd_queue *queue;
39}; 40};
40 41
41struct cryptd_blkcipher_ctx { 42struct cryptd_blkcipher_ctx {
@@ -54,11 +55,85 @@ struct cryptd_hash_request_ctx {
54 crypto_completion_t complete; 55 crypto_completion_t complete;
55}; 56};
56 57
57static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) 58static void cryptd_queue_worker(struct work_struct *work);
59
60static int cryptd_init_queue(struct cryptd_queue *queue,
61 unsigned int max_cpu_qlen)
62{
63 int cpu;
64 struct cryptd_cpu_queue *cpu_queue;
65
66 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
67 if (!queue->cpu_queue)
68 return -ENOMEM;
69 for_each_possible_cpu(cpu) {
70 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
71 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
72 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
73 }
74 return 0;
75}
76
77static void cryptd_fini_queue(struct cryptd_queue *queue)
78{
79 int cpu;
80 struct cryptd_cpu_queue *cpu_queue;
81
82 for_each_possible_cpu(cpu) {
83 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
84 BUG_ON(cpu_queue->queue.qlen);
85 }
86 free_percpu(queue->cpu_queue);
87}
88
89static int cryptd_enqueue_request(struct cryptd_queue *queue,
90 struct crypto_async_request *request)
91{
92 int cpu, err;
93 struct cryptd_cpu_queue *cpu_queue;
94
95 cpu = get_cpu();
96 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
97 err = crypto_enqueue_request(&cpu_queue->queue, request);
98 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
99 put_cpu();
100
101 return err;
102}
103
104/* Called in workqueue context, do one real cryption work (via
105 * req->complete) and reschedule itself if there are more work to
106 * do. */
107static void cryptd_queue_worker(struct work_struct *work)
108{
109 struct cryptd_cpu_queue *cpu_queue;
110 struct crypto_async_request *req, *backlog;
111
112 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
113 /* Only handle one request at a time to avoid hogging crypto
114 * workqueue. preempt_disable/enable is used to prevent
115 * being preempted by cryptd_enqueue_request() */
116 preempt_disable();
117 backlog = crypto_get_backlog(&cpu_queue->queue);
118 req = crypto_dequeue_request(&cpu_queue->queue);
119 preempt_enable();
120
121 if (!req)
122 return;
123
124 if (backlog)
125 backlog->complete(backlog, -EINPROGRESS);
126 req->complete(req, 0);
127
128 if (cpu_queue->queue.qlen)
129 queue_work(kcrypto_wq, &cpu_queue->work);
130}
131
132static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
58{ 133{
59 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 134 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
60 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 135 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
61 return ictx->state; 136 return ictx->queue;
62} 137}
63 138
64static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 139static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -130,19 +205,13 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
130{ 205{
131 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 206 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
132 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 207 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
133 struct cryptd_state *state = 208 struct cryptd_queue *queue;
134 cryptd_get_state(crypto_ablkcipher_tfm(tfm));
135 int err;
136 209
210 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
137 rctx->complete = req->base.complete; 211 rctx->complete = req->base.complete;
138 req->base.complete = complete; 212 req->base.complete = complete;
139 213
140 spin_lock_bh(&state->lock); 214 return cryptd_enqueue_request(queue, &req->base);
141 err = ablkcipher_enqueue_request(&state->queue, req);
142 spin_unlock_bh(&state->lock);
143
144 wake_up_process(state->task);
145 return err;
146} 215}
147 216
148static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 217static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
@@ -176,21 +245,12 @@ static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
176static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 245static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
177{ 246{
178 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 247 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
179 struct cryptd_state *state = cryptd_get_state(tfm);
180 int active;
181
182 mutex_lock(&state->mutex);
183 active = ablkcipher_tfm_in_queue(&state->queue,
184 __crypto_ablkcipher_cast(tfm));
185 mutex_unlock(&state->mutex);
186
187 BUG_ON(active);
188 248
189 crypto_free_blkcipher(ctx->child); 249 crypto_free_blkcipher(ctx->child);
190} 250}
191 251
192static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, 252static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
193 struct cryptd_state *state) 253 struct cryptd_queue *queue)
194{ 254{
195 struct crypto_instance *inst; 255 struct crypto_instance *inst;
196 struct cryptd_instance_ctx *ctx; 256 struct cryptd_instance_ctx *ctx;
@@ -213,7 +273,7 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
213 if (err) 273 if (err)
214 goto out_free_inst; 274 goto out_free_inst;
215 275
216 ctx->state = state; 276 ctx->queue = queue;
217 277
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 278 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219 279
@@ -231,7 +291,7 @@ out_free_inst:
231} 291}
232 292
233static struct crypto_instance *cryptd_alloc_blkcipher( 293static struct crypto_instance *cryptd_alloc_blkcipher(
234 struct rtattr **tb, struct cryptd_state *state) 294 struct rtattr **tb, struct cryptd_queue *queue)
235{ 295{
236 struct crypto_instance *inst; 296 struct crypto_instance *inst;
237 struct crypto_alg *alg; 297 struct crypto_alg *alg;
@@ -241,7 +301,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
241 if (IS_ERR(alg)) 301 if (IS_ERR(alg))
242 return ERR_CAST(alg); 302 return ERR_CAST(alg);
243 303
244 inst = cryptd_alloc_instance(alg, state); 304 inst = cryptd_alloc_instance(alg, queue);
245 if (IS_ERR(inst)) 305 if (IS_ERR(inst))
246 goto out_put_alg; 306 goto out_put_alg;
247 307
@@ -289,15 +349,6 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
289static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 349static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
290{ 350{
291 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 351 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
292 struct cryptd_state *state = cryptd_get_state(tfm);
293 int active;
294
295 mutex_lock(&state->mutex);
296 active = ahash_tfm_in_queue(&state->queue,
297 __crypto_ahash_cast(tfm));
298 mutex_unlock(&state->mutex);
299
300 BUG_ON(active);
301 352
302 crypto_free_hash(ctx->child); 353 crypto_free_hash(ctx->child);
303} 354}
@@ -323,19 +374,13 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
323{ 374{
324 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 375 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 376 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct cryptd_state *state = 377 struct cryptd_queue *queue =
327 cryptd_get_state(crypto_ahash_tfm(tfm)); 378 cryptd_get_queue(crypto_ahash_tfm(tfm));
328 int err;
329 379
330 rctx->complete = req->base.complete; 380 rctx->complete = req->base.complete;
331 req->base.complete = complete; 381 req->base.complete = complete;
332 382
333 spin_lock_bh(&state->lock); 383 return cryptd_enqueue_request(queue, &req->base);
334 err = ahash_enqueue_request(&state->queue, req);
335 spin_unlock_bh(&state->lock);
336
337 wake_up_process(state->task);
338 return err;
339} 384}
340 385
341static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 386static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
@@ -468,7 +513,7 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
468} 513}
469 514
470static struct crypto_instance *cryptd_alloc_hash( 515static struct crypto_instance *cryptd_alloc_hash(
471 struct rtattr **tb, struct cryptd_state *state) 516 struct rtattr **tb, struct cryptd_queue *queue)
472{ 517{
473 struct crypto_instance *inst; 518 struct crypto_instance *inst;
474 struct crypto_alg *alg; 519 struct crypto_alg *alg;
@@ -478,7 +523,7 @@ static struct crypto_instance *cryptd_alloc_hash(
478 if (IS_ERR(alg)) 523 if (IS_ERR(alg))
479 return ERR_PTR(PTR_ERR(alg)); 524 return ERR_PTR(PTR_ERR(alg));
480 525
481 inst = cryptd_alloc_instance(alg, state); 526 inst = cryptd_alloc_instance(alg, queue);
482 if (IS_ERR(inst)) 527 if (IS_ERR(inst))
483 goto out_put_alg; 528 goto out_put_alg;
484 529
@@ -502,7 +547,7 @@ out_put_alg:
502 return inst; 547 return inst;
503} 548}
504 549
505static struct cryptd_state state; 550static struct cryptd_queue queue;
506 551
507static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 552static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
508{ 553{
@@ -514,9 +559,9 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
514 559
515 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 560 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
516 case CRYPTO_ALG_TYPE_BLKCIPHER: 561 case CRYPTO_ALG_TYPE_BLKCIPHER:
517 return cryptd_alloc_blkcipher(tb, &state); 562 return cryptd_alloc_blkcipher(tb, &queue);
518 case CRYPTO_ALG_TYPE_DIGEST: 563 case CRYPTO_ALG_TYPE_DIGEST:
519 return cryptd_alloc_hash(tb, &state); 564 return cryptd_alloc_hash(tb, &queue);
520 } 565 }
521 566
522 return ERR_PTR(-EINVAL); 567 return ERR_PTR(-EINVAL);
@@ -537,82 +582,58 @@ static struct crypto_template cryptd_tmpl = {
537 .module = THIS_MODULE, 582 .module = THIS_MODULE,
538}; 583};
539 584
540static inline int cryptd_create_thread(struct cryptd_state *state, 585struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
541 int (*fn)(void *data), const char *name) 586 u32 type, u32 mask)
542{ 587{
543 spin_lock_init(&state->lock); 588 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
544 mutex_init(&state->mutex); 589 struct crypto_ablkcipher *tfm;
545 crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); 590
546 591 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
547 state->task = kthread_run(fn, state, name); 592 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
548 if (IS_ERR(state->task)) 593 return ERR_PTR(-EINVAL);
549 return PTR_ERR(state->task); 594 tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask);
595 if (IS_ERR(tfm))
596 return ERR_CAST(tfm);
597 if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) {
598 crypto_free_ablkcipher(tfm);
599 return ERR_PTR(-EINVAL);
600 }
550 601
551 return 0; 602 return __cryptd_ablkcipher_cast(tfm);
552} 603}
604EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
553 605
554static inline void cryptd_stop_thread(struct cryptd_state *state) 606struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
555{ 607{
556 BUG_ON(state->queue.qlen); 608 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
557 kthread_stop(state->task); 609 return ctx->child;
558} 610}
611EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
559 612
560static int cryptd_thread(void *data) 613void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
561{ 614{
562 struct cryptd_state *state = data; 615 crypto_free_ablkcipher(&tfm->base);
563 int stop;
564
565 current->flags |= PF_NOFREEZE;
566
567 do {
568 struct crypto_async_request *req, *backlog;
569
570 mutex_lock(&state->mutex);
571 __set_current_state(TASK_INTERRUPTIBLE);
572
573 spin_lock_bh(&state->lock);
574 backlog = crypto_get_backlog(&state->queue);
575 req = crypto_dequeue_request(&state->queue);
576 spin_unlock_bh(&state->lock);
577
578 stop = kthread_should_stop();
579
580 if (stop || req) {
581 __set_current_state(TASK_RUNNING);
582 if (req) {
583 if (backlog)
584 backlog->complete(backlog,
585 -EINPROGRESS);
586 req->complete(req, 0);
587 }
588 }
589
590 mutex_unlock(&state->mutex);
591
592 schedule();
593 } while (!stop);
594
595 return 0;
596} 616}
617EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
597 618
598static int __init cryptd_init(void) 619static int __init cryptd_init(void)
599{ 620{
600 int err; 621 int err;
601 622
602 err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); 623 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
603 if (err) 624 if (err)
604 return err; 625 return err;
605 626
606 err = crypto_register_template(&cryptd_tmpl); 627 err = crypto_register_template(&cryptd_tmpl);
607 if (err) 628 if (err)
608 kthread_stop(state.task); 629 cryptd_fini_queue(&queue);
609 630
610 return err; 631 return err;
611} 632}
612 633
613static void __exit cryptd_exit(void) 634static void __exit cryptd_exit(void)
614{ 635{
615 cryptd_stop_thread(&state); 636 cryptd_fini_queue(&queue);
616 crypto_unregister_template(&cryptd_tmpl); 637 crypto_unregister_template(&cryptd_tmpl);
617} 638}
618 639
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
new file mode 100644
index 000000000000..fdcf6248f152
--- /dev/null
+++ b/crypto/crypto_wq.c
@@ -0,0 +1,38 @@
1/*
2 * Workqueue for crypto subsystem
3 *
4 * Copyright (c) 2009 Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13
14#include <linux/workqueue.h>
15#include <crypto/algapi.h>
16#include <crypto/crypto_wq.h>
17
18struct workqueue_struct *kcrypto_wq;
19EXPORT_SYMBOL_GPL(kcrypto_wq);
20
21static int __init crypto_wq_init(void)
22{
23 kcrypto_wq = create_workqueue("crypto");
24 if (unlikely(!kcrypto_wq))
25 return -ENOMEM;
26 return 0;
27}
28
29static void __exit crypto_wq_exit(void)
30{
31 destroy_workqueue(kcrypto_wq);
32}
33
34module_init(crypto_wq_init);
35module_exit(crypto_wq_exit);
36
37MODULE_LICENSE("GPL");
38MODULE_DESCRIPTION("Workqueue for crypto subsystem");
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index ecbeaa1f17e1..a90d260528d4 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org> 4 * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
5 * 5 *
6 * Based on Dr Brian Gladman's (GPL'd) work published at 6 * Based on Dr Brian Gladman's (GPL'd) work published at
7 * http://fp.gladman.plus.com/cryptography_technology/index.htm 7 * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
8 * See the original copyright notice below. 8 * See the original copyright notice below.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
diff --git a/crypto/internal.h b/crypto/internal.h
index 3c19a27a7563..fc76e1f37fc3 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -109,8 +109,10 @@ void crypto_alg_tested(const char *name, int err);
109void crypto_shoot_alg(struct crypto_alg *alg); 109void crypto_shoot_alg(struct crypto_alg *alg);
110struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 110struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
111 u32 mask); 111 u32 mask);
112struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, 112void *crypto_create_tfm(struct crypto_alg *alg,
113 const struct crypto_type *frontend); 113 const struct crypto_type *frontend);
114void *crypto_alloc_tfm(const char *alg_name,
115 const struct crypto_type *frontend, u32 type, u32 mask);
114 116
115int crypto_register_instance(struct crypto_template *tmpl, 117int crypto_register_instance(struct crypto_template *tmpl,
116 struct crypto_instance *inst); 118 struct crypto_instance *inst);
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
new file mode 100644
index 000000000000..ca9a4af91efe
--- /dev/null
+++ b/crypto/pcompress.c
@@ -0,0 +1,97 @@
1/*
2 * Cryptographic API.
3 *
4 * Partial (de)compression operations.
5 *
6 * Copyright 2008 Sony Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.
19 * If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/crypto.h>
23#include <linux/errno.h>
24#include <linux/module.h>
25#include <linux/seq_file.h>
26#include <linux/string.h>
27
28#include <crypto/compress.h>
29
30#include "internal.h"
31
32
33static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
34{
35 return 0;
36}
37
38static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg,
39 const struct crypto_type *frontend)
40{
41 return alg->cra_ctxsize;
42}
43
44static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm,
45 const struct crypto_type *frontend)
46{
47 return 0;
48}
49
50static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
51 __attribute__ ((unused));
52static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
53{
54 seq_printf(m, "type : pcomp\n");
55}
56
57static const struct crypto_type crypto_pcomp_type = {
58 .extsize = crypto_pcomp_extsize,
59 .init = crypto_pcomp_init,
60 .init_tfm = crypto_pcomp_init_tfm,
61#ifdef CONFIG_PROC_FS
62 .show = crypto_pcomp_show,
63#endif
64 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
65 .maskset = CRYPTO_ALG_TYPE_MASK,
66 .type = CRYPTO_ALG_TYPE_PCOMPRESS,
67 .tfmsize = offsetof(struct crypto_pcomp, base),
68};
69
70struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
71 u32 mask)
72{
73 return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
74}
75EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
76
77int crypto_register_pcomp(struct pcomp_alg *alg)
78{
79 struct crypto_alg *base = &alg->base;
80
81 base->cra_type = &crypto_pcomp_type;
82 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
83 base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
84
85 return crypto_register_alg(base);
86}
87EXPORT_SYMBOL_GPL(crypto_register_pcomp);
88
89int crypto_unregister_pcomp(struct pcomp_alg *alg)
90{
91 return crypto_unregister_alg(&alg->base);
92}
93EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
94
95MODULE_LICENSE("GPL");
96MODULE_DESCRIPTION("Partial (de)compression type");
97MODULE_AUTHOR("Sony Corporation");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index caa3542e6ce8..6349d8339d37 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -2,7 +2,7 @@
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * SHA-256, as specified in 4 * SHA-256, as specified in
5 * http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf 5 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
6 * 6 *
7 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. 7 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
8 * 8 *
diff --git a/crypto/shash.c b/crypto/shash.c
index d5a2b619c55f..7a659733f94a 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -18,15 +18,10 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21static const struct crypto_type crypto_shash_type;
22
23static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
24{
25 return container_of(tfm, struct crypto_shash, base);
26}
27
28#include "internal.h" 21#include "internal.h"
29 22
23static const struct crypto_type crypto_shash_type;
24
30static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, 25static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
31 unsigned int keylen) 26 unsigned int keylen)
32{ 27{
@@ -282,8 +277,7 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
282 if (!crypto_mod_get(calg)) 277 if (!crypto_mod_get(calg))
283 return -EAGAIN; 278 return -EAGAIN;
284 279
285 shash = __crypto_shash_cast(crypto_create_tfm( 280 shash = crypto_create_tfm(calg, &crypto_shash_type);
286 calg, &crypto_shash_type));
287 if (IS_ERR(shash)) { 281 if (IS_ERR(shash)) {
288 crypto_mod_put(calg); 282 crypto_mod_put(calg);
289 return PTR_ERR(shash); 283 return PTR_ERR(shash);
@@ -391,8 +385,7 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
391 if (!crypto_mod_get(calg)) 385 if (!crypto_mod_get(calg))
392 return -EAGAIN; 386 return -EAGAIN;
393 387
394 shash = __crypto_shash_cast(crypto_create_tfm( 388 shash = crypto_create_tfm(calg, &crypto_shash_type);
395 calg, &crypto_shash_type));
396 if (IS_ERR(shash)) { 389 if (IS_ERR(shash)) {
397 crypto_mod_put(calg); 390 crypto_mod_put(calg);
398 return PTR_ERR(shash); 391 return PTR_ERR(shash);
@@ -442,8 +435,6 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
442static int crypto_shash_init_tfm(struct crypto_tfm *tfm, 435static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
443 const struct crypto_type *frontend) 436 const struct crypto_type *frontend)
444{ 437{
445 if (frontend->type != CRYPTO_ALG_TYPE_SHASH)
446 return -EINVAL;
447 return 0; 438 return 0;
448} 439}
449 440
@@ -482,8 +473,7 @@ static const struct crypto_type crypto_shash_type = {
482struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, 473struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
483 u32 mask) 474 u32 mask)
484{ 475{
485 return __crypto_shash_cast( 476 return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
486 crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask));
487} 477}
488EXPORT_SYMBOL_GPL(crypto_alloc_shash); 478EXPORT_SYMBOL_GPL(crypto_alloc_shash);
489 479
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 28a45a1e6f42..c3c9124209a1 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -53,7 +53,7 @@ static char *check[] = {
53 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 53 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
54 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 54 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
55 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", 55 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
56 "lzo", "cts", NULL 56 "lzo", "cts", "zlib", NULL
57}; 57};
58 58
59static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, 59static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
@@ -661,6 +661,10 @@ static void do_test(int m)
661 tcrypt_test("ecb(seed)"); 661 tcrypt_test("ecb(seed)");
662 break; 662 break;
663 663
664 case 44:
665 tcrypt_test("zlib");
666 break;
667
664 case 100: 668 case 100:
665 tcrypt_test("hmac(md5)"); 669 tcrypt_test("hmac(md5)");
666 break; 670 break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a75f11ffb957..b50c3c6b17a2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -72,6 +72,13 @@ struct comp_test_suite {
72 } comp, decomp; 72 } comp, decomp;
73}; 73};
74 74
75struct pcomp_test_suite {
76 struct {
77 struct pcomp_testvec *vecs;
78 unsigned int count;
79 } comp, decomp;
80};
81
75struct hash_test_suite { 82struct hash_test_suite {
76 struct hash_testvec *vecs; 83 struct hash_testvec *vecs;
77 unsigned int count; 84 unsigned int count;
@@ -86,6 +93,7 @@ struct alg_test_desc {
86 struct aead_test_suite aead; 93 struct aead_test_suite aead;
87 struct cipher_test_suite cipher; 94 struct cipher_test_suite cipher;
88 struct comp_test_suite comp; 95 struct comp_test_suite comp;
96 struct pcomp_test_suite pcomp;
89 struct hash_test_suite hash; 97 struct hash_test_suite hash;
90 } suite; 98 } suite;
91}; 99};
@@ -898,6 +906,159 @@ out:
898 return ret; 906 return ret;
899} 907}
900 908
909static int test_pcomp(struct crypto_pcomp *tfm,
910 struct pcomp_testvec *ctemplate,
911 struct pcomp_testvec *dtemplate, int ctcount,
912 int dtcount)
913{
914 const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
915 unsigned int i;
916 char result[COMP_BUF_SIZE];
917 int error;
918
919 for (i = 0; i < ctcount; i++) {
920 struct comp_request req;
921
922 error = crypto_compress_setup(tfm, ctemplate[i].params,
923 ctemplate[i].paramsize);
924 if (error) {
925 pr_err("alg: pcomp: compression setup failed on test "
926 "%d for %s: error=%d\n", i + 1, algo, error);
927 return error;
928 }
929
930 error = crypto_compress_init(tfm);
931 if (error) {
932 pr_err("alg: pcomp: compression init failed on test "
933 "%d for %s: error=%d\n", i + 1, algo, error);
934 return error;
935 }
936
937 memset(result, 0, sizeof(result));
938
939 req.next_in = ctemplate[i].input;
940 req.avail_in = ctemplate[i].inlen / 2;
941 req.next_out = result;
942 req.avail_out = ctemplate[i].outlen / 2;
943
944 error = crypto_compress_update(tfm, &req);
945 if (error && (error != -EAGAIN || req.avail_in)) {
946 pr_err("alg: pcomp: compression update failed on test "
947 "%d for %s: error=%d\n", i + 1, algo, error);
948 return error;
949 }
950
951 /* Add remaining input data */
952 req.avail_in += (ctemplate[i].inlen + 1) / 2;
953
954 error = crypto_compress_update(tfm, &req);
955 if (error && (error != -EAGAIN || req.avail_in)) {
956 pr_err("alg: pcomp: compression update failed on test "
957 "%d for %s: error=%d\n", i + 1, algo, error);
958 return error;
959 }
960
961 /* Provide remaining output space */
962 req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
963
964 error = crypto_compress_final(tfm, &req);
965 if (error) {
966 pr_err("alg: pcomp: compression final failed on test "
967 "%d for %s: error=%d\n", i + 1, algo, error);
968 return error;
969 }
970
971 if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
972 pr_err("alg: comp: Compression test %d failed for %s: "
973 "output len = %d (expected %d)\n", i + 1, algo,
974 COMP_BUF_SIZE - req.avail_out,
975 ctemplate[i].outlen);
976 return -EINVAL;
977 }
978
979 if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
980 pr_err("alg: pcomp: Compression test %d failed for "
981 "%s\n", i + 1, algo);
982 hexdump(result, ctemplate[i].outlen);
983 return -EINVAL;
984 }
985 }
986
987 for (i = 0; i < dtcount; i++) {
988 struct comp_request req;
989
990 error = crypto_decompress_setup(tfm, dtemplate[i].params,
991 dtemplate[i].paramsize);
992 if (error) {
993 pr_err("alg: pcomp: decompression setup failed on "
994 "test %d for %s: error=%d\n", i + 1, algo,
995 error);
996 return error;
997 }
998
999 error = crypto_decompress_init(tfm);
1000 if (error) {
1001 pr_err("alg: pcomp: decompression init failed on test "
1002 "%d for %s: error=%d\n", i + 1, algo, error);
1003 return error;
1004 }
1005
1006 memset(result, 0, sizeof(result));
1007
1008 req.next_in = dtemplate[i].input;
1009 req.avail_in = dtemplate[i].inlen / 2;
1010 req.next_out = result;
1011 req.avail_out = dtemplate[i].outlen / 2;
1012
1013 error = crypto_decompress_update(tfm, &req);
1014 if (error && (error != -EAGAIN || req.avail_in)) {
1015 pr_err("alg: pcomp: decompression update failed on "
1016 "test %d for %s: error=%d\n", i + 1, algo,
1017 error);
1018 return error;
1019 }
1020
1021 /* Add remaining input data */
1022 req.avail_in += (dtemplate[i].inlen + 1) / 2;
1023
1024 error = crypto_decompress_update(tfm, &req);
1025 if (error && (error != -EAGAIN || req.avail_in)) {
1026 pr_err("alg: pcomp: decompression update failed on "
1027 "test %d for %s: error=%d\n", i + 1, algo,
1028 error);
1029 return error;
1030 }
1031
1032 /* Provide remaining output space */
1033 req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
1034
1035 error = crypto_decompress_final(tfm, &req);
1036 if (error && (error != -EAGAIN || req.avail_in)) {
1037 pr_err("alg: pcomp: decompression final failed on "
1038 "test %d for %s: error=%d\n", i + 1, algo,
1039 error);
1040 return error;
1041 }
1042
1043 if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
1044 pr_err("alg: comp: Decompression test %d failed for "
1045 "%s: output len = %d (expected %d)\n", i + 1,
1046 algo, COMP_BUF_SIZE - req.avail_out,
1047 dtemplate[i].outlen);
1048 return -EINVAL;
1049 }
1050
1051 if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
1052 pr_err("alg: pcomp: Decompression test %d failed for "
1053 "%s\n", i + 1, algo);
1054 hexdump(result, dtemplate[i].outlen);
1055 return -EINVAL;
1056 }
1057 }
1058
1059 return 0;
1060}
1061
901static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, 1062static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
902 u32 type, u32 mask) 1063 u32 type, u32 mask)
903{ 1064{
@@ -1007,6 +1168,28 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1007 return err; 1168 return err;
1008} 1169}
1009 1170
1171static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
1172 u32 type, u32 mask)
1173{
1174 struct crypto_pcomp *tfm;
1175 int err;
1176
1177 tfm = crypto_alloc_pcomp(driver, type, mask);
1178 if (IS_ERR(tfm)) {
1179 pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
1180 driver, PTR_ERR(tfm));
1181 return PTR_ERR(tfm);
1182 }
1183
1184 err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
1185 desc->suite.pcomp.decomp.vecs,
1186 desc->suite.pcomp.comp.count,
1187 desc->suite.pcomp.decomp.count);
1188
1189 crypto_free_pcomp(tfm);
1190 return err;
1191}
1192
1010static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, 1193static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1011 u32 type, u32 mask) 1194 u32 type, u32 mask)
1012{ 1195{
@@ -1835,6 +2018,21 @@ static const struct alg_test_desc alg_test_descs[] = {
1835 } 2018 }
1836 } 2019 }
1837 } 2020 }
2021 }, {
2022 .alg = "zlib",
2023 .test = alg_test_pcomp,
2024 .suite = {
2025 .pcomp = {
2026 .comp = {
2027 .vecs = zlib_comp_tv_template,
2028 .count = ZLIB_COMP_TEST_VECTORS
2029 },
2030 .decomp = {
2031 .vecs = zlib_decomp_tv_template,
2032 .count = ZLIB_DECOMP_TEST_VECTORS
2033 }
2034 }
2035 }
1838 } 2036 }
1839}; 2037};
1840 2038
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 132953e144d3..526f00a9c72f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -15,6 +15,11 @@
15#ifndef _CRYPTO_TESTMGR_H 15#ifndef _CRYPTO_TESTMGR_H
16#define _CRYPTO_TESTMGR_H 16#define _CRYPTO_TESTMGR_H
17 17
18#include <linux/netlink.h>
19#include <linux/zlib.h>
20
21#include <crypto/compress.h>
22
18#define MAX_DIGEST_SIZE 64 23#define MAX_DIGEST_SIZE 64
19#define MAX_TAP 8 24#define MAX_TAP 8
20 25
@@ -8347,10 +8352,19 @@ struct comp_testvec {
8347 char output[COMP_BUF_SIZE]; 8352 char output[COMP_BUF_SIZE];
8348}; 8353};
8349 8354
8355struct pcomp_testvec {
8356 void *params;
8357 unsigned int paramsize;
8358 int inlen, outlen;
8359 char input[COMP_BUF_SIZE];
8360 char output[COMP_BUF_SIZE];
8361};
8362
8350/* 8363/*
8351 * Deflate test vectors (null-terminated strings). 8364 * Deflate test vectors (null-terminated strings).
8352 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. 8365 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
8353 */ 8366 */
8367
8354#define DEFLATE_COMP_TEST_VECTORS 2 8368#define DEFLATE_COMP_TEST_VECTORS 2
8355#define DEFLATE_DECOMP_TEST_VECTORS 2 8369#define DEFLATE_DECOMP_TEST_VECTORS 2
8356 8370
@@ -8426,6 +8440,139 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
8426 }, 8440 },
8427}; 8441};
8428 8442
8443#define ZLIB_COMP_TEST_VECTORS 2
8444#define ZLIB_DECOMP_TEST_VECTORS 2
8445
8446static const struct {
8447 struct nlattr nla;
8448 int val;
8449} deflate_comp_params[] = {
8450 {
8451 .nla = {
8452 .nla_len = NLA_HDRLEN + sizeof(int),
8453 .nla_type = ZLIB_COMP_LEVEL,
8454 },
8455 .val = Z_DEFAULT_COMPRESSION,
8456 }, {
8457 .nla = {
8458 .nla_len = NLA_HDRLEN + sizeof(int),
8459 .nla_type = ZLIB_COMP_METHOD,
8460 },
8461 .val = Z_DEFLATED,
8462 }, {
8463 .nla = {
8464 .nla_len = NLA_HDRLEN + sizeof(int),
8465 .nla_type = ZLIB_COMP_WINDOWBITS,
8466 },
8467 .val = -11,
8468 }, {
8469 .nla = {
8470 .nla_len = NLA_HDRLEN + sizeof(int),
8471 .nla_type = ZLIB_COMP_MEMLEVEL,
8472 },
8473 .val = MAX_MEM_LEVEL,
8474 }, {
8475 .nla = {
8476 .nla_len = NLA_HDRLEN + sizeof(int),
8477 .nla_type = ZLIB_COMP_STRATEGY,
8478 },
8479 .val = Z_DEFAULT_STRATEGY,
8480 }
8481};
8482
8483static const struct {
8484 struct nlattr nla;
8485 int val;
8486} deflate_decomp_params[] = {
8487 {
8488 .nla = {
8489 .nla_len = NLA_HDRLEN + sizeof(int),
8490 .nla_type = ZLIB_DECOMP_WINDOWBITS,
8491 },
8492 .val = -11,
8493 }
8494};
8495
8496static struct pcomp_testvec zlib_comp_tv_template[] = {
8497 {
8498 .params = &deflate_comp_params,
8499 .paramsize = sizeof(deflate_comp_params),
8500 .inlen = 70,
8501 .outlen = 38,
8502 .input = "Join us now and share the software "
8503 "Join us now and share the software ",
8504 .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8505 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8506 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8507 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8508 "\x71\xbc\x08\x2b\x01\x00",
8509 }, {
8510 .params = &deflate_comp_params,
8511 .paramsize = sizeof(deflate_comp_params),
8512 .inlen = 191,
8513 .outlen = 122,
8514 .input = "This document describes a compression method based on the DEFLATE"
8515 "compression algorithm. This document defines the application of "
8516 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8517 .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8518 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8519 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8520 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8521 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8522 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8523 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8524 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8525 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8526 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8527 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8528 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8529 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8530 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8531 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8532 "\xfa\x02",
8533 },
8534};
8535
8536static struct pcomp_testvec zlib_decomp_tv_template[] = {
8537 {
8538 .params = &deflate_decomp_params,
8539 .paramsize = sizeof(deflate_decomp_params),
8540 .inlen = 122,
8541 .outlen = 191,
8542 .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8543 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8544 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8545 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8546 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8547 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8548 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8549 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8550 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8551 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8552 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8553 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8554 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8555 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8556 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8557 "\xfa\x02",
8558 .output = "This document describes a compression method based on the DEFLATE"
8559 "compression algorithm. This document defines the application of "
8560 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8561 }, {
8562 .params = &deflate_decomp_params,
8563 .paramsize = sizeof(deflate_decomp_params),
8564 .inlen = 38,
8565 .outlen = 70,
8566 .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8567 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8568 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8569 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8570 "\x71\xbc\x08\x2b\x01\x00",
8571 .output = "Join us now and share the software "
8572 "Join us now and share the software ",
8573 },
8574};
8575
8429/* 8576/*
8430 * LZO test vectors (null-terminated strings). 8577 * LZO test vectors (null-terminated strings).
8431 */ 8578 */
diff --git a/crypto/zlib.c b/crypto/zlib.c
new file mode 100644
index 000000000000..33609bab614e
--- /dev/null
+++ b/crypto/zlib.c
@@ -0,0 +1,378 @@
1/*
2 * Cryptographic API.
3 *
4 * Zlib algorithm
5 *
6 * Copyright 2008 Sony Corporation
7 *
8 * Based on deflate.c, which is
9 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * FIXME: deflate transforms will require up to a total of about 436k of kernel
17 * memory on i386 (390k for compression, the rest for decompression), as the
18 * current zlib kernel code uses a worst case pre-allocation system by default.
19 * This needs to be fixed so that the amount of memory required is properly
20 * related to the winbits and memlevel parameters.
21 */
22
23#define pr_fmt(fmt) "%s: " fmt, __func__
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/zlib.h>
28#include <linux/vmalloc.h>
29#include <linux/interrupt.h>
30#include <linux/mm.h>
31#include <linux/net.h>
32#include <linux/slab.h>
33
34#include <crypto/internal/compress.h>
35
36#include <net/netlink.h>
37
38
39struct zlib_ctx {
40 struct z_stream_s comp_stream;
41 struct z_stream_s decomp_stream;
42 int decomp_windowBits;
43};
44
45
46static void zlib_comp_exit(struct zlib_ctx *ctx)
47{
48 struct z_stream_s *stream = &ctx->comp_stream;
49
50 if (stream->workspace) {
51 zlib_deflateEnd(stream);
52 vfree(stream->workspace);
53 stream->workspace = NULL;
54 }
55}
56
57static void zlib_decomp_exit(struct zlib_ctx *ctx)
58{
59 struct z_stream_s *stream = &ctx->decomp_stream;
60
61 if (stream->workspace) {
62 zlib_inflateEnd(stream);
63 kfree(stream->workspace);
64 stream->workspace = NULL;
65 }
66}
67
68static int zlib_init(struct crypto_tfm *tfm)
69{
70 return 0;
71}
72
73static void zlib_exit(struct crypto_tfm *tfm)
74{
75 struct zlib_ctx *ctx = crypto_tfm_ctx(tfm);
76
77 zlib_comp_exit(ctx);
78 zlib_decomp_exit(ctx);
79}
80
81
82static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
83 unsigned int len)
84{
85 struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
86 struct z_stream_s *stream = &ctx->comp_stream;
87 struct nlattr *tb[ZLIB_COMP_MAX + 1];
88 size_t workspacesize;
89 int ret;
90
91 ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL);
92 if (ret)
93 return ret;
94
95 zlib_comp_exit(ctx);
96
97 workspacesize = zlib_deflate_workspacesize();
98 stream->workspace = vmalloc(workspacesize);
99 if (!stream->workspace)
100 return -ENOMEM;
101
102 memset(stream->workspace, 0, workspacesize);
103 ret = zlib_deflateInit2(stream,
104 tb[ZLIB_COMP_LEVEL]
105 ? nla_get_u32(tb[ZLIB_COMP_LEVEL])
106 : Z_DEFAULT_COMPRESSION,
107 tb[ZLIB_COMP_METHOD]
108 ? nla_get_u32(tb[ZLIB_COMP_METHOD])
109 : Z_DEFLATED,
110 tb[ZLIB_COMP_WINDOWBITS]
111 ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
112 : MAX_WBITS,
113 tb[ZLIB_COMP_MEMLEVEL]
114 ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
115 : DEF_MEM_LEVEL,
116 tb[ZLIB_COMP_STRATEGY]
117 ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
118 : Z_DEFAULT_STRATEGY);
119 if (ret != Z_OK) {
120 vfree(stream->workspace);
121 stream->workspace = NULL;
122 return -EINVAL;
123 }
124
125 return 0;
126}
127
128static int zlib_compress_init(struct crypto_pcomp *tfm)
129{
130 int ret;
131 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
132 struct z_stream_s *stream = &dctx->comp_stream;
133
134 ret = zlib_deflateReset(stream);
135 if (ret != Z_OK)
136 return -EINVAL;
137
138 return 0;
139}
140
141static int zlib_compress_update(struct crypto_pcomp *tfm,
142 struct comp_request *req)
143{
144 int ret;
145 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
146 struct z_stream_s *stream = &dctx->comp_stream;
147
148 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
149 stream->next_in = req->next_in;
150 stream->avail_in = req->avail_in;
151 stream->next_out = req->next_out;
152 stream->avail_out = req->avail_out;
153
154 ret = zlib_deflate(stream, Z_NO_FLUSH);
155 switch (ret) {
156 case Z_OK:
157 break;
158
159 case Z_BUF_ERROR:
160 pr_debug("zlib_deflate could not make progress\n");
161 return -EAGAIN;
162
163 default:
164 pr_debug("zlib_deflate failed %d\n", ret);
165 return -EINVAL;
166 }
167
168 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
169 stream->avail_in, stream->avail_out,
170 req->avail_in - stream->avail_in,
171 req->avail_out - stream->avail_out);
172 req->next_in = stream->next_in;
173 req->avail_in = stream->avail_in;
174 req->next_out = stream->next_out;
175 req->avail_out = stream->avail_out;
176 return 0;
177}
178
179static int zlib_compress_final(struct crypto_pcomp *tfm,
180 struct comp_request *req)
181{
182 int ret;
183 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
184 struct z_stream_s *stream = &dctx->comp_stream;
185
186 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
187 stream->next_in = req->next_in;
188 stream->avail_in = req->avail_in;
189 stream->next_out = req->next_out;
190 stream->avail_out = req->avail_out;
191
192 ret = zlib_deflate(stream, Z_FINISH);
193 if (ret != Z_STREAM_END) {
194 pr_debug("zlib_deflate failed %d\n", ret);
195 return -EINVAL;
196 }
197
198 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
199 stream->avail_in, stream->avail_out,
200 req->avail_in - stream->avail_in,
201 req->avail_out - stream->avail_out);
202 req->next_in = stream->next_in;
203 req->avail_in = stream->avail_in;
204 req->next_out = stream->next_out;
205 req->avail_out = stream->avail_out;
206 return 0;
207}
208
209
210static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params,
211 unsigned int len)
212{
213 struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
214 struct z_stream_s *stream = &ctx->decomp_stream;
215 struct nlattr *tb[ZLIB_DECOMP_MAX + 1];
216 int ret = 0;
217
218 ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL);
219 if (ret)
220 return ret;
221
222 zlib_decomp_exit(ctx);
223
224 ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS]
225 ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
226 : DEF_WBITS;
227
228 stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
229 if (!stream->workspace)
230 return -ENOMEM;
231
232 ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
233 if (ret != Z_OK) {
234 kfree(stream->workspace);
235 stream->workspace = NULL;
236 return -EINVAL;
237 }
238
239 return 0;
240}
241
242static int zlib_decompress_init(struct crypto_pcomp *tfm)
243{
244 int ret;
245 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
246 struct z_stream_s *stream = &dctx->decomp_stream;
247
248 ret = zlib_inflateReset(stream);
249 if (ret != Z_OK)
250 return -EINVAL;
251
252 return 0;
253}
254
255static int zlib_decompress_update(struct crypto_pcomp *tfm,
256 struct comp_request *req)
257{
258 int ret;
259 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
260 struct z_stream_s *stream = &dctx->decomp_stream;
261
262 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
263 stream->next_in = req->next_in;
264 stream->avail_in = req->avail_in;
265 stream->next_out = req->next_out;
266 stream->avail_out = req->avail_out;
267
268 ret = zlib_inflate(stream, Z_SYNC_FLUSH);
269 switch (ret) {
270 case Z_OK:
271 case Z_STREAM_END:
272 break;
273
274 case Z_BUF_ERROR:
275 pr_debug("zlib_inflate could not make progress\n");
276 return -EAGAIN;
277
278 default:
279 pr_debug("zlib_inflate failed %d\n", ret);
280 return -EINVAL;
281 }
282
283 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
284 stream->avail_in, stream->avail_out,
285 req->avail_in - stream->avail_in,
286 req->avail_out - stream->avail_out);
287 req->next_in = stream->next_in;
288 req->avail_in = stream->avail_in;
289 req->next_out = stream->next_out;
290 req->avail_out = stream->avail_out;
291 return 0;
292}
293
294static int zlib_decompress_final(struct crypto_pcomp *tfm,
295 struct comp_request *req)
296{
297 int ret;
298 struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
299 struct z_stream_s *stream = &dctx->decomp_stream;
300
301 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
302 stream->next_in = req->next_in;
303 stream->avail_in = req->avail_in;
304 stream->next_out = req->next_out;
305 stream->avail_out = req->avail_out;
306
307 if (dctx->decomp_windowBits < 0) {
308 ret = zlib_inflate(stream, Z_SYNC_FLUSH);
309 /*
310 * Work around a bug in zlib, which sometimes wants to taste an
311 * extra byte when being used in the (undocumented) raw deflate
312 * mode. (From USAGI).
313 */
314 if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
315 const void *saved_next_in = stream->next_in;
316 u8 zerostuff = 0;
317
318 stream->next_in = &zerostuff;
319 stream->avail_in = 1;
320 ret = zlib_inflate(stream, Z_FINISH);
321 stream->next_in = saved_next_in;
322 stream->avail_in = 0;
323 }
324 } else
325 ret = zlib_inflate(stream, Z_FINISH);
326 if (ret != Z_STREAM_END) {
327 pr_debug("zlib_inflate failed %d\n", ret);
328 return -EINVAL;
329 }
330
331 pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n",
332 stream->avail_in, stream->avail_out,
333 req->avail_in - stream->avail_in,
334 req->avail_out - stream->avail_out);
335 req->next_in = stream->next_in;
336 req->avail_in = stream->avail_in;
337 req->next_out = stream->next_out;
338 req->avail_out = stream->avail_out;
339 return 0;
340}
341
342
343static struct pcomp_alg zlib_alg = {
344 .compress_setup = zlib_compress_setup,
345 .compress_init = zlib_compress_init,
346 .compress_update = zlib_compress_update,
347 .compress_final = zlib_compress_final,
348 .decompress_setup = zlib_decompress_setup,
349 .decompress_init = zlib_decompress_init,
350 .decompress_update = zlib_decompress_update,
351 .decompress_final = zlib_decompress_final,
352
353 .base = {
354 .cra_name = "zlib",
355 .cra_flags = CRYPTO_ALG_TYPE_PCOMPRESS,
356 .cra_ctxsize = sizeof(struct zlib_ctx),
357 .cra_module = THIS_MODULE,
358 .cra_init = zlib_init,
359 .cra_exit = zlib_exit,
360 }
361};
362
363static int __init zlib_mod_init(void)
364{
365 return crypto_register_pcomp(&zlib_alg);
366}
367
368static void __exit zlib_mod_fini(void)
369{
370 crypto_unregister_pcomp(&zlib_alg);
371}
372
373module_init(zlib_mod_init);
374module_exit(zlib_mod_fini);
375
376MODULE_LICENSE("GPL");
377MODULE_DESCRIPTION("Zlib Compression Algorithm");
378MODULE_AUTHOR("Sony Corporation");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8822eca58ffa..5fab6470f4b2 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -20,6 +20,20 @@ config HW_RANDOM
20 20
21 If unsure, say Y. 21 If unsure, say Y.
22 22
23config HW_RANDOM_TIMERIOMEM
24 tristate "Timer IOMEM HW Random Number Generator support"
25 depends on HW_RANDOM && HAS_IOMEM
26 ---help---
27 This driver provides kernel-side support for a generic Random
28 Number Generator used by reading a 'dumb' iomem address that
29 is to be read no faster than, for example, once a second;
30 the default FPGA bitstream on the TS-7800 has such functionality.
31
32 To compile this driver as a module, choose M here: the
33 module will be called timeriomem-rng.
34
35 If unsure, say Y.
36
23config HW_RANDOM_INTEL 37config HW_RANDOM_INTEL
24 tristate "Intel HW Random Number Generator support" 38 tristate "Intel HW Random Number Generator support"
25 depends on HW_RANDOM && (X86 || IA64) && PCI 39 depends on HW_RANDOM && (X86 || IA64) && PCI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b6effb7522c2..e81d21a5f28f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_HW_RANDOM) += rng-core.o 5obj-$(CONFIG_HW_RANDOM) += rng-core.o
6rng-core-y := core.o 6rng-core-y := core.o
7obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
7obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o 8obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
8obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o 9obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
9obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o 10obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
new file mode 100644
index 000000000000..10ad41be5897
--- /dev/null
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -0,0 +1,151 @@
1/*
2 * drivers/char/hw_random/timeriomem-rng.c
3 *
4 * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk>
5 *
6 * Derived from drivers/char/hw_random/omap-rng.c
7 * Copyright 2005 (c) MontaVista Software, Inc.
8 * Author: Deepak Saxena <dsaxena@plexity.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Overview:
15 * This driver is useful for platforms that have an IO range that provides
16 * periodic random data from a single IO memory address. All the platform
17 * has to do is provide the address and 'wait time' that new data becomes
18 * available.
19 *
20 * TODO: add support for reading sizes other than 32bits and masking
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/platform_device.h>
26#include <linux/hw_random.h>
27#include <linux/io.h>
28#include <linux/timeriomem-rng.h>
29#include <linux/jiffies.h>
30#include <linux/sched.h>
31#include <linux/timer.h>
32#include <linux/completion.h>
33
34static struct timeriomem_rng_data *timeriomem_rng_data;
35
36static void timeriomem_rng_trigger(unsigned long);
37static DEFINE_TIMER(timeriomem_rng_timer, timeriomem_rng_trigger, 0, 0);
38
39/*
40 * have data return 1, however return 0 if we have nothing
41 */
42static int timeriomem_rng_data_present(struct hwrng *rng, int wait)
43{
44 if (rng->priv == 0)
45 return 1;
46
47 if (!wait || timeriomem_rng_data->present)
48 return timeriomem_rng_data->present;
49
50 wait_for_completion(&timeriomem_rng_data->completion);
51
52 return 1;
53}
54
55static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data)
56{
57 unsigned long cur;
58 s32 delay;
59
60 *data = readl(timeriomem_rng_data->address);
61
62 if (rng->priv != 0) {
63 cur = jiffies;
64
65 delay = cur - timeriomem_rng_timer.expires;
66 delay = rng->priv - (delay % rng->priv);
67
68 timeriomem_rng_timer.expires = cur + delay;
69 timeriomem_rng_data->present = 0;
70
71 init_completion(&timeriomem_rng_data->completion);
72 add_timer(&timeriomem_rng_timer);
73 }
74
75 return 4;
76}
77
78static void timeriomem_rng_trigger(unsigned long dummy)
79{
80 timeriomem_rng_data->present = 1;
81 complete(&timeriomem_rng_data->completion);
82}
83
84static struct hwrng timeriomem_rng_ops = {
85 .name = "timeriomem",
86 .data_present = timeriomem_rng_data_present,
87 .data_read = timeriomem_rng_data_read,
88 .priv = 0,
89};
90
91static int __init timeriomem_rng_probe(struct platform_device *pdev)
92{
93 int ret;
94
95 timeriomem_rng_data = pdev->dev.platform_data;
96
97 if (timeriomem_rng_data->period != 0
98 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
99 timeriomem_rng_timer.expires = jiffies;
100
101 timeriomem_rng_ops.priv = usecs_to_jiffies(
102 timeriomem_rng_data->period);
103 }
104 timeriomem_rng_data->present = 1;
105
106 ret = hwrng_register(&timeriomem_rng_ops);
107 if (ret) {
108 dev_err(&pdev->dev, "problem registering\n");
109 return ret;
110 }
111
112 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
113 timeriomem_rng_data->address,
114 timeriomem_rng_data->period);
115
116 return 0;
117}
118
119static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
120{
121 del_timer_sync(&timeriomem_rng_timer);
122 hwrng_unregister(&timeriomem_rng_ops);
123
124 return 0;
125}
126
127static struct platform_driver timeriomem_rng_driver = {
128 .driver = {
129 .name = "timeriomem_rng",
130 .owner = THIS_MODULE,
131 },
132 .probe = timeriomem_rng_probe,
133 .remove = __devexit_p(timeriomem_rng_remove),
134};
135
136static int __init timeriomem_rng_init(void)
137{
138 return platform_driver_register(&timeriomem_rng_driver);
139}
140
141static void __exit timeriomem_rng_exit(void)
142{
143 platform_driver_unregister(&timeriomem_rng_driver);
144}
145
146module_init(timeriomem_rng_init);
147module_exit(timeriomem_rng_exit);
148
149MODULE_LICENSE("GPL");
150MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
151MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e522144cba3a..01afd758072f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -86,7 +86,7 @@ config ZCRYPT_MONOLITHIC
86config CRYPTO_SHA1_S390 86config CRYPTO_SHA1_S390
87 tristate "SHA1 digest algorithm" 87 tristate "SHA1 digest algorithm"
88 depends on S390 88 depends on S390
89 select CRYPTO_ALGAPI 89 select CRYPTO_HASH
90 help 90 help
91 This is the s390 hardware accelerated implementation of the 91 This is the s390 hardware accelerated implementation of the
92 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 92 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
@@ -94,7 +94,7 @@ config CRYPTO_SHA1_S390
94config CRYPTO_SHA256_S390 94config CRYPTO_SHA256_S390
95 tristate "SHA256 digest algorithm" 95 tristate "SHA256 digest algorithm"
96 depends on S390 96 depends on S390
97 select CRYPTO_ALGAPI 97 select CRYPTO_HASH
98 help 98 help
99 This is the s390 hardware accelerated implementation of the 99 This is the s390 hardware accelerated implementation of the
100 SHA256 secure hash standard (DFIPS 180-2). 100 SHA256 secure hash standard (DFIPS 180-2).
@@ -105,7 +105,7 @@ config CRYPTO_SHA256_S390
105config CRYPTO_SHA512_S390 105config CRYPTO_SHA512_S390
106 tristate "SHA384 and SHA512 digest algorithm" 106 tristate "SHA384 and SHA512 digest algorithm"
107 depends on S390 107 depends on S390
108 select CRYPTO_ALGAPI 108 select CRYPTO_HASH
109 help 109 help
110 This is the s390 hardware accelerated implementation of the 110 This is the s390 hardware accelerated implementation of the
111 SHA512 secure hash standard. 111 SHA512 secure hash standard.
@@ -200,4 +200,13 @@ config CRYPTO_DEV_IXP4XX
200 help 200 help
201 Driver for the IXP4xx NPE crypto engine. 201 Driver for the IXP4xx NPE crypto engine.
202 202
203config CRYPTO_DEV_PPC4XX
204 tristate "Driver AMCC PPC4xx crypto accelerator"
205 depends on PPC && 4xx
206 select CRYPTO_HASH
207 select CRYPTO_ALGAPI
208 select CRYPTO_BLKCIPHER
209 help
210 This option allows you to have support for AMCC crypto acceleration.
211
203endif # CRYPTO_HW 212endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 73557b2968d3..9bf4a2bc8846 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
7obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
new file mode 100644
index 000000000000..aa376e8d5ed5
--- /dev/null
+++ b/drivers/crypto/amcc/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
2crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
new file mode 100644
index 000000000000..61b6e1bec8c6
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -0,0 +1,293 @@
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This file implements the Linux crypto algorithms.
18 */
19
20#include <linux/kernel.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock_types.h>
23#include <linux/scatterlist.h>
24#include <linux/crypto.h>
25#include <linux/hash.h>
26#include <crypto/internal/hash.h>
27#include <linux/dma-mapping.h>
28#include <crypto/algapi.h>
29#include <crypto/aes.h>
30#include <crypto/sha.h>
31#include "crypto4xx_reg_def.h"
32#include "crypto4xx_sa.h"
33#include "crypto4xx_core.h"
34
35void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
36 u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
37 u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
38 u32 dir)
39{
40 sa->sa_command_0.w = 0;
41 sa->sa_command_0.bf.save_hash_state = save_h;
42 sa->sa_command_0.bf.save_iv = save_iv;
43 sa->sa_command_0.bf.load_hash_state = ld_h;
44 sa->sa_command_0.bf.load_iv = ld_iv;
45 sa->sa_command_0.bf.hdr_proc = hdr_proc;
46 sa->sa_command_0.bf.hash_alg = h;
47 sa->sa_command_0.bf.cipher_alg = c;
48 sa->sa_command_0.bf.pad_type = pad_type & 3;
49 sa->sa_command_0.bf.extend_pad = pad_type >> 2;
50 sa->sa_command_0.bf.op_group = op_grp;
51 sa->sa_command_0.bf.opcode = op;
52 sa->sa_command_0.bf.dir = dir;
53}
54
55void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
56 u32 cfb, u32 esn, u32 sn_mask, u32 mute,
57 u32 cp_pad, u32 cp_pay, u32 cp_hdr)
58{
59 sa->sa_command_1.w = 0;
60 sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
61 sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
62 sa->sa_command_1.bf.feedback_mode = cfb,
63 sa->sa_command_1.bf.sa_rev = 1;
64 sa->sa_command_1.bf.extended_seq_num = esn;
65 sa->sa_command_1.bf.seq_num_mask = sn_mask;
66 sa->sa_command_1.bf.mutable_bit_proc = mute;
67 sa->sa_command_1.bf.copy_pad = cp_pad;
68 sa->sa_command_1.bf.copy_payload = cp_pay;
69 sa->sa_command_1.bf.copy_hdr = cp_hdr;
70}
71
72int crypto4xx_encrypt(struct ablkcipher_request *req)
73{
74 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
75
76 ctx->direction = DIR_OUTBOUND;
77 ctx->hash_final = 0;
78 ctx->is_hash = 0;
79 ctx->pd_ctl = 0x1;
80
81 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
82 req->nbytes, req->info,
83 get_dynamic_sa_iv_size(ctx));
84}
85
86int crypto4xx_decrypt(struct ablkcipher_request *req)
87{
88 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
89
90 ctx->direction = DIR_INBOUND;
91 ctx->hash_final = 0;
92 ctx->is_hash = 0;
93 ctx->pd_ctl = 1;
94
95 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
96 req->nbytes, req->info,
97 get_dynamic_sa_iv_size(ctx));
98}
99
100/**
101 * AES Functions
102 */
103static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
104 const u8 *key,
105 unsigned int keylen,
106 unsigned char cm,
107 u8 fb)
108{
109 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
110 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
111 struct dynamic_sa_ctl *sa;
112 int rc;
113
114 if (keylen != AES_KEYSIZE_256 &&
115 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
116 crypto_ablkcipher_set_flags(cipher,
117 CRYPTO_TFM_RES_BAD_KEY_LEN);
118 return -EINVAL;
119 }
120
121 /* Create SA */
122 if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
123 crypto4xx_free_sa(ctx);
124
125 rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
126 if (rc)
127 return rc;
128
129 if (ctx->state_record_dma_addr == 0) {
130 rc = crypto4xx_alloc_state_record(ctx);
131 if (rc) {
132 crypto4xx_free_sa(ctx);
133 return rc;
134 }
135 }
136 /* Setup SA */
137 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
138 ctx->hash_final = 0;
139
140 set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
141 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
142 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
143 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
144 SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
145 DIR_INBOUND);
146
147 set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
148 fb, SA_EXTENDED_SN_OFF,
149 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
150 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
151 SA_NOT_COPY_HDR);
152 crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
153 key, keylen);
154 sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
155 sa->sa_command_1.bf.key_len = keylen >> 3;
156 ctx->is_hash = 0;
157 ctx->direction = DIR_INBOUND;
158 memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
159 (void *)&ctx->state_record_dma_addr, 4);
160 ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
161
162 memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
163 sa = (struct dynamic_sa_ctl *) ctx->sa_out;
164 sa->sa_command_0.bf.dir = DIR_OUTBOUND;
165
166 return 0;
167}
168
169int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
170 const u8 *key, unsigned int keylen)
171{
172 return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
173 CRYPTO_FEEDBACK_MODE_NO_FB);
174}
175
176/**
177 * HASH SHA1 Functions
178 */
179static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
180 unsigned int sa_len,
181 unsigned char ha,
182 unsigned char hm)
183{
184 struct crypto_alg *alg = tfm->__crt_alg;
185 struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
186 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
187 struct dynamic_sa_ctl *sa;
188 struct dynamic_sa_hash160 *sa_in;
189 int rc;
190
191 ctx->dev = my_alg->dev;
192 ctx->is_hash = 1;
193 ctx->hash_final = 0;
194
195 /* Create SA */
196 if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
197 crypto4xx_free_sa(ctx);
198
199 rc = crypto4xx_alloc_sa(ctx, sa_len);
200 if (rc)
201 return rc;
202
203 if (ctx->state_record_dma_addr == 0) {
204 crypto4xx_alloc_state_record(ctx);
205 if (!ctx->state_record_dma_addr) {
206 crypto4xx_free_sa(ctx);
207 return -ENOMEM;
208 }
209 }
210
211 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
212 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
213 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
214 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
215 SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
216 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
217 SA_OPCODE_HASH, DIR_INBOUND);
218 set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
219 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
220 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
221 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
222 SA_NOT_COPY_HDR);
223 ctx->direction = DIR_INBOUND;
224 sa->sa_contents = SA_HASH160_CONTENTS;
225 sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
226 /* Need to zero hash digest in SA */
227 memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
228 memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
229 sa_in->state_ptr = ctx->state_record_dma_addr;
230 ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
231
232 return 0;
233}
234
235int crypto4xx_hash_init(struct ahash_request *req)
236{
237 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
238 int ds;
239 struct dynamic_sa_ctl *sa;
240
241 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
242 ds = crypto_ahash_digestsize(
243 __crypto_ahash_cast(req->base.tfm));
244 sa->sa_command_0.bf.digest_len = ds >> 2;
245 sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
246 ctx->is_hash = 1;
247 ctx->direction = DIR_INBOUND;
248
249 return 0;
250}
251
252int crypto4xx_hash_update(struct ahash_request *req)
253{
254 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
255
256 ctx->is_hash = 1;
257 ctx->hash_final = 0;
258 ctx->pd_ctl = 0x11;
259 ctx->direction = DIR_INBOUND;
260
261 return crypto4xx_build_pd(&req->base, ctx, req->src,
262 (struct scatterlist *) req->result,
263 req->nbytes, NULL, 0);
264}
265
266int crypto4xx_hash_final(struct ahash_request *req)
267{
268 return 0;
269}
270
271int crypto4xx_hash_digest(struct ahash_request *req)
272{
273 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
274
275 ctx->hash_final = 1;
276 ctx->pd_ctl = 0x11;
277 ctx->direction = DIR_INBOUND;
278
279 return crypto4xx_build_pd(&req->base, ctx, req->src,
280 (struct scatterlist *) req->result,
281 req->nbytes, NULL, 0);
282}
283
284/**
285 * SHA1 Algorithm
286 */
287int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
288{
289 return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
290 SA_HASH_MODE_HASH);
291}
292
293
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
new file mode 100644
index 000000000000..4c0dfb2b872e
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -0,0 +1,1310 @@
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This file implements AMCC crypto offload Linux device driver for use with
18 * Linux CryptoAPI.
19 */
20
21#include <linux/kernel.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock_types.h>
24#include <linux/random.h>
25#include <linux/scatterlist.h>
26#include <linux/crypto.h>
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
29#include <linux/init.h>
30#include <linux/of_platform.h>
31#include <asm/dcr.h>
32#include <asm/dcr-regs.h>
33#include <asm/cacheflush.h>
34#include <crypto/internal/hash.h>
35#include <crypto/algapi.h>
36#include <crypto/aes.h>
37#include <crypto/sha.h>
38#include "crypto4xx_reg_def.h"
39#include "crypto4xx_core.h"
40#include "crypto4xx_sa.h"
41
42#define PPC4XX_SEC_VERSION_STR "0.5"
43
44/**
45 * PPC4xx Crypto Engine Initialization Routine
46 */
47static void crypto4xx_hw_init(struct crypto4xx_device *dev)
48{
49 union ce_ring_size ring_size;
50 union ce_ring_contol ring_ctrl;
51 union ce_part_ring_size part_ring_size;
52 union ce_io_threshold io_threshold;
53 u32 rand_num;
54 union ce_pe_dma_cfg pe_dma_cfg;
55
56 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
57 /* setup pe dma, include reset sg, pdr and pe, then release reset */
58 pe_dma_cfg.w = 0;
59 pe_dma_cfg.bf.bo_sgpd_en = 1;
60 pe_dma_cfg.bf.bo_data_en = 0;
61 pe_dma_cfg.bf.bo_sa_en = 1;
62 pe_dma_cfg.bf.bo_pd_en = 1;
63 pe_dma_cfg.bf.dynamic_sa_en = 1;
64 pe_dma_cfg.bf.reset_sg = 1;
65 pe_dma_cfg.bf.reset_pdr = 1;
66 pe_dma_cfg.bf.reset_pe = 1;
67 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
68 /* un reset pe,sg and pdr */
69 pe_dma_cfg.bf.pe_mode = 0;
70 pe_dma_cfg.bf.reset_sg = 0;
71 pe_dma_cfg.bf.reset_pdr = 0;
72 pe_dma_cfg.bf.reset_pe = 0;
73 pe_dma_cfg.bf.bo_td_en = 0;
74 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
75 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
76 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
77 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
78 get_random_bytes(&rand_num, sizeof(rand_num));
79 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
80 get_random_bytes(&rand_num, sizeof(rand_num));
81 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
82 ring_size.w = 0;
83 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
84 ring_size.bf.ring_size = PPC4XX_NUM_PD;
85 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
86 ring_ctrl.w = 0;
87 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
88 writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
89 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
90 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
91 part_ring_size.w = 0;
92 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
93 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
94 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
95 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
96 io_threshold.w = 0;
97 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
98 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
99 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
100 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
101 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
102 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
103 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
104 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
105 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
106 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
107 /* un reset pe,sg and pdr */
108 pe_dma_cfg.bf.pe_mode = 1;
109 pe_dma_cfg.bf.reset_sg = 0;
110 pe_dma_cfg.bf.reset_pdr = 0;
111 pe_dma_cfg.bf.reset_pe = 0;
112 pe_dma_cfg.bf.bo_td_en = 0;
113 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
114 /*clear all pending interrupt*/
115 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
116 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
117 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
118 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
119 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
120}
121
122int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
123{
124 ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
125 &ctx->sa_in_dma_addr, GFP_ATOMIC);
126 if (ctx->sa_in == NULL)
127 return -ENOMEM;
128
129 ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
130 &ctx->sa_out_dma_addr, GFP_ATOMIC);
131 if (ctx->sa_out == NULL) {
132 dma_free_coherent(ctx->dev->core_dev->device,
133 ctx->sa_len * 4,
134 ctx->sa_in, ctx->sa_in_dma_addr);
135 return -ENOMEM;
136 }
137
138 memset(ctx->sa_in, 0, size * 4);
139 memset(ctx->sa_out, 0, size * 4);
140 ctx->sa_len = size;
141
142 return 0;
143}
144
145void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
146{
147 if (ctx->sa_in != NULL)
148 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
149 ctx->sa_in, ctx->sa_in_dma_addr);
150 if (ctx->sa_out != NULL)
151 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
152 ctx->sa_out, ctx->sa_out_dma_addr);
153
154 ctx->sa_in_dma_addr = 0;
155 ctx->sa_out_dma_addr = 0;
156 ctx->sa_len = 0;
157}
158
159u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
160{
161 ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
162 sizeof(struct sa_state_record),
163 &ctx->state_record_dma_addr, GFP_ATOMIC);
164 if (!ctx->state_record_dma_addr)
165 return -ENOMEM;
166 memset(ctx->state_record, 0, sizeof(struct sa_state_record));
167
168 return 0;
169}
170
171void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
172{
173 if (ctx->state_record != NULL)
174 dma_free_coherent(ctx->dev->core_dev->device,
175 sizeof(struct sa_state_record),
176 ctx->state_record,
177 ctx->state_record_dma_addr);
178 ctx->state_record_dma_addr = 0;
179}
180
181/**
182 * alloc memory for the gather ring
183 * no need to alloc buf for the ring
184 * gdr_tail, gdr_head and gdr_count are initialized by this function
185 */
186static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
187{
188 int i;
189 struct pd_uinfo *pd_uinfo;
190 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
191 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
192 &dev->pdr_pa, GFP_ATOMIC);
193 if (!dev->pdr)
194 return -ENOMEM;
195
196 dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
197 GFP_KERNEL);
198 if (!dev->pdr_uinfo) {
199 dma_free_coherent(dev->core_dev->device,
200 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
201 dev->pdr,
202 dev->pdr_pa);
203 return -ENOMEM;
204 }
205 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
206 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
207 256 * PPC4XX_NUM_PD,
208 &dev->shadow_sa_pool_pa,
209 GFP_ATOMIC);
210 if (!dev->shadow_sa_pool)
211 return -ENOMEM;
212
213 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
214 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
215 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
216 if (!dev->shadow_sr_pool)
217 return -ENOMEM;
218 for (i = 0; i < PPC4XX_NUM_PD; i++) {
219 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
220 sizeof(struct pd_uinfo) * i);
221
222 /* alloc 256 bytes which is enough for any kind of dynamic sa */
223 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
224 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
225
226 /* alloc state record */
227 pd_uinfo->sr_va = dev->shadow_sr_pool +
228 sizeof(struct sa_state_record) * i;
229 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
230 sizeof(struct sa_state_record) * i;
231 }
232
233 return 0;
234}
235
236static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
237{
238 if (dev->pdr != NULL)
239 dma_free_coherent(dev->core_dev->device,
240 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
241 dev->pdr, dev->pdr_pa);
242 if (dev->shadow_sa_pool)
243 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
244 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
245 if (dev->shadow_sr_pool)
246 dma_free_coherent(dev->core_dev->device,
247 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
248 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
249
250 kfree(dev->pdr_uinfo);
251}
252
253static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
254{
255 u32 retval;
256 u32 tmp;
257
258 retval = dev->pdr_head;
259 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
260
261 if (tmp == dev->pdr_tail)
262 return ERING_WAS_FULL;
263
264 dev->pdr_head = tmp;
265
266 return retval;
267}
268
269static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
270{
271 struct pd_uinfo *pd_uinfo;
272 unsigned long flags;
273
274 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
275 sizeof(struct pd_uinfo) * idx);
276 spin_lock_irqsave(&dev->core_dev->lock, flags);
277 if (dev->pdr_tail != PPC4XX_LAST_PD)
278 dev->pdr_tail++;
279 else
280 dev->pdr_tail = 0;
281 pd_uinfo->state = PD_ENTRY_FREE;
282 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
283
284 return 0;
285}
286
287static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
288 dma_addr_t *pd_dma, u32 idx)
289{
290 *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
291
292 return dev->pdr + sizeof(struct ce_pd) * idx;
293}
294
295/**
296 * alloc memory for the gather ring
297 * no need to alloc buf for the ring
298 * gdr_tail, gdr_head and gdr_count are initialized by this function
299 */
300static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
301{
302 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
303 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
304 &dev->gdr_pa, GFP_ATOMIC);
305 if (!dev->gdr)
306 return -ENOMEM;
307
308 memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
309
310 return 0;
311}
312
313static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
314{
315 dma_free_coherent(dev->core_dev->device,
316 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
317 dev->gdr, dev->gdr_pa);
318}
319
320/*
321 * when this function is called.
322 * preemption or interrupt must be disabled
323 */
324u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
325{
326 u32 retval;
327 u32 tmp;
328 if (n >= PPC4XX_NUM_GD)
329 return ERING_WAS_FULL;
330
331 retval = dev->gdr_head;
332 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
333 if (dev->gdr_head > dev->gdr_tail) {
334 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
335 return ERING_WAS_FULL;
336 } else if (dev->gdr_head < dev->gdr_tail) {
337 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
338 return ERING_WAS_FULL;
339 }
340 dev->gdr_head = tmp;
341
342 return retval;
343}
344
345static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
346{
347 unsigned long flags;
348
349 spin_lock_irqsave(&dev->core_dev->lock, flags);
350 if (dev->gdr_tail == dev->gdr_head) {
351 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
352 return 0;
353 }
354
355 if (dev->gdr_tail != PPC4XX_LAST_GD)
356 dev->gdr_tail++;
357 else
358 dev->gdr_tail = 0;
359
360 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
361
362 return 0;
363}
364
365static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
366 dma_addr_t *gd_dma, u32 idx)
367{
368 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
369
370 return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
371}
372
373/**
374 * alloc memory for the scatter ring
375 * need to alloc buf for the ring
376 * sdr_tail, sdr_head and sdr_count are initialized by this function
377 */
378static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
379{
380 int i;
381 struct ce_sd *sd_array;
382
383 /* alloc memory for scatter descriptor ring */
384 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
385 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
386 &dev->sdr_pa, GFP_ATOMIC);
387 if (!dev->sdr)
388 return -ENOMEM;
389
390 dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
391 dev->scatter_buffer_va =
392 dma_alloc_coherent(dev->core_dev->device,
393 dev->scatter_buffer_size * PPC4XX_NUM_SD,
394 &dev->scatter_buffer_pa, GFP_ATOMIC);
395 if (!dev->scatter_buffer_va) {
396 dma_free_coherent(dev->core_dev->device,
397 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
398 dev->sdr, dev->sdr_pa);
399 return -ENOMEM;
400 }
401
402 sd_array = dev->sdr;
403
404 for (i = 0; i < PPC4XX_NUM_SD; i++) {
405 sd_array[i].ptr = dev->scatter_buffer_pa +
406 dev->scatter_buffer_size * i;
407 }
408
409 return 0;
410}
411
412static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
413{
414 if (dev->sdr != NULL)
415 dma_free_coherent(dev->core_dev->device,
416 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
417 dev->sdr, dev->sdr_pa);
418
419 if (dev->scatter_buffer_va != NULL)
420 dma_free_coherent(dev->core_dev->device,
421 dev->scatter_buffer_size * PPC4XX_NUM_SD,
422 dev->scatter_buffer_va,
423 dev->scatter_buffer_pa);
424}
425
426/*
427 * when this function is called.
428 * preemption or interrupt must be disabled
429 */
430static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
431{
432 u32 retval;
433 u32 tmp;
434
435 if (n >= PPC4XX_NUM_SD)
436 return ERING_WAS_FULL;
437
438 retval = dev->sdr_head;
439 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
440 if (dev->sdr_head > dev->gdr_tail) {
441 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
442 return ERING_WAS_FULL;
443 } else if (dev->sdr_head < dev->sdr_tail) {
444 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
445 return ERING_WAS_FULL;
446 } /* the head = tail, or empty case is already take cared */
447 dev->sdr_head = tmp;
448
449 return retval;
450}
451
452static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
453{
454 unsigned long flags;
455
456 spin_lock_irqsave(&dev->core_dev->lock, flags);
457 if (dev->sdr_tail == dev->sdr_head) {
458 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
459 return 0;
460 }
461 if (dev->sdr_tail != PPC4XX_LAST_SD)
462 dev->sdr_tail++;
463 else
464 dev->sdr_tail = 0;
465 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
466
467 return 0;
468}
469
470static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
471 dma_addr_t *sd_dma, u32 idx)
472{
473 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
474
475 return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
476}
477
478static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
479 dma_addr_t *addr, u32 *length,
480 u32 *idx, u32 *offset, u32 *nbytes)
481{
482 u32 len;
483
484 if (*length > dev->scatter_buffer_size) {
485 memcpy(phys_to_virt(*addr),
486 dev->scatter_buffer_va +
487 *idx * dev->scatter_buffer_size + *offset,
488 dev->scatter_buffer_size);
489 *offset = 0;
490 *length -= dev->scatter_buffer_size;
491 *nbytes -= dev->scatter_buffer_size;
492 if (*idx == PPC4XX_LAST_SD)
493 *idx = 0;
494 else
495 (*idx)++;
496 *addr = *addr + dev->scatter_buffer_size;
497 return 1;
498 } else if (*length < dev->scatter_buffer_size) {
499 memcpy(phys_to_virt(*addr),
500 dev->scatter_buffer_va +
501 *idx * dev->scatter_buffer_size + *offset, *length);
502 if ((*offset + *length) == dev->scatter_buffer_size) {
503 if (*idx == PPC4XX_LAST_SD)
504 *idx = 0;
505 else
506 (*idx)++;
507 *nbytes -= *length;
508 *offset = 0;
509 } else {
510 *nbytes -= *length;
511 *offset += *length;
512 }
513
514 return 0;
515 } else {
516 len = (*nbytes <= dev->scatter_buffer_size) ?
517 (*nbytes) : dev->scatter_buffer_size;
518 memcpy(phys_to_virt(*addr),
519 dev->scatter_buffer_va +
520 *idx * dev->scatter_buffer_size + *offset,
521 len);
522 *offset = 0;
523 *nbytes -= len;
524
525 if (*idx == PPC4XX_LAST_SD)
526 *idx = 0;
527 else
528 (*idx)++;
529
530 return 0;
531 }
532}
533
534static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
535 struct ce_pd *pd,
536 struct pd_uinfo *pd_uinfo,
537 u32 nbytes,
538 struct scatterlist *dst)
539{
540 dma_addr_t addr;
541 u32 this_sd;
542 u32 offset;
543 u32 len;
544 u32 i;
545 u32 sg_len;
546 struct scatterlist *sg;
547
548 this_sd = pd_uinfo->first_sd;
549 offset = 0;
550 i = 0;
551
552 while (nbytes) {
553 sg = &dst[i];
554 sg_len = sg->length;
555 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
556 sg->offset, sg->length, DMA_TO_DEVICE);
557
558 if (offset == 0) {
559 len = (nbytes <= sg->length) ? nbytes : sg->length;
560 while (crypto4xx_fill_one_page(dev, &addr, &len,
561 &this_sd, &offset, &nbytes))
562 ;
563 if (!nbytes)
564 return;
565 i++;
566 } else {
567 len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
568 nbytes : (dev->scatter_buffer_size - offset);
569 len = (sg->length < len) ? sg->length : len;
570 while (crypto4xx_fill_one_page(dev, &addr, &len,
571 &this_sd, &offset, &nbytes))
572 ;
573 if (!nbytes)
574 return;
575 sg_len -= len;
576 if (sg_len) {
577 addr += len;
578 while (crypto4xx_fill_one_page(dev, &addr,
579 &sg_len, &this_sd, &offset, &nbytes))
580 ;
581 }
582 i++;
583 }
584 }
585}
586
587static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
588 struct crypto4xx_ctx *ctx)
589{
590 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
591 struct sa_state_record *state_record =
592 (struct sa_state_record *) pd_uinfo->sr_va;
593
594 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
595 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
596 SA_HASH_ALG_SHA1_DIGEST_SIZE);
597 }
598
599 return 0;
600}
601
602static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
603 struct pd_uinfo *pd_uinfo)
604{
605 int i;
606 if (pd_uinfo->num_gd) {
607 for (i = 0; i < pd_uinfo->num_gd; i++)
608 crypto4xx_put_gd_to_gdr(dev);
609 pd_uinfo->first_gd = 0xffffffff;
610 pd_uinfo->num_gd = 0;
611 }
612 if (pd_uinfo->num_sd) {
613 for (i = 0; i < pd_uinfo->num_sd; i++)
614 crypto4xx_put_sd_to_sdr(dev);
615
616 pd_uinfo->first_sd = 0xffffffff;
617 pd_uinfo->num_sd = 0;
618 }
619}
620
621static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
622 struct pd_uinfo *pd_uinfo,
623 struct ce_pd *pd)
624{
625 struct crypto4xx_ctx *ctx;
626 struct ablkcipher_request *ablk_req;
627 struct scatterlist *dst;
628 dma_addr_t addr;
629
630 ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
631 ctx = crypto_tfm_ctx(ablk_req->base.tfm);
632
633 if (pd_uinfo->using_sd) {
634 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
635 ablk_req->dst);
636 } else {
637 dst = pd_uinfo->dest_va;
638 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
639 dst->offset, dst->length, DMA_FROM_DEVICE);
640 }
641 crypto4xx_ret_sg_desc(dev, pd_uinfo);
642 if (ablk_req->base.complete != NULL)
643 ablk_req->base.complete(&ablk_req->base, 0);
644
645 return 0;
646}
647
648static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
649 struct pd_uinfo *pd_uinfo)
650{
651 struct crypto4xx_ctx *ctx;
652 struct ahash_request *ahash_req;
653
654 ahash_req = ahash_request_cast(pd_uinfo->async_req);
655 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
656
657 crypto4xx_copy_digest_to_dst(pd_uinfo,
658 crypto_tfm_ctx(ahash_req->base.tfm));
659 crypto4xx_ret_sg_desc(dev, pd_uinfo);
660 /* call user provided callback function x */
661 if (ahash_req->base.complete != NULL)
662 ahash_req->base.complete(&ahash_req->base, 0);
663
664 return 0;
665}
666
667static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
668{
669 struct ce_pd *pd;
670 struct pd_uinfo *pd_uinfo;
671
672 pd = dev->pdr + sizeof(struct ce_pd)*idx;
673 pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
674 if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
675 CRYPTO_ALG_TYPE_ABLKCIPHER)
676 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
677 else
678 return crypto4xx_ahash_done(dev, pd_uinfo);
679}
680
681/**
682 * Note: Only use this function to copy items that is word aligned.
683 */
684void crypto4xx_memcpy_le(unsigned int *dst,
685 const unsigned char *buf,
686 int len)
687{
688 u8 *tmp;
689 for (; len >= 4; buf += 4, len -= 4)
690 *dst++ = cpu_to_le32(*(unsigned int *) buf);
691
692 tmp = (u8 *)dst;
693 switch (len) {
694 case 3:
695 *tmp++ = 0;
696 *tmp++ = *(buf+2);
697 *tmp++ = *(buf+1);
698 *tmp++ = *buf;
699 break;
700 case 2:
701 *tmp++ = 0;
702 *tmp++ = 0;
703 *tmp++ = *(buf+1);
704 *tmp++ = *buf;
705 break;
706 case 1:
707 *tmp++ = 0;
708 *tmp++ = 0;
709 *tmp++ = 0;
710 *tmp++ = *buf;
711 break;
712 default:
713 break;
714 }
715}
716
717static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
718{
719 crypto4xx_destroy_pdr(core_dev->dev);
720 crypto4xx_destroy_gdr(core_dev->dev);
721 crypto4xx_destroy_sdr(core_dev->dev);
722 dev_set_drvdata(core_dev->device, NULL);
723 iounmap(core_dev->dev->ce_base);
724 kfree(core_dev->dev);
725 kfree(core_dev);
726}
727
728void crypto4xx_return_pd(struct crypto4xx_device *dev,
729 u32 pd_entry, struct ce_pd *pd,
730 struct pd_uinfo *pd_uinfo)
731{
732 /* irq should be already disabled */
733 dev->pdr_head = pd_entry;
734 pd->pd_ctl.w = 0;
735 pd->pd_ctl_len.w = 0;
736 pd_uinfo->state = PD_ENTRY_FREE;
737}
738
739/*
740 * derive number of elements in scatterlist
741 * Shamlessly copy from talitos.c
742 */
743static int get_sg_count(struct scatterlist *sg_list, int nbytes)
744{
745 struct scatterlist *sg = sg_list;
746 int sg_nents = 0;
747
748 while (nbytes) {
749 sg_nents++;
750 if (sg->length > nbytes)
751 break;
752 nbytes -= sg->length;
753 sg = sg_next(sg);
754 }
755
756 return sg_nents;
757}
758
759static u32 get_next_gd(u32 current)
760{
761 if (current != PPC4XX_LAST_GD)
762 return current + 1;
763 else
764 return 0;
765}
766
767static u32 get_next_sd(u32 current)
768{
769 if (current != PPC4XX_LAST_SD)
770 return current + 1;
771 else
772 return 0;
773}
774
775u32 crypto4xx_build_pd(struct crypto_async_request *req,
776 struct crypto4xx_ctx *ctx,
777 struct scatterlist *src,
778 struct scatterlist *dst,
779 unsigned int datalen,
780 void *iv, u32 iv_len)
781{
782 struct crypto4xx_device *dev = ctx->dev;
783 dma_addr_t addr, pd_dma, sd_dma, gd_dma;
784 struct dynamic_sa_ctl *sa;
785 struct scatterlist *sg;
786 struct ce_gd *gd;
787 struct ce_pd *pd;
788 u32 num_gd, num_sd;
789 u32 fst_gd = 0xffffffff;
790 u32 fst_sd = 0xffffffff;
791 u32 pd_entry;
792 unsigned long flags;
793 struct pd_uinfo *pd_uinfo = NULL;
794 unsigned int nbytes = datalen, idx;
795 unsigned int ivlen = 0;
796 u32 gd_idx = 0;
797
798 /* figure how many gd is needed */
799 num_gd = get_sg_count(src, datalen);
800 if (num_gd == 1)
801 num_gd = 0;
802
803 /* figure how many sd is needed */
804 if (sg_is_last(dst) || ctx->is_hash) {
805 num_sd = 0;
806 } else {
807 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
808 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
809 if (datalen % PPC4XX_SD_BUFFER_SIZE)
810 num_sd++;
811 } else {
812 num_sd = 1;
813 }
814 }
815
816 /*
817 * The follow section of code needs to be protected
818 * The gather ring and scatter ring needs to be consecutive
819 * In case of run out of any kind of descriptor, the descriptor
820 * already got must be return the original place.
821 */
822 spin_lock_irqsave(&dev->core_dev->lock, flags);
823 if (num_gd) {
824 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
825 if (fst_gd == ERING_WAS_FULL) {
826 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
827 return -EAGAIN;
828 }
829 }
830 if (num_sd) {
831 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
832 if (fst_sd == ERING_WAS_FULL) {
833 if (num_gd)
834 dev->gdr_head = fst_gd;
835 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
836 return -EAGAIN;
837 }
838 }
839 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
840 if (pd_entry == ERING_WAS_FULL) {
841 if (num_gd)
842 dev->gdr_head = fst_gd;
843 if (num_sd)
844 dev->sdr_head = fst_sd;
845 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
846 return -EAGAIN;
847 }
848 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
849
850 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
851 sizeof(struct pd_uinfo) * pd_entry);
852 pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
853 pd_uinfo->async_req = req;
854 pd_uinfo->num_gd = num_gd;
855 pd_uinfo->num_sd = num_sd;
856
857 if (iv_len || ctx->is_hash) {
858 ivlen = iv_len;
859 pd->sa = pd_uinfo->sa_pa;
860 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
861 if (ctx->direction == DIR_INBOUND)
862 memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
863 else
864 memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
865
866 memcpy((void *) sa + ctx->offset_to_sr_ptr,
867 &pd_uinfo->sr_pa, 4);
868
869 if (iv_len)
870 crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
871 } else {
872 if (ctx->direction == DIR_INBOUND) {
873 pd->sa = ctx->sa_in_dma_addr;
874 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
875 } else {
876 pd->sa = ctx->sa_out_dma_addr;
877 sa = (struct dynamic_sa_ctl *) ctx->sa_out;
878 }
879 }
880 pd->sa_len = ctx->sa_len;
881 if (num_gd) {
882 /* get first gd we are going to use */
883 gd_idx = fst_gd;
884 pd_uinfo->first_gd = fst_gd;
885 pd_uinfo->num_gd = num_gd;
886 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
887 pd->src = gd_dma;
888 /* enable gather */
889 sa->sa_command_0.bf.gather = 1;
890 idx = 0;
891 src = &src[0];
892 /* walk the sg, and setup gather array */
893 while (nbytes) {
894 sg = &src[idx];
895 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
896 sg->offset, sg->length, DMA_TO_DEVICE);
897 gd->ptr = addr;
898 gd->ctl_len.len = sg->length;
899 gd->ctl_len.done = 0;
900 gd->ctl_len.ready = 1;
901 if (sg->length >= nbytes)
902 break;
903 nbytes -= sg->length;
904 gd_idx = get_next_gd(gd_idx);
905 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
906 idx++;
907 }
908 } else {
909 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
910 src->offset, src->length, DMA_TO_DEVICE);
911 /*
912 * Disable gather in sa command
913 */
914 sa->sa_command_0.bf.gather = 0;
915 /*
916 * Indicate gather array is not used
917 */
918 pd_uinfo->first_gd = 0xffffffff;
919 pd_uinfo->num_gd = 0;
920 }
921 if (ctx->is_hash || sg_is_last(dst)) {
922 /*
923 * we know application give us dst a whole piece of memory
924 * no need to use scatter ring.
925 * In case of is_hash, the icv is always at end of src data.
926 */
927 pd_uinfo->using_sd = 0;
928 pd_uinfo->first_sd = 0xffffffff;
929 pd_uinfo->num_sd = 0;
930 pd_uinfo->dest_va = dst;
931 sa->sa_command_0.bf.scatter = 0;
932 if (ctx->is_hash)
933 pd->dest = virt_to_phys((void *)dst);
934 else
935 pd->dest = (u32)dma_map_page(dev->core_dev->device,
936 sg_page(dst), dst->offset,
937 dst->length, DMA_TO_DEVICE);
938 } else {
939 struct ce_sd *sd = NULL;
940 u32 sd_idx = fst_sd;
941 nbytes = datalen;
942 sa->sa_command_0.bf.scatter = 1;
943 pd_uinfo->using_sd = 1;
944 pd_uinfo->dest_va = dst;
945 pd_uinfo->first_sd = fst_sd;
946 pd_uinfo->num_sd = num_sd;
947 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
948 pd->dest = sd_dma;
949 /* setup scatter descriptor */
950 sd->ctl.done = 0;
951 sd->ctl.rdy = 1;
952 /* sd->ptr should be setup by sd_init routine*/
953 idx = 0;
954 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
955 nbytes -= PPC4XX_SD_BUFFER_SIZE;
956 else
957 nbytes = 0;
958 while (nbytes) {
959 sd_idx = get_next_sd(sd_idx);
960 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
961 /* setup scatter descriptor */
962 sd->ctl.done = 0;
963 sd->ctl.rdy = 1;
964 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
965 nbytes -= PPC4XX_SD_BUFFER_SIZE;
966 else
967 /*
968 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
969 * which is more than nbytes, so done.
970 */
971 nbytes = 0;
972 }
973 }
974
975 sa->sa_command_1.bf.hash_crypto_offset = 0;
976 pd->pd_ctl.w = ctx->pd_ctl;
977 pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
978 pd_uinfo->state = PD_ENTRY_INUSE;
979 wmb();
980 /* write any value to push engine to read a pd */
981 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
982 return -EINPROGRESS;
983}
984
985/**
986 * Algorithm Registration Functions
987 */
988static int crypto4xx_alg_init(struct crypto_tfm *tfm)
989{
990 struct crypto_alg *alg = tfm->__crt_alg;
991 struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
992 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
993
994 ctx->dev = amcc_alg->dev;
995 ctx->sa_in = NULL;
996 ctx->sa_out = NULL;
997 ctx->sa_in_dma_addr = 0;
998 ctx->sa_out_dma_addr = 0;
999 ctx->sa_len = 0;
1000
1001 if (alg->cra_type == &crypto_ablkcipher_type)
1002 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1003 else if (alg->cra_type == &crypto_ahash_type)
1004 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
1005
1006 return 0;
1007}
1008
1009static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1010{
1011 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1012
1013 crypto4xx_free_sa(ctx);
1014 crypto4xx_free_state_record(ctx);
1015}
1016
1017int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1018 struct crypto_alg *crypto_alg, int array_size)
1019{
1020 struct crypto4xx_alg *alg;
1021 int i;
1022 int rc = 0;
1023
1024 for (i = 0; i < array_size; i++) {
1025 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1026 if (!alg)
1027 return -ENOMEM;
1028
1029 alg->alg = crypto_alg[i];
1030 INIT_LIST_HEAD(&alg->alg.cra_list);
1031 if (alg->alg.cra_init == NULL)
1032 alg->alg.cra_init = crypto4xx_alg_init;
1033 if (alg->alg.cra_exit == NULL)
1034 alg->alg.cra_exit = crypto4xx_alg_exit;
1035 alg->dev = sec_dev;
1036 rc = crypto_register_alg(&alg->alg);
1037 if (rc) {
1038 list_del(&alg->entry);
1039 kfree(alg);
1040 } else {
1041 list_add_tail(&alg->entry, &sec_dev->alg_list);
1042 }
1043 }
1044
1045 return 0;
1046}
1047
1048static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1049{
1050 struct crypto4xx_alg *alg, *tmp;
1051
1052 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1053 list_del(&alg->entry);
1054 crypto_unregister_alg(&alg->alg);
1055 kfree(alg);
1056 }
1057}
1058
1059static void crypto4xx_bh_tasklet_cb(unsigned long data)
1060{
1061 struct device *dev = (struct device *)data;
1062 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1063 struct pd_uinfo *pd_uinfo;
1064 struct ce_pd *pd;
1065 u32 tail;
1066
1067 while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1068 tail = core_dev->dev->pdr_tail;
1069 pd_uinfo = core_dev->dev->pdr_uinfo +
1070 sizeof(struct pd_uinfo)*tail;
1071 pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1072 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1073 pd->pd_ctl.bf.pe_done &&
1074 !pd->pd_ctl.bf.host_ready) {
1075 pd->pd_ctl.bf.pe_done = 0;
1076 crypto4xx_pd_done(core_dev->dev, tail);
1077 crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1078 pd_uinfo->state = PD_ENTRY_FREE;
1079 } else {
1080 /* if tail not done, break */
1081 break;
1082 }
1083 }
1084}
1085
1086/**
1087 * Top Half of isr.
1088 */
1089static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1090{
1091 struct device *dev = (struct device *)data;
1092 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1093
1094 if (core_dev->dev->ce_base == 0)
1095 return 0;
1096
1097 writel(PPC4XX_INTERRUPT_CLR,
1098 core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1099 tasklet_schedule(&core_dev->tasklet);
1100
1101 return IRQ_HANDLED;
1102}
1103
1104/**
1105 * Supported Crypto Algorithms
1106 */
1107struct crypto_alg crypto4xx_alg[] = {
1108 /* Crypto AES modes */
1109 {
1110 .cra_name = "cbc(aes)",
1111 .cra_driver_name = "cbc-aes-ppc4xx",
1112 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1113 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1114 .cra_blocksize = AES_BLOCK_SIZE,
1115 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1116 .cra_alignmask = 0,
1117 .cra_type = &crypto_ablkcipher_type,
1118 .cra_module = THIS_MODULE,
1119 .cra_u = {
1120 .ablkcipher = {
1121 .min_keysize = AES_MIN_KEY_SIZE,
1122 .max_keysize = AES_MAX_KEY_SIZE,
1123 .ivsize = AES_IV_SIZE,
1124 .setkey = crypto4xx_setkey_aes_cbc,
1125 .encrypt = crypto4xx_encrypt,
1126 .decrypt = crypto4xx_decrypt,
1127 }
1128 }
1129 },
1130 /* Hash SHA1 */
1131 {
1132 .cra_name = "sha1",
1133 .cra_driver_name = "sha1-ppc4xx",
1134 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
1136 .cra_blocksize = SHA1_BLOCK_SIZE,
1137 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1138 .cra_alignmask = 0,
1139 .cra_type = &crypto_ahash_type,
1140 .cra_init = crypto4xx_sha1_alg_init,
1141 .cra_module = THIS_MODULE,
1142 .cra_u = {
1143 .ahash = {
1144 .digestsize = SHA1_DIGEST_SIZE,
1145 .init = crypto4xx_hash_init,
1146 .update = crypto4xx_hash_update,
1147 .final = crypto4xx_hash_final,
1148 .digest = crypto4xx_hash_digest,
1149 }
1150 }
1151 },
1152};
1153
1154/**
1155 * Module Initialization Routine
1156 */
1157static int __init crypto4xx_probe(struct of_device *ofdev,
1158 const struct of_device_id *match)
1159{
1160 int rc;
1161 struct resource res;
1162 struct device *dev = &ofdev->dev;
1163 struct crypto4xx_core_device *core_dev;
1164
1165 rc = of_address_to_resource(ofdev->node, 0, &res);
1166 if (rc)
1167 return -ENODEV;
1168
1169 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1170 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1171 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1172 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1173 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1174 } else if (of_find_compatible_node(NULL, NULL,
1175 "amcc,ppc405ex-crypto")) {
1176 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1177 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1178 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1179 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1180 } else if (of_find_compatible_node(NULL, NULL,
1181 "amcc,ppc460sx-crypto")) {
1182 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1183 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1184 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1185 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1186 } else {
1187 printk(KERN_ERR "Crypto Function Not supported!\n");
1188 return -EINVAL;
1189 }
1190
1191 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1192 if (!core_dev)
1193 return -ENOMEM;
1194
1195 dev_set_drvdata(dev, core_dev);
1196 core_dev->ofdev = ofdev;
1197 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1198 if (!core_dev->dev)
1199 goto err_alloc_dev;
1200
1201 core_dev->dev->core_dev = core_dev;
1202 core_dev->device = dev;
1203 spin_lock_init(&core_dev->lock);
1204 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1205 rc = crypto4xx_build_pdr(core_dev->dev);
1206 if (rc)
1207 goto err_build_pdr;
1208
1209 rc = crypto4xx_build_gdr(core_dev->dev);
1210 if (rc)
1211 goto err_build_gdr;
1212
1213 rc = crypto4xx_build_sdr(core_dev->dev);
1214 if (rc)
1215 goto err_build_sdr;
1216
1217 /* Init tasklet for bottom half processing */
1218 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1219 (unsigned long) dev);
1220
1221 /* Register for Crypto isr, Crypto Engine IRQ */
1222 core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
1223 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1224 core_dev->dev->name, dev);
1225 if (rc)
1226 goto err_request_irq;
1227
1228 core_dev->dev->ce_base = of_iomap(ofdev->node, 0);
1229 if (!core_dev->dev->ce_base) {
1230 dev_err(dev, "failed to of_iomap\n");
1231 goto err_iomap;
1232 }
1233
1234 /* need to setup pdr, rdr, gdr and sdr before this */
1235 crypto4xx_hw_init(core_dev->dev);
1236
1237 /* Register security algorithms with Linux CryptoAPI */
1238 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1239 ARRAY_SIZE(crypto4xx_alg));
1240 if (rc)
1241 goto err_start_dev;
1242
1243 return 0;
1244
1245err_start_dev:
1246 iounmap(core_dev->dev->ce_base);
1247err_iomap:
1248 free_irq(core_dev->irq, dev);
1249 irq_dispose_mapping(core_dev->irq);
1250 tasklet_kill(&core_dev->tasklet);
1251err_request_irq:
1252 crypto4xx_destroy_sdr(core_dev->dev);
1253err_build_sdr:
1254 crypto4xx_destroy_gdr(core_dev->dev);
1255err_build_gdr:
1256 crypto4xx_destroy_pdr(core_dev->dev);
1257err_build_pdr:
1258 kfree(core_dev->dev);
1259err_alloc_dev:
1260 kfree(core_dev);
1261
1262 return rc;
1263}
1264
1265static int __exit crypto4xx_remove(struct of_device *ofdev)
1266{
1267 struct device *dev = &ofdev->dev;
1268 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1269
1270 free_irq(core_dev->irq, dev);
1271 irq_dispose_mapping(core_dev->irq);
1272
1273 tasklet_kill(&core_dev->tasklet);
1274 /* Un-register with Linux CryptoAPI */
1275 crypto4xx_unregister_alg(core_dev->dev);
1276 /* Free all allocated memory */
1277 crypto4xx_stop_all(core_dev);
1278
1279 return 0;
1280}
1281
1282static struct of_device_id crypto4xx_match[] = {
1283 { .compatible = "amcc,ppc4xx-crypto",},
1284 { },
1285};
1286
1287static struct of_platform_driver crypto4xx_driver = {
1288 .name = "crypto4xx",
1289 .match_table = crypto4xx_match,
1290 .probe = crypto4xx_probe,
1291 .remove = crypto4xx_remove,
1292};
1293
1294static int __init crypto4xx_init(void)
1295{
1296 return of_register_platform_driver(&crypto4xx_driver);
1297}
1298
1299static void __exit crypto4xx_exit(void)
1300{
1301 of_unregister_platform_driver(&crypto4xx_driver);
1302}
1303
1304module_init(crypto4xx_init);
1305module_exit(crypto4xx_exit);
1306
1307MODULE_LICENSE("GPL");
1308MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1309MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
1310
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
new file mode 100644
index 000000000000..1ef103449364
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -0,0 +1,177 @@
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This is the header file for AMCC Crypto offload Linux device driver for
18 * use with Linux CryptoAPI.
19
20 */
21
22#ifndef __CRYPTO4XX_CORE_H__
23#define __CRYPTO4XX_CORE_H__
24
25#define PPC460SX_SDR0_SRST 0x201
26#define PPC405EX_SDR0_SRST 0x200
27#define PPC460EX_SDR0_SRST 0x201
28#define PPC460EX_CE_RESET 0x08000000
29#define PPC460SX_CE_RESET 0x20000000
30#define PPC405EX_CE_RESET 0x00000008
31
32#define CRYPTO4XX_CRYPTO_PRIORITY 300
33#define PPC4XX_LAST_PD 63
34#define PPC4XX_NUM_PD 64
35#define PPC4XX_LAST_GD 1023
36#define PPC4XX_NUM_GD 1024
37#define PPC4XX_LAST_SD 63
38#define PPC4XX_NUM_SD 64
39#define PPC4XX_SD_BUFFER_SIZE 2048
40
41#define PD_ENTRY_INUSE 1
42#define PD_ENTRY_FREE 0
43#define ERING_WAS_FULL 0xffffffff
44
45struct crypto4xx_device;
46
47struct pd_uinfo {
48 struct crypto4xx_device *dev;
49 u32 state;
50 u32 using_sd;
51 u32 first_gd; /* first gather discriptor
52 used by this packet */
53 u32 num_gd; /* number of gather discriptor
54 used by this packet */
55 u32 first_sd; /* first scatter discriptor
56 used by this packet */
57 u32 num_sd; /* number of scatter discriptors
58 used by this packet */
59 void *sa_va; /* shadow sa, when using cp from ctx->sa */
60 u32 sa_pa;
61 void *sr_va; /* state record for shadow sa */
62 u32 sr_pa;
63 struct scatterlist *dest_va;
64 struct crypto_async_request *async_req; /* base crypto request
65 for this packet */
66};
67
68struct crypto4xx_device {
69 struct crypto4xx_core_device *core_dev;
70 char *name;
71 u64 ce_phy_address;
72 void __iomem *ce_base;
73
74 void *pdr; /* base address of packet
75 descriptor ring */
76 dma_addr_t pdr_pa; /* physical address used to
77 program ce pdr_base_register */
78 void *gdr; /* gather descriptor ring */
79 dma_addr_t gdr_pa; /* physical address used to
80 program ce gdr_base_register */
81 void *sdr; /* scatter descriptor ring */
82 dma_addr_t sdr_pa; /* physical address used to
83 program ce sdr_base_register */
84 void *scatter_buffer_va;
85 dma_addr_t scatter_buffer_pa;
86 u32 scatter_buffer_size;
87
88 void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
89 dma_addr_t shadow_sa_pool_pa;
90 void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
91 dma_addr_t shadow_sr_pool_pa;
92 u32 pdr_tail;
93 u32 pdr_head;
94 u32 gdr_tail;
95 u32 gdr_head;
96 u32 sdr_tail;
97 u32 sdr_head;
98 void *pdr_uinfo;
99 struct list_head alg_list; /* List of algorithm supported
100 by this device */
101};
102
103struct crypto4xx_core_device {
104 struct device *device;
105 struct of_device *ofdev;
106 struct crypto4xx_device *dev;
107 u32 int_status;
108 u32 irq;
109 struct tasklet_struct tasklet;
110 spinlock_t lock;
111};
112
113struct crypto4xx_ctx {
114 struct crypto4xx_device *dev;
115 void *sa_in;
116 dma_addr_t sa_in_dma_addr;
117 void *sa_out;
118 dma_addr_t sa_out_dma_addr;
119 void *state_record;
120 dma_addr_t state_record_dma_addr;
121 u32 sa_len;
122 u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
123 u32 direction;
124 u32 next_hdr;
125 u32 save_iv;
126 u32 pd_ctl_len;
127 u32 pd_ctl;
128 u32 bypass;
129 u32 is_hash;
130 u32 hash_final;
131};
132
133struct crypto4xx_req_ctx {
134 struct crypto4xx_device *dev; /* Device in which
135 operation to send to */
136 void *sa;
137 u32 sa_dma_addr;
138 u16 sa_len;
139};
140
141struct crypto4xx_alg {
142 struct list_head entry;
143 struct crypto_alg alg;
144 struct crypto4xx_device *dev;
145};
146
147#define crypto_alg_to_crypto4xx_alg(x) \
148 container_of(x, struct crypto4xx_alg, alg)
149
150extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
151extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
152extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
153 struct crypto4xx_ctx *rctx);
154extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
155extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
156extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
157extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
158extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
159extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
160extern void crypto4xx_memcpy_le(unsigned int *dst,
161 const unsigned char *buf, int len);
162extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
163 struct crypto4xx_ctx *ctx,
164 struct scatterlist *src,
165 struct scatterlist *dst,
166 unsigned int datalen,
167 void *iv, u32 iv_len);
168extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
169 const u8 *key, unsigned int keylen);
170extern int crypto4xx_encrypt(struct ablkcipher_request *req);
171extern int crypto4xx_decrypt(struct ablkcipher_request *req);
172extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
173extern int crypto4xx_hash_digest(struct ahash_request *req);
174extern int crypto4xx_hash_final(struct ahash_request *req);
175extern int crypto4xx_hash_update(struct ahash_request *req);
176extern int crypto4xx_hash_init(struct ahash_request *req);
177#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
new file mode 100644
index 000000000000..7d4edb002619
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -0,0 +1,284 @@
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This filr defines the register set for Security Subsystem
18 */
19
20#ifndef __CRYPTO4XX_REG_DEF_H__
21#define __CRYPTO4XX_REG_DEF_H__
22
23/* CRYPTO4XX Register offset */
24#define CRYPTO4XX_DESCRIPTOR 0x00000000
25#define CRYPTO4XX_CTRL_STAT 0x00000000
26#define CRYPTO4XX_SOURCE 0x00000004
27#define CRYPTO4XX_DEST 0x00000008
28#define CRYPTO4XX_SA 0x0000000C
29#define CRYPTO4XX_SA_LENGTH 0x00000010
30#define CRYPTO4XX_LENGTH 0x00000014
31
32#define CRYPTO4XX_PE_DMA_CFG 0x00000040
33#define CRYPTO4XX_PE_DMA_STAT 0x00000044
34#define CRYPTO4XX_PDR_BASE 0x00000048
35#define CRYPTO4XX_RDR_BASE 0x0000004c
36#define CRYPTO4XX_RING_SIZE 0x00000050
37#define CRYPTO4XX_RING_CTRL 0x00000054
38#define CRYPTO4XX_INT_RING_STAT 0x00000058
39#define CRYPTO4XX_EXT_RING_STAT 0x0000005c
40#define CRYPTO4XX_IO_THRESHOLD 0x00000060
41#define CRYPTO4XX_GATH_RING_BASE 0x00000064
42#define CRYPTO4XX_SCAT_RING_BASE 0x00000068
43#define CRYPTO4XX_PART_RING_SIZE 0x0000006c
44#define CRYPTO4XX_PART_RING_CFG 0x00000070
45
46#define CRYPTO4XX_PDR_BASE_UADDR 0x00000080
47#define CRYPTO4XX_RDR_BASE_UADDR 0x00000084
48#define CRYPTO4XX_PKT_SRC_UADDR 0x00000088
49#define CRYPTO4XX_PKT_DEST_UADDR 0x0000008c
50#define CRYPTO4XX_SA_UADDR 0x00000090
51#define CRYPTO4XX_GATH_RING_BASE_UADDR 0x000000A0
52#define CRYPTO4XX_SCAT_RING_BASE_UADDR 0x000000A4
53
54#define CRYPTO4XX_SEQ_RD 0x00000408
55#define CRYPTO4XX_SEQ_MASK_RD 0x0000040C
56
57#define CRYPTO4XX_SA_CMD_0 0x00010600
58#define CRYPTO4XX_SA_CMD_1 0x00010604
59
60#define CRYPTO4XX_STATE_PTR 0x000106dc
61#define CRYPTO4XX_STATE_IV 0x00010700
62#define CRYPTO4XX_STATE_HASH_BYTE_CNT_0 0x00010710
63#define CRYPTO4XX_STATE_HASH_BYTE_CNT_1 0x00010714
64
65#define CRYPTO4XX_STATE_IDIGEST_0 0x00010718
66#define CRYPTO4XX_STATE_IDIGEST_1 0x0001071c
67
68#define CRYPTO4XX_DATA_IN 0x00018000
69#define CRYPTO4XX_DATA_OUT 0x0001c000
70
71#define CRYPTO4XX_INT_UNMASK_STAT 0x000500a0
72#define CRYPTO4XX_INT_MASK_STAT 0x000500a4
73#define CRYPTO4XX_INT_CLR 0x000500a4
74#define CRYPTO4XX_INT_EN 0x000500a8
75
76#define CRYPTO4XX_INT_PKA 0x00000002
77#define CRYPTO4XX_INT_PDR_DONE 0x00008000
78#define CRYPTO4XX_INT_MA_WR_ERR 0x00020000
79#define CRYPTO4XX_INT_MA_RD_ERR 0x00010000
80#define CRYPTO4XX_INT_PE_ERR 0x00000200
81#define CRYPTO4XX_INT_USER_DMA_ERR 0x00000040
82#define CRYPTO4XX_INT_SLAVE_ERR 0x00000010
83#define CRYPTO4XX_INT_MASTER_ERR 0x00000008
84#define CRYPTO4XX_INT_ERROR 0x00030258
85
86#define CRYPTO4XX_INT_CFG 0x000500ac
87#define CRYPTO4XX_INT_DESCR_RD 0x000500b0
88#define CRYPTO4XX_INT_DESCR_CNT 0x000500b4
89#define CRYPTO4XX_INT_TIMEOUT_CNT 0x000500b8
90
91#define CRYPTO4XX_DEVICE_CTRL 0x00060080
92#define CRYPTO4XX_DEVICE_ID 0x00060084
93#define CRYPTO4XX_DEVICE_INFO 0x00060088
94#define CRYPTO4XX_DMA_USER_SRC 0x00060094
95#define CRYPTO4XX_DMA_USER_DEST 0x00060098
96#define CRYPTO4XX_DMA_USER_CMD 0x0006009C
97
98#define CRYPTO4XX_DMA_CFG 0x000600d4
99#define CRYPTO4XX_BYTE_ORDER_CFG 0x000600d8
100#define CRYPTO4XX_ENDIAN_CFG 0x000600d8
101
102#define CRYPTO4XX_PRNG_STAT 0x00070000
103#define CRYPTO4XX_PRNG_CTRL 0x00070004
104#define CRYPTO4XX_PRNG_SEED_L 0x00070008
105#define CRYPTO4XX_PRNG_SEED_H 0x0007000c
106
107#define CRYPTO4XX_PRNG_RES_0 0x00070020
108#define CRYPTO4XX_PRNG_RES_1 0x00070024
109#define CRYPTO4XX_PRNG_RES_2 0x00070028
110#define CRYPTO4XX_PRNG_RES_3 0x0007002C
111
112#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
113#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
114
115/**
116 * Initilize CRYPTO ENGINE registers, and memory bases.
117 */
118#define PPC4XX_PDR_POLL 0x3ff
119#define PPC4XX_OUTPUT_THRESHOLD 2
120#define PPC4XX_INPUT_THRESHOLD 2
121#define PPC4XX_PD_SIZE 6
122#define PPC4XX_CTX_DONE_INT 0x2000
123#define PPC4XX_PD_DONE_INT 0x8000
124#define PPC4XX_BYTE_ORDER 0x22222
125#define PPC4XX_INTERRUPT_CLR 0x3ffff
126#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
127#define PPC4XX_DC_3DES_EN 1
128#define PPC4XX_INT_DESCR_CNT 4
129#define PPC4XX_INT_TIMEOUT_CNT 0
130#define PPC4XX_INT_CFG 1
131/**
132 * all follow define are ad hoc
133 */
134#define PPC4XX_RING_RETRY 100
135#define PPC4XX_RING_POLL 100
136#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
137#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
138
139/**
140 * Generic Security Association (SA) with all possible fields. These will
141 * never likely used except for reference purpose. These structure format
142 * can be not changed as the hardware expects them to be layout as defined.
143 * Field can be removed or reduced but ordering can not be changed.
144 */
145#define CRYPTO4XX_DMA_CFG_OFFSET 0x40
146union ce_pe_dma_cfg {
147 struct {
148 u32 rsv:7;
149 u32 dir_host:1;
150 u32 rsv1:2;
151 u32 bo_td_en:1;
152 u32 dis_pdr_upd:1;
153 u32 bo_sgpd_en:1;
154 u32 bo_data_en:1;
155 u32 bo_sa_en:1;
156 u32 bo_pd_en:1;
157 u32 rsv2:4;
158 u32 dynamic_sa_en:1;
159 u32 pdr_mode:2;
160 u32 pe_mode:1;
161 u32 rsv3:5;
162 u32 reset_sg:1;
163 u32 reset_pdr:1;
164 u32 reset_pe:1;
165 } bf;
166 u32 w;
167} __attribute__((packed));
168
169#define CRYPTO4XX_PDR_BASE_OFFSET 0x48
170#define CRYPTO4XX_RDR_BASE_OFFSET 0x4c
171#define CRYPTO4XX_RING_SIZE_OFFSET 0x50
172union ce_ring_size {
173 struct {
174 u32 ring_offset:16;
175 u32 rsv:6;
176 u32 ring_size:10;
177 } bf;
178 u32 w;
179} __attribute__((packed));
180
181#define CRYPTO4XX_RING_CONTROL_OFFSET 0x54
182union ce_ring_contol {
183 struct {
184 u32 continuous:1;
185 u32 rsv:5;
186 u32 ring_retry_divisor:10;
187 u32 rsv1:4;
188 u32 ring_poll_divisor:10;
189 } bf;
190 u32 w;
191} __attribute__((packed));
192
193#define CRYPTO4XX_IO_THRESHOLD_OFFSET 0x60
194union ce_io_threshold {
195 struct {
196 u32 rsv:6;
197 u32 output_threshold:10;
198 u32 rsv1:6;
199 u32 input_threshold:10;
200 } bf;
201 u32 w;
202} __attribute__((packed));
203
204#define CRYPTO4XX_GATHER_RING_BASE_OFFSET 0x64
205#define CRYPTO4XX_SCATTER_RING_BASE_OFFSET 0x68
206
207union ce_part_ring_size {
208 struct {
209 u32 sdr_size:16;
210 u32 gdr_size:16;
211 } bf;
212 u32 w;
213} __attribute__((packed));
214
215#define MAX_BURST_SIZE_32 0
216#define MAX_BURST_SIZE_64 1
217#define MAX_BURST_SIZE_128 2
218#define MAX_BURST_SIZE_256 3
219
220/* gather descriptor control length */
221struct gd_ctl_len {
222 u32 len:16;
223 u32 rsv:14;
224 u32 done:1;
225 u32 ready:1;
226} __attribute__((packed));
227
228struct ce_gd {
229 u32 ptr;
230 struct gd_ctl_len ctl_len;
231} __attribute__((packed));
232
233struct sd_ctl {
234 u32 ctl:30;
235 u32 done:1;
236 u32 rdy:1;
237} __attribute__((packed));
238
239struct ce_sd {
240 u32 ptr;
241 struct sd_ctl ctl;
242} __attribute__((packed));
243
244#define PD_PAD_CTL_32 0x10
245#define PD_PAD_CTL_64 0x20
246#define PD_PAD_CTL_128 0x40
247#define PD_PAD_CTL_256 0x80
248union ce_pd_ctl {
249 struct {
250 u32 pd_pad_ctl:8;
251 u32 status:8;
252 u32 next_hdr:8;
253 u32 rsv:2;
254 u32 cached_sa:1;
255 u32 hash_final:1;
256 u32 init_arc4:1;
257 u32 rsv1:1;
258 u32 pe_done:1;
259 u32 host_ready:1;
260 } bf;
261 u32 w;
262} __attribute__((packed));
263
264union ce_pd_ctl_len {
265 struct {
266 u32 bypass:8;
267 u32 pe_done:1;
268 u32 host_ready:1;
269 u32 rsv:2;
270 u32 pkt_len:20;
271 } bf;
272 u32 w;
273} __attribute__((packed));
274
275struct ce_pd {
276 union ce_pd_ctl pd_ctl;
277 u32 src;
278 u32 dest;
279 u32 sa; /* get from ctx->sa_dma_addr */
280 u32 sa_len; /* only if dynamic sa is used */
281 union ce_pd_ctl_len pd_ctl_len;
282
283} __attribute__((packed));
284#endif
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
new file mode 100644
index 000000000000..466fd94cd4a3
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -0,0 +1,108 @@
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * @file crypto4xx_sa.c
18 *
19 * This file implements the security context
20 * assoicate format.
21 */
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/mod_devicetable.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock_types.h>
28#include <linux/highmem.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/algapi.h>
32#include <crypto/des.h>
33#include "crypto4xx_reg_def.h"
34#include "crypto4xx_sa.h"
35#include "crypto4xx_core.h"
36
37u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx)
38{
39 u32 offset;
40 union dynamic_sa_contents cts;
41
42 if (ctx->direction == DIR_INBOUND)
43 cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
44 else
45 cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
46 offset = cts.bf.key_size
47 + cts.bf.inner_size
48 + cts.bf.outer_size
49 + cts.bf.spi
50 + cts.bf.seq_num0
51 + cts.bf.seq_num1
52 + cts.bf.seq_num_mask0
53 + cts.bf.seq_num_mask1
54 + cts.bf.seq_num_mask2
55 + cts.bf.seq_num_mask3;
56
57 return sizeof(struct dynamic_sa_ctl) + offset * 4;
58}
59
60u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
61{
62 u32 offset;
63 union dynamic_sa_contents cts;
64
65 if (ctx->direction == DIR_INBOUND)
66 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
67 else
68 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
69 offset = cts.bf.key_size
70 + cts.bf.inner_size
71 + cts.bf.outer_size
72 + cts.bf.spi
73 + cts.bf.seq_num0
74 + cts.bf.seq_num1
75 + cts.bf.seq_num_mask0
76 + cts.bf.seq_num_mask1
77 + cts.bf.seq_num_mask2
78 + cts.bf.seq_num_mask3
79 + cts.bf.iv0
80 + cts.bf.iv1
81 + cts.bf.iv2
82 + cts.bf.iv3;
83
84 return sizeof(struct dynamic_sa_ctl) + offset * 4;
85}
86
87u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
88{
89 union dynamic_sa_contents cts;
90
91 if (ctx->direction == DIR_INBOUND)
92 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
93 else
94 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
95 return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
96}
97
98u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
99{
100 union dynamic_sa_contents cts;
101
102 if (ctx->direction == DIR_INBOUND)
103 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
104 else
105 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
106
107 return sizeof(struct dynamic_sa_ctl);
108}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
new file mode 100644
index 000000000000..4b83ed7e5570
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -0,0 +1,243 @@
1/**
2 * AMCC SoC PPC4xx Crypto Driver
3 *
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This file defines the security context
18 * assoicate format.
19 */
20
21#ifndef __CRYPTO4XX_SA_H__
22#define __CRYPTO4XX_SA_H__
23
24#define AES_IV_SIZE 16
25
26/**
27 * Contents of Dynamic Security Association (SA) with all possible fields
28 */
29union dynamic_sa_contents {
30 struct {
31 u32 arc4_state_ptr:1;
32 u32 arc4_ij_ptr:1;
33 u32 state_ptr:1;
34 u32 iv3:1;
35 u32 iv2:1;
36 u32 iv1:1;
37 u32 iv0:1;
38 u32 seq_num_mask3:1;
39 u32 seq_num_mask2:1;
40 u32 seq_num_mask1:1;
41 u32 seq_num_mask0:1;
42 u32 seq_num1:1;
43 u32 seq_num0:1;
44 u32 spi:1;
45 u32 outer_size:5;
46 u32 inner_size:5;
47 u32 key_size:4;
48 u32 cmd_size:4;
49 } bf;
50 u32 w;
51} __attribute__((packed));
52
53#define DIR_OUTBOUND 0
54#define DIR_INBOUND 1
55#define SA_OP_GROUP_BASIC 0
56#define SA_OPCODE_ENCRYPT 0
57#define SA_OPCODE_DECRYPT 0
58#define SA_OPCODE_HASH 3
59#define SA_CIPHER_ALG_DES 0
60#define SA_CIPHER_ALG_3DES 1
61#define SA_CIPHER_ALG_ARC4 2
62#define SA_CIPHER_ALG_AES 3
63#define SA_CIPHER_ALG_KASUMI 4
64#define SA_CIPHER_ALG_NULL 15
65
66#define SA_HASH_ALG_MD5 0
67#define SA_HASH_ALG_SHA1 1
68#define SA_HASH_ALG_NULL 15
69#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
70
71#define SA_LOAD_HASH_FROM_SA 0
72#define SA_LOAD_HASH_FROM_STATE 2
73#define SA_NOT_LOAD_HASH 3
74#define SA_LOAD_IV_FROM_SA 0
75#define SA_LOAD_IV_FROM_INPUT 1
76#define SA_LOAD_IV_FROM_STATE 2
77#define SA_LOAD_IV_GEN_IV 3
78
79#define SA_PAD_TYPE_CONSTANT 2
80#define SA_PAD_TYPE_ZERO 3
81#define SA_PAD_TYPE_TLS 5
82#define SA_PAD_TYPE_DTLS 5
83#define SA_NOT_SAVE_HASH 0
84#define SA_SAVE_HASH 1
85#define SA_NOT_SAVE_IV 0
86#define SA_SAVE_IV 1
87#define SA_HEADER_PROC 1
88#define SA_NO_HEADER_PROC 0
89
90union sa_command_0 {
91 struct {
92 u32 scatter:1;
93 u32 gather:1;
94 u32 save_hash_state:1;
95 u32 save_iv:1;
96 u32 load_hash_state:2;
97 u32 load_iv:2;
98 u32 digest_len:4;
99 u32 hdr_proc:1;
100 u32 extend_pad:1;
101 u32 stream_cipher_pad:1;
102 u32 rsv:1;
103 u32 hash_alg:4;
104 u32 cipher_alg:4;
105 u32 pad_type:2;
106 u32 op_group:2;
107 u32 dir:1;
108 u32 opcode:3;
109 } bf;
110 u32 w;
111} __attribute__((packed));
112
113#define CRYPTO_MODE_ECB 0
114#define CRYPTO_MODE_CBC 1
115
116#define CRYPTO_FEEDBACK_MODE_NO_FB 0
117#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
118#define CRYPTO_FEEDBACK_MODE_8BIT_CFB 1
119#define CRYPTO_FEEDBACK_MODE_1BIT_CFB 2
120#define CRYPTO_FEEDBACK_MODE_128BIT_CFB 3
121
122#define SA_AES_KEY_LEN_128 2
123#define SA_AES_KEY_LEN_192 3
124#define SA_AES_KEY_LEN_256 4
125
126#define SA_REV2 1
127/**
128 * The follow defines bits sa_command_1
129 * In Basic hash mode this bit define simple hash or hmac.
130 * In IPsec mode, this bit define muting control.
131 */
132#define SA_HASH_MODE_HASH 0
133#define SA_HASH_MODE_HMAC 1
134#define SA_MC_ENABLE 0
135#define SA_MC_DISABLE 1
136#define SA_NOT_COPY_HDR 0
137#define SA_COPY_HDR 1
138#define SA_NOT_COPY_PAD 0
139#define SA_COPY_PAD 1
140#define SA_NOT_COPY_PAYLOAD 0
141#define SA_COPY_PAYLOAD 1
142#define SA_EXTENDED_SN_OFF 0
143#define SA_EXTENDED_SN_ON 1
144#define SA_SEQ_MASK_OFF 0
145#define SA_SEQ_MASK_ON 1
146
147union sa_command_1 {
148 struct {
149 u32 crypto_mode31:1;
150 u32 save_arc4_state:1;
151 u32 arc4_stateful:1;
152 u32 key_len:5;
153 u32 hash_crypto_offset:8;
154 u32 sa_rev:2;
155 u32 byte_offset:1;
156 u32 hmac_muting:1;
157 u32 feedback_mode:2;
158 u32 crypto_mode9_8:2;
159 u32 extended_seq_num:1;
160 u32 seq_num_mask:1;
161 u32 mutable_bit_proc:1;
162 u32 ip_version:1;
163 u32 copy_pad:1;
164 u32 copy_payload:1;
165 u32 copy_hdr:1;
166 u32 rsv1:1;
167 } bf;
168 u32 w;
169} __attribute__((packed));
170
171struct dynamic_sa_ctl {
172 u32 sa_contents;
173 union sa_command_0 sa_command_0;
174 union sa_command_1 sa_command_1;
175} __attribute__((packed));
176
177/**
178 * State Record for Security Association (SA)
179 */
180struct sa_state_record {
181 u32 save_iv[4];
182 u32 save_hash_byte_cnt[2];
183 u32 save_digest[16];
184} __attribute__((packed));
185
186/**
187 * Security Association (SA) for AES128
188 *
189 */
190struct dynamic_sa_aes128 {
191 struct dynamic_sa_ctl ctrl;
192 u32 key[4];
193 u32 iv[4]; /* for CBC, OFC, and CFB mode */
194 u32 state_ptr;
195 u32 reserved;
196} __attribute__((packed));
197
198#define SA_AES128_LEN (sizeof(struct dynamic_sa_aes128)/4)
199#define SA_AES128_CONTENTS 0x3e000042
200
201/*
202 * Security Association (SA) for AES192
203 */
204struct dynamic_sa_aes192 {
205 struct dynamic_sa_ctl ctrl;
206 u32 key[6];
207 u32 iv[4]; /* for CBC, OFC, and CFB mode */
208 u32 state_ptr;
209 u32 reserved;
210} __attribute__((packed));
211
212#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
213#define SA_AES192_CONTENTS 0x3e000062
214
215/**
216 * Security Association (SA) for AES256
217 */
218struct dynamic_sa_aes256 {
219 struct dynamic_sa_ctl ctrl;
220 u32 key[8];
221 u32 iv[4]; /* for CBC, OFC, and CFB mode */
222 u32 state_ptr;
223 u32 reserved;
224} __attribute__((packed));
225
226#define SA_AES256_LEN (sizeof(struct dynamic_sa_aes256)/4)
227#define SA_AES256_CONTENTS 0x3e000082
228#define SA_AES_CONTENTS 0x3e000002
229
230/**
231 * Security Association (SA) for HASH160: HMAC-SHA1
232 */
233struct dynamic_sa_hash160 {
234 struct dynamic_sa_ctl ctrl;
235 u32 inner_digest[5];
236 u32 outer_digest[5];
237 u32 state_ptr;
238 u32 reserved;
239} __attribute__((packed));
240#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
241#define SA_HASH160_CONTENTS 0x2000a502
242
243#endif
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 656a4c66a568..7524ba3b6f3c 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -17,10 +17,14 @@
17#define AES_MAX_KEYLENGTH (15 * 16) 17#define AES_MAX_KEYLENGTH (15 * 16)
18#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32)) 18#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
19 19
20/*
21 * Please ensure that the first two fields are 16-byte aligned
22 * relative to the start of the structure, i.e., don't move them!
23 */
20struct crypto_aes_ctx { 24struct crypto_aes_ctx {
21 u32 key_length;
22 u32 key_enc[AES_MAX_KEYLENGTH_U32]; 25 u32 key_enc[AES_MAX_KEYLENGTH_U32];
23 u32 key_dec[AES_MAX_KEYLENGTH_U32]; 26 u32 key_dec[AES_MAX_KEYLENGTH_U32];
27 u32 key_length;
24}; 28};
25 29
26extern const u32 crypto_ft_tab[4][256]; 30extern const u32 crypto_ft_tab[4][256];
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
new file mode 100644
index 000000000000..86163ef24219
--- /dev/null
+++ b/include/crypto/compress.h
@@ -0,0 +1,145 @@
1/*
2 * Compress: Compression algorithms under the cryptographic API.
3 *
4 * Copyright 2008 Sony Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.
17 * If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef _CRYPTO_COMPRESS_H
21#define _CRYPTO_COMPRESS_H
22
23#include <linux/crypto.h>
24
25
26struct comp_request {
27 const void *next_in; /* next input byte */
28 void *next_out; /* next output byte */
29 unsigned int avail_in; /* bytes available at next_in */
30 unsigned int avail_out; /* bytes available at next_out */
31};
32
33enum zlib_comp_params {
34 ZLIB_COMP_LEVEL = 1, /* e.g. Z_DEFAULT_COMPRESSION */
35 ZLIB_COMP_METHOD, /* e.g. Z_DEFLATED */
36 ZLIB_COMP_WINDOWBITS, /* e.g. MAX_WBITS */
37 ZLIB_COMP_MEMLEVEL, /* e.g. DEF_MEM_LEVEL */
38 ZLIB_COMP_STRATEGY, /* e.g. Z_DEFAULT_STRATEGY */
39 __ZLIB_COMP_MAX,
40};
41
42#define ZLIB_COMP_MAX (__ZLIB_COMP_MAX - 1)
43
44
45enum zlib_decomp_params {
46 ZLIB_DECOMP_WINDOWBITS = 1, /* e.g. DEF_WBITS */
47 __ZLIB_DECOMP_MAX,
48};
49
50#define ZLIB_DECOMP_MAX (__ZLIB_DECOMP_MAX - 1)
51
52
53struct crypto_pcomp {
54 struct crypto_tfm base;
55};
56
57struct pcomp_alg {
58 int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
59 unsigned int len);
60 int (*compress_init)(struct crypto_pcomp *tfm);
61 int (*compress_update)(struct crypto_pcomp *tfm,
62 struct comp_request *req);
63 int (*compress_final)(struct crypto_pcomp *tfm,
64 struct comp_request *req);
65 int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
66 unsigned int len);
67 int (*decompress_init)(struct crypto_pcomp *tfm);
68 int (*decompress_update)(struct crypto_pcomp *tfm,
69 struct comp_request *req);
70 int (*decompress_final)(struct crypto_pcomp *tfm,
71 struct comp_request *req);
72
73 struct crypto_alg base;
74};
75
76extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
77 u32 mask);
78
79static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm)
80{
81 return &tfm->base;
82}
83
84static inline void crypto_free_pcomp(struct crypto_pcomp *tfm)
85{
86 crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm));
87}
88
89static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg)
90{
91 return container_of(alg, struct pcomp_alg, base);
92}
93
94static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
95{
96 return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg);
97}
98
99static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
100 void *params, unsigned int len)
101{
102 return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
103}
104
105static inline int crypto_compress_init(struct crypto_pcomp *tfm)
106{
107 return crypto_pcomp_alg(tfm)->compress_init(tfm);
108}
109
110static inline int crypto_compress_update(struct crypto_pcomp *tfm,
111 struct comp_request *req)
112{
113 return crypto_pcomp_alg(tfm)->compress_update(tfm, req);
114}
115
116static inline int crypto_compress_final(struct crypto_pcomp *tfm,
117 struct comp_request *req)
118{
119 return crypto_pcomp_alg(tfm)->compress_final(tfm, req);
120}
121
122static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
123 void *params, unsigned int len)
124{
125 return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
126}
127
128static inline int crypto_decompress_init(struct crypto_pcomp *tfm)
129{
130 return crypto_pcomp_alg(tfm)->decompress_init(tfm);
131}
132
133static inline int crypto_decompress_update(struct crypto_pcomp *tfm,
134 struct comp_request *req)
135{
136 return crypto_pcomp_alg(tfm)->decompress_update(tfm, req);
137}
138
139static inline int crypto_decompress_final(struct crypto_pcomp *tfm,
140 struct comp_request *req)
141{
142 return crypto_pcomp_alg(tfm)->decompress_final(tfm, req);
143}
144
145#endif /* _CRYPTO_COMPRESS_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
new file mode 100644
index 000000000000..55fa7bbdbc71
--- /dev/null
+++ b/include/crypto/cryptd.h
@@ -0,0 +1,27 @@
1/*
2 * Software async crypto daemon
3 */
4
5#ifndef _CRYPTO_CRYPT_H
6#define _CRYPTO_CRYPT_H
7
8#include <linux/crypto.h>
9#include <linux/kernel.h>
10
11struct cryptd_ablkcipher {
12 struct crypto_ablkcipher base;
13};
14
15static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
16 struct crypto_ablkcipher *tfm)
17{
18 return (struct cryptd_ablkcipher *)tfm;
19}
20
21/* alg_name should be algorithm to be cryptd-ed */
22struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
23 u32 type, u32 mask);
24struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
25void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
26
27#endif
diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h
new file mode 100644
index 000000000000..a7d252daf91b
--- /dev/null
+++ b/include/crypto/crypto_wq.h
@@ -0,0 +1,7 @@
1#ifndef CRYPTO_WQ_H
2#define CRYPTO_WQ_H
3
4#include <linux/workqueue.h>
5
6extern struct workqueue_struct *kcrypto_wq;
7#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d797e119e3d5..d56bb71617c3 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -231,6 +231,11 @@ static inline unsigned int crypto_shash_alignmask(
231 return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); 231 return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
232} 232}
233 233
234static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
235{
236 return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
237}
238
234static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) 239static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
235{ 240{
236 return container_of(alg, struct shash_alg, base); 241 return container_of(alg, struct shash_alg, base);
diff --git a/include/crypto/internal/compress.h b/include/crypto/internal/compress.h
new file mode 100644
index 000000000000..178a888d1d93
--- /dev/null
+++ b/include/crypto/internal/compress.h
@@ -0,0 +1,28 @@
1/*
2 * Compress: Compression algorithms under the cryptographic API.
3 *
4 * Copyright 2008 Sony Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.
17 * If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef _CRYPTO_INTERNAL_COMPRESS_H
21#define _CRYPTO_INTERNAL_COMPRESS_H
22
23#include <crypto/compress.h>
24
25extern int crypto_register_pcomp(struct pcomp_alg *alg);
26extern int crypto_unregister_pcomp(struct pcomp_alg *alg);
27
28#endif /* _CRYPTO_INTERNAL_COMPRESS_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 1f2e9020acc6..ec29fa268b94 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -40,6 +40,7 @@
40#define CRYPTO_ALG_TYPE_SHASH 0x00000009 40#define CRYPTO_ALG_TYPE_SHASH 0x00000009
41#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 41#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
42#define CRYPTO_ALG_TYPE_RNG 0x0000000c 42#define CRYPTO_ALG_TYPE_RNG 0x0000000c
43#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
43 44
44#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 45#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
45#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 46#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -548,9 +549,6 @@ struct crypto_attr_u32 {
548 * Transform user interface. 549 * Transform user interface.
549 */ 550 */
550 551
551struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
552 const struct crypto_type *frontend,
553 u32 type, u32 mask);
554struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 552struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
555void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 553void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
556 554
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
new file mode 100644
index 000000000000..dd253177f65f
--- /dev/null
+++ b/include/linux/timeriomem-rng.h
@@ -0,0 +1,21 @@
1/*
2 * linux/include/linux/timeriomem-rng.h
3 *
4 * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/completion.h>
12
13struct timeriomem_rng_data {
14 struct completion completion;
15 unsigned int present:1;
16
17 u32 __iomem *address;
18
19 /* measures in usecs */
20 unsigned int period;
21};
diff --git a/lib/Kconfig b/lib/Kconfig
index 03c2c24b9083..cea9e30a88ff 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -174,4 +174,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
175 depends on EXPERIMENTAL && BROKEN 175 depends on EXPERIMENTAL && BROKEN
176 176
177#
178# Netlink attribute parsing support is select'ed if needed
179#
180config NLATTR
181 bool
182
177endmenu 183endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 32b0e64ded27..b2c09da02cae 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -84,6 +84,8 @@ obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
84 84
85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o 85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o
86 86
87obj-$(CONFIG_NLATTR) += nlattr.o
88
87hostprogs-y := gen_crc32table 89hostprogs-y := gen_crc32table
88clean-files := crc32table.h 90clean-files := crc32table.h
89 91
diff --git a/net/netlink/attr.c b/lib/nlattr.c
index 56c3ce7fe29a..80009a24e21d 100644
--- a/net/netlink/attr.c
+++ b/lib/nlattr.c
@@ -281,6 +281,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
281 return d; 281 return d;
282} 282}
283 283
284#ifdef CONFIG_NET
284/** 285/**
285 * __nla_reserve - reserve room for attribute on the skb 286 * __nla_reserve - reserve room for attribute on the skb
286 * @skb: socket buffer to reserve room on 287 * @skb: socket buffer to reserve room on
@@ -305,6 +306,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
305 306
306 return nla; 307 return nla;
307} 308}
309EXPORT_SYMBOL(__nla_reserve);
308 310
309/** 311/**
310 * __nla_reserve_nohdr - reserve room for attribute without header 312 * __nla_reserve_nohdr - reserve room for attribute without header
@@ -325,6 +327,7 @@ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
325 327
326 return start; 328 return start;
327} 329}
330EXPORT_SYMBOL(__nla_reserve_nohdr);
328 331
329/** 332/**
330 * nla_reserve - reserve room for attribute on the skb 333 * nla_reserve - reserve room for attribute on the skb
@@ -345,6 +348,7 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
345 348
346 return __nla_reserve(skb, attrtype, attrlen); 349 return __nla_reserve(skb, attrtype, attrlen);
347} 350}
351EXPORT_SYMBOL(nla_reserve);
348 352
349/** 353/**
350 * nla_reserve_nohdr - reserve room for attribute without header 354 * nla_reserve_nohdr - reserve room for attribute without header
@@ -363,6 +367,7 @@ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
363 367
364 return __nla_reserve_nohdr(skb, attrlen); 368 return __nla_reserve_nohdr(skb, attrlen);
365} 369}
370EXPORT_SYMBOL(nla_reserve_nohdr);
366 371
367/** 372/**
368 * __nla_put - Add a netlink attribute to a socket buffer 373 * __nla_put - Add a netlink attribute to a socket buffer
@@ -382,6 +387,7 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
382 nla = __nla_reserve(skb, attrtype, attrlen); 387 nla = __nla_reserve(skb, attrtype, attrlen);
383 memcpy(nla_data(nla), data, attrlen); 388 memcpy(nla_data(nla), data, attrlen);
384} 389}
390EXPORT_SYMBOL(__nla_put);
385 391
386/** 392/**
387 * __nla_put_nohdr - Add a netlink attribute without header 393 * __nla_put_nohdr - Add a netlink attribute without header
@@ -399,6 +405,7 @@ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
399 start = __nla_reserve_nohdr(skb, attrlen); 405 start = __nla_reserve_nohdr(skb, attrlen);
400 memcpy(start, data, attrlen); 406 memcpy(start, data, attrlen);
401} 407}
408EXPORT_SYMBOL(__nla_put_nohdr);
402 409
403/** 410/**
404 * nla_put - Add a netlink attribute to a socket buffer 411 * nla_put - Add a netlink attribute to a socket buffer
@@ -418,6 +425,7 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
418 __nla_put(skb, attrtype, attrlen, data); 425 __nla_put(skb, attrtype, attrlen, data);
419 return 0; 426 return 0;
420} 427}
428EXPORT_SYMBOL(nla_put);
421 429
422/** 430/**
423 * nla_put_nohdr - Add a netlink attribute without header 431 * nla_put_nohdr - Add a netlink attribute without header
@@ -436,6 +444,7 @@ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
436 __nla_put_nohdr(skb, attrlen, data); 444 __nla_put_nohdr(skb, attrlen, data);
437 return 0; 445 return 0;
438} 446}
447EXPORT_SYMBOL(nla_put_nohdr);
439 448
440/** 449/**
441 * nla_append - Add a netlink attribute without header or padding 450 * nla_append - Add a netlink attribute without header or padding
@@ -454,20 +463,13 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
454 memcpy(skb_put(skb, attrlen), data, attrlen); 463 memcpy(skb_put(skb, attrlen), data, attrlen);
455 return 0; 464 return 0;
456} 465}
466EXPORT_SYMBOL(nla_append);
467#endif
457 468
458EXPORT_SYMBOL(nla_validate); 469EXPORT_SYMBOL(nla_validate);
459EXPORT_SYMBOL(nla_parse); 470EXPORT_SYMBOL(nla_parse);
460EXPORT_SYMBOL(nla_find); 471EXPORT_SYMBOL(nla_find);
461EXPORT_SYMBOL(nla_strlcpy); 472EXPORT_SYMBOL(nla_strlcpy);
462EXPORT_SYMBOL(__nla_reserve);
463EXPORT_SYMBOL(__nla_reserve_nohdr);
464EXPORT_SYMBOL(nla_reserve);
465EXPORT_SYMBOL(nla_reserve_nohdr);
466EXPORT_SYMBOL(__nla_put);
467EXPORT_SYMBOL(__nla_put_nohdr);
468EXPORT_SYMBOL(nla_put);
469EXPORT_SYMBOL(nla_put_nohdr);
470EXPORT_SYMBOL(nla_memcpy); 473EXPORT_SYMBOL(nla_memcpy);
471EXPORT_SYMBOL(nla_memcmp); 474EXPORT_SYMBOL(nla_memcmp);
472EXPORT_SYMBOL(nla_strcmp); 475EXPORT_SYMBOL(nla_strcmp);
473EXPORT_SYMBOL(nla_append);
diff --git a/net/Kconfig b/net/Kconfig
index cdb8fdef6c4a..eab40a481356 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig NET 5menuconfig NET
6 bool "Networking support" 6 bool "Networking support"
7 select NLATTR
7 ---help--- 8 ---help---
8 Unless you really know what you are doing, you should say Y here. 9 Unless you really know what you are doing, you should say Y here.
9 The reason is that some programs need kernel networking support even 10 The reason is that some programs need kernel networking support even
diff --git a/net/netlink/Makefile b/net/netlink/Makefile
index e3589c2de49e..bdd6ddf4e95b 100644
--- a/net/netlink/Makefile
+++ b/net/netlink/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the netlink driver. 2# Makefile for the netlink driver.
3# 3#
4 4
5obj-y := af_netlink.o attr.o genetlink.o 5obj-y := af_netlink.o genetlink.o