aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-03-10 04:47:46 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-03-12 06:13:35 -0400
commit006d0624fa0d71787448cacee0195bf20f2d47c8 (patch)
tree3a5ac372472888966fa2ec9fe1d5683c17fe4f02 /arch/arm
parent864cbeed4ab22de8c4d9a49101e9fd63c6f7fda2 (diff)
crypto: arm - add support for SHA-224/256 using ARMv8 Crypto Extensions
This implements the SHA-224/256 secure hash algorithm using the AArch32 versions of the ARMv8 Crypto Extensions for SHA2. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/crypto/Kconfig9
-rw-r--r--arch/arm/crypto/Makefile2
-rw-r--r--arch/arm/crypto/sha2-ce-core.S134
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c203
4 files changed, 348 insertions, 0 deletions
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index d7bc10beb8ac..9c1478e55a40 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -37,6 +37,15 @@ config CRYPTO_SHA1_ARM_CE
37 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 37 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
38 using special ARMv8 Crypto Extensions. 38 using special ARMv8 Crypto Extensions.
39 39
40config CRYPTO_SHA2_ARM_CE
41 tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
42 depends on KERNEL_MODE_NEON
43 select CRYPTO_SHA256
44 select CRYPTO_HASH
45 help
46 SHA-256 secure hash standard (DFIPS 180-2) implemented
47 using special ARMv8 Crypto Extensions.
48
40config CRYPTO_SHA512_ARM_NEON 49config CRYPTO_SHA512_ARM_NEON
41 tristate "SHA384 and SHA512 digest algorithm (ARM NEON)" 50 tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
42 depends on KERNEL_MODE_NEON 51 depends on KERNEL_MODE_NEON
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index d92d05ba646e..4ea9f96c2782 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
8obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o 8obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
9obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o 9obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
10obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o 10obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
11obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
11 12
12aes-arm-y := aes-armv4.o aes_glue.o 13aes-arm-y := aes-armv4.o aes_glue.o
13aes-arm-bs-y := aesbs-core.o aesbs-glue.o 14aes-arm-bs-y := aesbs-core.o aesbs-glue.o
@@ -15,6 +16,7 @@ sha1-arm-y := sha1-armv4-large.o sha1_glue.o
15sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o 16sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
16sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o 17sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
17sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o 18sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
19sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
18 20
19quiet_cmd_perl = PERL $@ 21quiet_cmd_perl = PERL $@
20 cmd_perl = $(PERL) $(<) > $(@) 22 cmd_perl = $(PERL) $(<) > $(@)
diff --git a/arch/arm/crypto/sha2-ce-core.S b/arch/arm/crypto/sha2-ce-core.S
new file mode 100644
index 000000000000..96af09fe957b
--- /dev/null
+++ b/arch/arm/crypto/sha2-ce-core.S
@@ -0,0 +1,134 @@
1/*
2 * sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
3 *
4 * Copyright (C) 2015 Linaro Ltd.
5 * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14
15 .text
16 .fpu crypto-neon-fp-armv8
17
18 k0 .req q7
19 k1 .req q8
20 rk .req r3
21
22 ta0 .req q9
23 ta1 .req q10
24 tb0 .req q10
25 tb1 .req q9
26
27 dga .req q11
28 dgb .req q12
29
30 dg0 .req q13
31 dg1 .req q14
32 dg2 .req q15
33
34 .macro add_only, ev, s0
35 vmov dg2, dg0
36 .ifnb \s0
37 vld1.32 {k\ev}, [rk, :128]!
38 .endif
39 sha256h.32 dg0, dg1, tb\ev
40 sha256h2.32 dg1, dg2, tb\ev
41 .ifnb \s0
42 vadd.u32 ta\ev, q\s0, k\ev
43 .endif
44 .endm
45
46 .macro add_update, ev, s0, s1, s2, s3
47 sha256su0.32 q\s0, q\s1
48 add_only \ev, \s1
49 sha256su1.32 q\s0, q\s2, q\s3
50 .endm
51
52 .align 6
53.Lsha256_rcon:
54 .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
55 .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
56 .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
57 .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
58 .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
59 .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
60 .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
61 .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
62 .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
63 .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
64 .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
65 .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
66 .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
67 .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
68 .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
69 .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
70
71 /*
72 * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
73 * u8 *head);
74 */
75ENTRY(sha2_ce_transform)
76 /* load state */
77 vld1.32 {dga-dgb}, [r2]
78
79 /* load partial input (if supplied) */
80 teq r3, #0
81 beq 0f
82 vld1.32 {q0-q1}, [r3]!
83 vld1.32 {q2-q3}, [r3]
84 teq r0, #0
85 b 1f
86
87 /* load input */
880: vld1.32 {q0-q1}, [r1]!
89 vld1.32 {q2-q3}, [r1]!
90 subs r0, r0, #1
91
921:
93#ifndef CONFIG_CPU_BIG_ENDIAN
94 vrev32.8 q0, q0
95 vrev32.8 q1, q1
96 vrev32.8 q2, q2
97 vrev32.8 q3, q3
98#endif
99
100 /* load first round constant */
101 adr rk, .Lsha256_rcon
102 vld1.32 {k0}, [rk, :128]!
103
104 vadd.u32 ta0, q0, k0
105 vmov dg0, dga
106 vmov dg1, dgb
107
108 add_update 1, 0, 1, 2, 3
109 add_update 0, 1, 2, 3, 0
110 add_update 1, 2, 3, 0, 1
111 add_update 0, 3, 0, 1, 2
112 add_update 1, 0, 1, 2, 3
113 add_update 0, 1, 2, 3, 0
114 add_update 1, 2, 3, 0, 1
115 add_update 0, 3, 0, 1, 2
116 add_update 1, 0, 1, 2, 3
117 add_update 0, 1, 2, 3, 0
118 add_update 1, 2, 3, 0, 1
119 add_update 0, 3, 0, 1, 2
120
121 add_only 1, 1
122 add_only 0, 2
123 add_only 1, 3
124 add_only 0
125
126 /* update state */
127 vadd.u32 dga, dga, dg0
128 vadd.u32 dgb, dgb, dg1
129 bne 0b
130
131 /* store new state */
132 vst1.32 {dga-dgb}, [r2]
133 bx lr
134ENDPROC(sha2_ce_transform)
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
new file mode 100644
index 000000000000..9ffe8ad27402
--- /dev/null
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -0,0 +1,203 @@
1/*
2 * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
3 *
4 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <crypto/internal/hash.h>
12#include <crypto/sha.h>
13#include <linux/crypto.h>
14#include <linux/module.h>
15
16#include <asm/hwcap.h>
17#include <asm/simd.h>
18#include <asm/neon.h>
19#include <asm/unaligned.h>
20
21MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
22MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
23MODULE_LICENSE("GPL v2");
24
25asmlinkage void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
26 u8 *head);
27
28static int sha224_init(struct shash_desc *desc)
29{
30 struct sha256_state *sctx = shash_desc_ctx(desc);
31
32 *sctx = (struct sha256_state){
33 .state = {
34 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
35 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
36 }
37 };
38 return 0;
39}
40
41static int sha256_init(struct shash_desc *desc)
42{
43 struct sha256_state *sctx = shash_desc_ctx(desc);
44
45 *sctx = (struct sha256_state){
46 .state = {
47 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
48 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
49 }
50 };
51 return 0;
52}
53
54static int sha2_update(struct shash_desc *desc, const u8 *data,
55 unsigned int len)
56{
57 struct sha256_state *sctx = shash_desc_ctx(desc);
58 unsigned int partial;
59
60 if (!may_use_simd())
61 return crypto_sha256_update(desc, data, len);
62
63 partial = sctx->count % SHA256_BLOCK_SIZE;
64 sctx->count += len;
65
66 if ((partial + len) >= SHA256_BLOCK_SIZE) {
67 int blocks;
68
69 if (partial) {
70 int p = SHA256_BLOCK_SIZE - partial;
71
72 memcpy(sctx->buf + partial, data, p);
73 data += p;
74 len -= p;
75 }
76
77 blocks = len / SHA256_BLOCK_SIZE;
78 len %= SHA256_BLOCK_SIZE;
79
80 kernel_neon_begin();
81 sha2_ce_transform(blocks, data, sctx->state,
82 partial ? sctx->buf : NULL);
83 kernel_neon_end();
84
85 data += blocks * SHA256_BLOCK_SIZE;
86 partial = 0;
87 }
88 if (len)
89 memcpy(sctx->buf + partial, data, len);
90 return 0;
91}
92
93static void sha2_final(struct shash_desc *desc)
94{
95 static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
96
97 struct sha256_state *sctx = shash_desc_ctx(desc);
98 __be64 bits = cpu_to_be64(sctx->count << 3);
99 u32 padlen = SHA256_BLOCK_SIZE
100 - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
101
102 sha2_update(desc, padding, padlen);
103 sha2_update(desc, (const u8 *)&bits, sizeof(bits));
104}
105
106static int sha224_final(struct shash_desc *desc, u8 *out)
107{
108 struct sha256_state *sctx = shash_desc_ctx(desc);
109 __be32 *dst = (__be32 *)out;
110 int i;
111
112 sha2_final(desc);
113
114 for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
115 put_unaligned_be32(sctx->state[i], dst++);
116
117 *sctx = (struct sha256_state){};
118 return 0;
119}
120
121static int sha256_final(struct shash_desc *desc, u8 *out)
122{
123 struct sha256_state *sctx = shash_desc_ctx(desc);
124 __be32 *dst = (__be32 *)out;
125 int i;
126
127 sha2_final(desc);
128
129 for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
130 put_unaligned_be32(sctx->state[i], dst++);
131
132 *sctx = (struct sha256_state){};
133 return 0;
134}
135
136static int sha2_export(struct shash_desc *desc, void *out)
137{
138 struct sha256_state *sctx = shash_desc_ctx(desc);
139 struct sha256_state *dst = out;
140
141 *dst = *sctx;
142 return 0;
143}
144
145static int sha2_import(struct shash_desc *desc, const void *in)
146{
147 struct sha256_state *sctx = shash_desc_ctx(desc);
148 struct sha256_state const *src = in;
149
150 *sctx = *src;
151 return 0;
152}
153
154static struct shash_alg algs[] = { {
155 .init = sha224_init,
156 .update = sha2_update,
157 .final = sha224_final,
158 .export = sha2_export,
159 .import = sha2_import,
160 .descsize = sizeof(struct sha256_state),
161 .digestsize = SHA224_DIGEST_SIZE,
162 .statesize = sizeof(struct sha256_state),
163 .base = {
164 .cra_name = "sha224",
165 .cra_driver_name = "sha224-ce",
166 .cra_priority = 200,
167 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
168 .cra_blocksize = SHA256_BLOCK_SIZE,
169 .cra_module = THIS_MODULE,
170 }
171}, {
172 .init = sha256_init,
173 .update = sha2_update,
174 .final = sha256_final,
175 .export = sha2_export,
176 .import = sha2_import,
177 .descsize = sizeof(struct sha256_state),
178 .digestsize = SHA256_DIGEST_SIZE,
179 .statesize = sizeof(struct sha256_state),
180 .base = {
181 .cra_name = "sha256",
182 .cra_driver_name = "sha256-ce",
183 .cra_priority = 200,
184 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
185 .cra_blocksize = SHA256_BLOCK_SIZE,
186 .cra_module = THIS_MODULE,
187 }
188} };
189
190static int __init sha2_ce_mod_init(void)
191{
192 if (!(elf_hwcap2 & HWCAP2_SHA2))
193 return -ENODEV;
194 return crypto_register_shashes(algs, ARRAY_SIZE(algs));
195}
196
197static void __exit sha2_ce_mod_fini(void)
198{
199 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
200}
201
202module_init(sha2_ce_mod_init);
203module_exit(sha2_ce_mod_fini);