aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2014-03-20 10:35:40 -0400
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2014-05-14 13:04:01 -0400
commit6ba6c74dfc6bcf43312ef572592f7d4ebb3aedfa (patch)
treeac3adf772f009e3604b746232ccddd3983a68ac9 /arch/arm64
parent2c98833a42cd194ba0f537cd21917e15e5593715 (diff)
arm64/crypto: SHA-224/SHA-256 using ARMv8 Crypto Extensions
This patch adds support for the SHA-224 and SHA-256 Secure Hash Algorithms for CPUs that have support for the SHA-2 part of the ARM v8 Crypto Extensions. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/crypto/Kconfig5
-rw-r--r--arch/arm64/crypto/Makefile3
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S156
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c255
4 files changed, 419 insertions, 0 deletions
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 7956881b5986..eb1e99770c21 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -13,4 +13,9 @@ config CRYPTO_SHA1_ARM64_CE
13 depends on ARM64 && KERNEL_MODE_NEON 13 depends on ARM64 && KERNEL_MODE_NEON
14 select CRYPTO_HASH 14 select CRYPTO_HASH
15 15
16config CRYPTO_SHA2_ARM64_CE
17 tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
18 depends on ARM64 && KERNEL_MODE_NEON
19 select CRYPTO_HASH
20
16endif 21endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 0ed3caaec81b..0b3885a60d43 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -10,3 +10,6 @@
10 10
11obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o 11obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
12sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o 12sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
13
14obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
15sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
new file mode 100644
index 000000000000..7f29fc031ea8
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -0,0 +1,156 @@
1/*
2 * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
3 *
4 * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/linkage.h>
12#include <asm/assembler.h>
13
14 .text
15 .arch armv8-a+crypto
16
17 dga .req q20
18 dgav .req v20
19 dgb .req q21
20 dgbv .req v21
21
22 t0 .req v22
23 t1 .req v23
24
25 dg0q .req q24
26 dg0v .req v24
27 dg1q .req q25
28 dg1v .req v25
29 dg2q .req q26
30 dg2v .req v26
31
32 .macro add_only, ev, rc, s0
33 mov dg2v.16b, dg0v.16b
34 .ifeq \ev
35 add t1.4s, v\s0\().4s, \rc\().4s
36 sha256h dg0q, dg1q, t0.4s
37 sha256h2 dg1q, dg2q, t0.4s
38 .else
39 .ifnb \s0
40 add t0.4s, v\s0\().4s, \rc\().4s
41 .endif
42 sha256h dg0q, dg1q, t1.4s
43 sha256h2 dg1q, dg2q, t1.4s
44 .endif
45 .endm
46
47 .macro add_update, ev, rc, s0, s1, s2, s3
48 sha256su0 v\s0\().4s, v\s1\().4s
49 add_only \ev, \rc, \s1
50 sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s
51 .endm
52
53 /*
54 * The SHA-256 round constants
55 */
56 .align 4
57.Lsha2_rcon:
58 .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
59 .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
60 .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
61 .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
62 .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
63 .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
64 .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
65 .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
66 .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
67 .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
68 .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
69 .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
70 .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
71 .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
72 .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
73 .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
74
75 /*
76 * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
77 * u8 *head, long bytes)
78 */
79ENTRY(sha2_ce_transform)
80 /* load round constants */
81 adr x8, .Lsha2_rcon
82 ld1 { v0.4s- v3.4s}, [x8], #64
83 ld1 { v4.4s- v7.4s}, [x8], #64
84 ld1 { v8.4s-v11.4s}, [x8], #64
85 ld1 {v12.4s-v15.4s}, [x8]
86
87 /* load state */
88 ldp dga, dgb, [x2]
89
90 /* load partial input (if supplied) */
91 cbz x3, 0f
92 ld1 {v16.4s-v19.4s}, [x3]
93 b 1f
94
95 /* load input */
960: ld1 {v16.4s-v19.4s}, [x1], #64
97 sub w0, w0, #1
98
991:
100CPU_LE( rev32 v16.16b, v16.16b )
101CPU_LE( rev32 v17.16b, v17.16b )
102CPU_LE( rev32 v18.16b, v18.16b )
103CPU_LE( rev32 v19.16b, v19.16b )
104
1052: add t0.4s, v16.4s, v0.4s
106 mov dg0v.16b, dgav.16b
107 mov dg1v.16b, dgbv.16b
108
109 add_update 0, v1, 16, 17, 18, 19
110 add_update 1, v2, 17, 18, 19, 16
111 add_update 0, v3, 18, 19, 16, 17
112 add_update 1, v4, 19, 16, 17, 18
113
114 add_update 0, v5, 16, 17, 18, 19
115 add_update 1, v6, 17, 18, 19, 16
116 add_update 0, v7, 18, 19, 16, 17
117 add_update 1, v8, 19, 16, 17, 18
118
119 add_update 0, v9, 16, 17, 18, 19
120 add_update 1, v10, 17, 18, 19, 16
121 add_update 0, v11, 18, 19, 16, 17
122 add_update 1, v12, 19, 16, 17, 18
123
124 add_only 0, v13, 17
125 add_only 1, v14, 18
126 add_only 0, v15, 19
127 add_only 1
128
129 /* update state */
130 add dgav.4s, dgav.4s, dg0v.4s
131 add dgbv.4s, dgbv.4s, dg1v.4s
132
133 /* handled all input blocks? */
134 cbnz w0, 0b
135
136 /*
137 * Final block: add padding and total bit count.
138 * Skip if we have no total byte count in x4. In that case, the input
139 * size was not a round multiple of the block size, and the padding is
140 * handled by the C code.
141 */
142 cbz x4, 3f
143 movi v17.2d, #0
144 mov x8, #0x80000000
145 movi v18.2d, #0
146 ror x7, x4, #29 // ror(lsl(x4, 3), 32)
147 fmov d16, x8
148 mov x4, #0
149 mov v19.d[0], xzr
150 mov v19.d[1], x7
151 b 2b
152
153 /* store new state */
1543: stp dga, dgb, [x2]
155 ret
156ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
new file mode 100644
index 000000000000..c294e67d3925
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -0,0 +1,255 @@
1/*
2 * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
3 *
4 * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/neon.h>
12#include <asm/unaligned.h>
13#include <crypto/internal/hash.h>
14#include <crypto/sha.h>
15#include <linux/cpufeature.h>
16#include <linux/crypto.h>
17#include <linux/module.h>
18
19MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
20MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
21MODULE_LICENSE("GPL v2");
22
23asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
24 u8 *head, long bytes);
25
26static int sha224_init(struct shash_desc *desc)
27{
28 struct sha256_state *sctx = shash_desc_ctx(desc);
29
30 *sctx = (struct sha256_state){
31 .state = {
32 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
33 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
34 }
35 };
36 return 0;
37}
38
39static int sha256_init(struct shash_desc *desc)
40{
41 struct sha256_state *sctx = shash_desc_ctx(desc);
42
43 *sctx = (struct sha256_state){
44 .state = {
45 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
46 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
47 }
48 };
49 return 0;
50}
51
52static int sha2_update(struct shash_desc *desc, const u8 *data,
53 unsigned int len)
54{
55 struct sha256_state *sctx = shash_desc_ctx(desc);
56 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
57
58 sctx->count += len;
59
60 if ((partial + len) >= SHA256_BLOCK_SIZE) {
61 int blocks;
62
63 if (partial) {
64 int p = SHA256_BLOCK_SIZE - partial;
65
66 memcpy(sctx->buf + partial, data, p);
67 data += p;
68 len -= p;
69 }
70
71 blocks = len / SHA256_BLOCK_SIZE;
72 len %= SHA256_BLOCK_SIZE;
73
74 kernel_neon_begin_partial(28);
75 sha2_ce_transform(blocks, data, sctx->state,
76 partial ? sctx->buf : NULL, 0);
77 kernel_neon_end();
78
79 data += blocks * SHA256_BLOCK_SIZE;
80 partial = 0;
81 }
82 if (len)
83 memcpy(sctx->buf + partial, data, len);
84 return 0;
85}
86
87static void sha2_final(struct shash_desc *desc)
88{
89 static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
90
91 struct sha256_state *sctx = shash_desc_ctx(desc);
92 __be64 bits = cpu_to_be64(sctx->count << 3);
93 u32 padlen = SHA256_BLOCK_SIZE
94 - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
95
96 sha2_update(desc, padding, padlen);
97 sha2_update(desc, (const u8 *)&bits, sizeof(bits));
98}
99
100static int sha224_final(struct shash_desc *desc, u8 *out)
101{
102 struct sha256_state *sctx = shash_desc_ctx(desc);
103 __be32 *dst = (__be32 *)out;
104 int i;
105
106 sha2_final(desc);
107
108 for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
109 put_unaligned_be32(sctx->state[i], dst++);
110
111 *sctx = (struct sha256_state){};
112 return 0;
113}
114
115static int sha256_final(struct shash_desc *desc, u8 *out)
116{
117 struct sha256_state *sctx = shash_desc_ctx(desc);
118 __be32 *dst = (__be32 *)out;
119 int i;
120
121 sha2_final(desc);
122
123 for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
124 put_unaligned_be32(sctx->state[i], dst++);
125
126 *sctx = (struct sha256_state){};
127 return 0;
128}
129
130static void sha2_finup(struct shash_desc *desc, const u8 *data,
131 unsigned int len)
132{
133 struct sha256_state *sctx = shash_desc_ctx(desc);
134 int blocks;
135
136 if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
137 sha2_update(desc, data, len);
138 sha2_final(desc);
139 return;
140 }
141
142 /*
143 * Use a fast path if the input is a multiple of 64 bytes. In
144 * this case, there is no need to copy data around, and we can
145 * perform the entire digest calculation in a single invocation
146 * of sha2_ce_transform()
147 */
148 blocks = len / SHA256_BLOCK_SIZE;
149
150 kernel_neon_begin_partial(28);
151 sha2_ce_transform(blocks, data, sctx->state, NULL, len);
152 kernel_neon_end();
153 data += blocks * SHA256_BLOCK_SIZE;
154}
155
156static int sha224_finup(struct shash_desc *desc, const u8 *data,
157 unsigned int len, u8 *out)
158{
159 struct sha256_state *sctx = shash_desc_ctx(desc);
160 __be32 *dst = (__be32 *)out;
161 int i;
162
163 sha2_finup(desc, data, len);
164
165 for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
166 put_unaligned_be32(sctx->state[i], dst++);
167
168 *sctx = (struct sha256_state){};
169 return 0;
170}
171
172static int sha256_finup(struct shash_desc *desc, const u8 *data,
173 unsigned int len, u8 *out)
174{
175 struct sha256_state *sctx = shash_desc_ctx(desc);
176 __be32 *dst = (__be32 *)out;
177 int i;
178
179 sha2_finup(desc, data, len);
180
181 for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
182 put_unaligned_be32(sctx->state[i], dst++);
183
184 *sctx = (struct sha256_state){};
185 return 0;
186}
187
188static int sha2_export(struct shash_desc *desc, void *out)
189{
190 struct sha256_state *sctx = shash_desc_ctx(desc);
191 struct sha256_state *dst = out;
192
193 *dst = *sctx;
194 return 0;
195}
196
197static int sha2_import(struct shash_desc *desc, const void *in)
198{
199 struct sha256_state *sctx = shash_desc_ctx(desc);
200 struct sha256_state const *src = in;
201
202 *sctx = *src;
203 return 0;
204}
205
206static struct shash_alg algs[] = { {
207 .init = sha224_init,
208 .update = sha2_update,
209 .final = sha224_final,
210 .finup = sha224_finup,
211 .export = sha2_export,
212 .import = sha2_import,
213 .descsize = sizeof(struct sha256_state),
214 .digestsize = SHA224_DIGEST_SIZE,
215 .statesize = sizeof(struct sha256_state),
216 .base = {
217 .cra_name = "sha224",
218 .cra_driver_name = "sha224-ce",
219 .cra_priority = 200,
220 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
221 .cra_blocksize = SHA256_BLOCK_SIZE,
222 .cra_module = THIS_MODULE,
223 }
224}, {
225 .init = sha256_init,
226 .update = sha2_update,
227 .final = sha256_final,
228 .finup = sha256_finup,
229 .export = sha2_export,
230 .import = sha2_import,
231 .descsize = sizeof(struct sha256_state),
232 .digestsize = SHA256_DIGEST_SIZE,
233 .statesize = sizeof(struct sha256_state),
234 .base = {
235 .cra_name = "sha256",
236 .cra_driver_name = "sha256-ce",
237 .cra_priority = 200,
238 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
239 .cra_blocksize = SHA256_BLOCK_SIZE,
240 .cra_module = THIS_MODULE,
241 }
242} };
243
244static int __init sha2_ce_mod_init(void)
245{
246 return crypto_register_shashes(algs, ARRAY_SIZE(algs));
247}
248
249static void __exit sha2_ce_mod_fini(void)
250{
251 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
252}
253
254module_cpu_feature_match(SHA2, sha2_ce_mod_init);
255module_exit(sha2_ce_mod_fini);