diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2014-02-10 05:26:29 -0500 |
---|---|---|
committer | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2014-05-14 13:04:15 -0400 |
commit | a3fd82105b9d149033984bf018f473140f5b94bc (patch) | |
tree | 5e2cbccc0cf1833649b7a4df6faff937d7fa73da /arch/arm64 | |
parent | 317f2f750d708d684bddd8cb14827ec2efee4b1c (diff) |
arm64/crypto: AES in CCM mode using ARMv8 Crypto Extensions
This patch adds support for the AES-CCM encryption algorithm for CPUs that
have support for the AES part of the ARM v8 Crypto Extensions.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/crypto/Kconfig | 7 | ||||
-rw-r--r-- | arch/arm64/crypto/Makefile | 3 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-ce-ccm-core.S | 222 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-ce-ccm-glue.c | 297 |
4 files changed, 529 insertions, 0 deletions
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 9ba32c0da871..8fffd5af65ef 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig | |||
@@ -29,4 +29,11 @@ config CRYPTO_AES_ARM64_CE | |||
29 | select CRYPTO_ALGAPI | 29 | select CRYPTO_ALGAPI |
30 | select CRYPTO_AES | 30 | select CRYPTO_AES |
31 | 31 | ||
32 | config CRYPTO_AES_ARM64_CE_CCM | ||
33 | tristate "AES in CCM mode using ARMv8 Crypto Extensions" | ||
34 | depends on ARM64 && KERNEL_MODE_NEON | ||
35 | select CRYPTO_ALGAPI | ||
36 | select CRYPTO_AES | ||
37 | select CRYPTO_AEAD | ||
38 | |||
32 | endif | 39 | endif |
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 908abd9242b1..311287d68078 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile | |||
@@ -19,3 +19,6 @@ ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o | |||
19 | 19 | ||
20 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o | 20 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o |
21 | CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto | 21 | CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto |
22 | |||
23 | obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o | ||
24 | aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o | ||
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S new file mode 100644 index 000000000000..432e4841cd81 --- /dev/null +++ b/arch/arm64/crypto/aes-ce-ccm-core.S | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions | ||
3 | * | ||
4 | * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | |||
13 | .text | ||
14 | .arch armv8-a+crypto | ||
15 | |||
16 | /* | ||
17 | * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, | ||
18 | * u32 *macp, u8 const rk[], u32 rounds); | ||
19 | */ | ||
20 | ENTRY(ce_aes_ccm_auth_data) | ||
21 | ldr w8, [x3] /* leftover from prev round? */ | ||
22 | ld1 {v0.2d}, [x0] /* load mac */ | ||
23 | cbz w8, 1f | ||
24 | sub w8, w8, #16 | ||
25 | eor v1.16b, v1.16b, v1.16b | ||
26 | 0: ldrb w7, [x1], #1 /* get 1 byte of input */ | ||
27 | subs w2, w2, #1 | ||
28 | add w8, w8, #1 | ||
29 | ins v1.b[0], w7 | ||
30 | ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ | ||
31 | beq 8f /* out of input? */ | ||
32 | cbnz w8, 0b | ||
33 | eor v0.16b, v0.16b, v1.16b | ||
34 | 1: ld1 {v3.2d}, [x4] /* load first round key */ | ||
35 | prfm pldl1strm, [x1] | ||
36 | cmp w5, #12 /* which key size? */ | ||
37 | add x6, x4, #16 | ||
38 | sub w7, w5, #2 /* modified # of rounds */ | ||
39 | bmi 2f | ||
40 | bne 5f | ||
41 | mov v5.16b, v3.16b | ||
42 | b 4f | ||
43 | 2: mov v4.16b, v3.16b | ||
44 | ld1 {v5.2d}, [x6], #16 /* load 2nd round key */ | ||
45 | 3: aese v0.16b, v4.16b | ||
46 | aesmc v0.16b, v0.16b | ||
47 | 4: ld1 {v3.2d}, [x6], #16 /* load next round key */ | ||
48 | aese v0.16b, v5.16b | ||
49 | aesmc v0.16b, v0.16b | ||
50 | 5: ld1 {v4.2d}, [x6], #16 /* load next round key */ | ||
51 | subs w7, w7, #3 | ||
52 | aese v0.16b, v3.16b | ||
53 | aesmc v0.16b, v0.16b | ||
54 | ld1 {v5.2d}, [x6], #16 /* load next round key */ | ||
55 | bpl 3b | ||
56 | aese v0.16b, v4.16b | ||
57 | subs w2, w2, #16 /* last data? */ | ||
58 | eor v0.16b, v0.16b, v5.16b /* final round */ | ||
59 | bmi 6f | ||
60 | ld1 {v1.16b}, [x1], #16 /* load next input block */ | ||
61 | eor v0.16b, v0.16b, v1.16b /* xor with mac */ | ||
62 | bne 1b | ||
63 | 6: st1 {v0.2d}, [x0] /* store mac */ | ||
64 | beq 10f | ||
65 | adds w2, w2, #16 | ||
66 | beq 10f | ||
67 | mov w8, w2 | ||
68 | 7: ldrb w7, [x1], #1 | ||
69 | umov w6, v0.b[0] | ||
70 | eor w6, w6, w7 | ||
71 | strb w6, [x0], #1 | ||
72 | subs w2, w2, #1 | ||
73 | beq 10f | ||
74 | ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ | ||
75 | b 7b | ||
76 | 8: mov w7, w8 | ||
77 | add w8, w8, #16 | ||
78 | 9: ext v1.16b, v1.16b, v1.16b, #1 | ||
79 | adds w7, w7, #1 | ||
80 | bne 9b | ||
81 | eor v0.16b, v0.16b, v1.16b | ||
82 | st1 {v0.2d}, [x0] | ||
83 | 10: str w8, [x3] | ||
84 | ret | ||
85 | ENDPROC(ce_aes_ccm_auth_data) | ||
86 | |||
87 | /* | ||
88 | * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], | ||
89 | * u32 rounds); | ||
90 | */ | ||
91 | ENTRY(ce_aes_ccm_final) | ||
92 | ld1 {v3.2d}, [x2], #16 /* load first round key */ | ||
93 | ld1 {v0.2d}, [x0] /* load mac */ | ||
94 | cmp w3, #12 /* which key size? */ | ||
95 | sub w3, w3, #2 /* modified # of rounds */ | ||
96 | ld1 {v1.2d}, [x1] /* load 1st ctriv */ | ||
97 | bmi 0f | ||
98 | bne 3f | ||
99 | mov v5.16b, v3.16b | ||
100 | b 2f | ||
101 | 0: mov v4.16b, v3.16b | ||
102 | 1: ld1 {v5.2d}, [x2], #16 /* load next round key */ | ||
103 | aese v0.16b, v4.16b | ||
104 | aese v1.16b, v4.16b | ||
105 | aesmc v0.16b, v0.16b | ||
106 | aesmc v1.16b, v1.16b | ||
107 | 2: ld1 {v3.2d}, [x2], #16 /* load next round key */ | ||
108 | aese v0.16b, v5.16b | ||
109 | aese v1.16b, v5.16b | ||
110 | aesmc v0.16b, v0.16b | ||
111 | aesmc v1.16b, v1.16b | ||
112 | 3: ld1 {v4.2d}, [x2], #16 /* load next round key */ | ||
113 | subs w3, w3, #3 | ||
114 | aese v0.16b, v3.16b | ||
115 | aese v1.16b, v3.16b | ||
116 | aesmc v0.16b, v0.16b | ||
117 | aesmc v1.16b, v1.16b | ||
118 | bpl 1b | ||
119 | aese v0.16b, v4.16b | ||
120 | aese v1.16b, v4.16b | ||
121 | /* final round key cancels out */ | ||
122 | eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ | ||
123 | st1 {v0.2d}, [x0] /* store result */ | ||
124 | ret | ||
125 | ENDPROC(ce_aes_ccm_final) | ||
126 | |||
127 | .macro aes_ccm_do_crypt,enc | ||
128 | ldr x8, [x6, #8] /* load lower ctr */ | ||
129 | ld1 {v0.2d}, [x5] /* load mac */ | ||
130 | rev x8, x8 /* keep swabbed ctr in reg */ | ||
131 | 0: /* outer loop */ | ||
132 | ld1 {v1.1d}, [x6] /* load upper ctr */ | ||
133 | prfm pldl1strm, [x1] | ||
134 | add x8, x8, #1 | ||
135 | rev x9, x8 | ||
136 | cmp w4, #12 /* which key size? */ | ||
137 | sub w7, w4, #2 /* get modified # of rounds */ | ||
138 | ins v1.d[1], x9 /* no carry in lower ctr */ | ||
139 | ld1 {v3.2d}, [x3] /* load first round key */ | ||
140 | add x10, x3, #16 | ||
141 | bmi 1f | ||
142 | bne 4f | ||
143 | mov v5.16b, v3.16b | ||
144 | b 3f | ||
145 | 1: mov v4.16b, v3.16b | ||
146 | ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ | ||
147 | 2: /* inner loop: 3 rounds, 2x interleaved */ | ||
148 | aese v0.16b, v4.16b | ||
149 | aese v1.16b, v4.16b | ||
150 | aesmc v0.16b, v0.16b | ||
151 | aesmc v1.16b, v1.16b | ||
152 | 3: ld1 {v3.2d}, [x10], #16 /* load next round key */ | ||
153 | aese v0.16b, v5.16b | ||
154 | aese v1.16b, v5.16b | ||
155 | aesmc v0.16b, v0.16b | ||
156 | aesmc v1.16b, v1.16b | ||
157 | 4: ld1 {v4.2d}, [x10], #16 /* load next round key */ | ||
158 | subs w7, w7, #3 | ||
159 | aese v0.16b, v3.16b | ||
160 | aese v1.16b, v3.16b | ||
161 | aesmc v0.16b, v0.16b | ||
162 | aesmc v1.16b, v1.16b | ||
163 | ld1 {v5.2d}, [x10], #16 /* load next round key */ | ||
164 | bpl 2b | ||
165 | aese v0.16b, v4.16b | ||
166 | aese v1.16b, v4.16b | ||
167 | subs w2, w2, #16 | ||
168 | bmi 6f /* partial block? */ | ||
169 | ld1 {v2.16b}, [x1], #16 /* load next input block */ | ||
170 | .if \enc == 1 | ||
171 | eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ | ||
172 | eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ | ||
173 | .else | ||
174 | eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */ | ||
175 | eor v1.16b, v2.16b, v5.16b /* final round enc */ | ||
176 | .endif | ||
177 | eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ | ||
178 | st1 {v1.16b}, [x0], #16 /* write output block */ | ||
179 | bne 0b | ||
180 | rev x8, x8 | ||
181 | st1 {v0.2d}, [x5] /* store mac */ | ||
182 | str x8, [x6, #8] /* store lsb end of ctr (BE) */ | ||
183 | 5: ret | ||
184 | |||
185 | 6: eor v0.16b, v0.16b, v5.16b /* final round mac */ | ||
186 | eor v1.16b, v1.16b, v5.16b /* final round enc */ | ||
187 | st1 {v0.2d}, [x5] /* store mac */ | ||
188 | add w2, w2, #16 /* process partial tail block */ | ||
189 | 7: ldrb w9, [x1], #1 /* get 1 byte of input */ | ||
190 | umov w6, v1.b[0] /* get top crypted ctr byte */ | ||
191 | umov w7, v0.b[0] /* get top mac byte */ | ||
192 | .if \enc == 1 | ||
193 | eor w7, w7, w9 | ||
194 | eor w9, w9, w6 | ||
195 | .else | ||
196 | eor w9, w9, w6 | ||
197 | eor w7, w7, w9 | ||
198 | .endif | ||
199 | strb w9, [x0], #1 /* store out byte */ | ||
200 | strb w7, [x5], #1 /* store mac byte */ | ||
201 | subs w2, w2, #1 | ||
202 | beq 5b | ||
203 | ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ | ||
204 | ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ | ||
205 | b 7b | ||
206 | .endm | ||
207 | |||
208 | /* | ||
209 | * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, | ||
210 | * u8 const rk[], u32 rounds, u8 mac[], | ||
211 | * u8 ctr[]); | ||
212 | * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, | ||
213 | * u8 const rk[], u32 rounds, u8 mac[], | ||
214 | * u8 ctr[]); | ||
215 | */ | ||
216 | ENTRY(ce_aes_ccm_encrypt) | ||
217 | aes_ccm_do_crypt 1 | ||
218 | ENDPROC(ce_aes_ccm_encrypt) | ||
219 | |||
220 | ENTRY(ce_aes_ccm_decrypt) | ||
221 | aes_ccm_do_crypt 0 | ||
222 | ENDPROC(ce_aes_ccm_decrypt) | ||
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c new file mode 100644 index 000000000000..9e6cdde9b43d --- /dev/null +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c | |||
@@ -0,0 +1,297 @@ | |||
1 | /* | ||
2 | * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions | ||
3 | * | ||
4 | * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <asm/neon.h> | ||
12 | #include <asm/unaligned.h> | ||
13 | #include <crypto/aes.h> | ||
14 | #include <crypto/algapi.h> | ||
15 | #include <crypto/scatterwalk.h> | ||
16 | #include <linux/crypto.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | static int num_rounds(struct crypto_aes_ctx *ctx) | ||
20 | { | ||
21 | /* | ||
22 | * # of rounds specified by AES: | ||
23 | * 128 bit key 10 rounds | ||
24 | * 192 bit key 12 rounds | ||
25 | * 256 bit key 14 rounds | ||
26 | * => n byte key => 6 + (n/4) rounds | ||
27 | */ | ||
28 | return 6 + ctx->key_length / 4; | ||
29 | } | ||
30 | |||
31 | asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, | ||
32 | u32 *macp, u32 const rk[], u32 rounds); | ||
33 | |||
34 | asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, | ||
35 | u32 const rk[], u32 rounds, u8 mac[], | ||
36 | u8 ctr[]); | ||
37 | |||
38 | asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, | ||
39 | u32 const rk[], u32 rounds, u8 mac[], | ||
40 | u8 ctr[]); | ||
41 | |||
42 | asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[], | ||
43 | u32 rounds); | ||
44 | |||
45 | static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, | ||
46 | unsigned int key_len) | ||
47 | { | ||
48 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm); | ||
49 | int ret; | ||
50 | |||
51 | ret = crypto_aes_expand_key(ctx, in_key, key_len); | ||
52 | if (!ret) | ||
53 | return 0; | ||
54 | |||
55 | tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
56 | return -EINVAL; | ||
57 | } | ||
58 | |||
59 | static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | ||
60 | { | ||
61 | if ((authsize & 1) || authsize < 4) | ||
62 | return -EINVAL; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) | ||
67 | { | ||
68 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
69 | __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8]; | ||
70 | u32 l = req->iv[0] + 1; | ||
71 | |||
72 | /* verify that CCM dimension 'L' is set correctly in the IV */ | ||
73 | if (l < 2 || l > 8) | ||
74 | return -EINVAL; | ||
75 | |||
76 | /* verify that msglen can in fact be represented in L bytes */ | ||
77 | if (l < 4 && msglen >> (8 * l)) | ||
78 | return -EOVERFLOW; | ||
79 | |||
80 | /* | ||
81 | * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi | ||
82 | * uses a u32 type to represent msglen so the top 4 bytes are always 0. | ||
83 | */ | ||
84 | n[0] = 0; | ||
85 | n[1] = cpu_to_be32(msglen); | ||
86 | |||
87 | memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); | ||
88 | |||
89 | /* | ||
90 | * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C) | ||
91 | * - bits 0..2 : max # of bytes required to represent msglen, minus 1 | ||
92 | * (already set by caller) | ||
93 | * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc) | ||
94 | * - bit 6 : indicates presence of authenticate-only data | ||
95 | */ | ||
96 | maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2; | ||
97 | if (req->assoclen) | ||
98 | maciv[0] |= 0x40; | ||
99 | |||
100 | memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) | ||
105 | { | ||
106 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
107 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); | ||
108 | struct __packed { __be16 l; __be32 h; u16 len; } ltag; | ||
109 | struct scatter_walk walk; | ||
110 | u32 len = req->assoclen; | ||
111 | u32 macp = 0; | ||
112 | |||
113 | /* prepend the AAD with a length tag */ | ||
114 | if (len < 0xff00) { | ||
115 | ltag.l = cpu_to_be16(len); | ||
116 | ltag.len = 2; | ||
117 | } else { | ||
118 | ltag.l = cpu_to_be16(0xfffe); | ||
119 | put_unaligned_be32(len, <ag.h); | ||
120 | ltag.len = 6; | ||
121 | } | ||
122 | |||
123 | ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc, | ||
124 | num_rounds(ctx)); | ||
125 | scatterwalk_start(&walk, req->assoc); | ||
126 | |||
127 | do { | ||
128 | u32 n = scatterwalk_clamp(&walk, len); | ||
129 | u8 *p; | ||
130 | |||
131 | if (!n) { | ||
132 | scatterwalk_start(&walk, sg_next(walk.sg)); | ||
133 | n = scatterwalk_clamp(&walk, len); | ||
134 | } | ||
135 | p = scatterwalk_map(&walk); | ||
136 | ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc, | ||
137 | num_rounds(ctx)); | ||
138 | len -= n; | ||
139 | |||
140 | scatterwalk_unmap(p); | ||
141 | scatterwalk_advance(&walk, n); | ||
142 | scatterwalk_done(&walk, 0, len); | ||
143 | } while (len); | ||
144 | } | ||
145 | |||
146 | static int ccm_encrypt(struct aead_request *req) | ||
147 | { | ||
148 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
149 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); | ||
150 | struct blkcipher_desc desc = { .info = req->iv }; | ||
151 | struct blkcipher_walk walk; | ||
152 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; | ||
153 | u8 buf[AES_BLOCK_SIZE]; | ||
154 | u32 len = req->cryptlen; | ||
155 | int err; | ||
156 | |||
157 | err = ccm_init_mac(req, mac, len); | ||
158 | if (err) | ||
159 | return err; | ||
160 | |||
161 | kernel_neon_begin_partial(6); | ||
162 | |||
163 | if (req->assoclen) | ||
164 | ccm_calculate_auth_mac(req, mac); | ||
165 | |||
166 | /* preserve the original iv for the final round */ | ||
167 | memcpy(buf, req->iv, AES_BLOCK_SIZE); | ||
168 | |||
169 | blkcipher_walk_init(&walk, req->dst, req->src, len); | ||
170 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, | ||
171 | AES_BLOCK_SIZE); | ||
172 | |||
173 | while (walk.nbytes) { | ||
174 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; | ||
175 | |||
176 | if (walk.nbytes == len) | ||
177 | tail = 0; | ||
178 | |||
179 | ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
180 | walk.nbytes - tail, ctx->key_enc, | ||
181 | num_rounds(ctx), mac, walk.iv); | ||
182 | |||
183 | len -= walk.nbytes - tail; | ||
184 | err = blkcipher_walk_done(&desc, &walk, tail); | ||
185 | } | ||
186 | if (!err) | ||
187 | ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); | ||
188 | |||
189 | kernel_neon_end(); | ||
190 | |||
191 | if (err) | ||
192 | return err; | ||
193 | |||
194 | /* copy authtag to end of dst */ | ||
195 | scatterwalk_map_and_copy(mac, req->dst, req->cryptlen, | ||
196 | crypto_aead_authsize(aead), 1); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int ccm_decrypt(struct aead_request *req) | ||
202 | { | ||
203 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
204 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); | ||
205 | unsigned int authsize = crypto_aead_authsize(aead); | ||
206 | struct blkcipher_desc desc = { .info = req->iv }; | ||
207 | struct blkcipher_walk walk; | ||
208 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; | ||
209 | u8 buf[AES_BLOCK_SIZE]; | ||
210 | u32 len = req->cryptlen - authsize; | ||
211 | int err; | ||
212 | |||
213 | err = ccm_init_mac(req, mac, len); | ||
214 | if (err) | ||
215 | return err; | ||
216 | |||
217 | kernel_neon_begin_partial(6); | ||
218 | |||
219 | if (req->assoclen) | ||
220 | ccm_calculate_auth_mac(req, mac); | ||
221 | |||
222 | /* preserve the original iv for the final round */ | ||
223 | memcpy(buf, req->iv, AES_BLOCK_SIZE); | ||
224 | |||
225 | blkcipher_walk_init(&walk, req->dst, req->src, len); | ||
226 | err = blkcipher_aead_walk_virt_block(&desc, &walk, aead, | ||
227 | AES_BLOCK_SIZE); | ||
228 | |||
229 | while (walk.nbytes) { | ||
230 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; | ||
231 | |||
232 | if (walk.nbytes == len) | ||
233 | tail = 0; | ||
234 | |||
235 | ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
236 | walk.nbytes - tail, ctx->key_enc, | ||
237 | num_rounds(ctx), mac, walk.iv); | ||
238 | |||
239 | len -= walk.nbytes - tail; | ||
240 | err = blkcipher_walk_done(&desc, &walk, tail); | ||
241 | } | ||
242 | if (!err) | ||
243 | ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); | ||
244 | |||
245 | kernel_neon_end(); | ||
246 | |||
247 | if (err) | ||
248 | return err; | ||
249 | |||
250 | /* compare calculated auth tag with the stored one */ | ||
251 | scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize, | ||
252 | authsize, 0); | ||
253 | |||
254 | if (memcmp(mac, buf, authsize)) | ||
255 | return -EBADMSG; | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static struct crypto_alg ccm_aes_alg = { | ||
260 | .cra_name = "ccm(aes)", | ||
261 | .cra_driver_name = "ccm-aes-ce", | ||
262 | .cra_priority = 300, | ||
263 | .cra_flags = CRYPTO_ALG_TYPE_AEAD, | ||
264 | .cra_blocksize = 1, | ||
265 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | ||
266 | .cra_alignmask = 7, | ||
267 | .cra_type = &crypto_aead_type, | ||
268 | .cra_module = THIS_MODULE, | ||
269 | .cra_aead = { | ||
270 | .ivsize = AES_BLOCK_SIZE, | ||
271 | .maxauthsize = AES_BLOCK_SIZE, | ||
272 | .setkey = ccm_setkey, | ||
273 | .setauthsize = ccm_setauthsize, | ||
274 | .encrypt = ccm_encrypt, | ||
275 | .decrypt = ccm_decrypt, | ||
276 | } | ||
277 | }; | ||
278 | |||
279 | static int __init aes_mod_init(void) | ||
280 | { | ||
281 | if (!(elf_hwcap & HWCAP_AES)) | ||
282 | return -ENODEV; | ||
283 | return crypto_register_alg(&ccm_aes_alg); | ||
284 | } | ||
285 | |||
286 | static void __exit aes_mod_exit(void) | ||
287 | { | ||
288 | crypto_unregister_alg(&ccm_aes_alg); | ||
289 | } | ||
290 | |||
291 | module_init(aes_mod_init); | ||
292 | module_exit(aes_mod_exit); | ||
293 | |||
294 | MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions"); | ||
295 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
296 | MODULE_LICENSE("GPL v2"); | ||
297 | MODULE_ALIAS("ccm(aes)"); | ||