diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2015-03-10 04:47:47 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-12 06:13:36 -0400 |
commit | 86464859cc77ecfd989ad5c912bef167b1128b0b (patch) | |
tree | 12f2d62b6d092a1c30f1e5f93a9c416be3796063 /arch/arm | |
parent | 006d0624fa0d71787448cacee0195bf20f2d47c8 (diff) |
crypto: arm - AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions
This implements the ECB, CBC, CTR and XTS asynchronous block ciphers
using the AArch32 versions of the ARMv8 Crypto Extensions for AES.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/crypto/Kconfig | 9 | ||||
-rw-r--r-- | arch/arm/crypto/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/crypto/aes-ce-core.S | 518 | ||||
-rw-r--r-- | arch/arm/crypto/aes-ce-glue.c | 520 |
4 files changed, 1049 insertions, 0 deletions
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 9c1478e55a40..63588bdf3b5d 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig | |||
@@ -101,4 +101,13 @@ config CRYPTO_AES_ARM_BS | |||
101 | This implementation does not rely on any lookup tables so it is | 101 | This implementation does not rely on any lookup tables so it is |
102 | believed to be invulnerable to cache timing attacks. | 102 | believed to be invulnerable to cache timing attacks. |
103 | 103 | ||
104 | config CRYPTO_AES_ARM_CE | ||
105 | tristate "Accelerated AES using ARMv8 Crypto Extensions" | ||
106 | depends on KERNEL_MODE_NEON | ||
107 | select CRYPTO_ALGAPI | ||
108 | select CRYPTO_ABLK_HELPER | ||
109 | help | ||
110 | Use an implementation of AES in CBC, CTR and XTS modes that uses | ||
111 | ARMv8 Crypto Extensions | ||
112 | |||
104 | endif | 113 | endif |
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 4ea9f96c2782..2514c420e8d3 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o | 5 | obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o |
6 | obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o | 6 | obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o |
7 | obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o | ||
7 | obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o | 8 | obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o |
8 | obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o | 9 | obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o |
9 | obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o | 10 | obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o |
@@ -17,6 +18,7 @@ sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o | |||
17 | sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o | 18 | sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o |
18 | sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o | 19 | sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o |
19 | sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o | 20 | sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o |
21 | aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o | ||
20 | 22 | ||
21 | quiet_cmd_perl = PERL $@ | 23 | quiet_cmd_perl = PERL $@ |
22 | cmd_perl = $(PERL) $(<) > $(@) | 24 | cmd_perl = $(PERL) $(<) > $(@) |
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S new file mode 100644 index 000000000000..8cfa468ee570 --- /dev/null +++ b/arch/arm/crypto/aes-ce-core.S | |||
@@ -0,0 +1,518 @@ | |||
1 | /* | ||
2 | * aes-ce-core.S - AES in CBC/CTR/XTS mode using ARMv8 Crypto Extensions | ||
3 | * | ||
4 | * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | #include <asm/assembler.h> | ||
13 | |||
14 | .text | ||
15 | .fpu crypto-neon-fp-armv8 | ||
16 | .align 3 | ||
17 | |||
18 | .macro enc_round, state, key | ||
19 | aese.8 \state, \key | ||
20 | aesmc.8 \state, \state | ||
21 | .endm | ||
22 | |||
23 | .macro dec_round, state, key | ||
24 | aesd.8 \state, \key | ||
25 | aesimc.8 \state, \state | ||
26 | .endm | ||
27 | |||
28 | .macro enc_dround, key1, key2 | ||
29 | enc_round q0, \key1 | ||
30 | enc_round q0, \key2 | ||
31 | .endm | ||
32 | |||
33 | .macro dec_dround, key1, key2 | ||
34 | dec_round q0, \key1 | ||
35 | dec_round q0, \key2 | ||
36 | .endm | ||
37 | |||
38 | .macro enc_fround, key1, key2, key3 | ||
39 | enc_round q0, \key1 | ||
40 | aese.8 q0, \key2 | ||
41 | veor q0, q0, \key3 | ||
42 | .endm | ||
43 | |||
44 | .macro dec_fround, key1, key2, key3 | ||
45 | dec_round q0, \key1 | ||
46 | aesd.8 q0, \key2 | ||
47 | veor q0, q0, \key3 | ||
48 | .endm | ||
49 | |||
50 | .macro enc_dround_3x, key1, key2 | ||
51 | enc_round q0, \key1 | ||
52 | enc_round q1, \key1 | ||
53 | enc_round q2, \key1 | ||
54 | enc_round q0, \key2 | ||
55 | enc_round q1, \key2 | ||
56 | enc_round q2, \key2 | ||
57 | .endm | ||
58 | |||
59 | .macro dec_dround_3x, key1, key2 | ||
60 | dec_round q0, \key1 | ||
61 | dec_round q1, \key1 | ||
62 | dec_round q2, \key1 | ||
63 | dec_round q0, \key2 | ||
64 | dec_round q1, \key2 | ||
65 | dec_round q2, \key2 | ||
66 | .endm | ||
67 | |||
68 | .macro enc_fround_3x, key1, key2, key3 | ||
69 | enc_round q0, \key1 | ||
70 | enc_round q1, \key1 | ||
71 | enc_round q2, \key1 | ||
72 | aese.8 q0, \key2 | ||
73 | aese.8 q1, \key2 | ||
74 | aese.8 q2, \key2 | ||
75 | veor q0, q0, \key3 | ||
76 | veor q1, q1, \key3 | ||
77 | veor q2, q2, \key3 | ||
78 | .endm | ||
79 | |||
80 | .macro dec_fround_3x, key1, key2, key3 | ||
81 | dec_round q0, \key1 | ||
82 | dec_round q1, \key1 | ||
83 | dec_round q2, \key1 | ||
84 | aesd.8 q0, \key2 | ||
85 | aesd.8 q1, \key2 | ||
86 | aesd.8 q2, \key2 | ||
87 | veor q0, q0, \key3 | ||
88 | veor q1, q1, \key3 | ||
89 | veor q2, q2, \key3 | ||
90 | .endm | ||
91 | |||
92 | .macro do_block, dround, fround | ||
93 | cmp r3, #12 @ which key size? | ||
94 | vld1.8 {q10-q11}, [ip]! | ||
95 | \dround q8, q9 | ||
96 | vld1.8 {q12-q13}, [ip]! | ||
97 | \dround q10, q11 | ||
98 | vld1.8 {q10-q11}, [ip]! | ||
99 | \dround q12, q13 | ||
100 | vld1.8 {q12-q13}, [ip]! | ||
101 | \dround q10, q11 | ||
102 | blo 0f @ AES-128: 10 rounds | ||
103 | vld1.8 {q10-q11}, [ip]! | ||
104 | beq 1f @ AES-192: 12 rounds | ||
105 | \dround q12, q13 | ||
106 | vld1.8 {q12-q13}, [ip] | ||
107 | \dround q10, q11 | ||
108 | 0: \fround q12, q13, q14 | ||
109 | bx lr | ||
110 | |||
111 | 1: \dround q12, q13 | ||
112 | \fround q10, q11, q14 | ||
113 | bx lr | ||
114 | .endm | ||
115 | |||
116 | /* | ||
117 | * Internal, non-AAPCS compliant functions that implement the core AES | ||
118 | * transforms. These should preserve all registers except q0 - q2 and ip | ||
119 | * Arguments: | ||
120 | * q0 : first in/output block | ||
121 | * q1 : second in/output block (_3x version only) | ||
122 | * q2 : third in/output block (_3x version only) | ||
123 | * q8 : first round key | ||
124 | * q9 : secound round key | ||
125 | * ip : address of 3rd round key | ||
126 | * q14 : final round key | ||
127 | * r3 : number of rounds | ||
128 | */ | ||
129 | .align 6 | ||
130 | aes_encrypt: | ||
131 | add ip, r2, #32 @ 3rd round key | ||
132 | .Laes_encrypt_tweak: | ||
133 | do_block enc_dround, enc_fround | ||
134 | ENDPROC(aes_encrypt) | ||
135 | |||
136 | .align 6 | ||
137 | aes_decrypt: | ||
138 | add ip, r2, #32 @ 3rd round key | ||
139 | do_block dec_dround, dec_fround | ||
140 | ENDPROC(aes_decrypt) | ||
141 | |||
142 | .align 6 | ||
143 | aes_encrypt_3x: | ||
144 | add ip, r2, #32 @ 3rd round key | ||
145 | do_block enc_dround_3x, enc_fround_3x | ||
146 | ENDPROC(aes_encrypt_3x) | ||
147 | |||
148 | .align 6 | ||
149 | aes_decrypt_3x: | ||
150 | add ip, r2, #32 @ 3rd round key | ||
151 | do_block dec_dround_3x, dec_fround_3x | ||
152 | ENDPROC(aes_decrypt_3x) | ||
153 | |||
154 | .macro prepare_key, rk, rounds | ||
155 | add ip, \rk, \rounds, lsl #4 | ||
156 | vld1.8 {q8-q9}, [\rk] @ load first 2 round keys | ||
157 | vld1.8 {q14}, [ip] @ load last round key | ||
158 | .endm | ||
159 | |||
160 | /* | ||
161 | * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, | ||
162 | * int blocks) | ||
163 | * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, | ||
164 | * int blocks) | ||
165 | */ | ||
166 | ENTRY(ce_aes_ecb_encrypt) | ||
167 | push {r4, lr} | ||
168 | ldr r4, [sp, #8] | ||
169 | prepare_key r2, r3 | ||
170 | .Lecbencloop3x: | ||
171 | subs r4, r4, #3 | ||
172 | bmi .Lecbenc1x | ||
173 | vld1.8 {q0-q1}, [r1, :64]! | ||
174 | vld1.8 {q2}, [r1, :64]! | ||
175 | bl aes_encrypt_3x | ||
176 | vst1.8 {q0-q1}, [r0, :64]! | ||
177 | vst1.8 {q2}, [r0, :64]! | ||
178 | b .Lecbencloop3x | ||
179 | .Lecbenc1x: | ||
180 | adds r4, r4, #3 | ||
181 | beq .Lecbencout | ||
182 | .Lecbencloop: | ||
183 | vld1.8 {q0}, [r1, :64]! | ||
184 | bl aes_encrypt | ||
185 | vst1.8 {q0}, [r0, :64]! | ||
186 | subs r4, r4, #1 | ||
187 | bne .Lecbencloop | ||
188 | .Lecbencout: | ||
189 | pop {r4, pc} | ||
190 | ENDPROC(ce_aes_ecb_encrypt) | ||
191 | |||
192 | ENTRY(ce_aes_ecb_decrypt) | ||
193 | push {r4, lr} | ||
194 | ldr r4, [sp, #8] | ||
195 | prepare_key r2, r3 | ||
196 | .Lecbdecloop3x: | ||
197 | subs r4, r4, #3 | ||
198 | bmi .Lecbdec1x | ||
199 | vld1.8 {q0-q1}, [r1, :64]! | ||
200 | vld1.8 {q2}, [r1, :64]! | ||
201 | bl aes_decrypt_3x | ||
202 | vst1.8 {q0-q1}, [r0, :64]! | ||
203 | vst1.8 {q2}, [r0, :64]! | ||
204 | b .Lecbdecloop3x | ||
205 | .Lecbdec1x: | ||
206 | adds r4, r4, #3 | ||
207 | beq .Lecbdecout | ||
208 | .Lecbdecloop: | ||
209 | vld1.8 {q0}, [r1, :64]! | ||
210 | bl aes_decrypt | ||
211 | vst1.8 {q0}, [r0, :64]! | ||
212 | subs r4, r4, #1 | ||
213 | bne .Lecbdecloop | ||
214 | .Lecbdecout: | ||
215 | pop {r4, pc} | ||
216 | ENDPROC(ce_aes_ecb_decrypt) | ||
217 | |||
218 | /* | ||
219 | * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, | ||
220 | * int blocks, u8 iv[]) | ||
221 | * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, | ||
222 | * int blocks, u8 iv[]) | ||
223 | */ | ||
224 | ENTRY(ce_aes_cbc_encrypt) | ||
225 | push {r4-r6, lr} | ||
226 | ldrd r4, r5, [sp, #16] | ||
227 | vld1.8 {q0}, [r5] | ||
228 | prepare_key r2, r3 | ||
229 | .Lcbcencloop: | ||
230 | vld1.8 {q1}, [r1, :64]! @ get next pt block | ||
231 | veor q0, q0, q1 @ ..and xor with iv | ||
232 | bl aes_encrypt | ||
233 | vst1.8 {q0}, [r0, :64]! | ||
234 | subs r4, r4, #1 | ||
235 | bne .Lcbcencloop | ||
236 | vst1.8 {q0}, [r5] | ||
237 | pop {r4-r6, pc} | ||
238 | ENDPROC(ce_aes_cbc_encrypt) | ||
239 | |||
240 | ENTRY(ce_aes_cbc_decrypt) | ||
241 | push {r4-r6, lr} | ||
242 | ldrd r4, r5, [sp, #16] | ||
243 | vld1.8 {q6}, [r5] @ keep iv in q6 | ||
244 | prepare_key r2, r3 | ||
245 | .Lcbcdecloop3x: | ||
246 | subs r4, r4, #3 | ||
247 | bmi .Lcbcdec1x | ||
248 | vld1.8 {q0-q1}, [r1, :64]! | ||
249 | vld1.8 {q2}, [r1, :64]! | ||
250 | vmov q3, q0 | ||
251 | vmov q4, q1 | ||
252 | vmov q5, q2 | ||
253 | bl aes_decrypt_3x | ||
254 | veor q0, q0, q6 | ||
255 | veor q1, q1, q3 | ||
256 | veor q2, q2, q4 | ||
257 | vmov q6, q5 | ||
258 | vst1.8 {q0-q1}, [r0, :64]! | ||
259 | vst1.8 {q2}, [r0, :64]! | ||
260 | b .Lcbcdecloop3x | ||
261 | .Lcbcdec1x: | ||
262 | adds r4, r4, #3 | ||
263 | beq .Lcbcdecout | ||
264 | vmov q15, q14 @ preserve last round key | ||
265 | .Lcbcdecloop: | ||
266 | vld1.8 {q0}, [r1, :64]! @ get next ct block | ||
267 | veor q14, q15, q6 @ combine prev ct with last key | ||
268 | vmov q6, q0 | ||
269 | bl aes_decrypt | ||
270 | vst1.8 {q0}, [r0, :64]! | ||
271 | subs r4, r4, #1 | ||
272 | bne .Lcbcdecloop | ||
273 | .Lcbcdecout: | ||
274 | vst1.8 {q6}, [r5] @ keep iv in q6 | ||
275 | pop {r4-r6, pc} | ||
276 | ENDPROC(ce_aes_cbc_decrypt) | ||
277 | |||
278 | /* | ||
279 | * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, | ||
280 | * int blocks, u8 ctr[]) | ||
281 | */ | ||
282 | ENTRY(ce_aes_ctr_encrypt) | ||
283 | push {r4-r6, lr} | ||
284 | ldrd r4, r5, [sp, #16] | ||
285 | vld1.8 {q6}, [r5] @ load ctr | ||
286 | prepare_key r2, r3 | ||
287 | vmov r6, s27 @ keep swabbed ctr in r6 | ||
288 | rev r6, r6 | ||
289 | cmn r6, r4 @ 32 bit overflow? | ||
290 | bcs .Lctrloop | ||
291 | .Lctrloop3x: | ||
292 | subs r4, r4, #3 | ||
293 | bmi .Lctr1x | ||
294 | add r6, r6, #1 | ||
295 | vmov q0, q6 | ||
296 | vmov q1, q6 | ||
297 | rev ip, r6 | ||
298 | add r6, r6, #1 | ||
299 | vmov q2, q6 | ||
300 | vmov s7, ip | ||
301 | rev ip, r6 | ||
302 | add r6, r6, #1 | ||
303 | vmov s11, ip | ||
304 | vld1.8 {q3-q4}, [r1, :64]! | ||
305 | vld1.8 {q5}, [r1, :64]! | ||
306 | bl aes_encrypt_3x | ||
307 | veor q0, q0, q3 | ||
308 | veor q1, q1, q4 | ||
309 | veor q2, q2, q5 | ||
310 | rev ip, r6 | ||
311 | vst1.8 {q0-q1}, [r0, :64]! | ||
312 | vst1.8 {q2}, [r0, :64]! | ||
313 | vmov s27, ip | ||
314 | b .Lctrloop3x | ||
315 | .Lctr1x: | ||
316 | adds r4, r4, #3 | ||
317 | beq .Lctrout | ||
318 | .Lctrloop: | ||
319 | vmov q0, q6 | ||
320 | bl aes_encrypt | ||
321 | subs r4, r4, #1 | ||
322 | bmi .Lctrhalfblock @ blocks < 0 means 1/2 block | ||
323 | vld1.8 {q3}, [r1, :64]! | ||
324 | veor q3, q0, q3 | ||
325 | vst1.8 {q3}, [r0, :64]! | ||
326 | |||
327 | adds r6, r6, #1 @ increment BE ctr | ||
328 | rev ip, r6 | ||
329 | vmov s27, ip | ||
330 | bcs .Lctrcarry | ||
331 | teq r4, #0 | ||
332 | bne .Lctrloop | ||
333 | .Lctrout: | ||
334 | vst1.8 {q6}, [r5] | ||
335 | pop {r4-r6, pc} | ||
336 | |||
337 | .Lctrhalfblock: | ||
338 | vld1.8 {d1}, [r1, :64] | ||
339 | veor d0, d0, d1 | ||
340 | vst1.8 {d0}, [r0, :64] | ||
341 | pop {r4-r6, pc} | ||
342 | |||
343 | .Lctrcarry: | ||
344 | .irp sreg, s26, s25, s24 | ||
345 | vmov ip, \sreg @ load next word of ctr | ||
346 | rev ip, ip @ ... to handle the carry | ||
347 | adds ip, ip, #1 | ||
348 | rev ip, ip | ||
349 | vmov \sreg, ip | ||
350 | bcc 0f | ||
351 | .endr | ||
352 | 0: teq r4, #0 | ||
353 | beq .Lctrout | ||
354 | b .Lctrloop | ||
355 | ENDPROC(ce_aes_ctr_encrypt) | ||
356 | |||
357 | /* | ||
358 | * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, | ||
359 | * int blocks, u8 iv[], u8 const rk2[], int first) | ||
360 | * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, | ||
361 | * int blocks, u8 iv[], u8 const rk2[], int first) | ||
362 | */ | ||
363 | |||
364 | .macro next_tweak, out, in, const, tmp | ||
365 | vshr.s64 \tmp, \in, #63 | ||
366 | vand \tmp, \tmp, \const | ||
367 | vadd.u64 \out, \in, \in | ||
368 | vext.8 \tmp, \tmp, \tmp, #8 | ||
369 | veor \out, \out, \tmp | ||
370 | .endm | ||
371 | |||
372 | .align 3 | ||
373 | .Lxts_mul_x: | ||
374 | .quad 1, 0x87 | ||
375 | |||
376 | ce_aes_xts_init: | ||
377 | vldr d14, .Lxts_mul_x | ||
378 | vldr d15, .Lxts_mul_x + 8 | ||
379 | |||
380 | ldrd r4, r5, [sp, #16] @ load args | ||
381 | ldr r6, [sp, #28] | ||
382 | vld1.8 {q0}, [r5] @ load iv | ||
383 | teq r6, #1 @ start of a block? | ||
384 | bxne lr | ||
385 | |||
386 | @ Encrypt the IV in q0 with the second AES key. This should only | ||
387 | @ be done at the start of a block. | ||
388 | ldr r6, [sp, #24] @ load AES key 2 | ||
389 | prepare_key r6, r3 | ||
390 | add ip, r6, #32 @ 3rd round key of key 2 | ||
391 | b .Laes_encrypt_tweak @ tail call | ||
392 | ENDPROC(ce_aes_xts_init) | ||
393 | |||
394 | ENTRY(ce_aes_xts_encrypt) | ||
395 | push {r4-r6, lr} | ||
396 | |||
397 | bl ce_aes_xts_init @ run shared prologue | ||
398 | prepare_key r2, r3 | ||
399 | vmov q3, q0 | ||
400 | |||
401 | teq r6, #0 @ start of a block? | ||
402 | bne .Lxtsenc3x | ||
403 | |||
404 | .Lxtsencloop3x: | ||
405 | next_tweak q3, q3, q7, q6 | ||
406 | .Lxtsenc3x: | ||
407 | subs r4, r4, #3 | ||
408 | bmi .Lxtsenc1x | ||
409 | vld1.8 {q0-q1}, [r1, :64]! @ get 3 pt blocks | ||
410 | vld1.8 {q2}, [r1, :64]! | ||
411 | next_tweak q4, q3, q7, q6 | ||
412 | veor q0, q0, q3 | ||
413 | next_tweak q5, q4, q7, q6 | ||
414 | veor q1, q1, q4 | ||
415 | veor q2, q2, q5 | ||
416 | bl aes_encrypt_3x | ||
417 | veor q0, q0, q3 | ||
418 | veor q1, q1, q4 | ||
419 | veor q2, q2, q5 | ||
420 | vst1.8 {q0-q1}, [r0, :64]! @ write 3 ct blocks | ||
421 | vst1.8 {q2}, [r0, :64]! | ||
422 | vmov q3, q5 | ||
423 | teq r4, #0 | ||
424 | beq .Lxtsencout | ||
425 | b .Lxtsencloop3x | ||
426 | .Lxtsenc1x: | ||
427 | adds r4, r4, #3 | ||
428 | beq .Lxtsencout | ||
429 | .Lxtsencloop: | ||
430 | vld1.8 {q0}, [r1, :64]! | ||
431 | veor q0, q0, q3 | ||
432 | bl aes_encrypt | ||
433 | veor q0, q0, q3 | ||
434 | vst1.8 {q0}, [r0, :64]! | ||
435 | subs r4, r4, #1 | ||
436 | beq .Lxtsencout | ||
437 | next_tweak q3, q3, q7, q6 | ||
438 | b .Lxtsencloop | ||
439 | .Lxtsencout: | ||
440 | vst1.8 {q3}, [r5] | ||
441 | pop {r4-r6, pc} | ||
442 | ENDPROC(ce_aes_xts_encrypt) | ||
443 | |||
444 | |||
445 | ENTRY(ce_aes_xts_decrypt) | ||
446 | push {r4-r6, lr} | ||
447 | |||
448 | bl ce_aes_xts_init @ run shared prologue | ||
449 | prepare_key r2, r3 | ||
450 | vmov q3, q0 | ||
451 | |||
452 | teq r6, #0 @ start of a block? | ||
453 | bne .Lxtsdec3x | ||
454 | |||
455 | .Lxtsdecloop3x: | ||
456 | next_tweak q3, q3, q7, q6 | ||
457 | .Lxtsdec3x: | ||
458 | subs r4, r4, #3 | ||
459 | bmi .Lxtsdec1x | ||
460 | vld1.8 {q0-q1}, [r1, :64]! @ get 3 ct blocks | ||
461 | vld1.8 {q2}, [r1, :64]! | ||
462 | next_tweak q4, q3, q7, q6 | ||
463 | veor q0, q0, q3 | ||
464 | next_tweak q5, q4, q7, q6 | ||
465 | veor q1, q1, q4 | ||
466 | veor q2, q2, q5 | ||
467 | bl aes_decrypt_3x | ||
468 | veor q0, q0, q3 | ||
469 | veor q1, q1, q4 | ||
470 | veor q2, q2, q5 | ||
471 | vst1.8 {q0-q1}, [r0, :64]! @ write 3 pt blocks | ||
472 | vst1.8 {q2}, [r0, :64]! | ||
473 | vmov q3, q5 | ||
474 | teq r4, #0 | ||
475 | beq .Lxtsdecout | ||
476 | b .Lxtsdecloop3x | ||
477 | .Lxtsdec1x: | ||
478 | adds r4, r4, #3 | ||
479 | beq .Lxtsdecout | ||
480 | .Lxtsdecloop: | ||
481 | vld1.8 {q0}, [r1, :64]! | ||
482 | veor q0, q0, q3 | ||
483 | add ip, r2, #32 @ 3rd round key | ||
484 | bl aes_decrypt | ||
485 | veor q0, q0, q3 | ||
486 | vst1.8 {q0}, [r0, :64]! | ||
487 | subs r4, r4, #1 | ||
488 | beq .Lxtsdecout | ||
489 | next_tweak q3, q3, q7, q6 | ||
490 | b .Lxtsdecloop | ||
491 | .Lxtsdecout: | ||
492 | vst1.8 {q3}, [r5] | ||
493 | pop {r4-r6, pc} | ||
494 | ENDPROC(ce_aes_xts_decrypt) | ||
495 | |||
496 | /* | ||
497 | * u32 ce_aes_sub(u32 input) - use the aese instruction to perform the | ||
498 | * AES sbox substitution on each byte in | ||
499 | * 'input' | ||
500 | */ | ||
501 | ENTRY(ce_aes_sub) | ||
502 | vdup.32 q1, r0 | ||
503 | veor q0, q0, q0 | ||
504 | aese.8 q0, q1 | ||
505 | vmov r0, s0 | ||
506 | bx lr | ||
507 | ENDPROC(ce_aes_sub) | ||
508 | |||
509 | /* | ||
510 | * void ce_aes_invert(u8 *dst, u8 *src) - perform the Inverse MixColumns | ||
511 | * operation on round key *src | ||
512 | */ | ||
513 | ENTRY(ce_aes_invert) | ||
514 | vld1.8 {q0}, [r1] | ||
515 | aesimc.8 q0, q0 | ||
516 | vst1.8 {q0}, [r0] | ||
517 | bx lr | ||
518 | ENDPROC(ce_aes_invert) | ||
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c new file mode 100644 index 000000000000..d2ee59157ec7 --- /dev/null +++ b/arch/arm/crypto/aes-ce-glue.c | |||
@@ -0,0 +1,520 @@ | |||
1 | /* | ||
2 | * aes-ce-glue.c - wrapper code for ARMv8 AES | ||
3 | * | ||
4 | * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <asm/hwcap.h> | ||
12 | #include <asm/neon.h> | ||
13 | #include <asm/hwcap.h> | ||
14 | #include <crypto/aes.h> | ||
15 | #include <crypto/ablk_helper.h> | ||
16 | #include <crypto/algapi.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); | ||
20 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
21 | MODULE_LICENSE("GPL v2"); | ||
22 | |||
23 | /* defined in aes-ce-core.S */ | ||
24 | asmlinkage u32 ce_aes_sub(u32 input); | ||
25 | asmlinkage void ce_aes_invert(void *dst, void *src); | ||
26 | |||
27 | asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], | ||
28 | int rounds, int blocks); | ||
29 | asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], | ||
30 | int rounds, int blocks); | ||
31 | |||
32 | asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], | ||
33 | int rounds, int blocks, u8 iv[]); | ||
34 | asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], | ||
35 | int rounds, int blocks, u8 iv[]); | ||
36 | |||
37 | asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], | ||
38 | int rounds, int blocks, u8 ctr[]); | ||
39 | |||
40 | asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], | ||
41 | int rounds, int blocks, u8 iv[], | ||
42 | u8 const rk2[], int first); | ||
43 | asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], | ||
44 | int rounds, int blocks, u8 iv[], | ||
45 | u8 const rk2[], int first); | ||
46 | |||
47 | struct aes_block { | ||
48 | u8 b[AES_BLOCK_SIZE]; | ||
49 | }; | ||
50 | |||
51 | static int num_rounds(struct crypto_aes_ctx *ctx) | ||
52 | { | ||
53 | /* | ||
54 | * # of rounds specified by AES: | ||
55 | * 128 bit key 10 rounds | ||
56 | * 192 bit key 12 rounds | ||
57 | * 256 bit key 14 rounds | ||
58 | * => n byte key => 6 + (n/4) rounds | ||
59 | */ | ||
60 | return 6 + ctx->key_length / 4; | ||
61 | } | ||
62 | |||
63 | static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, | ||
64 | unsigned int key_len) | ||
65 | { | ||
66 | /* | ||
67 | * The AES key schedule round constants | ||
68 | */ | ||
69 | static u8 const rcon[] = { | ||
70 | 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, | ||
71 | }; | ||
72 | |||
73 | u32 kwords = key_len / sizeof(u32); | ||
74 | struct aes_block *key_enc, *key_dec; | ||
75 | int i, j; | ||
76 | |||
77 | if (key_len != AES_KEYSIZE_128 && | ||
78 | key_len != AES_KEYSIZE_192 && | ||
79 | key_len != AES_KEYSIZE_256) | ||
80 | return -EINVAL; | ||
81 | |||
82 | memcpy(ctx->key_enc, in_key, key_len); | ||
83 | ctx->key_length = key_len; | ||
84 | |||
85 | kernel_neon_begin(); | ||
86 | for (i = 0; i < sizeof(rcon); i++) { | ||
87 | u32 *rki = ctx->key_enc + (i * kwords); | ||
88 | u32 *rko = rki + kwords; | ||
89 | |||
90 | rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8); | ||
91 | rko[0] = rko[0] ^ rki[0] ^ rcon[i]; | ||
92 | rko[1] = rko[0] ^ rki[1]; | ||
93 | rko[2] = rko[1] ^ rki[2]; | ||
94 | rko[3] = rko[2] ^ rki[3]; | ||
95 | |||
96 | if (key_len == AES_KEYSIZE_192) { | ||
97 | if (i >= 7) | ||
98 | break; | ||
99 | rko[4] = rko[3] ^ rki[4]; | ||
100 | rko[5] = rko[4] ^ rki[5]; | ||
101 | } else if (key_len == AES_KEYSIZE_256) { | ||
102 | if (i >= 6) | ||
103 | break; | ||
104 | rko[4] = ce_aes_sub(rko[3]) ^ rki[4]; | ||
105 | rko[5] = rko[4] ^ rki[5]; | ||
106 | rko[6] = rko[5] ^ rki[6]; | ||
107 | rko[7] = rko[6] ^ rki[7]; | ||
108 | } | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Generate the decryption keys for the Equivalent Inverse Cipher. | ||
113 | * This involves reversing the order of the round keys, and applying | ||
114 | * the Inverse Mix Columns transformation on all but the first and | ||
115 | * the last one. | ||
116 | */ | ||
117 | key_enc = (struct aes_block *)ctx->key_enc; | ||
118 | key_dec = (struct aes_block *)ctx->key_dec; | ||
119 | j = num_rounds(ctx); | ||
120 | |||
121 | key_dec[0] = key_enc[j]; | ||
122 | for (i = 1, j--; j > 0; i++, j--) | ||
123 | ce_aes_invert(key_dec + i, key_enc + j); | ||
124 | key_dec[i] = key_enc[0]; | ||
125 | |||
126 | kernel_neon_end(); | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, | ||
131 | unsigned int key_len) | ||
132 | { | ||
133 | struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
134 | int ret; | ||
135 | |||
136 | ret = ce_aes_expandkey(ctx, in_key, key_len); | ||
137 | if (!ret) | ||
138 | return 0; | ||
139 | |||
140 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | struct crypto_aes_xts_ctx { | ||
145 | struct crypto_aes_ctx key1; | ||
146 | struct crypto_aes_ctx __aligned(8) key2; | ||
147 | }; | ||
148 | |||
149 | static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, | ||
150 | unsigned int key_len) | ||
151 | { | ||
152 | struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | ||
153 | int ret; | ||
154 | |||
155 | ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2); | ||
156 | if (!ret) | ||
157 | ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2], | ||
158 | key_len / 2); | ||
159 | if (!ret) | ||
160 | return 0; | ||
161 | |||
162 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | ||
163 | return -EINVAL; | ||
164 | } | ||
165 | |||
166 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
167 | struct scatterlist *src, unsigned int nbytes) | ||
168 | { | ||
169 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
170 | struct blkcipher_walk walk; | ||
171 | unsigned int blocks; | ||
172 | int err; | ||
173 | |||
174 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
175 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
176 | err = blkcipher_walk_virt(desc, &walk); | ||
177 | |||
178 | kernel_neon_begin(); | ||
179 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | ||
180 | ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
181 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks); | ||
182 | err = blkcipher_walk_done(desc, &walk, | ||
183 | walk.nbytes % AES_BLOCK_SIZE); | ||
184 | } | ||
185 | kernel_neon_end(); | ||
186 | return err; | ||
187 | } | ||
188 | |||
189 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
190 | struct scatterlist *src, unsigned int nbytes) | ||
191 | { | ||
192 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
193 | struct blkcipher_walk walk; | ||
194 | unsigned int blocks; | ||
195 | int err; | ||
196 | |||
197 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
198 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
199 | err = blkcipher_walk_virt(desc, &walk); | ||
200 | |||
201 | kernel_neon_begin(); | ||
202 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | ||
203 | ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
204 | (u8 *)ctx->key_dec, num_rounds(ctx), blocks); | ||
205 | err = blkcipher_walk_done(desc, &walk, | ||
206 | walk.nbytes % AES_BLOCK_SIZE); | ||
207 | } | ||
208 | kernel_neon_end(); | ||
209 | return err; | ||
210 | } | ||
211 | |||
212 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
213 | struct scatterlist *src, unsigned int nbytes) | ||
214 | { | ||
215 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
216 | struct blkcipher_walk walk; | ||
217 | unsigned int blocks; | ||
218 | int err; | ||
219 | |||
220 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
221 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
222 | err = blkcipher_walk_virt(desc, &walk); | ||
223 | |||
224 | kernel_neon_begin(); | ||
225 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | ||
226 | ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
227 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks, | ||
228 | walk.iv); | ||
229 | err = blkcipher_walk_done(desc, &walk, | ||
230 | walk.nbytes % AES_BLOCK_SIZE); | ||
231 | } | ||
232 | kernel_neon_end(); | ||
233 | return err; | ||
234 | } | ||
235 | |||
236 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
237 | struct scatterlist *src, unsigned int nbytes) | ||
238 | { | ||
239 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
240 | struct blkcipher_walk walk; | ||
241 | unsigned int blocks; | ||
242 | int err; | ||
243 | |||
244 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
245 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
246 | err = blkcipher_walk_virt(desc, &walk); | ||
247 | |||
248 | kernel_neon_begin(); | ||
249 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | ||
250 | ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
251 | (u8 *)ctx->key_dec, num_rounds(ctx), blocks, | ||
252 | walk.iv); | ||
253 | err = blkcipher_walk_done(desc, &walk, | ||
254 | walk.nbytes % AES_BLOCK_SIZE); | ||
255 | } | ||
256 | kernel_neon_end(); | ||
257 | return err; | ||
258 | } | ||
259 | |||
260 | static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
261 | struct scatterlist *src, unsigned int nbytes) | ||
262 | { | ||
263 | struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
264 | struct blkcipher_walk walk; | ||
265 | int err, blocks; | ||
266 | |||
267 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
268 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
269 | err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | ||
270 | |||
271 | kernel_neon_begin(); | ||
272 | while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { | ||
273 | ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
274 | (u8 *)ctx->key_enc, num_rounds(ctx), blocks, | ||
275 | walk.iv); | ||
276 | nbytes -= blocks * AES_BLOCK_SIZE; | ||
277 | if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE) | ||
278 | break; | ||
279 | err = blkcipher_walk_done(desc, &walk, | ||
280 | walk.nbytes % AES_BLOCK_SIZE); | ||
281 | } | ||
282 | if (nbytes) { | ||
283 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; | ||
284 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; | ||
285 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; | ||
286 | |||
287 | /* | ||
288 | * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need | ||
289 | * to tell aes_ctr_encrypt() to only read half a block. | ||
290 | */ | ||
291 | blocks = (nbytes <= 8) ? -1 : 1; | ||
292 | |||
293 | ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, | ||
294 | num_rounds(ctx), blocks, walk.iv); | ||
295 | memcpy(tdst, tail, nbytes); | ||
296 | err = blkcipher_walk_done(desc, &walk, 0); | ||
297 | } | ||
298 | kernel_neon_end(); | ||
299 | |||
300 | return err; | ||
301 | } | ||
302 | |||
303 | static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
304 | struct scatterlist *src, unsigned int nbytes) | ||
305 | { | ||
306 | struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
307 | int err, first, rounds = num_rounds(&ctx->key1); | ||
308 | struct blkcipher_walk walk; | ||
309 | unsigned int blocks; | ||
310 | |||
311 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
312 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
313 | err = blkcipher_walk_virt(desc, &walk); | ||
314 | |||
315 | kernel_neon_begin(); | ||
316 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | ||
317 | ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
318 | (u8 *)ctx->key1.key_enc, rounds, blocks, | ||
319 | walk.iv, (u8 *)ctx->key2.key_enc, first); | ||
320 | err = blkcipher_walk_done(desc, &walk, | ||
321 | walk.nbytes % AES_BLOCK_SIZE); | ||
322 | } | ||
323 | kernel_neon_end(); | ||
324 | |||
325 | return err; | ||
326 | } | ||
327 | |||
328 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
329 | struct scatterlist *src, unsigned int nbytes) | ||
330 | { | ||
331 | struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
332 | int err, first, rounds = num_rounds(&ctx->key1); | ||
333 | struct blkcipher_walk walk; | ||
334 | unsigned int blocks; | ||
335 | |||
336 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
337 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
338 | err = blkcipher_walk_virt(desc, &walk); | ||
339 | |||
340 | kernel_neon_begin(); | ||
341 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | ||
342 | ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | ||
343 | (u8 *)ctx->key1.key_dec, rounds, blocks, | ||
344 | walk.iv, (u8 *)ctx->key2.key_enc, first); | ||
345 | err = blkcipher_walk_done(desc, &walk, | ||
346 | walk.nbytes % AES_BLOCK_SIZE); | ||
347 | } | ||
348 | kernel_neon_end(); | ||
349 | |||
350 | return err; | ||
351 | } | ||
352 | |||
353 | static struct crypto_alg aes_algs[] = { { | ||
354 | .cra_name = "__ecb-aes-ce", | ||
355 | .cra_driver_name = "__driver-ecb-aes-ce", | ||
356 | .cra_priority = 0, | ||
357 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
358 | .cra_blocksize = AES_BLOCK_SIZE, | ||
359 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | ||
360 | .cra_alignmask = 7, | ||
361 | .cra_type = &crypto_blkcipher_type, | ||
362 | .cra_module = THIS_MODULE, | ||
363 | .cra_blkcipher = { | ||
364 | .min_keysize = AES_MIN_KEY_SIZE, | ||
365 | .max_keysize = AES_MAX_KEY_SIZE, | ||
366 | .ivsize = AES_BLOCK_SIZE, | ||
367 | .setkey = ce_aes_setkey, | ||
368 | .encrypt = ecb_encrypt, | ||
369 | .decrypt = ecb_decrypt, | ||
370 | }, | ||
371 | }, { | ||
372 | .cra_name = "__cbc-aes-ce", | ||
373 | .cra_driver_name = "__driver-cbc-aes-ce", | ||
374 | .cra_priority = 0, | ||
375 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
376 | .cra_blocksize = AES_BLOCK_SIZE, | ||
377 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | ||
378 | .cra_alignmask = 7, | ||
379 | .cra_type = &crypto_blkcipher_type, | ||
380 | .cra_module = THIS_MODULE, | ||
381 | .cra_blkcipher = { | ||
382 | .min_keysize = AES_MIN_KEY_SIZE, | ||
383 | .max_keysize = AES_MAX_KEY_SIZE, | ||
384 | .ivsize = AES_BLOCK_SIZE, | ||
385 | .setkey = ce_aes_setkey, | ||
386 | .encrypt = cbc_encrypt, | ||
387 | .decrypt = cbc_decrypt, | ||
388 | }, | ||
389 | }, { | ||
390 | .cra_name = "__ctr-aes-ce", | ||
391 | .cra_driver_name = "__driver-ctr-aes-ce", | ||
392 | .cra_priority = 0, | ||
393 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
394 | .cra_blocksize = 1, | ||
395 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | ||
396 | .cra_alignmask = 7, | ||
397 | .cra_type = &crypto_blkcipher_type, | ||
398 | .cra_module = THIS_MODULE, | ||
399 | .cra_blkcipher = { | ||
400 | .min_keysize = AES_MIN_KEY_SIZE, | ||
401 | .max_keysize = AES_MAX_KEY_SIZE, | ||
402 | .ivsize = AES_BLOCK_SIZE, | ||
403 | .setkey = ce_aes_setkey, | ||
404 | .encrypt = ctr_encrypt, | ||
405 | .decrypt = ctr_encrypt, | ||
406 | }, | ||
407 | }, { | ||
408 | .cra_name = "__xts-aes-ce", | ||
409 | .cra_driver_name = "__driver-xts-aes-ce", | ||
410 | .cra_priority = 0, | ||
411 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
412 | .cra_blocksize = AES_BLOCK_SIZE, | ||
413 | .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), | ||
414 | .cra_alignmask = 7, | ||
415 | .cra_type = &crypto_blkcipher_type, | ||
416 | .cra_module = THIS_MODULE, | ||
417 | .cra_blkcipher = { | ||
418 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
419 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
420 | .ivsize = AES_BLOCK_SIZE, | ||
421 | .setkey = xts_set_key, | ||
422 | .encrypt = xts_encrypt, | ||
423 | .decrypt = xts_decrypt, | ||
424 | }, | ||
425 | }, { | ||
426 | .cra_name = "ecb(aes)", | ||
427 | .cra_driver_name = "ecb-aes-ce", | ||
428 | .cra_priority = 300, | ||
429 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
430 | .cra_blocksize = AES_BLOCK_SIZE, | ||
431 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
432 | .cra_alignmask = 7, | ||
433 | .cra_type = &crypto_ablkcipher_type, | ||
434 | .cra_module = THIS_MODULE, | ||
435 | .cra_init = ablk_init, | ||
436 | .cra_exit = ablk_exit, | ||
437 | .cra_ablkcipher = { | ||
438 | .min_keysize = AES_MIN_KEY_SIZE, | ||
439 | .max_keysize = AES_MAX_KEY_SIZE, | ||
440 | .ivsize = AES_BLOCK_SIZE, | ||
441 | .setkey = ablk_set_key, | ||
442 | .encrypt = ablk_encrypt, | ||
443 | .decrypt = ablk_decrypt, | ||
444 | } | ||
445 | }, { | ||
446 | .cra_name = "cbc(aes)", | ||
447 | .cra_driver_name = "cbc-aes-ce", | ||
448 | .cra_priority = 300, | ||
449 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
450 | .cra_blocksize = AES_BLOCK_SIZE, | ||
451 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
452 | .cra_alignmask = 7, | ||
453 | .cra_type = &crypto_ablkcipher_type, | ||
454 | .cra_module = THIS_MODULE, | ||
455 | .cra_init = ablk_init, | ||
456 | .cra_exit = ablk_exit, | ||
457 | .cra_ablkcipher = { | ||
458 | .min_keysize = AES_MIN_KEY_SIZE, | ||
459 | .max_keysize = AES_MAX_KEY_SIZE, | ||
460 | .ivsize = AES_BLOCK_SIZE, | ||
461 | .setkey = ablk_set_key, | ||
462 | .encrypt = ablk_encrypt, | ||
463 | .decrypt = ablk_decrypt, | ||
464 | } | ||
465 | }, { | ||
466 | .cra_name = "ctr(aes)", | ||
467 | .cra_driver_name = "ctr-aes-ce", | ||
468 | .cra_priority = 300, | ||
469 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
470 | .cra_blocksize = 1, | ||
471 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
472 | .cra_alignmask = 7, | ||
473 | .cra_type = &crypto_ablkcipher_type, | ||
474 | .cra_module = THIS_MODULE, | ||
475 | .cra_init = ablk_init, | ||
476 | .cra_exit = ablk_exit, | ||
477 | .cra_ablkcipher = { | ||
478 | .min_keysize = AES_MIN_KEY_SIZE, | ||
479 | .max_keysize = AES_MAX_KEY_SIZE, | ||
480 | .ivsize = AES_BLOCK_SIZE, | ||
481 | .setkey = ablk_set_key, | ||
482 | .encrypt = ablk_encrypt, | ||
483 | .decrypt = ablk_decrypt, | ||
484 | } | ||
485 | }, { | ||
486 | .cra_name = "xts(aes)", | ||
487 | .cra_driver_name = "xts-aes-ce", | ||
488 | .cra_priority = 300, | ||
489 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | ||
490 | .cra_blocksize = AES_BLOCK_SIZE, | ||
491 | .cra_ctxsize = sizeof(struct async_helper_ctx), | ||
492 | .cra_alignmask = 7, | ||
493 | .cra_type = &crypto_ablkcipher_type, | ||
494 | .cra_module = THIS_MODULE, | ||
495 | .cra_init = ablk_init, | ||
496 | .cra_exit = ablk_exit, | ||
497 | .cra_ablkcipher = { | ||
498 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
499 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
500 | .ivsize = AES_BLOCK_SIZE, | ||
501 | .setkey = ablk_set_key, | ||
502 | .encrypt = ablk_encrypt, | ||
503 | .decrypt = ablk_decrypt, | ||
504 | } | ||
505 | } }; | ||
506 | |||
507 | static int __init aes_init(void) | ||
508 | { | ||
509 | if (!(elf_hwcap2 & HWCAP2_AES)) | ||
510 | return -ENODEV; | ||
511 | return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); | ||
512 | } | ||
513 | |||
514 | static void __exit aes_exit(void) | ||
515 | { | ||
516 | crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); | ||
517 | } | ||
518 | |||
519 | module_init(aes_init); | ||
520 | module_exit(aes_exit); | ||