aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-07 02:22:25 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2018-09-03 23:35:03 -0400
commit578bdaabd015b9b164842c3e8ace9802f38e7ecc (patch)
tree6a1b6134e2377490812b7aa27620f2330e94576e
parent9dbe3072c6b1f28000961e34497237d0e3d13318 (diff)
crypto: speck - remove Speck
These are unused, undesired, and have never actually been used by anybody. The original authors of this code have changed their mind about its inclusion. While originally proposed for disk encryption on low-end devices, the idea was discarded [1] in favor of something else before that could really get going. Therefore, this patch removes Speck. [1] https://marc.info/?l=linux-crypto-vger&m=153359499015659 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Acked-by: Eric Biggers <ebiggers@google.com> Cc: stable@vger.kernel.org Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--Documentation/filesystems/fscrypt.rst10
-rw-r--r--arch/arm/crypto/Kconfig6
-rw-r--r--arch/arm/crypto/Makefile2
-rw-r--r--arch/arm/crypto/speck-neon-core.S434
-rw-r--r--arch/arm/crypto/speck-neon-glue.c288
-rw-r--r--arch/arm64/crypto/Kconfig6
-rw-r--r--arch/arm64/crypto/Makefile3
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c282
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/s390/defconfig1
-rw-r--r--crypto/Kconfig14
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/testmgr.c24
-rw-r--r--crypto/testmgr.h738
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c10
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/uapi/linux/fs.h4
31 files changed, 2 insertions, 2558 deletions
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 48b424de85bb..cfbc18f0d9c9 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported:
191 191
192- AES-256-XTS for contents and AES-256-CTS-CBC for filenames 192- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
193- AES-128-CBC for contents and AES-128-CTS-CBC for filenames 193- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
194- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
195 194
196It is strongly recommended to use AES-256-XTS for contents encryption. 195It is strongly recommended to use AES-256-XTS for contents encryption.
197AES-128-CBC was added only for low-powered embedded devices with 196AES-128-CBC was added only for low-powered embedded devices with
198crypto accelerators such as CAAM or CESA that do not support XTS. 197crypto accelerators such as CAAM or CESA that do not support XTS.
199 198
200Similarly, Speck128/256 support was only added for older or low-end
201CPUs which cannot do AES fast enough -- especially ARM CPUs which have
202NEON instructions but not the Cryptography Extensions -- and for which
203it would not otherwise be feasible to use encryption at all. It is
204not recommended to use Speck on CPUs that have AES instructions.
205Speck support is only available if it has been enabled in the crypto
206API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
207acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
208
209New encryption modes can be added relatively easily, without changes 199New encryption modes can be added relatively easily, without changes
210to individual filesystems. However, authenticated encryption (AE) 200to individual filesystems. However, authenticated encryption (AE)
211modes are not currently supported because of the difficulty of dealing 201modes are not currently supported because of the difficulty of dealing
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 925d1364727a..b8e69fe282b8 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -121,10 +121,4 @@ config CRYPTO_CHACHA20_NEON
121 select CRYPTO_BLKCIPHER 121 select CRYPTO_BLKCIPHER
122 select CRYPTO_CHACHA20 122 select CRYPTO_CHACHA20
123 123
124config CRYPTO_SPECK_NEON
125 tristate "NEON accelerated Speck cipher algorithms"
126 depends on KERNEL_MODE_NEON
127 select CRYPTO_BLKCIPHER
128 select CRYPTO_SPECK
129
130endif 124endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8de542c48ade..bd5bceef0605 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
10obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o 10obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
11obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o 11obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
12obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o 12obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
13obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
14 13
15ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o 14ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
16ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o 15ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
54crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o 53crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
55crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o 54crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
56chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o 55chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
57speck-neon-y := speck-neon-core.o speck-neon-glue.o
58 56
59ifdef REGENERATE_ARM_CRYPTO 57ifdef REGENERATE_ARM_CRYPTO
60quiet_cmd_perl = PERL $@ 58quiet_cmd_perl = PERL $@
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
deleted file mode 100644
index 57caa742016e..000000000000
--- a/arch/arm/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,434 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Author: Eric Biggers <ebiggers@google.com>
8 */
9
10#include <linux/linkage.h>
11
12 .text
13 .fpu neon
14
15 // arguments
16 ROUND_KEYS .req r0 // const {u64,u32} *round_keys
17 NROUNDS .req r1 // int nrounds
18 DST .req r2 // void *dst
19 SRC .req r3 // const void *src
20 NBYTES .req r4 // unsigned int nbytes
21 TWEAK .req r5 // void *tweak
22
23 // registers which hold the data being encrypted/decrypted
24 X0 .req q0
25 X0_L .req d0
26 X0_H .req d1
27 Y0 .req q1
28 Y0_H .req d3
29 X1 .req q2
30 X1_L .req d4
31 X1_H .req d5
32 Y1 .req q3
33 Y1_H .req d7
34 X2 .req q4
35 X2_L .req d8
36 X2_H .req d9
37 Y2 .req q5
38 Y2_H .req d11
39 X3 .req q6
40 X3_L .req d12
41 X3_H .req d13
42 Y3 .req q7
43 Y3_H .req d15
44
45 // the round key, duplicated in all lanes
46 ROUND_KEY .req q8
47 ROUND_KEY_L .req d16
48 ROUND_KEY_H .req d17
49
50 // index vector for vtbl-based 8-bit rotates
51 ROTATE_TABLE .req d18
52
53 // multiplication table for updating XTS tweaks
54 GF128MUL_TABLE .req d19
55 GF64MUL_TABLE .req d19
56
57 // current XTS tweak value(s)
58 TWEAKV .req q10
59 TWEAKV_L .req d20
60 TWEAKV_H .req d21
61
62 TMP0 .req q12
63 TMP0_L .req d24
64 TMP0_H .req d25
65 TMP1 .req q13
66 TMP2 .req q14
67 TMP3 .req q15
68
69 .align 4
70.Lror64_8_table:
71 .byte 1, 2, 3, 4, 5, 6, 7, 0
72.Lror32_8_table:
73 .byte 1, 2, 3, 0, 5, 6, 7, 4
74.Lrol64_8_table:
75 .byte 7, 0, 1, 2, 3, 4, 5, 6
76.Lrol32_8_table:
77 .byte 3, 0, 1, 2, 7, 4, 5, 6
78.Lgf128mul_table:
79 .byte 0, 0x87
80 .fill 14
81.Lgf64mul_table:
82 .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
83 .fill 12
84
85/*
86 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
87 *
88 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
89 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
90 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
91 *
92 * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
93 * the vtbl approach is faster on some processors and the same speed on others.
94 */
95.macro _speck_round_128bytes n
96
97 // x = ror(x, 8)
98 vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
99 vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
100 vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
101 vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
102 vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
103 vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
104 vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
105 vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
106
107 // x += y
108 vadd.u\n X0, Y0
109 vadd.u\n X1, Y1
110 vadd.u\n X2, Y2
111 vadd.u\n X3, Y3
112
113 // x ^= k
114 veor X0, ROUND_KEY
115 veor X1, ROUND_KEY
116 veor X2, ROUND_KEY
117 veor X3, ROUND_KEY
118
119 // y = rol(y, 3)
120 vshl.u\n TMP0, Y0, #3
121 vshl.u\n TMP1, Y1, #3
122 vshl.u\n TMP2, Y2, #3
123 vshl.u\n TMP3, Y3, #3
124 vsri.u\n TMP0, Y0, #(\n - 3)
125 vsri.u\n TMP1, Y1, #(\n - 3)
126 vsri.u\n TMP2, Y2, #(\n - 3)
127 vsri.u\n TMP3, Y3, #(\n - 3)
128
129 // y ^= x
130 veor Y0, TMP0, X0
131 veor Y1, TMP1, X1
132 veor Y2, TMP2, X2
133 veor Y3, TMP3, X3
134.endm
135
136/*
137 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
138 *
139 * This is the inverse of _speck_round_128bytes().
140 */
141.macro _speck_unround_128bytes n
142
143 // y ^= x
144 veor TMP0, Y0, X0
145 veor TMP1, Y1, X1
146 veor TMP2, Y2, X2
147 veor TMP3, Y3, X3
148
149 // y = ror(y, 3)
150 vshr.u\n Y0, TMP0, #3
151 vshr.u\n Y1, TMP1, #3
152 vshr.u\n Y2, TMP2, #3
153 vshr.u\n Y3, TMP3, #3
154 vsli.u\n Y0, TMP0, #(\n - 3)
155 vsli.u\n Y1, TMP1, #(\n - 3)
156 vsli.u\n Y2, TMP2, #(\n - 3)
157 vsli.u\n Y3, TMP3, #(\n - 3)
158
159 // x ^= k
160 veor X0, ROUND_KEY
161 veor X1, ROUND_KEY
162 veor X2, ROUND_KEY
163 veor X3, ROUND_KEY
164
165 // x -= y
166 vsub.u\n X0, Y0
167 vsub.u\n X1, Y1
168 vsub.u\n X2, Y2
169 vsub.u\n X3, Y3
170
171 // x = rol(x, 8);
172 vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
173 vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
174 vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
175 vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
176 vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
177 vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
178 vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
179 vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
180.endm
181
182.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
183
184 // Load the next source block
185 vld1.8 {\dst_reg}, [SRC]!
186
187 // Save the current tweak in the tweak buffer
188 vst1.8 {TWEAKV}, [\tweak_buf:128]!
189
190 // XOR the next source block with the current tweak
191 veor \dst_reg, TWEAKV
192
193 /*
194 * Calculate the next tweak by multiplying the current one by x,
195 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
196 */
197 vshr.u64 \tmp, TWEAKV, #63
198 vshl.u64 TWEAKV, #1
199 veor TWEAKV_H, \tmp\()_L
200 vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
201 veor TWEAKV_L, \tmp\()_H
202.endm
203
204.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
205
206 // Load the next two source blocks
207 vld1.8 {\dst_reg}, [SRC]!
208
209 // Save the current two tweaks in the tweak buffer
210 vst1.8 {TWEAKV}, [\tweak_buf:128]!
211
212 // XOR the next two source blocks with the current two tweaks
213 veor \dst_reg, TWEAKV
214
215 /*
216 * Calculate the next two tweaks by multiplying the current ones by x^2,
217 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
218 */
219 vshr.u64 \tmp, TWEAKV, #62
220 vshl.u64 TWEAKV, #2
221 vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
222 vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
223 veor TWEAKV, \tmp
224.endm
225
226/*
227 * _speck_xts_crypt() - Speck-XTS encryption/decryption
228 *
229 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
230 * using Speck-XTS, specifically the variant with a block size of '2n' and round
231 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
232 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
233 * nonzero multiple of 128.
234 */
235.macro _speck_xts_crypt n, decrypting
236 push {r4-r7}
237 mov r7, sp
238
239 /*
240 * The first four parameters were passed in registers r0-r3. Load the
241 * additional parameters, which were passed on the stack.
242 */
243 ldr NBYTES, [sp, #16]
244 ldr TWEAK, [sp, #20]
245
246 /*
247 * If decrypting, modify the ROUND_KEYS parameter to point to the last
248 * round key rather than the first, since for decryption the round keys
249 * are used in reverse order.
250 */
251.if \decrypting
252.if \n == 64
253 add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
254 sub ROUND_KEYS, #8
255.else
256 add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
257 sub ROUND_KEYS, #4
258.endif
259.endif
260
261 // Load the index vector for vtbl-based 8-bit rotates
262.if \decrypting
263 ldr r12, =.Lrol\n\()_8_table
264.else
265 ldr r12, =.Lror\n\()_8_table
266.endif
267 vld1.8 {ROTATE_TABLE}, [r12:64]
268
269 // One-time XTS preparation
270
271 /*
272 * Allocate stack space to store 128 bytes worth of tweaks. For
273 * performance, this space is aligned to a 16-byte boundary so that we
274 * can use the load/store instructions that declare 16-byte alignment.
275 * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
276 */
277 sub r12, sp, #128
278 bic r12, #0xf
279 mov sp, r12
280
281.if \n == 64
282 // Load first tweak
283 vld1.8 {TWEAKV}, [TWEAK]
284
285 // Load GF(2^128) multiplication table
286 ldr r12, =.Lgf128mul_table
287 vld1.8 {GF128MUL_TABLE}, [r12:64]
288.else
289 // Load first tweak
290 vld1.8 {TWEAKV_L}, [TWEAK]
291
292 // Load GF(2^64) multiplication table
293 ldr r12, =.Lgf64mul_table
294 vld1.8 {GF64MUL_TABLE}, [r12:64]
295
296 // Calculate second tweak, packing it together with the first
297 vshr.u64 TMP0_L, TWEAKV_L, #63
298 vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
299 vshl.u64 TWEAKV_H, TWEAKV_L, #1
300 veor TWEAKV_H, TMP0_L
301.endif
302
303.Lnext_128bytes_\@:
304
305 /*
306 * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
307 * values, and save the tweaks on the stack for later. Then
308 * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
309 * that the X[0-3] registers contain only the second halves of blocks,
310 * and the Y[0-3] registers contain only the first halves of blocks.
311 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
312 */
313 mov r12, sp
314.if \n == 64
315 _xts128_precrypt_one X0, r12, TMP0
316 _xts128_precrypt_one Y0, r12, TMP0
317 _xts128_precrypt_one X1, r12, TMP0
318 _xts128_precrypt_one Y1, r12, TMP0
319 _xts128_precrypt_one X2, r12, TMP0
320 _xts128_precrypt_one Y2, r12, TMP0
321 _xts128_precrypt_one X3, r12, TMP0
322 _xts128_precrypt_one Y3, r12, TMP0
323 vswp X0_L, Y0_H
324 vswp X1_L, Y1_H
325 vswp X2_L, Y2_H
326 vswp X3_L, Y3_H
327.else
328 _xts64_precrypt_two X0, r12, TMP0
329 _xts64_precrypt_two Y0, r12, TMP0
330 _xts64_precrypt_two X1, r12, TMP0
331 _xts64_precrypt_two Y1, r12, TMP0
332 _xts64_precrypt_two X2, r12, TMP0
333 _xts64_precrypt_two Y2, r12, TMP0
334 _xts64_precrypt_two X3, r12, TMP0
335 _xts64_precrypt_two Y3, r12, TMP0
336 vuzp.32 Y0, X0
337 vuzp.32 Y1, X1
338 vuzp.32 Y2, X2
339 vuzp.32 Y3, X3
340.endif
341
342 // Do the cipher rounds
343
344 mov r12, ROUND_KEYS
345 mov r6, NROUNDS
346
347.Lnext_round_\@:
348.if \decrypting
349.if \n == 64
350 vld1.64 ROUND_KEY_L, [r12]
351 sub r12, #8
352 vmov ROUND_KEY_H, ROUND_KEY_L
353.else
354 vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
355 sub r12, #4
356.endif
357 _speck_unround_128bytes \n
358.else
359.if \n == 64
360 vld1.64 ROUND_KEY_L, [r12]!
361 vmov ROUND_KEY_H, ROUND_KEY_L
362.else
363 vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
364.endif
365 _speck_round_128bytes \n
366.endif
367 subs r6, r6, #1
368 bne .Lnext_round_\@
369
370 // Re-interleave the 'x' and 'y' elements of each block
371.if \n == 64
372 vswp X0_L, Y0_H
373 vswp X1_L, Y1_H
374 vswp X2_L, Y2_H
375 vswp X3_L, Y3_H
376.else
377 vzip.32 Y0, X0
378 vzip.32 Y1, X1
379 vzip.32 Y2, X2
380 vzip.32 Y3, X3
381.endif
382
383 // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
384 mov r12, sp
385 vld1.8 {TMP0, TMP1}, [r12:128]!
386 vld1.8 {TMP2, TMP3}, [r12:128]!
387 veor X0, TMP0
388 veor Y0, TMP1
389 veor X1, TMP2
390 veor Y1, TMP3
391 vld1.8 {TMP0, TMP1}, [r12:128]!
392 vld1.8 {TMP2, TMP3}, [r12:128]!
393 veor X2, TMP0
394 veor Y2, TMP1
395 veor X3, TMP2
396 veor Y3, TMP3
397
398 // Store the ciphertext in the destination buffer
399 vst1.8 {X0, Y0}, [DST]!
400 vst1.8 {X1, Y1}, [DST]!
401 vst1.8 {X2, Y2}, [DST]!
402 vst1.8 {X3, Y3}, [DST]!
403
404 // Continue if there are more 128-byte chunks remaining, else return
405 subs NBYTES, #128
406 bne .Lnext_128bytes_\@
407
408 // Store the next tweak
409.if \n == 64
410 vst1.8 {TWEAKV}, [TWEAK]
411.else
412 vst1.8 {TWEAKV_L}, [TWEAK]
413.endif
414
415 mov sp, r7
416 pop {r4-r7}
417 bx lr
418.endm
419
420ENTRY(speck128_xts_encrypt_neon)
421 _speck_xts_crypt n=64, decrypting=0
422ENDPROC(speck128_xts_encrypt_neon)
423
424ENTRY(speck128_xts_decrypt_neon)
425 _speck_xts_crypt n=64, decrypting=1
426ENDPROC(speck128_xts_decrypt_neon)
427
428ENTRY(speck64_xts_encrypt_neon)
429 _speck_xts_crypt n=32, decrypting=0
430ENDPROC(speck64_xts_encrypt_neon)
431
432ENTRY(speck64_xts_decrypt_neon)
433 _speck_xts_crypt n=32, decrypting=1
434ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
deleted file mode 100644
index f012c3ea998f..000000000000
--- a/arch/arm/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,288 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
8 * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
9 * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
10 * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
11 * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
12 * OCB and PMAC"), represented as 0x1B.
13 */
14
15#include <asm/hwcap.h>
16#include <asm/neon.h>
17#include <asm/simd.h>
18#include <crypto/algapi.h>
19#include <crypto/gf128mul.h>
20#include <crypto/internal/skcipher.h>
21#include <crypto/speck.h>
22#include <crypto/xts.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25
26/* The assembly functions only handle multiples of 128 bytes */
27#define SPECK_NEON_CHUNK_SIZE 128
28
29/* Speck128 */
30
31struct speck128_xts_tfm_ctx {
32 struct speck128_tfm_ctx main_key;
33 struct speck128_tfm_ctx tweak_key;
34};
35
36asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
37 void *dst, const void *src,
38 unsigned int nbytes, void *tweak);
39
40asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
41 void *dst, const void *src,
42 unsigned int nbytes, void *tweak);
43
44typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
45 u8 *, const u8 *);
46typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
47 const void *, unsigned int, void *);
48
49static __always_inline int
50__speck128_xts_crypt(struct skcipher_request *req,
51 speck128_crypt_one_t crypt_one,
52 speck128_xts_crypt_many_t crypt_many)
53{
54 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
55 const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
56 struct skcipher_walk walk;
57 le128 tweak;
58 int err;
59
60 err = skcipher_walk_virt(&walk, req, true);
61
62 crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
63
64 while (walk.nbytes > 0) {
65 unsigned int nbytes = walk.nbytes;
66 u8 *dst = walk.dst.virt.addr;
67 const u8 *src = walk.src.virt.addr;
68
69 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
70 unsigned int count;
71
72 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
73 kernel_neon_begin();
74 (*crypt_many)(ctx->main_key.round_keys,
75 ctx->main_key.nrounds,
76 dst, src, count, &tweak);
77 kernel_neon_end();
78 dst += count;
79 src += count;
80 nbytes -= count;
81 }
82
83 /* Handle any remainder with generic code */
84 while (nbytes >= sizeof(tweak)) {
85 le128_xor((le128 *)dst, (const le128 *)src, &tweak);
86 (*crypt_one)(&ctx->main_key, dst, dst);
87 le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
88 gf128mul_x_ble(&tweak, &tweak);
89
90 dst += sizeof(tweak);
91 src += sizeof(tweak);
92 nbytes -= sizeof(tweak);
93 }
94 err = skcipher_walk_done(&walk, nbytes);
95 }
96
97 return err;
98}
99
100static int speck128_xts_encrypt(struct skcipher_request *req)
101{
102 return __speck128_xts_crypt(req, crypto_speck128_encrypt,
103 speck128_xts_encrypt_neon);
104}
105
106static int speck128_xts_decrypt(struct skcipher_request *req)
107{
108 return __speck128_xts_crypt(req, crypto_speck128_decrypt,
109 speck128_xts_decrypt_neon);
110}
111
112static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
113 unsigned int keylen)
114{
115 struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
116 int err;
117
118 err = xts_verify_key(tfm, key, keylen);
119 if (err)
120 return err;
121
122 keylen /= 2;
123
124 err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
125 if (err)
126 return err;
127
128 return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
129}
130
131/* Speck64 */
132
133struct speck64_xts_tfm_ctx {
134 struct speck64_tfm_ctx main_key;
135 struct speck64_tfm_ctx tweak_key;
136};
137
138asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
139 void *dst, const void *src,
140 unsigned int nbytes, void *tweak);
141
142asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
143 void *dst, const void *src,
144 unsigned int nbytes, void *tweak);
145
146typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
147 u8 *, const u8 *);
148typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
149 const void *, unsigned int, void *);
150
151static __always_inline int
152__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
153 speck64_xts_crypt_many_t crypt_many)
154{
155 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
156 const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
157 struct skcipher_walk walk;
158 __le64 tweak;
159 int err;
160
161 err = skcipher_walk_virt(&walk, req, true);
162
163 crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
164
165 while (walk.nbytes > 0) {
166 unsigned int nbytes = walk.nbytes;
167 u8 *dst = walk.dst.virt.addr;
168 const u8 *src = walk.src.virt.addr;
169
170 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
171 unsigned int count;
172
173 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
174 kernel_neon_begin();
175 (*crypt_many)(ctx->main_key.round_keys,
176 ctx->main_key.nrounds,
177 dst, src, count, &tweak);
178 kernel_neon_end();
179 dst += count;
180 src += count;
181 nbytes -= count;
182 }
183
184 /* Handle any remainder with generic code */
185 while (nbytes >= sizeof(tweak)) {
186 *(__le64 *)dst = *(__le64 *)src ^ tweak;
187 (*crypt_one)(&ctx->main_key, dst, dst);
188 *(__le64 *)dst ^= tweak;
189 tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
190 ((tweak & cpu_to_le64(1ULL << 63)) ?
191 0x1B : 0));
192 dst += sizeof(tweak);
193 src += sizeof(tweak);
194 nbytes -= sizeof(tweak);
195 }
196 err = skcipher_walk_done(&walk, nbytes);
197 }
198
199 return err;
200}
201
202static int speck64_xts_encrypt(struct skcipher_request *req)
203{
204 return __speck64_xts_crypt(req, crypto_speck64_encrypt,
205 speck64_xts_encrypt_neon);
206}
207
208static int speck64_xts_decrypt(struct skcipher_request *req)
209{
210 return __speck64_xts_crypt(req, crypto_speck64_decrypt,
211 speck64_xts_decrypt_neon);
212}
213
214static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
215 unsigned int keylen)
216{
217 struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
218 int err;
219
220 err = xts_verify_key(tfm, key, keylen);
221 if (err)
222 return err;
223
224 keylen /= 2;
225
226 err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
227 if (err)
228 return err;
229
230 return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
231}
232
233static struct skcipher_alg speck_algs[] = {
234 {
235 .base.cra_name = "xts(speck128)",
236 .base.cra_driver_name = "xts-speck128-neon",
237 .base.cra_priority = 300,
238 .base.cra_blocksize = SPECK128_BLOCK_SIZE,
239 .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
240 .base.cra_alignmask = 7,
241 .base.cra_module = THIS_MODULE,
242 .min_keysize = 2 * SPECK128_128_KEY_SIZE,
243 .max_keysize = 2 * SPECK128_256_KEY_SIZE,
244 .ivsize = SPECK128_BLOCK_SIZE,
245 .walksize = SPECK_NEON_CHUNK_SIZE,
246 .setkey = speck128_xts_setkey,
247 .encrypt = speck128_xts_encrypt,
248 .decrypt = speck128_xts_decrypt,
249 }, {
250 .base.cra_name = "xts(speck64)",
251 .base.cra_driver_name = "xts-speck64-neon",
252 .base.cra_priority = 300,
253 .base.cra_blocksize = SPECK64_BLOCK_SIZE,
254 .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
255 .base.cra_alignmask = 7,
256 .base.cra_module = THIS_MODULE,
257 .min_keysize = 2 * SPECK64_96_KEY_SIZE,
258 .max_keysize = 2 * SPECK64_128_KEY_SIZE,
259 .ivsize = SPECK64_BLOCK_SIZE,
260 .walksize = SPECK_NEON_CHUNK_SIZE,
261 .setkey = speck64_xts_setkey,
262 .encrypt = speck64_xts_encrypt,
263 .decrypt = speck64_xts_decrypt,
264 }
265};
266
267static int __init speck_neon_module_init(void)
268{
269 if (!(elf_hwcap & HWCAP_NEON))
270 return -ENODEV;
271 return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
272}
273
274static void __exit speck_neon_module_exit(void)
275{
276 crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
277}
278
279module_init(speck_neon_module_init);
280module_exit(speck_neon_module_exit);
281
282MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
283MODULE_LICENSE("GPL");
284MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
285MODULE_ALIAS_CRYPTO("xts(speck128)");
286MODULE_ALIAS_CRYPTO("xts-speck128-neon");
287MODULE_ALIAS_CRYPTO("xts(speck64)");
288MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index e3fdb0fd6f70..d51944ff9f91 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -119,10 +119,4 @@ config CRYPTO_AES_ARM64_BS
119 select CRYPTO_AES_ARM64 119 select CRYPTO_AES_ARM64
120 select CRYPTO_SIMD 120 select CRYPTO_SIMD
121 121
122config CRYPTO_SPECK_NEON
123 tristate "NEON accelerated Speck cipher algorithms"
124 depends on KERNEL_MODE_NEON
125 select CRYPTO_BLKCIPHER
126 select CRYPTO_SPECK
127
128endif 122endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index bcafd016618e..7bc4bda6d9c6 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -56,9 +56,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
56obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o 56obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
57chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o 57chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
58 58
59obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
60speck-neon-y := speck-neon-core.o speck-neon-glue.o
61
62obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o 59obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
63aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o 60aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
64 61
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
deleted file mode 100644
index b14463438b09..000000000000
--- a/arch/arm64/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,352 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Author: Eric Biggers <ebiggers@google.com>
8 */
9
10#include <linux/linkage.h>
11
12 .text
13
14 // arguments
15 ROUND_KEYS .req x0 // const {u64,u32} *round_keys
16 NROUNDS .req w1 // int nrounds
17 NROUNDS_X .req x1
18 DST .req x2 // void *dst
19 SRC .req x3 // const void *src
20 NBYTES .req w4 // unsigned int nbytes
21 TWEAK .req x5 // void *tweak
22
23 // registers which hold the data being encrypted/decrypted
24 // (underscores avoid a naming collision with ARM64 registers x0-x3)
25 X_0 .req v0
26 Y_0 .req v1
27 X_1 .req v2
28 Y_1 .req v3
29 X_2 .req v4
30 Y_2 .req v5
31 X_3 .req v6
32 Y_3 .req v7
33
34 // the round key, duplicated in all lanes
35 ROUND_KEY .req v8
36
37 // index vector for tbl-based 8-bit rotates
38 ROTATE_TABLE .req v9
39 ROTATE_TABLE_Q .req q9
40
41 // temporary registers
42 TMP0 .req v10
43 TMP1 .req v11
44 TMP2 .req v12
45 TMP3 .req v13
46
47 // multiplication table for updating XTS tweaks
48 GFMUL_TABLE .req v14
49 GFMUL_TABLE_Q .req q14
50
51 // next XTS tweak value(s)
52 TWEAKV_NEXT .req v15
53
54 // XTS tweaks for the blocks currently being encrypted/decrypted
55 TWEAKV0 .req v16
56 TWEAKV1 .req v17
57 TWEAKV2 .req v18
58 TWEAKV3 .req v19
59 TWEAKV4 .req v20
60 TWEAKV5 .req v21
61 TWEAKV6 .req v22
62 TWEAKV7 .req v23
63
64 .align 4
65.Lror64_8_table:
66 .octa 0x080f0e0d0c0b0a090007060504030201
67.Lror32_8_table:
68 .octa 0x0c0f0e0d080b0a090407060500030201
69.Lrol64_8_table:
70 .octa 0x0e0d0c0b0a09080f0605040302010007
71.Lrol32_8_table:
72 .octa 0x0e0d0c0f0a09080b0605040702010003
73.Lgf128mul_table:
74 .octa 0x00000000000000870000000000000001
75.Lgf64mul_table:
76 .octa 0x0000000000000000000000002d361b00
77
78/*
79 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
80 *
81 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
82 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
83 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
84 * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
85 */
86.macro _speck_round_128bytes n, lanes
87
88 // x = ror(x, 8)
89 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
90 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
91 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
92 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
93
94 // x += y
95 add X_0.\lanes, X_0.\lanes, Y_0.\lanes
96 add X_1.\lanes, X_1.\lanes, Y_1.\lanes
97 add X_2.\lanes, X_2.\lanes, Y_2.\lanes
98 add X_3.\lanes, X_3.\lanes, Y_3.\lanes
99
100 // x ^= k
101 eor X_0.16b, X_0.16b, ROUND_KEY.16b
102 eor X_1.16b, X_1.16b, ROUND_KEY.16b
103 eor X_2.16b, X_2.16b, ROUND_KEY.16b
104 eor X_3.16b, X_3.16b, ROUND_KEY.16b
105
106 // y = rol(y, 3)
107 shl TMP0.\lanes, Y_0.\lanes, #3
108 shl TMP1.\lanes, Y_1.\lanes, #3
109 shl TMP2.\lanes, Y_2.\lanes, #3
110 shl TMP3.\lanes, Y_3.\lanes, #3
111 sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
112 sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
113 sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
114 sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
115
116 // y ^= x
117 eor Y_0.16b, TMP0.16b, X_0.16b
118 eor Y_1.16b, TMP1.16b, X_1.16b
119 eor Y_2.16b, TMP2.16b, X_2.16b
120 eor Y_3.16b, TMP3.16b, X_3.16b
121.endm
122
123/*
124 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
125 *
126 * This is the inverse of _speck_round_128bytes().
127 */
128.macro _speck_unround_128bytes n, lanes
129
130 // y ^= x
131 eor TMP0.16b, Y_0.16b, X_0.16b
132 eor TMP1.16b, Y_1.16b, X_1.16b
133 eor TMP2.16b, Y_2.16b, X_2.16b
134 eor TMP3.16b, Y_3.16b, X_3.16b
135
136 // y = ror(y, 3)
137 ushr Y_0.\lanes, TMP0.\lanes, #3
138 ushr Y_1.\lanes, TMP1.\lanes, #3
139 ushr Y_2.\lanes, TMP2.\lanes, #3
140 ushr Y_3.\lanes, TMP3.\lanes, #3
141 sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
142 sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
143 sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
144 sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
145
146 // x ^= k
147 eor X_0.16b, X_0.16b, ROUND_KEY.16b
148 eor X_1.16b, X_1.16b, ROUND_KEY.16b
149 eor X_2.16b, X_2.16b, ROUND_KEY.16b
150 eor X_3.16b, X_3.16b, ROUND_KEY.16b
151
152 // x -= y
153 sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
154 sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
155 sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
156 sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
157
158 // x = rol(x, 8)
159 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
160 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
161 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
162 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
163.endm
164
165.macro _next_xts_tweak next, cur, tmp, n
166.if \n == 64
167 /*
168 * Calculate the next tweak by multiplying the current one by x,
169 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
170 */
171 sshr \tmp\().2d, \cur\().2d, #63
172 and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
173 shl \next\().2d, \cur\().2d, #1
174 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
175 eor \next\().16b, \next\().16b, \tmp\().16b
176.else
177 /*
178 * Calculate the next two tweaks by multiplying the current ones by x^2,
179 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
180 */
181 ushr \tmp\().2d, \cur\().2d, #62
182 shl \next\().2d, \cur\().2d, #2
183 tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
184 eor \next\().16b, \next\().16b, \tmp\().16b
185.endif
186.endm
187
188/*
189 * _speck_xts_crypt() - Speck-XTS encryption/decryption
190 *
191 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
192 * using Speck-XTS, specifically the variant with a block size of '2n' and round
193 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
194 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
195 * nonzero multiple of 128.
196 */
197.macro _speck_xts_crypt n, lanes, decrypting
198
199 /*
200 * If decrypting, modify the ROUND_KEYS parameter to point to the last
201 * round key rather than the first, since for decryption the round keys
202 * are used in reverse order.
203 */
204.if \decrypting
205 mov NROUNDS, NROUNDS /* zero the high 32 bits */
206.if \n == 64
207 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
208 sub ROUND_KEYS, ROUND_KEYS, #8
209.else
210 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
211 sub ROUND_KEYS, ROUND_KEYS, #4
212.endif
213.endif
214
215 // Load the index vector for tbl-based 8-bit rotates
216.if \decrypting
217 ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
218.else
219 ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
220.endif
221
222 // One-time XTS preparation
223.if \n == 64
224 // Load first tweak
225 ld1 {TWEAKV0.16b}, [TWEAK]
226
227 // Load GF(2^128) multiplication table
228 ldr GFMUL_TABLE_Q, .Lgf128mul_table
229.else
230 // Load first tweak
231 ld1 {TWEAKV0.8b}, [TWEAK]
232
233 // Load GF(2^64) multiplication table
234 ldr GFMUL_TABLE_Q, .Lgf64mul_table
235
236 // Calculate second tweak, packing it together with the first
237 ushr TMP0.2d, TWEAKV0.2d, #63
238 shl TMP1.2d, TWEAKV0.2d, #1
239 tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
240 eor TMP0.8b, TMP0.8b, TMP1.8b
241 mov TWEAKV0.d[1], TMP0.d[0]
242.endif
243
244.Lnext_128bytes_\@:
245
246 // Calculate XTS tweaks for next 128 bytes
247 _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
248 _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
249 _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
250 _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
251 _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
252 _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
253 _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
254 _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
255
256 // Load the next source blocks into {X,Y}[0-3]
257 ld1 {X_0.16b-Y_1.16b}, [SRC], #64
258 ld1 {X_2.16b-Y_3.16b}, [SRC], #64
259
260 // XOR the source blocks with their XTS tweaks
261 eor TMP0.16b, X_0.16b, TWEAKV0.16b
262 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
263 eor TMP1.16b, X_1.16b, TWEAKV2.16b
264 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
265 eor TMP2.16b, X_2.16b, TWEAKV4.16b
266 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
267 eor TMP3.16b, X_3.16b, TWEAKV6.16b
268 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
269
270 /*
271 * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
272 * that the X[0-3] registers contain only the second halves of blocks,
273 * and the Y[0-3] registers contain only the first halves of blocks.
274 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
275 */
276 uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
277 uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
278 uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
279 uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
280 uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
281 uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
282 uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
283 uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
284
285 // Do the cipher rounds
286 mov x6, ROUND_KEYS
287 mov w7, NROUNDS
288.Lnext_round_\@:
289.if \decrypting
290 ld1r {ROUND_KEY.\lanes}, [x6]
291 sub x6, x6, #( \n / 8 )
292 _speck_unround_128bytes \n, \lanes
293.else
294 ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
295 _speck_round_128bytes \n, \lanes
296.endif
297 subs w7, w7, #1
298 bne .Lnext_round_\@
299
300 // Re-interleave the 'x' and 'y' elements of each block
301 zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
302 zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
303 zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
304 zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
305 zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
306 zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
307 zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
308 zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
309
310 // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
311 eor X_0.16b, TMP0.16b, TWEAKV0.16b
312 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
313 eor X_1.16b, TMP1.16b, TWEAKV2.16b
314 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
315 eor X_2.16b, TMP2.16b, TWEAKV4.16b
316 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
317 eor X_3.16b, TMP3.16b, TWEAKV6.16b
318 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
319 mov TWEAKV0.16b, TWEAKV_NEXT.16b
320
321 // Store the ciphertext in the destination buffer
322 st1 {X_0.16b-Y_1.16b}, [DST], #64
323 st1 {X_2.16b-Y_3.16b}, [DST], #64
324
325 // Continue if there are more 128-byte chunks remaining
326 subs NBYTES, NBYTES, #128
327 bne .Lnext_128bytes_\@
328
329 // Store the next tweak and return
330.if \n == 64
331 st1 {TWEAKV_NEXT.16b}, [TWEAK]
332.else
333 st1 {TWEAKV_NEXT.8b}, [TWEAK]
334.endif
335 ret
336.endm
337
338ENTRY(speck128_xts_encrypt_neon)
339 _speck_xts_crypt n=64, lanes=2d, decrypting=0
340ENDPROC(speck128_xts_encrypt_neon)
341
342ENTRY(speck128_xts_decrypt_neon)
343 _speck_xts_crypt n=64, lanes=2d, decrypting=1
344ENDPROC(speck128_xts_decrypt_neon)
345
346ENTRY(speck64_xts_encrypt_neon)
347 _speck_xts_crypt n=32, lanes=4s, decrypting=0
348ENDPROC(speck64_xts_encrypt_neon)
349
350ENTRY(speck64_xts_decrypt_neon)
351 _speck_xts_crypt n=32, lanes=4s, decrypting=1
352ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
deleted file mode 100644
index 6e233aeb4ff4..000000000000
--- a/arch/arm64/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,282 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 * (64-bit version; based on the 32-bit version)
5 *
6 * Copyright (c) 2018 Google, Inc
7 */
8
9#include <asm/hwcap.h>
10#include <asm/neon.h>
11#include <asm/simd.h>
12#include <crypto/algapi.h>
13#include <crypto/gf128mul.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/speck.h>
16#include <crypto/xts.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19
20/* The assembly functions only handle multiples of 128 bytes */
21#define SPECK_NEON_CHUNK_SIZE 128
22
23/* Speck128 */
24
25struct speck128_xts_tfm_ctx {
26 struct speck128_tfm_ctx main_key;
27 struct speck128_tfm_ctx tweak_key;
28};
29
30asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
31 void *dst, const void *src,
32 unsigned int nbytes, void *tweak);
33
34asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
35 void *dst, const void *src,
36 unsigned int nbytes, void *tweak);
37
38typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
39 u8 *, const u8 *);
40typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
41 const void *, unsigned int, void *);
42
43static __always_inline int
44__speck128_xts_crypt(struct skcipher_request *req,
45 speck128_crypt_one_t crypt_one,
46 speck128_xts_crypt_many_t crypt_many)
47{
48 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
49 const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
50 struct skcipher_walk walk;
51 le128 tweak;
52 int err;
53
54 err = skcipher_walk_virt(&walk, req, true);
55
56 crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
57
58 while (walk.nbytes > 0) {
59 unsigned int nbytes = walk.nbytes;
60 u8 *dst = walk.dst.virt.addr;
61 const u8 *src = walk.src.virt.addr;
62
63 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
64 unsigned int count;
65
66 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
67 kernel_neon_begin();
68 (*crypt_many)(ctx->main_key.round_keys,
69 ctx->main_key.nrounds,
70 dst, src, count, &tweak);
71 kernel_neon_end();
72 dst += count;
73 src += count;
74 nbytes -= count;
75 }
76
77 /* Handle any remainder with generic code */
78 while (nbytes >= sizeof(tweak)) {
79 le128_xor((le128 *)dst, (const le128 *)src, &tweak);
80 (*crypt_one)(&ctx->main_key, dst, dst);
81 le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
82 gf128mul_x_ble(&tweak, &tweak);
83
84 dst += sizeof(tweak);
85 src += sizeof(tweak);
86 nbytes -= sizeof(tweak);
87 }
88 err = skcipher_walk_done(&walk, nbytes);
89 }
90
91 return err;
92}
93
94static int speck128_xts_encrypt(struct skcipher_request *req)
95{
96 return __speck128_xts_crypt(req, crypto_speck128_encrypt,
97 speck128_xts_encrypt_neon);
98}
99
100static int speck128_xts_decrypt(struct skcipher_request *req)
101{
102 return __speck128_xts_crypt(req, crypto_speck128_decrypt,
103 speck128_xts_decrypt_neon);
104}
105
106static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
107 unsigned int keylen)
108{
109 struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
110 int err;
111
112 err = xts_verify_key(tfm, key, keylen);
113 if (err)
114 return err;
115
116 keylen /= 2;
117
118 err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
119 if (err)
120 return err;
121
122 return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
123}
124
125/* Speck64 */
126
127struct speck64_xts_tfm_ctx {
128 struct speck64_tfm_ctx main_key;
129 struct speck64_tfm_ctx tweak_key;
130};
131
132asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
133 void *dst, const void *src,
134 unsigned int nbytes, void *tweak);
135
136asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
137 void *dst, const void *src,
138 unsigned int nbytes, void *tweak);
139
140typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
141 u8 *, const u8 *);
142typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
143 const void *, unsigned int, void *);
144
145static __always_inline int
146__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
147 speck64_xts_crypt_many_t crypt_many)
148{
149 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
150 const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
151 struct skcipher_walk walk;
152 __le64 tweak;
153 int err;
154
155 err = skcipher_walk_virt(&walk, req, true);
156
157 crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
158
159 while (walk.nbytes > 0) {
160 unsigned int nbytes = walk.nbytes;
161 u8 *dst = walk.dst.virt.addr;
162 const u8 *src = walk.src.virt.addr;
163
164 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
165 unsigned int count;
166
167 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
168 kernel_neon_begin();
169 (*crypt_many)(ctx->main_key.round_keys,
170 ctx->main_key.nrounds,
171 dst, src, count, &tweak);
172 kernel_neon_end();
173 dst += count;
174 src += count;
175 nbytes -= count;
176 }
177
178 /* Handle any remainder with generic code */
179 while (nbytes >= sizeof(tweak)) {
180 *(__le64 *)dst = *(__le64 *)src ^ tweak;
181 (*crypt_one)(&ctx->main_key, dst, dst);
182 *(__le64 *)dst ^= tweak;
183 tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
184 ((tweak & cpu_to_le64(1ULL << 63)) ?
185 0x1B : 0));
186 dst += sizeof(tweak);
187 src += sizeof(tweak);
188 nbytes -= sizeof(tweak);
189 }
190 err = skcipher_walk_done(&walk, nbytes);
191 }
192
193 return err;
194}
195
196static int speck64_xts_encrypt(struct skcipher_request *req)
197{
198 return __speck64_xts_crypt(req, crypto_speck64_encrypt,
199 speck64_xts_encrypt_neon);
200}
201
202static int speck64_xts_decrypt(struct skcipher_request *req)
203{
204 return __speck64_xts_crypt(req, crypto_speck64_decrypt,
205 speck64_xts_decrypt_neon);
206}
207
208static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
209 unsigned int keylen)
210{
211 struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
212 int err;
213
214 err = xts_verify_key(tfm, key, keylen);
215 if (err)
216 return err;
217
218 keylen /= 2;
219
220 err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
221 if (err)
222 return err;
223
224 return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
225}
226
227static struct skcipher_alg speck_algs[] = {
228 {
229 .base.cra_name = "xts(speck128)",
230 .base.cra_driver_name = "xts-speck128-neon",
231 .base.cra_priority = 300,
232 .base.cra_blocksize = SPECK128_BLOCK_SIZE,
233 .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
234 .base.cra_alignmask = 7,
235 .base.cra_module = THIS_MODULE,
236 .min_keysize = 2 * SPECK128_128_KEY_SIZE,
237 .max_keysize = 2 * SPECK128_256_KEY_SIZE,
238 .ivsize = SPECK128_BLOCK_SIZE,
239 .walksize = SPECK_NEON_CHUNK_SIZE,
240 .setkey = speck128_xts_setkey,
241 .encrypt = speck128_xts_encrypt,
242 .decrypt = speck128_xts_decrypt,
243 }, {
244 .base.cra_name = "xts(speck64)",
245 .base.cra_driver_name = "xts-speck64-neon",
246 .base.cra_priority = 300,
247 .base.cra_blocksize = SPECK64_BLOCK_SIZE,
248 .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
249 .base.cra_alignmask = 7,
250 .base.cra_module = THIS_MODULE,
251 .min_keysize = 2 * SPECK64_96_KEY_SIZE,
252 .max_keysize = 2 * SPECK64_128_KEY_SIZE,
253 .ivsize = SPECK64_BLOCK_SIZE,
254 .walksize = SPECK_NEON_CHUNK_SIZE,
255 .setkey = speck64_xts_setkey,
256 .encrypt = speck64_xts_encrypt,
257 .decrypt = speck64_xts_decrypt,
258 }
259};
260
261static int __init speck_neon_module_init(void)
262{
263 if (!(elf_hwcap & HWCAP_ASIMD))
264 return -ENODEV;
265 return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
266}
267
268static void __exit speck_neon_module_exit(void)
269{
270 crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
271}
272
273module_init(speck_neon_module_init);
274module_exit(speck_neon_module_exit);
275
276MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
277MODULE_LICENSE("GPL");
278MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
279MODULE_ALIAS_CRYPTO("xts(speck128)");
280MODULE_ALIAS_CRYPTO("xts-speck128-neon");
281MODULE_ALIAS_CRYPTO("xts(speck64)");
282MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 1d5483f6e457..93a3c3c0238c 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -657,7 +657,6 @@ CONFIG_CRYPTO_SALSA20=m
657CONFIG_CRYPTO_SEED=m 657CONFIG_CRYPTO_SEED=m
658CONFIG_CRYPTO_SERPENT=m 658CONFIG_CRYPTO_SERPENT=m
659CONFIG_CRYPTO_SM4=m 659CONFIG_CRYPTO_SM4=m
660CONFIG_CRYPTO_SPECK=m
661CONFIG_CRYPTO_TEA=m 660CONFIG_CRYPTO_TEA=m
662CONFIG_CRYPTO_TWOFISH=m 661CONFIG_CRYPTO_TWOFISH=m
663CONFIG_CRYPTO_LZO=m 662CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 52a0af127951..e3d0efd6397d 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -614,7 +614,6 @@ CONFIG_CRYPTO_SALSA20=m
614CONFIG_CRYPTO_SEED=m 614CONFIG_CRYPTO_SEED=m
615CONFIG_CRYPTO_SERPENT=m 615CONFIG_CRYPTO_SERPENT=m
616CONFIG_CRYPTO_SM4=m 616CONFIG_CRYPTO_SM4=m
617CONFIG_CRYPTO_SPECK=m
618CONFIG_CRYPTO_TEA=m 617CONFIG_CRYPTO_TEA=m
619CONFIG_CRYPTO_TWOFISH=m 618CONFIG_CRYPTO_TWOFISH=m
620CONFIG_CRYPTO_LZO=m 619CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index b3103e51268a..75ac0c76e884 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -635,7 +635,6 @@ CONFIG_CRYPTO_SALSA20=m
635CONFIG_CRYPTO_SEED=m 635CONFIG_CRYPTO_SEED=m
636CONFIG_CRYPTO_SERPENT=m 636CONFIG_CRYPTO_SERPENT=m
637CONFIG_CRYPTO_SM4=m 637CONFIG_CRYPTO_SM4=m
638CONFIG_CRYPTO_SPECK=m
639CONFIG_CRYPTO_TEA=m 638CONFIG_CRYPTO_TEA=m
640CONFIG_CRYPTO_TWOFISH=m 639CONFIG_CRYPTO_TWOFISH=m
641CONFIG_CRYPTO_LZO=m 640CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fb7d651a4cab..c6e492700188 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
606CONFIG_CRYPTO_SEED=m 606CONFIG_CRYPTO_SEED=m
607CONFIG_CRYPTO_SERPENT=m 607CONFIG_CRYPTO_SERPENT=m
608CONFIG_CRYPTO_SM4=m 608CONFIG_CRYPTO_SM4=m
609CONFIG_CRYPTO_SPECK=m
610CONFIG_CRYPTO_TEA=m 609CONFIG_CRYPTO_TEA=m
611CONFIG_CRYPTO_TWOFISH=m 610CONFIG_CRYPTO_TWOFISH=m
612CONFIG_CRYPTO_LZO=m 611CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6b37f5537c39..b00d1c477432 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -616,7 +616,6 @@ CONFIG_CRYPTO_SALSA20=m
616CONFIG_CRYPTO_SEED=m 616CONFIG_CRYPTO_SEED=m
617CONFIG_CRYPTO_SERPENT=m 617CONFIG_CRYPTO_SERPENT=m
618CONFIG_CRYPTO_SM4=m 618CONFIG_CRYPTO_SM4=m
619CONFIG_CRYPTO_SPECK=m
620CONFIG_CRYPTO_TEA=m 619CONFIG_CRYPTO_TEA=m
621CONFIG_CRYPTO_TWOFISH=m 620CONFIG_CRYPTO_TWOFISH=m
622CONFIG_CRYPTO_LZO=m 621CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index c717bf879449..85cac3770d89 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -638,7 +638,6 @@ CONFIG_CRYPTO_SALSA20=m
638CONFIG_CRYPTO_SEED=m 638CONFIG_CRYPTO_SEED=m
639CONFIG_CRYPTO_SERPENT=m 639CONFIG_CRYPTO_SERPENT=m
640CONFIG_CRYPTO_SM4=m 640CONFIG_CRYPTO_SM4=m
641CONFIG_CRYPTO_SPECK=m
642CONFIG_CRYPTO_TEA=m 641CONFIG_CRYPTO_TEA=m
643CONFIG_CRYPTO_TWOFISH=m 642CONFIG_CRYPTO_TWOFISH=m
644CONFIG_CRYPTO_LZO=m 643CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 226c994ce794..b3a5d1e99d27 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -720,7 +720,6 @@ CONFIG_CRYPTO_SALSA20=m
720CONFIG_CRYPTO_SEED=m 720CONFIG_CRYPTO_SEED=m
721CONFIG_CRYPTO_SERPENT=m 721CONFIG_CRYPTO_SERPENT=m
722CONFIG_CRYPTO_SM4=m 722CONFIG_CRYPTO_SM4=m
723CONFIG_CRYPTO_SPECK=m
724CONFIG_CRYPTO_TEA=m 723CONFIG_CRYPTO_TEA=m
725CONFIG_CRYPTO_TWOFISH=m 724CONFIG_CRYPTO_TWOFISH=m
726CONFIG_CRYPTO_LZO=m 725CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b383327fd77a..0ca22608453f 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
606CONFIG_CRYPTO_SEED=m 606CONFIG_CRYPTO_SEED=m
607CONFIG_CRYPTO_SERPENT=m 607CONFIG_CRYPTO_SERPENT=m
608CONFIG_CRYPTO_SM4=m 608CONFIG_CRYPTO_SM4=m
609CONFIG_CRYPTO_SPECK=m
610CONFIG_CRYPTO_TEA=m 609CONFIG_CRYPTO_TEA=m
611CONFIG_CRYPTO_TWOFISH=m 610CONFIG_CRYPTO_TWOFISH=m
612CONFIG_CRYPTO_LZO=m 611CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9783d3deb9e9..8e3d10d12d9c 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
606CONFIG_CRYPTO_SEED=m 606CONFIG_CRYPTO_SEED=m
607CONFIG_CRYPTO_SERPENT=m 607CONFIG_CRYPTO_SERPENT=m
608CONFIG_CRYPTO_SM4=m 608CONFIG_CRYPTO_SM4=m
609CONFIG_CRYPTO_SPECK=m
610CONFIG_CRYPTO_TEA=m 609CONFIG_CRYPTO_TEA=m
611CONFIG_CRYPTO_TWOFISH=m 610CONFIG_CRYPTO_TWOFISH=m
612CONFIG_CRYPTO_LZO=m 611CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index a35d10ee10cb..ff7e653ec7fa 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -629,7 +629,6 @@ CONFIG_CRYPTO_SALSA20=m
629CONFIG_CRYPTO_SEED=m 629CONFIG_CRYPTO_SEED=m
630CONFIG_CRYPTO_SERPENT=m 630CONFIG_CRYPTO_SERPENT=m
631CONFIG_CRYPTO_SM4=m 631CONFIG_CRYPTO_SM4=m
632CONFIG_CRYPTO_SPECK=m
633CONFIG_CRYPTO_TEA=m 632CONFIG_CRYPTO_TEA=m
634CONFIG_CRYPTO_TWOFISH=m 633CONFIG_CRYPTO_TWOFISH=m
635CONFIG_CRYPTO_LZO=m 634CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 573bf922d448..612cf46f6d0c 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -607,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m
607CONFIG_CRYPTO_SEED=m 607CONFIG_CRYPTO_SEED=m
608CONFIG_CRYPTO_SERPENT=m 608CONFIG_CRYPTO_SERPENT=m
609CONFIG_CRYPTO_SM4=m 609CONFIG_CRYPTO_SM4=m
610CONFIG_CRYPTO_SPECK=m
611CONFIG_CRYPTO_TEA=m 610CONFIG_CRYPTO_TEA=m
612CONFIG_CRYPTO_TWOFISH=m 611CONFIG_CRYPTO_TWOFISH=m
613CONFIG_CRYPTO_LZO=m 612CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index efb27a7fcc55..a6a7bb6dc3fd 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -608,7 +608,6 @@ CONFIG_CRYPTO_SALSA20=m
608CONFIG_CRYPTO_SEED=m 608CONFIG_CRYPTO_SEED=m
609CONFIG_CRYPTO_SERPENT=m 609CONFIG_CRYPTO_SERPENT=m
610CONFIG_CRYPTO_SM4=m 610CONFIG_CRYPTO_SM4=m
611CONFIG_CRYPTO_SPECK=m
612CONFIG_CRYPTO_TEA=m 611CONFIG_CRYPTO_TEA=m
613CONFIG_CRYPTO_TWOFISH=m 612CONFIG_CRYPTO_TWOFISH=m
614CONFIG_CRYPTO_LZO=m 613CONFIG_CRYPTO_LZO=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f40600eb1762..5134c71a4937 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m
221CONFIG_CRYPTO_SEED=m 221CONFIG_CRYPTO_SEED=m
222CONFIG_CRYPTO_SERPENT=m 222CONFIG_CRYPTO_SERPENT=m
223CONFIG_CRYPTO_SM4=m 223CONFIG_CRYPTO_SM4=m
224CONFIG_CRYPTO_SPECK=m
225CONFIG_CRYPTO_TEA=m 224CONFIG_CRYPTO_TEA=m
226CONFIG_CRYPTO_TWOFISH=m 225CONFIG_CRYPTO_TWOFISH=m
227CONFIG_CRYPTO_DEFLATE=m 226CONFIG_CRYPTO_DEFLATE=m
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f3e40ac56d93..59e32623a7ce 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1590,20 +1590,6 @@ config CRYPTO_SM4
1590 1590
1591 If unsure, say N. 1591 If unsure, say N.
1592 1592
1593config CRYPTO_SPECK
1594 tristate "Speck cipher algorithm"
1595 select CRYPTO_ALGAPI
1596 help
1597 Speck is a lightweight block cipher that is tuned for optimal
1598 performance in software (rather than hardware).
1599
1600 Speck may not be as secure as AES, and should only be used on systems
1601 where AES is not fast enough.
1602
1603 See also: <https://eprint.iacr.org/2013/404.pdf>
1604
1605 If unsure, say N.
1606
1607config CRYPTO_TEA 1593config CRYPTO_TEA
1608 tristate "TEA, XTEA and XETA cipher algorithms" 1594 tristate "TEA, XTEA and XETA cipher algorithms"
1609 select CRYPTO_ALGAPI 1595 select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 6d1d40eeb964..f6a234d08882 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
115obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o 115obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
116obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o 116obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
117obj-$(CONFIG_CRYPTO_SEED) += seed.o 117obj-$(CONFIG_CRYPTO_SEED) += seed.o
118obj-$(CONFIG_CRYPTO_SPECK) += speck.o
119obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o 118obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
120obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o 119obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
121obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o 120obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
diff --git a/crypto/speck.c b/crypto/speck.c
deleted file mode 100644
index 58aa9f7f91f7..000000000000
--- a/crypto/speck.c
+++ /dev/null
@@ -1,307 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Speck: a lightweight block cipher
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Speck has 10 variants, including 5 block sizes. For now we only implement
8 * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
9 * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
10 * and a key size of K bits. The Speck128 variants are believed to be the most
11 * secure variants, and they use the same block size and key sizes as AES. The
12 * Speck64 variants are less secure, but on 32-bit processors are usually
13 * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
14 * secure and/or not as well suited for implementation on either 32-bit or
15 * 64-bit processors, so are omitted.
16 *
17 * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
18 * https://eprint.iacr.org/2013/404.pdf
19 *
20 * In a correspondence, the Speck designers have also clarified that the words
21 * should be interpreted in little-endian format, and the words should be
22 * ordered such that the first word of each block is 'y' rather than 'x', and
23 * the first key word (rather than the last) becomes the first round key.
24 */
25
26#include <asm/unaligned.h>
27#include <crypto/speck.h>
28#include <linux/bitops.h>
29#include <linux/crypto.h>
30#include <linux/init.h>
31#include <linux/module.h>
32
33/* Speck128 */
34
35static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
36{
37 *x = ror64(*x, 8);
38 *x += *y;
39 *x ^= k;
40 *y = rol64(*y, 3);
41 *y ^= *x;
42}
43
44static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
45{
46 *y ^= *x;
47 *y = ror64(*y, 3);
48 *x ^= k;
49 *x -= *y;
50 *x = rol64(*x, 8);
51}
52
53void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
54 u8 *out, const u8 *in)
55{
56 u64 y = get_unaligned_le64(in);
57 u64 x = get_unaligned_le64(in + 8);
58 int i;
59
60 for (i = 0; i < ctx->nrounds; i++)
61 speck128_round(&x, &y, ctx->round_keys[i]);
62
63 put_unaligned_le64(y, out);
64 put_unaligned_le64(x, out + 8);
65}
66EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
67
68static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
69{
70 crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
71}
72
73void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
74 u8 *out, const u8 *in)
75{
76 u64 y = get_unaligned_le64(in);
77 u64 x = get_unaligned_le64(in + 8);
78 int i;
79
80 for (i = ctx->nrounds - 1; i >= 0; i--)
81 speck128_unround(&x, &y, ctx->round_keys[i]);
82
83 put_unaligned_le64(y, out);
84 put_unaligned_le64(x, out + 8);
85}
86EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
87
88static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
89{
90 crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
91}
92
93int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
94 unsigned int keylen)
95{
96 u64 l[3];
97 u64 k;
98 int i;
99
100 switch (keylen) {
101 case SPECK128_128_KEY_SIZE:
102 k = get_unaligned_le64(key);
103 l[0] = get_unaligned_le64(key + 8);
104 ctx->nrounds = SPECK128_128_NROUNDS;
105 for (i = 0; i < ctx->nrounds; i++) {
106 ctx->round_keys[i] = k;
107 speck128_round(&l[0], &k, i);
108 }
109 break;
110 case SPECK128_192_KEY_SIZE:
111 k = get_unaligned_le64(key);
112 l[0] = get_unaligned_le64(key + 8);
113 l[1] = get_unaligned_le64(key + 16);
114 ctx->nrounds = SPECK128_192_NROUNDS;
115 for (i = 0; i < ctx->nrounds; i++) {
116 ctx->round_keys[i] = k;
117 speck128_round(&l[i % 2], &k, i);
118 }
119 break;
120 case SPECK128_256_KEY_SIZE:
121 k = get_unaligned_le64(key);
122 l[0] = get_unaligned_le64(key + 8);
123 l[1] = get_unaligned_le64(key + 16);
124 l[2] = get_unaligned_le64(key + 24);
125 ctx->nrounds = SPECK128_256_NROUNDS;
126 for (i = 0; i < ctx->nrounds; i++) {
127 ctx->round_keys[i] = k;
128 speck128_round(&l[i % 3], &k, i);
129 }
130 break;
131 default:
132 return -EINVAL;
133 }
134
135 return 0;
136}
137EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
138
139static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
140 unsigned int keylen)
141{
142 return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
143}
144
145/* Speck64 */
146
147static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
148{
149 *x = ror32(*x, 8);
150 *x += *y;
151 *x ^= k;
152 *y = rol32(*y, 3);
153 *y ^= *x;
154}
155
156static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
157{
158 *y ^= *x;
159 *y = ror32(*y, 3);
160 *x ^= k;
161 *x -= *y;
162 *x = rol32(*x, 8);
163}
164
165void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
166 u8 *out, const u8 *in)
167{
168 u32 y = get_unaligned_le32(in);
169 u32 x = get_unaligned_le32(in + 4);
170 int i;
171
172 for (i = 0; i < ctx->nrounds; i++)
173 speck64_round(&x, &y, ctx->round_keys[i]);
174
175 put_unaligned_le32(y, out);
176 put_unaligned_le32(x, out + 4);
177}
178EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
179
180static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
181{
182 crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
183}
184
185void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
186 u8 *out, const u8 *in)
187{
188 u32 y = get_unaligned_le32(in);
189 u32 x = get_unaligned_le32(in + 4);
190 int i;
191
192 for (i = ctx->nrounds - 1; i >= 0; i--)
193 speck64_unround(&x, &y, ctx->round_keys[i]);
194
195 put_unaligned_le32(y, out);
196 put_unaligned_le32(x, out + 4);
197}
198EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
199
200static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
201{
202 crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
203}
204
205int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
206 unsigned int keylen)
207{
208 u32 l[3];
209 u32 k;
210 int i;
211
212 switch (keylen) {
213 case SPECK64_96_KEY_SIZE:
214 k = get_unaligned_le32(key);
215 l[0] = get_unaligned_le32(key + 4);
216 l[1] = get_unaligned_le32(key + 8);
217 ctx->nrounds = SPECK64_96_NROUNDS;
218 for (i = 0; i < ctx->nrounds; i++) {
219 ctx->round_keys[i] = k;
220 speck64_round(&l[i % 2], &k, i);
221 }
222 break;
223 case SPECK64_128_KEY_SIZE:
224 k = get_unaligned_le32(key);
225 l[0] = get_unaligned_le32(key + 4);
226 l[1] = get_unaligned_le32(key + 8);
227 l[2] = get_unaligned_le32(key + 12);
228 ctx->nrounds = SPECK64_128_NROUNDS;
229 for (i = 0; i < ctx->nrounds; i++) {
230 ctx->round_keys[i] = k;
231 speck64_round(&l[i % 3], &k, i);
232 }
233 break;
234 default:
235 return -EINVAL;
236 }
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
241
242static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
243 unsigned int keylen)
244{
245 return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
246}
247
248/* Algorithm definitions */
249
250static struct crypto_alg speck_algs[] = {
251 {
252 .cra_name = "speck128",
253 .cra_driver_name = "speck128-generic",
254 .cra_priority = 100,
255 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
256 .cra_blocksize = SPECK128_BLOCK_SIZE,
257 .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
258 .cra_module = THIS_MODULE,
259 .cra_u = {
260 .cipher = {
261 .cia_min_keysize = SPECK128_128_KEY_SIZE,
262 .cia_max_keysize = SPECK128_256_KEY_SIZE,
263 .cia_setkey = speck128_setkey,
264 .cia_encrypt = speck128_encrypt,
265 .cia_decrypt = speck128_decrypt
266 }
267 }
268 }, {
269 .cra_name = "speck64",
270 .cra_driver_name = "speck64-generic",
271 .cra_priority = 100,
272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
273 .cra_blocksize = SPECK64_BLOCK_SIZE,
274 .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
275 .cra_module = THIS_MODULE,
276 .cra_u = {
277 .cipher = {
278 .cia_min_keysize = SPECK64_96_KEY_SIZE,
279 .cia_max_keysize = SPECK64_128_KEY_SIZE,
280 .cia_setkey = speck64_setkey,
281 .cia_encrypt = speck64_encrypt,
282 .cia_decrypt = speck64_decrypt
283 }
284 }
285 }
286};
287
288static int __init speck_module_init(void)
289{
290 return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
291}
292
293static void __exit speck_module_exit(void)
294{
295 crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
296}
297
298module_init(speck_module_init);
299module_exit(speck_module_exit);
300
301MODULE_DESCRIPTION("Speck block cipher (generic)");
302MODULE_LICENSE("GPL");
303MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
304MODULE_ALIAS_CRYPTO("speck128");
305MODULE_ALIAS_CRYPTO("speck128-generic");
306MODULE_ALIAS_CRYPTO("speck64");
307MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a1d42245082a..1c9bf38e59ea 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3038,18 +3038,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3038 .cipher = __VECS(sm4_tv_template) 3038 .cipher = __VECS(sm4_tv_template)
3039 } 3039 }
3040 }, { 3040 }, {
3041 .alg = "ecb(speck128)",
3042 .test = alg_test_skcipher,
3043 .suite = {
3044 .cipher = __VECS(speck128_tv_template)
3045 }
3046 }, {
3047 .alg = "ecb(speck64)",
3048 .test = alg_test_skcipher,
3049 .suite = {
3050 .cipher = __VECS(speck64_tv_template)
3051 }
3052 }, {
3053 .alg = "ecb(tea)", 3041 .alg = "ecb(tea)",
3054 .test = alg_test_skcipher, 3042 .test = alg_test_skcipher,
3055 .suite = { 3043 .suite = {
@@ -3577,18 +3565,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3577 .cipher = __VECS(serpent_xts_tv_template) 3565 .cipher = __VECS(serpent_xts_tv_template)
3578 } 3566 }
3579 }, { 3567 }, {
3580 .alg = "xts(speck128)",
3581 .test = alg_test_skcipher,
3582 .suite = {
3583 .cipher = __VECS(speck128_xts_tv_template)
3584 }
3585 }, {
3586 .alg = "xts(speck64)",
3587 .test = alg_test_skcipher,
3588 .suite = {
3589 .cipher = __VECS(speck64_xts_tv_template)
3590 }
3591 }, {
3592 .alg = "xts(twofish)", 3568 .alg = "xts(twofish)",
3593 .test = alg_test_skcipher, 3569 .test = alg_test_skcipher,
3594 .suite = { 3570 .suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 173111c70746..0b3d7cadbe93 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -10198,744 +10198,6 @@ static const struct cipher_testvec sm4_tv_template[] = {
10198 } 10198 }
10199}; 10199};
10200 10200
10201/*
10202 * Speck test vectors taken from the original paper:
10203 * "The Simon and Speck Families of Lightweight Block Ciphers"
10204 * https://eprint.iacr.org/2013/404.pdf
10205 *
10206 * Note that the paper does not make byte and word order clear. But it was
10207 * confirmed with the authors that the intended orders are little endian byte
10208 * order and (y, x) word order. Equivalently, the printed test vectors, when
10209 * looking at only the bytes (ignoring the whitespace that divides them into
10210 * words), are backwards: the left-most byte is actually the one with the
10211 * highest memory address, while the right-most byte is actually the one with
10212 * the lowest memory address.
10213 */
10214
10215static const struct cipher_testvec speck128_tv_template[] = {
10216 { /* Speck128/128 */
10217 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
10218 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
10219 .klen = 16,
10220 .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74"
10221 "\x20\x65\x71\x75\x69\x76\x61\x6c",
10222 .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
10223 "\x65\x32\x78\x79\x51\x98\x5d\xa6",
10224 .len = 16,
10225 }, { /* Speck128/192 */
10226 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
10227 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10228 "\x10\x11\x12\x13\x14\x15\x16\x17",
10229 .klen = 24,
10230 .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
10231 "\x68\x69\x65\x66\x20\x48\x61\x72",
10232 .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
10233 "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
10234 .len = 16,
10235 }, { /* Speck128/256 */
10236 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
10237 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10238 "\x10\x11\x12\x13\x14\x15\x16\x17"
10239 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
10240 .klen = 32,
10241 .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
10242 "\x49\x6e\x20\x74\x68\x6f\x73\x65",
10243 .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
10244 "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
10245 .len = 16,
10246 },
10247};
10248
10249/*
10250 * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
10251 * ciphertext recomputed with Speck128 as the cipher
10252 */
10253static const struct cipher_testvec speck128_xts_tv_template[] = {
10254 {
10255 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
10256 "\x00\x00\x00\x00\x00\x00\x00\x00"
10257 "\x00\x00\x00\x00\x00\x00\x00\x00"
10258 "\x00\x00\x00\x00\x00\x00\x00\x00",
10259 .klen = 32,
10260 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10261 "\x00\x00\x00\x00\x00\x00\x00\x00",
10262 .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
10263 "\x00\x00\x00\x00\x00\x00\x00\x00"
10264 "\x00\x00\x00\x00\x00\x00\x00\x00"
10265 "\x00\x00\x00\x00\x00\x00\x00\x00",
10266 .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
10267 "\x3b\x99\x4a\x64\x74\x77\xac\xed"
10268 "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
10269 "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
10270 .len = 32,
10271 }, {
10272 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
10273 "\x11\x11\x11\x11\x11\x11\x11\x11"
10274 "\x22\x22\x22\x22\x22\x22\x22\x22"
10275 "\x22\x22\x22\x22\x22\x22\x22\x22",
10276 .klen = 32,
10277 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
10278 "\x00\x00\x00\x00\x00\x00\x00\x00",
10279 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
10280 "\x44\x44\x44\x44\x44\x44\x44\x44"
10281 "\x44\x44\x44\x44\x44\x44\x44\x44"
10282 "\x44\x44\x44\x44\x44\x44\x44\x44",
10283 .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
10284 "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
10285 "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
10286 "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
10287 .len = 32,
10288 }, {
10289 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
10290 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
10291 "\x22\x22\x22\x22\x22\x22\x22\x22"
10292 "\x22\x22\x22\x22\x22\x22\x22\x22",
10293 .klen = 32,
10294 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
10295 "\x00\x00\x00\x00\x00\x00\x00\x00",
10296 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
10297 "\x44\x44\x44\x44\x44\x44\x44\x44"
10298 "\x44\x44\x44\x44\x44\x44\x44\x44"
10299 "\x44\x44\x44\x44\x44\x44\x44\x44",
10300 .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
10301 "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
10302 "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
10303 "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
10304 .len = 32,
10305 }, {
10306 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10307 "\x23\x53\x60\x28\x74\x71\x35\x26"
10308 "\x31\x41\x59\x26\x53\x58\x97\x93"
10309 "\x23\x84\x62\x64\x33\x83\x27\x95",
10310 .klen = 32,
10311 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10312 "\x00\x00\x00\x00\x00\x00\x00\x00",
10313 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10314 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10315 "\x10\x11\x12\x13\x14\x15\x16\x17"
10316 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10317 "\x20\x21\x22\x23\x24\x25\x26\x27"
10318 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10319 "\x30\x31\x32\x33\x34\x35\x36\x37"
10320 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10321 "\x40\x41\x42\x43\x44\x45\x46\x47"
10322 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10323 "\x50\x51\x52\x53\x54\x55\x56\x57"
10324 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10325 "\x60\x61\x62\x63\x64\x65\x66\x67"
10326 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10327 "\x70\x71\x72\x73\x74\x75\x76\x77"
10328 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10329 "\x80\x81\x82\x83\x84\x85\x86\x87"
10330 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10331 "\x90\x91\x92\x93\x94\x95\x96\x97"
10332 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10333 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10334 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10335 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10336 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10337 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10338 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10339 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10340 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10341 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10342 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10343 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10344 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10345 "\x00\x01\x02\x03\x04\x05\x06\x07"
10346 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10347 "\x10\x11\x12\x13\x14\x15\x16\x17"
10348 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10349 "\x20\x21\x22\x23\x24\x25\x26\x27"
10350 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10351 "\x30\x31\x32\x33\x34\x35\x36\x37"
10352 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10353 "\x40\x41\x42\x43\x44\x45\x46\x47"
10354 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10355 "\x50\x51\x52\x53\x54\x55\x56\x57"
10356 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10357 "\x60\x61\x62\x63\x64\x65\x66\x67"
10358 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10359 "\x70\x71\x72\x73\x74\x75\x76\x77"
10360 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10361 "\x80\x81\x82\x83\x84\x85\x86\x87"
10362 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10363 "\x90\x91\x92\x93\x94\x95\x96\x97"
10364 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10365 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10366 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10367 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10368 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10369 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10370 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10371 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10372 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10373 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10374 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10375 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10376 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10377 .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
10378 "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
10379 "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
10380 "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
10381 "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
10382 "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
10383 "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
10384 "\x19\xc5\x58\x84\x63\xb9\x12\x68"
10385 "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
10386 "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
10387 "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
10388 "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
10389 "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
10390 "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
10391 "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
10392 "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
10393 "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
10394 "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
10395 "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
10396 "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
10397 "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
10398 "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
10399 "\xa5\x20\xac\x24\x1c\x73\x59\x73"
10400 "\x58\x61\x3a\x87\x58\xb3\x20\x56"
10401 "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
10402 "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
10403 "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
10404 "\x09\x35\x71\x50\x65\xac\x92\xe3"
10405 "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
10406 "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
10407 "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
10408 "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
10409 "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
10410 "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
10411 "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
10412 "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
10413 "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
10414 "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
10415 "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
10416 "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
10417 "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
10418 "\x87\x30\xac\xd5\xea\x73\x49\x10"
10419 "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
10420 "\x66\x02\x35\x3d\x60\x06\x36\x4f"
10421 "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
10422 "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
10423 "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
10424 "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
10425 "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
10426 "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
10427 "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
10428 "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
10429 "\x51\x66\x02\x69\x04\x97\x36\xd4"
10430 "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
10431 "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
10432 "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
10433 "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
10434 "\x03\xe7\x05\x39\xf5\x05\x26\xee"
10435 "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
10436 "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
10437 "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
10438 "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
10439 "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
10440 "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
10441 .len = 512,
10442 }, {
10443 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10444 "\x23\x53\x60\x28\x74\x71\x35\x26"
10445 "\x62\x49\x77\x57\x24\x70\x93\x69"
10446 "\x99\x59\x57\x49\x66\x96\x76\x27"
10447 "\x31\x41\x59\x26\x53\x58\x97\x93"
10448 "\x23\x84\x62\x64\x33\x83\x27\x95"
10449 "\x02\x88\x41\x97\x16\x93\x99\x37"
10450 "\x51\x05\x82\x09\x74\x94\x45\x92",
10451 .klen = 64,
10452 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
10453 "\x00\x00\x00\x00\x00\x00\x00\x00",
10454 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10455 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10456 "\x10\x11\x12\x13\x14\x15\x16\x17"
10457 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10458 "\x20\x21\x22\x23\x24\x25\x26\x27"
10459 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10460 "\x30\x31\x32\x33\x34\x35\x36\x37"
10461 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10462 "\x40\x41\x42\x43\x44\x45\x46\x47"
10463 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10464 "\x50\x51\x52\x53\x54\x55\x56\x57"
10465 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10466 "\x60\x61\x62\x63\x64\x65\x66\x67"
10467 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10468 "\x70\x71\x72\x73\x74\x75\x76\x77"
10469 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10470 "\x80\x81\x82\x83\x84\x85\x86\x87"
10471 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10472 "\x90\x91\x92\x93\x94\x95\x96\x97"
10473 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10474 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10475 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10476 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10477 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10478 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10479 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10480 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10481 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10482 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10483 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10484 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10485 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10486 "\x00\x01\x02\x03\x04\x05\x06\x07"
10487 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10488 "\x10\x11\x12\x13\x14\x15\x16\x17"
10489 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10490 "\x20\x21\x22\x23\x24\x25\x26\x27"
10491 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10492 "\x30\x31\x32\x33\x34\x35\x36\x37"
10493 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10494 "\x40\x41\x42\x43\x44\x45\x46\x47"
10495 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10496 "\x50\x51\x52\x53\x54\x55\x56\x57"
10497 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10498 "\x60\x61\x62\x63\x64\x65\x66\x67"
10499 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10500 "\x70\x71\x72\x73\x74\x75\x76\x77"
10501 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10502 "\x80\x81\x82\x83\x84\x85\x86\x87"
10503 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10504 "\x90\x91\x92\x93\x94\x95\x96\x97"
10505 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10506 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10507 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10508 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10509 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10510 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10511 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10512 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10513 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10514 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10515 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10516 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10517 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10518 .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
10519 "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
10520 "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
10521 "\x92\x99\xde\xd3\x76\xed\xcd\x63"
10522 "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
10523 "\x79\x36\x31\x19\x62\xae\x10\x7e"
10524 "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
10525 "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
10526 "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
10527 "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
10528 "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
10529 "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
10530 "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
10531 "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
10532 "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
10533 "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
10534 "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
10535 "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
10536 "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
10537 "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
10538 "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
10539 "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
10540 "\xac\xa5\xd0\x38\xef\x19\x56\x53"
10541 "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
10542 "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
10543 "\xed\x22\x34\x1c\x5d\xed\x17\x06"
10544 "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
10545 "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
10546 "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
10547 "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
10548 "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
10549 "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
10550 "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
10551 "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
10552 "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
10553 "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
10554 "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
10555 "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
10556 "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
10557 "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
10558 "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
10559 "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
10560 "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
10561 "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
10562 "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
10563 "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
10564 "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
10565 "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
10566 "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
10567 "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
10568 "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
10569 "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
10570 "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
10571 "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
10572 "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
10573 "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
10574 "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
10575 "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
10576 "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
10577 "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
10578 "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
10579 "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
10580 "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
10581 "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
10582 .len = 512,
10583 .also_non_np = 1,
10584 .np = 3,
10585 .tap = { 512 - 20, 4, 16 },
10586 }
10587};
10588
10589static const struct cipher_testvec speck64_tv_template[] = {
10590 { /* Speck64/96 */
10591 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
10592 "\x10\x11\x12\x13",
10593 .klen = 12,
10594 .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74",
10595 .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
10596 .len = 8,
10597 }, { /* Speck64/128 */
10598 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
10599 "\x10\x11\x12\x13\x18\x19\x1a\x1b",
10600 .klen = 16,
10601 .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
10602 .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
10603 .len = 8,
10604 },
10605};
10606
10607/*
10608 * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the
10609 * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted
10610 */
10611static const struct cipher_testvec speck64_xts_tv_template[] = {
10612 {
10613 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
10614 "\x00\x00\x00\x00\x00\x00\x00\x00"
10615 "\x00\x00\x00\x00\x00\x00\x00\x00",
10616 .klen = 24,
10617 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10618 "\x00\x00\x00\x00\x00\x00\x00\x00",
10619 .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
10620 "\x00\x00\x00\x00\x00\x00\x00\x00"
10621 "\x00\x00\x00\x00\x00\x00\x00\x00"
10622 "\x00\x00\x00\x00\x00\x00\x00\x00",
10623 .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
10624 "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
10625 "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
10626 "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
10627 .len = 32,
10628 }, {
10629 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
10630 "\x11\x11\x11\x11\x11\x11\x11\x11"
10631 "\x22\x22\x22\x22\x22\x22\x22\x22",
10632 .klen = 24,
10633 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
10634 "\x00\x00\x00\x00\x00\x00\x00\x00",
10635 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
10636 "\x44\x44\x44\x44\x44\x44\x44\x44"
10637 "\x44\x44\x44\x44\x44\x44\x44\x44"
10638 "\x44\x44\x44\x44\x44\x44\x44\x44",
10639 .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
10640 "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
10641 "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
10642 "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
10643 .len = 32,
10644 }, {
10645 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
10646 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
10647 "\x22\x22\x22\x22\x22\x22\x22\x22",
10648 .klen = 24,
10649 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
10650 "\x00\x00\x00\x00\x00\x00\x00\x00",
10651 .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
10652 "\x44\x44\x44\x44\x44\x44\x44\x44"
10653 "\x44\x44\x44\x44\x44\x44\x44\x44"
10654 "\x44\x44\x44\x44\x44\x44\x44\x44",
10655 .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
10656 "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
10657 "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
10658 "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
10659 .len = 32,
10660 }, {
10661 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10662 "\x23\x53\x60\x28\x74\x71\x35\x26"
10663 "\x31\x41\x59\x26\x53\x58\x97\x93",
10664 .klen = 24,
10665 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
10666 "\x00\x00\x00\x00\x00\x00\x00\x00",
10667 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10668 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10669 "\x10\x11\x12\x13\x14\x15\x16\x17"
10670 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10671 "\x20\x21\x22\x23\x24\x25\x26\x27"
10672 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10673 "\x30\x31\x32\x33\x34\x35\x36\x37"
10674 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10675 "\x40\x41\x42\x43\x44\x45\x46\x47"
10676 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10677 "\x50\x51\x52\x53\x54\x55\x56\x57"
10678 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10679 "\x60\x61\x62\x63\x64\x65\x66\x67"
10680 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10681 "\x70\x71\x72\x73\x74\x75\x76\x77"
10682 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10683 "\x80\x81\x82\x83\x84\x85\x86\x87"
10684 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10685 "\x90\x91\x92\x93\x94\x95\x96\x97"
10686 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10687 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10688 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10689 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10690 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10691 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10692 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10693 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10694 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10695 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10696 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10697 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10698 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10699 "\x00\x01\x02\x03\x04\x05\x06\x07"
10700 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10701 "\x10\x11\x12\x13\x14\x15\x16\x17"
10702 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10703 "\x20\x21\x22\x23\x24\x25\x26\x27"
10704 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10705 "\x30\x31\x32\x33\x34\x35\x36\x37"
10706 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10707 "\x40\x41\x42\x43\x44\x45\x46\x47"
10708 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10709 "\x50\x51\x52\x53\x54\x55\x56\x57"
10710 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10711 "\x60\x61\x62\x63\x64\x65\x66\x67"
10712 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10713 "\x70\x71\x72\x73\x74\x75\x76\x77"
10714 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10715 "\x80\x81\x82\x83\x84\x85\x86\x87"
10716 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10717 "\x90\x91\x92\x93\x94\x95\x96\x97"
10718 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10719 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10720 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10721 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10722 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10723 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10724 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10725 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10726 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10727 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10728 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10729 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10730 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10731 .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
10732 "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
10733 "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
10734 "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
10735 "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
10736 "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
10737 "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
10738 "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
10739 "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
10740 "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
10741 "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
10742 "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
10743 "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
10744 "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
10745 "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
10746 "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
10747 "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
10748 "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
10749 "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
10750 "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
10751 "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
10752 "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
10753 "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
10754 "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
10755 "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
10756 "\x07\xff\xf3\x72\x74\x48\xb5\x40"
10757 "\x50\xb5\xdd\x90\x43\x31\x18\x15"
10758 "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
10759 "\x29\x93\x90\x8b\xda\x07\xf0\x35"
10760 "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
10761 "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
10762 "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
10763 "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
10764 "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
10765 "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
10766 "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
10767 "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
10768 "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
10769 "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
10770 "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
10771 "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
10772 "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
10773 "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
10774 "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
10775 "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
10776 "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
10777 "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
10778 "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
10779 "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
10780 "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
10781 "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
10782 "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
10783 "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
10784 "\x59\xda\xee\x1a\x22\x18\xda\x0d"
10785 "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
10786 "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
10787 "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
10788 "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
10789 "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
10790 "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
10791 "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
10792 "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
10793 "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
10794 "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
10795 .len = 512,
10796 }, {
10797 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
10798 "\x23\x53\x60\x28\x74\x71\x35\x26"
10799 "\x62\x49\x77\x57\x24\x70\x93\x69"
10800 "\x99\x59\x57\x49\x66\x96\x76\x27",
10801 .klen = 32,
10802 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
10803 "\x00\x00\x00\x00\x00\x00\x00\x00",
10804 .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
10805 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10806 "\x10\x11\x12\x13\x14\x15\x16\x17"
10807 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10808 "\x20\x21\x22\x23\x24\x25\x26\x27"
10809 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10810 "\x30\x31\x32\x33\x34\x35\x36\x37"
10811 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10812 "\x40\x41\x42\x43\x44\x45\x46\x47"
10813 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10814 "\x50\x51\x52\x53\x54\x55\x56\x57"
10815 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10816 "\x60\x61\x62\x63\x64\x65\x66\x67"
10817 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10818 "\x70\x71\x72\x73\x74\x75\x76\x77"
10819 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10820 "\x80\x81\x82\x83\x84\x85\x86\x87"
10821 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10822 "\x90\x91\x92\x93\x94\x95\x96\x97"
10823 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10824 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10825 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10826 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10827 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10828 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10829 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10830 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10831 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10832 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10833 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10834 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10835 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
10836 "\x00\x01\x02\x03\x04\x05\x06\x07"
10837 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
10838 "\x10\x11\x12\x13\x14\x15\x16\x17"
10839 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
10840 "\x20\x21\x22\x23\x24\x25\x26\x27"
10841 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
10842 "\x30\x31\x32\x33\x34\x35\x36\x37"
10843 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
10844 "\x40\x41\x42\x43\x44\x45\x46\x47"
10845 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
10846 "\x50\x51\x52\x53\x54\x55\x56\x57"
10847 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
10848 "\x60\x61\x62\x63\x64\x65\x66\x67"
10849 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
10850 "\x70\x71\x72\x73\x74\x75\x76\x77"
10851 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
10852 "\x80\x81\x82\x83\x84\x85\x86\x87"
10853 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
10854 "\x90\x91\x92\x93\x94\x95\x96\x97"
10855 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
10856 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
10857 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
10858 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
10859 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
10860 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
10861 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
10862 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
10863 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
10864 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
10865 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
10866 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
10867 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
10868 .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
10869 "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
10870 "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
10871 "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
10872 "\x78\x16\xea\x80\xdb\x33\x75\x94"
10873 "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
10874 "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
10875 "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
10876 "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
10877 "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
10878 "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
10879 "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
10880 "\x84\x5e\x46\xed\x20\x89\xb6\x44"
10881 "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
10882 "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
10883 "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
10884 "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
10885 "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
10886 "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
10887 "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
10888 "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
10889 "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
10890 "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
10891 "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
10892 "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
10893 "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
10894 "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
10895 "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
10896 "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
10897 "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
10898 "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
10899 "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
10900 "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
10901 "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
10902 "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
10903 "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
10904 "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
10905 "\x52\x7f\x29\x51\x94\x20\x67\x3c"
10906 "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
10907 "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
10908 "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
10909 "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
10910 "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
10911 "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
10912 "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
10913 "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
10914 "\x65\x53\x0f\x41\x11\xbd\x98\x99"
10915 "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
10916 "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
10917 "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
10918 "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
10919 "\x63\x51\x34\x9d\x96\x12\xae\xce"
10920 "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
10921 "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
10922 "\x54\x27\x74\xbb\x10\x86\x57\x46"
10923 "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
10924 "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
10925 "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
10926 "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
10927 "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
10928 "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
10929 "\x9b\x63\x76\x32\x2f\x19\x72\x10"
10930 "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
10931 "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
10932 .len = 512,
10933 .also_non_np = 1,
10934 .np = 3,
10935 .tap = { 512 - 20, 4, 16 },
10936 }
10937};
10938
10939/* Cast6 test vectors from RFC 2612 */ 10201/* Cast6 test vectors from RFC 2612 */
10940static const struct cipher_testvec cast6_tv_template[] = { 10202static const struct cipher_testvec cast6_tv_template[] = {
10941 { 10203 {
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 39c20ef26db4..79debfc9cef9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -83,10 +83,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
83 filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) 83 filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
84 return true; 84 return true;
85 85
86 if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
87 filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
88 return true;
89
90 return false; 86 return false;
91} 87}
92 88
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index e997ca51192f..7874c9bb2fc5 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -174,16 +174,6 @@ static struct fscrypt_mode {
174 .cipher_str = "cts(cbc(aes))", 174 .cipher_str = "cts(cbc(aes))",
175 .keysize = 16, 175 .keysize = 16,
176 }, 176 },
177 [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
178 .friendly_name = "Speck128/256-XTS",
179 .cipher_str = "xts(speck128)",
180 .keysize = 64,
181 },
182 [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
183 .friendly_name = "Speck128/256-CTS-CBC",
184 .cipher_str = "cts(cbc(speck128))",
185 .keysize = 32,
186 },
187}; 177};
188 178
189static struct fscrypt_mode * 179static struct fscrypt_mode *
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc952d405..000000000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Common values for the Speck algorithm
4 */
5
6#ifndef _CRYPTO_SPECK_H
7#define _CRYPTO_SPECK_H
8
9#include <linux/types.h>
10
11/* Speck128 */
12
13#define SPECK128_BLOCK_SIZE 16
14
15#define SPECK128_128_KEY_SIZE 16
16#define SPECK128_128_NROUNDS 32
17
18#define SPECK128_192_KEY_SIZE 24
19#define SPECK128_192_NROUNDS 33
20
21#define SPECK128_256_KEY_SIZE 32
22#define SPECK128_256_NROUNDS 34
23
24struct speck128_tfm_ctx {
25 u64 round_keys[SPECK128_256_NROUNDS];
26 int nrounds;
27};
28
29void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
30 u8 *out, const u8 *in);
31
32void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
33 u8 *out, const u8 *in);
34
35int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
36 unsigned int keysize);
37
38/* Speck64 */
39
40#define SPECK64_BLOCK_SIZE 8
41
42#define SPECK64_96_KEY_SIZE 12
43#define SPECK64_96_NROUNDS 26
44
45#define SPECK64_128_KEY_SIZE 16
46#define SPECK64_128_NROUNDS 27
47
48struct speck64_tfm_ctx {
49 u32 round_keys[SPECK64_128_NROUNDS];
50 int nrounds;
51};
52
53void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
54 u8 *out, const u8 *in);
55
56void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
57 u8 *out, const u8 *in);
58
59int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
60 unsigned int keysize);
61
62#endif /* _CRYPTO_SPECK_H */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 73e01918f996..a441ea1bfe6d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -279,8 +279,8 @@ struct fsxattr {
279#define FS_ENCRYPTION_MODE_AES_256_CTS 4 279#define FS_ENCRYPTION_MODE_AES_256_CTS 4
280#define FS_ENCRYPTION_MODE_AES_128_CBC 5 280#define FS_ENCRYPTION_MODE_AES_128_CBC 5
281#define FS_ENCRYPTION_MODE_AES_128_CTS 6 281#define FS_ENCRYPTION_MODE_AES_128_CTS 6
282#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 282#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
283#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 283#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
284 284
285struct fscrypt_policy { 285struct fscrypt_policy {
286 __u8 version; 286 __u8 version;