aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto/twofish_avx_glue.c
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@mbnet.fi>2012-06-18 07:07:39 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2012-06-27 02:42:02 -0400
commita7378d4e552ac139ae1cbbdfebfeaa9b18c948d0 (patch)
treedc8960d5ce50ac27f05e02151c7744bb313286b4 /arch/x86/crypto/twofish_avx_glue.c
parent414cb5e7cc6e258fe36e2c3cc3ef1ff2e246c0e3 (diff)
crypto: twofish-avx - remove duplicated glue code and use shared glue code from glue_helper
Now that shared glue code is available, convert twofish-avx to use it. Cc: Johannes Goetzfried <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto/twofish_avx_glue.c')
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c556
1 files changed, 99 insertions, 457 deletions
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index cabe058eba14..782b67ddaf6a 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -4,9 +4,6 @@
4 * Copyright (C) 2012 Johannes Goetzfried 4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> 5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 * 6 *
7 * Glue code based on serpent_sse2_glue.c by:
8 * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
9 *
10 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
@@ -39,38 +36,21 @@
39#include <asm/i387.h> 36#include <asm/i387.h>
40#include <asm/xcr.h> 37#include <asm/xcr.h>
41#include <asm/xsave.h> 38#include <asm/xsave.h>
39#include <asm/crypto/twofish.h>
42#include <asm/crypto/ablk_helper.h> 40#include <asm/crypto/ablk_helper.h>
41#include <asm/crypto/glue_helper.h>
43#include <crypto/scatterwalk.h> 42#include <crypto/scatterwalk.h>
44#include <linux/workqueue.h> 43#include <linux/workqueue.h>
45#include <linux/spinlock.h> 44#include <linux/spinlock.h>
46 45
47
48#define TWOFISH_PARALLEL_BLOCKS 8 46#define TWOFISH_PARALLEL_BLOCKS 8
49 47
50/* regular block cipher functions from twofish_x86_64 module */
51asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
52 const u8 *src);
53asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
54 const u8 *src);
55
56/* 3-way parallel cipher functions from twofish_x86_64-3way module */
57asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
58 const u8 *src, bool xor);
59asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
60 const u8 *src);
61
62static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 48static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
63 const u8 *src) 49 const u8 *src)
64{ 50{
65 __twofish_enc_blk_3way(ctx, dst, src, false); 51 __twofish_enc_blk_3way(ctx, dst, src, false);
66} 52}
67 53
68static inline void twofish_enc_blk_3way_xor(struct twofish_ctx *ctx, u8 *dst,
69 const u8 *src)
70{
71 __twofish_enc_blk_3way(ctx, dst, src, true);
72}
73
74/* 8-way parallel cipher functions */ 54/* 8-way parallel cipher functions */
75asmlinkage void __twofish_enc_blk_8way(struct twofish_ctx *ctx, u8 *dst, 55asmlinkage void __twofish_enc_blk_8way(struct twofish_ctx *ctx, u8 *dst,
76 const u8 *src, bool xor); 56 const u8 *src, bool xor);
@@ -95,423 +75,142 @@ static inline void twofish_dec_blk_xway(struct twofish_ctx *ctx, u8 *dst,
95 twofish_dec_blk_8way(ctx, dst, src); 75 twofish_dec_blk_8way(ctx, dst, src);
96} 76}
97 77
98 78static void twofish_dec_blk_cbc_xway(void *ctx, u128 *dst, const u128 *src)
99static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
100{ 79{
101 if (fpu_enabled) 80 u128 ivs[TWOFISH_PARALLEL_BLOCKS - 1];
102 return true; 81 unsigned int j;
103 82
104 /* AVX is only used when chunk to be processed is large enough, so 83 for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++)
105 * do not enable FPU until it is necessary. 84 ivs[j] = src[j];
106 */
107 if (nbytes < TF_BLOCK_SIZE * TWOFISH_PARALLEL_BLOCKS)
108 return false;
109 85
110 kernel_fpu_begin(); 86 twofish_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
111 return true;
112}
113 87
114static inline void twofish_fpu_end(bool fpu_enabled) 88 for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++)
115{ 89 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
116 if (fpu_enabled)
117 kernel_fpu_end();
118} 90}
119 91
120static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, 92static void twofish_enc_blk_ctr_xway(void *ctx, u128 *dst, const u128 *src,
121 bool enc) 93 u128 *iv)
122{ 94{
123 bool fpu_enabled = false; 95 be128 ctrblks[TWOFISH_PARALLEL_BLOCKS];
124 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 96 unsigned int i;
125 const unsigned int bsize = TF_BLOCK_SIZE;
126 unsigned int nbytes;
127 int err;
128 97
129 err = blkcipher_walk_virt(desc, walk); 98 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS; i++) {
130 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 99 if (dst != src)
100 dst[i] = src[i];
131 101
132 while ((nbytes = walk->nbytes)) { 102 u128_to_be128(&ctrblks[i], iv);
133 u8 *wsrc = walk->src.virt.addr; 103 u128_inc(iv);
134 u8 *wdst = walk->dst.virt.addr;
135
136 fpu_enabled = twofish_fpu_begin(fpu_enabled, nbytes);
137
138 /* Process multi-block batch */
139 if (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS) {
140 do {
141 if (enc)
142 twofish_enc_blk_xway(ctx, wdst, wsrc);
143 else
144 twofish_dec_blk_xway(ctx, wdst, wsrc);
145
146 wsrc += bsize * TWOFISH_PARALLEL_BLOCKS;
147 wdst += bsize * TWOFISH_PARALLEL_BLOCKS;
148 nbytes -= bsize * TWOFISH_PARALLEL_BLOCKS;
149 } while (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS);
150
151 if (nbytes < bsize)
152 goto done;
153 }
154
155 /* Process three block batch */
156 if (nbytes >= bsize * 3) {
157 do {
158 if (enc)
159 twofish_enc_blk_3way(ctx, wdst, wsrc);
160 else
161 twofish_dec_blk_3way(ctx, wdst, wsrc);
162
163 wsrc += bsize * 3;
164 wdst += bsize * 3;
165 nbytes -= bsize * 3;
166 } while (nbytes >= bsize * 3);
167
168 if (nbytes < bsize)
169 goto done;
170 }
171
172 /* Handle leftovers */
173 do {
174 if (enc)
175 twofish_enc_blk(ctx, wdst, wsrc);
176 else
177 twofish_dec_blk(ctx, wdst, wsrc);
178
179 wsrc += bsize;
180 wdst += bsize;
181 nbytes -= bsize;
182 } while (nbytes >= bsize);
183
184done:
185 err = blkcipher_walk_done(desc, walk, nbytes);
186 } 104 }
187 105
188 twofish_fpu_end(fpu_enabled); 106 twofish_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
189 return err;
190} 107}
191 108
109static const struct common_glue_ctx twofish_enc = {
110 .num_funcs = 3,
111 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
112
113 .funcs = { {
114 .num_blocks = TWOFISH_PARALLEL_BLOCKS,
115 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_xway) }
116 }, {
117 .num_blocks = 3,
118 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
119 }, {
120 .num_blocks = 1,
121 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
122 } }
123};
124
125static const struct common_glue_ctx twofish_ctr = {
126 .num_funcs = 3,
127 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
128
129 .funcs = { {
130 .num_blocks = TWOFISH_PARALLEL_BLOCKS,
131 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_xway) }
132 }, {
133 .num_blocks = 3,
134 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
135 }, {
136 .num_blocks = 1,
137 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) }
138 } }
139};
140
141static const struct common_glue_ctx twofish_dec = {
142 .num_funcs = 3,
143 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
144
145 .funcs = { {
146 .num_blocks = TWOFISH_PARALLEL_BLOCKS,
147 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_xway) }
148 }, {
149 .num_blocks = 3,
150 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
151 }, {
152 .num_blocks = 1,
153 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
154 } }
155};
156
157static const struct common_glue_ctx twofish_dec_cbc = {
158 .num_funcs = 3,
159 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
160
161 .funcs = { {
162 .num_blocks = TWOFISH_PARALLEL_BLOCKS,
163 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_xway) }
164 }, {
165 .num_blocks = 3,
166 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
167 }, {
168 .num_blocks = 1,
169 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
170 } }
171};
172
192static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 173static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
193 struct scatterlist *src, unsigned int nbytes) 174 struct scatterlist *src, unsigned int nbytes)
194{ 175{
195 struct blkcipher_walk walk; 176 return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
196
197 blkcipher_walk_init(&walk, dst, src, nbytes);
198 return ecb_crypt(desc, &walk, true);
199} 177}
200 178
201static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 179static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
202 struct scatterlist *src, unsigned int nbytes) 180 struct scatterlist *src, unsigned int nbytes)
203{ 181{
204 struct blkcipher_walk walk; 182 return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
205
206 blkcipher_walk_init(&walk, dst, src, nbytes);
207 return ecb_crypt(desc, &walk, false);
208}
209
210static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
211 struct blkcipher_walk *walk)
212{
213 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
214 const unsigned int bsize = TF_BLOCK_SIZE;
215 unsigned int nbytes = walk->nbytes;
216 u128 *src = (u128 *)walk->src.virt.addr;
217 u128 *dst = (u128 *)walk->dst.virt.addr;
218 u128 *iv = (u128 *)walk->iv;
219
220 do {
221 u128_xor(dst, src, iv);
222 twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
223 iv = dst;
224
225 src += 1;
226 dst += 1;
227 nbytes -= bsize;
228 } while (nbytes >= bsize);
229
230 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
231 return nbytes;
232} 183}
233 184
234static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 185static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
235 struct scatterlist *src, unsigned int nbytes) 186 struct scatterlist *src, unsigned int nbytes)
236{ 187{
237 struct blkcipher_walk walk; 188 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
238 int err; 189 dst, src, nbytes);
239
240 blkcipher_walk_init(&walk, dst, src, nbytes);
241 err = blkcipher_walk_virt(desc, &walk);
242
243 while ((nbytes = walk.nbytes)) {
244 nbytes = __cbc_encrypt(desc, &walk);
245 err = blkcipher_walk_done(desc, &walk, nbytes);
246 }
247
248 return err;
249}
250
251static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
252 struct blkcipher_walk *walk)
253{
254 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
255 const unsigned int bsize = TF_BLOCK_SIZE;
256 unsigned int nbytes = walk->nbytes;
257 u128 *src = (u128 *)walk->src.virt.addr;
258 u128 *dst = (u128 *)walk->dst.virt.addr;
259 u128 ivs[TWOFISH_PARALLEL_BLOCKS - 1];
260 u128 last_iv;
261 int i;
262
263 /* Start of the last block. */
264 src += nbytes / bsize - 1;
265 dst += nbytes / bsize - 1;
266
267 last_iv = *src;
268
269 /* Process multi-block batch */
270 if (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS) {
271 do {
272 nbytes -= bsize * (TWOFISH_PARALLEL_BLOCKS - 1);
273 src -= TWOFISH_PARALLEL_BLOCKS - 1;
274 dst -= TWOFISH_PARALLEL_BLOCKS - 1;
275
276 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS - 1; i++)
277 ivs[i] = src[i];
278
279 twofish_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
280
281 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS - 1; i++)
282 u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
283
284 nbytes -= bsize;
285 if (nbytes < bsize)
286 goto done;
287
288 u128_xor(dst, dst, src - 1);
289 src -= 1;
290 dst -= 1;
291 } while (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS);
292
293 if (nbytes < bsize)
294 goto done;
295 }
296
297 /* Process three block batch */
298 if (nbytes >= bsize * 3) {
299 do {
300 nbytes -= bsize * (3 - 1);
301 src -= 3 - 1;
302 dst -= 3 - 1;
303
304 ivs[0] = src[0];
305 ivs[1] = src[1];
306
307 twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
308
309 u128_xor(dst + 1, dst + 1, ivs + 0);
310 u128_xor(dst + 2, dst + 2, ivs + 1);
311
312 nbytes -= bsize;
313 if (nbytes < bsize)
314 goto done;
315
316 u128_xor(dst, dst, src - 1);
317 src -= 1;
318 dst -= 1;
319 } while (nbytes >= bsize * 3);
320
321 if (nbytes < bsize)
322 goto done;
323 }
324
325 /* Handle leftovers */
326 for (;;) {
327 twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
328
329 nbytes -= bsize;
330 if (nbytes < bsize)
331 break;
332
333 u128_xor(dst, dst, src - 1);
334 src -= 1;
335 dst -= 1;
336 }
337
338done:
339 u128_xor(dst, dst, (u128 *)walk->iv);
340 *(u128 *)walk->iv = last_iv;
341
342 return nbytes;
343} 190}
344 191
345static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 192static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
346 struct scatterlist *src, unsigned int nbytes) 193 struct scatterlist *src, unsigned int nbytes)
347{ 194{
348 bool fpu_enabled = false; 195 return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
349 struct blkcipher_walk walk; 196 nbytes);
350 int err;
351
352 blkcipher_walk_init(&walk, dst, src, nbytes);
353 err = blkcipher_walk_virt(desc, &walk);
354 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
355
356 while ((nbytes = walk.nbytes)) {
357 fpu_enabled = twofish_fpu_begin(fpu_enabled, nbytes);
358 nbytes = __cbc_decrypt(desc, &walk);
359 err = blkcipher_walk_done(desc, &walk, nbytes);
360 }
361
362 twofish_fpu_end(fpu_enabled);
363 return err;
364}
365
366static inline void u128_to_be128(be128 *dst, const u128 *src)
367{
368 dst->a = cpu_to_be64(src->a);
369 dst->b = cpu_to_be64(src->b);
370} 197}
371 198
372static inline void be128_to_u128(u128 *dst, const be128 *src) 199static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
373{ 200 struct scatterlist *src, unsigned int nbytes)
374 dst->a = be64_to_cpu(src->a);
375 dst->b = be64_to_cpu(src->b);
376}
377
378static inline void u128_inc(u128 *i)
379{ 201{
380 i->b++; 202 return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
381 if (!i->b)
382 i->a++;
383} 203}
384 204
385static void ctr_crypt_final(struct blkcipher_desc *desc, 205static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
386 struct blkcipher_walk *walk)
387{ 206{
388 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 207 return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL,
389 u8 *ctrblk = walk->iv; 208 fpu_enabled, nbytes);
390 u8 keystream[TF_BLOCK_SIZE];
391 u8 *src = walk->src.virt.addr;
392 u8 *dst = walk->dst.virt.addr;
393 unsigned int nbytes = walk->nbytes;
394
395 twofish_enc_blk(ctx, keystream, ctrblk);
396 crypto_xor(keystream, src, nbytes);
397 memcpy(dst, keystream, nbytes);
398
399 crypto_inc(ctrblk, TF_BLOCK_SIZE);
400} 209}
401 210
402static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 211static inline void twofish_fpu_end(bool fpu_enabled)
403 struct blkcipher_walk *walk)
404{
405 struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
406 const unsigned int bsize = TF_BLOCK_SIZE;
407 unsigned int nbytes = walk->nbytes;
408 u128 *src = (u128 *)walk->src.virt.addr;
409 u128 *dst = (u128 *)walk->dst.virt.addr;
410 u128 ctrblk;
411 be128 ctrblocks[TWOFISH_PARALLEL_BLOCKS];
412 int i;
413
414 be128_to_u128(&ctrblk, (be128 *)walk->iv);
415
416 /* Process multi-block batch */
417 if (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS) {
418 do {
419 /* create ctrblks for parallel encrypt */
420 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS; i++) {
421 if (dst != src)
422 dst[i] = src[i];
423
424 u128_to_be128(&ctrblocks[i], &ctrblk);
425 u128_inc(&ctrblk);
426 }
427
428 twofish_enc_blk_xway_xor(ctx, (u8 *)dst,
429 (u8 *)ctrblocks);
430
431 src += TWOFISH_PARALLEL_BLOCKS;
432 dst += TWOFISH_PARALLEL_BLOCKS;
433 nbytes -= bsize * TWOFISH_PARALLEL_BLOCKS;
434 } while (nbytes >= bsize * TWOFISH_PARALLEL_BLOCKS);
435
436 if (nbytes < bsize)
437 goto done;
438 }
439
440 /* Process three block batch */
441 if (nbytes >= bsize * 3) {
442 do {
443 if (dst != src) {
444 dst[0] = src[0];
445 dst[1] = src[1];
446 dst[2] = src[2];
447 }
448
449 /* create ctrblks for parallel encrypt */
450 u128_to_be128(&ctrblocks[0], &ctrblk);
451 u128_inc(&ctrblk);
452 u128_to_be128(&ctrblocks[1], &ctrblk);
453 u128_inc(&ctrblk);
454 u128_to_be128(&ctrblocks[2], &ctrblk);
455 u128_inc(&ctrblk);
456
457 twofish_enc_blk_3way_xor(ctx, (u8 *)dst,
458 (u8 *)ctrblocks);
459
460 src += 3;
461 dst += 3;
462 nbytes -= bsize * 3;
463 } while (nbytes >= bsize * 3);
464
465 if (nbytes < bsize)
466 goto done;
467 }
468
469 /* Handle leftovers */
470 do {
471 if (dst != src)
472 *dst = *src;
473
474 u128_to_be128(&ctrblocks[0], &ctrblk);
475 u128_inc(&ctrblk);
476
477 twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
478 u128_xor(dst, dst, (u128 *)ctrblocks);
479
480 src += 1;
481 dst += 1;
482 nbytes -= bsize;
483 } while (nbytes >= bsize);
484
485done:
486 u128_to_be128((be128 *)walk->iv, &ctrblk);
487 return nbytes;
488}
489
490static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
491 struct scatterlist *src, unsigned int nbytes)
492{ 212{
493 bool fpu_enabled = false; 213 glue_fpu_end(fpu_enabled);
494 struct blkcipher_walk walk;
495 int err;
496
497 blkcipher_walk_init(&walk, dst, src, nbytes);
498 err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
499 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
500
501 while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
502 fpu_enabled = twofish_fpu_begin(fpu_enabled, nbytes);
503 nbytes = __ctr_crypt(desc, &walk);
504 err = blkcipher_walk_done(desc, &walk, nbytes);
505 }
506
507 twofish_fpu_end(fpu_enabled);
508
509 if (walk.nbytes) {
510 ctr_crypt_final(desc, &walk);
511 err = blkcipher_walk_done(desc, &walk, 0);
512 }
513
514 return err;
515} 214}
516 215
517struct crypt_priv { 216struct crypt_priv {
@@ -563,26 +262,6 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
563 twofish_dec_blk(ctx->ctx, srcdst, srcdst); 262 twofish_dec_blk(ctx->ctx, srcdst, srcdst);
564} 263}
565 264
566struct twofish_lrw_ctx {
567 struct lrw_table_ctx lrw_table;
568 struct twofish_ctx twofish_ctx;
569};
570
571static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
572 unsigned int keylen)
573{
574 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
575 int err;
576
577 err = __twofish_setkey(&ctx->twofish_ctx, key,
578 keylen - TF_BLOCK_SIZE, &tfm->crt_flags);
579 if (err)
580 return err;
581
582 return lrw_init_table(&ctx->lrw_table, key + keylen -
583 TF_BLOCK_SIZE);
584}
585
586static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 265static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
587 struct scatterlist *src, unsigned int nbytes) 266 struct scatterlist *src, unsigned int nbytes)
588{ 267{
@@ -635,43 +314,6 @@ static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
635 return ret; 314 return ret;
636} 315}
637 316
638static void lrw_exit_tfm(struct crypto_tfm *tfm)
639{
640 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
641
642 lrw_free_table(&ctx->lrw_table);
643}
644
645struct twofish_xts_ctx {
646 struct twofish_ctx tweak_ctx;
647 struct twofish_ctx crypt_ctx;
648};
649
650static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
651 unsigned int keylen)
652{
653 struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
654 u32 *flags = &tfm->crt_flags;
655 int err;
656
657 /* key consists of keys of equal size concatenated, therefore
658 * the length must be even
659 */
660 if (keylen % 2) {
661 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
662 return -EINVAL;
663 }
664
665 /* first half of xts-key is for crypt */
666 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
667 if (err)
668 return err;
669
670 /* second half of xts-key is for tweak */
671 return __twofish_setkey(&ctx->tweak_ctx,
672 key + keylen / 2, keylen / 2, flags);
673}
674
675static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 317static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
676 struct scatterlist *src, unsigned int nbytes) 318 struct scatterlist *src, unsigned int nbytes)
677{ 319{
@@ -798,7 +440,7 @@ static struct crypto_alg twofish_algs[10] = { {
798 .cra_type = &crypto_blkcipher_type, 440 .cra_type = &crypto_blkcipher_type,
799 .cra_module = THIS_MODULE, 441 .cra_module = THIS_MODULE,
800 .cra_list = LIST_HEAD_INIT(twofish_algs[3].cra_list), 442 .cra_list = LIST_HEAD_INIT(twofish_algs[3].cra_list),
801 .cra_exit = lrw_exit_tfm, 443 .cra_exit = lrw_twofish_exit_tfm,
802 .cra_u = { 444 .cra_u = {
803 .blkcipher = { 445 .blkcipher = {
804 .min_keysize = TF_MIN_KEY_SIZE + 446 .min_keysize = TF_MIN_KEY_SIZE +