aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@mbnet.fi>2012-06-18 07:07:24 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2012-06-27 02:42:01 -0400
commit1d0debbd4671a8d302a11837a126d5f87db16bdc (patch)
tree8bd1032627c7b2bbec8b4bdf86bd9dc39599130c
parent596d875052dac6bf084f0c3a3e946fb4709b727b (diff)
crypto: serpent-avx: remove duplicated glue code and use shared glue code from glue_helper
Now that shared glue code is available, convert serpent-avx to use it. Cc: Johannes Goetzfried <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c398
-rw-r--r--crypto/Kconfig1
2 files changed, 95 insertions, 304 deletions
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 31eb567cc893..c61b91aa42a8 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -36,357 +36,147 @@
36#include <crypto/ctr.h> 36#include <crypto/ctr.h>
37#include <crypto/lrw.h> 37#include <crypto/lrw.h>
38#include <crypto/xts.h> 38#include <crypto/xts.h>
39#include <asm/i387.h>
40#include <asm/xcr.h> 39#include <asm/xcr.h>
41#include <asm/xsave.h> 40#include <asm/xsave.h>
42#include <asm/serpent-avx.h> 41#include <asm/serpent-avx.h>
43#include <asm/crypto/ablk_helper.h> 42#include <asm/crypto/ablk_helper.h>
44#include <crypto/scatterwalk.h> 43#include <asm/crypto/glue_helper.h>
45#include <linux/workqueue.h>
46#include <linux/spinlock.h>
47 44
48static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) 45static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
49{ 46{
50 if (fpu_enabled) 47 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
51 return true; 48 unsigned int j;
52
53 /* AVX is only used when chunk to be processed is large enough, so
54 * do not enable FPU until it is necessary.
55 */
56 if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS)
57 return false;
58
59 kernel_fpu_begin();
60 return true;
61}
62
63static inline void serpent_fpu_end(bool fpu_enabled)
64{
65 if (fpu_enabled)
66 kernel_fpu_end();
67}
68
69static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
70 bool enc)
71{
72 bool fpu_enabled = false;
73 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
74 const unsigned int bsize = SERPENT_BLOCK_SIZE;
75 unsigned int nbytes;
76 int err;
77 49
78 err = blkcipher_walk_virt(desc, walk); 50 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
79 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 51 ivs[j] = src[j];
80 52
81 while ((nbytes = walk->nbytes)) { 53 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
82 u8 *wsrc = walk->src.virt.addr;
83 u8 *wdst = walk->dst.virt.addr;
84
85 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
86
87 /* Process multi-block batch */
88 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
89 do {
90 if (enc)
91 serpent_enc_blk_xway(ctx, wdst, wsrc);
92 else
93 serpent_dec_blk_xway(ctx, wdst, wsrc);
94
95 wsrc += bsize * SERPENT_PARALLEL_BLOCKS;
96 wdst += bsize * SERPENT_PARALLEL_BLOCKS;
97 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
98 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
99
100 if (nbytes < bsize)
101 goto done;
102 }
103
104 /* Handle leftovers */
105 do {
106 if (enc)
107 __serpent_encrypt(ctx, wdst, wsrc);
108 else
109 __serpent_decrypt(ctx, wdst, wsrc);
110
111 wsrc += bsize;
112 wdst += bsize;
113 nbytes -= bsize;
114 } while (nbytes >= bsize);
115
116done:
117 err = blkcipher_walk_done(desc, walk, nbytes);
118 }
119 54
120 serpent_fpu_end(fpu_enabled); 55 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
121 return err; 56 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
122} 57}
123 58
124static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 59static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
125 struct scatterlist *src, unsigned int nbytes)
126{ 60{
127 struct blkcipher_walk walk; 61 be128 ctrblk;
128
129 blkcipher_walk_init(&walk, dst, src, nbytes);
130 return ecb_crypt(desc, &walk, true);
131}
132 62
133static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 63 u128_to_be128(&ctrblk, iv);
134 struct scatterlist *src, unsigned int nbytes) 64 u128_inc(iv);
135{
136 struct blkcipher_walk walk;
137 65
138 blkcipher_walk_init(&walk, dst, src, nbytes); 66 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
139 return ecb_crypt(desc, &walk, false); 67 u128_xor(dst, src, (u128 *)&ctrblk);
140} 68}
141 69
142static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, 70static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
143 struct blkcipher_walk *walk) 71 u128 *iv)
144{ 72{
145 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 73 be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
146 const unsigned int bsize = SERPENT_BLOCK_SIZE; 74 unsigned int i;
147 unsigned int nbytes = walk->nbytes;
148 u128 *src = (u128 *)walk->src.virt.addr;
149 u128 *dst = (u128 *)walk->dst.virt.addr;
150 u128 *iv = (u128 *)walk->iv;
151
152 do {
153 u128_xor(dst, src, iv);
154 __serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst);
155 iv = dst;
156
157 src += 1;
158 dst += 1;
159 nbytes -= bsize;
160 } while (nbytes >= bsize);
161
162 u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
163 return nbytes;
164}
165 75
166static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 76 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
167 struct scatterlist *src, unsigned int nbytes) 77 if (dst != src)
168{ 78 dst[i] = src[i];
169 struct blkcipher_walk walk;
170 int err;
171
172 blkcipher_walk_init(&walk, dst, src, nbytes);
173 err = blkcipher_walk_virt(desc, &walk);
174 79
175 while ((nbytes = walk.nbytes)) { 80 u128_to_be128(&ctrblks[i], iv);
176 nbytes = __cbc_encrypt(desc, &walk); 81 u128_inc(iv);
177 err = blkcipher_walk_done(desc, &walk, nbytes);
178 } 82 }
179 83
180 return err; 84 serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
181} 85}
182 86
183static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, 87static const struct common_glue_ctx serpent_enc = {
184 struct blkcipher_walk *walk) 88 .num_funcs = 2,
185{ 89 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
186 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 90
187 const unsigned int bsize = SERPENT_BLOCK_SIZE; 91 .funcs = { {
188 unsigned int nbytes = walk->nbytes; 92 .num_blocks = SERPENT_PARALLEL_BLOCKS,
189 u128 *src = (u128 *)walk->src.virt.addr; 93 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
190 u128 *dst = (u128 *)walk->dst.virt.addr; 94 }, {
191 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; 95 .num_blocks = 1,
192 u128 last_iv; 96 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
193 int i; 97 } }
194 98};
195 /* Start of the last block. */
196 src += nbytes / bsize - 1;
197 dst += nbytes / bsize - 1;
198
199 last_iv = *src;
200
201 /* Process multi-block batch */
202 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
203 do {
204 nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1);
205 src -= SERPENT_PARALLEL_BLOCKS - 1;
206 dst -= SERPENT_PARALLEL_BLOCKS - 1;
207
208 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
209 ivs[i] = src[i];
210
211 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
212
213 for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
214 u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
215
216 nbytes -= bsize;
217 if (nbytes < bsize)
218 goto done;
219
220 u128_xor(dst, dst, src - 1);
221 src -= 1;
222 dst -= 1;
223 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
224
225 if (nbytes < bsize)
226 goto done;
227 }
228
229 /* Handle leftovers */
230 for (;;) {
231 __serpent_decrypt(ctx, (u8 *)dst, (u8 *)src);
232
233 nbytes -= bsize;
234 if (nbytes < bsize)
235 break;
236 99
237 u128_xor(dst, dst, src - 1); 100static const struct common_glue_ctx serpent_ctr = {
238 src -= 1; 101 .num_funcs = 2,
239 dst -= 1; 102 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
240 } 103
104 .funcs = { {
105 .num_blocks = SERPENT_PARALLEL_BLOCKS,
106 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
107 }, {
108 .num_blocks = 1,
109 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
110 } }
111};
241 112
242done: 113static const struct common_glue_ctx serpent_dec = {
243 u128_xor(dst, dst, (u128 *)walk->iv); 114 .num_funcs = 2,
244 *(u128 *)walk->iv = last_iv; 115 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
116
117 .funcs = { {
118 .num_blocks = SERPENT_PARALLEL_BLOCKS,
119 .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
120 }, {
121 .num_blocks = 1,
122 .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
123 } }
124};
245 125
246 return nbytes; 126static const struct common_glue_ctx serpent_dec_cbc = {
247} 127 .num_funcs = 2,
128 .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
129
130 .funcs = { {
131 .num_blocks = SERPENT_PARALLEL_BLOCKS,
132 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
133 }, {
134 .num_blocks = 1,
135 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
136 } }
137};
248 138
249static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 139static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
250 struct scatterlist *src, unsigned int nbytes) 140 struct scatterlist *src, unsigned int nbytes)
251{ 141{
252 bool fpu_enabled = false; 142 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
253 struct blkcipher_walk walk;
254 int err;
255
256 blkcipher_walk_init(&walk, dst, src, nbytes);
257 err = blkcipher_walk_virt(desc, &walk);
258 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
259
260 while ((nbytes = walk.nbytes)) {
261 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
262 nbytes = __cbc_decrypt(desc, &walk);
263 err = blkcipher_walk_done(desc, &walk, nbytes);
264 }
265
266 serpent_fpu_end(fpu_enabled);
267 return err;
268} 143}
269 144
270static inline void u128_to_be128(be128 *dst, const u128 *src) 145static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
146 struct scatterlist *src, unsigned int nbytes)
271{ 147{
272 dst->a = cpu_to_be64(src->a); 148 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
273 dst->b = cpu_to_be64(src->b);
274} 149}
275 150
276static inline void be128_to_u128(u128 *dst, const be128 *src) 151static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
152 struct scatterlist *src, unsigned int nbytes)
277{ 153{
278 dst->a = be64_to_cpu(src->a); 154 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
279 dst->b = be64_to_cpu(src->b); 155 dst, src, nbytes);
280} 156}
281 157
282static inline void u128_inc(u128 *i) 158static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
159 struct scatterlist *src, unsigned int nbytes)
283{ 160{
284 i->b++; 161 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
285 if (!i->b) 162 nbytes);
286 i->a++;
287} 163}
288 164
289static void ctr_crypt_final(struct blkcipher_desc *desc, 165static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
290 struct blkcipher_walk *walk) 166 struct scatterlist *src, unsigned int nbytes)
291{ 167{
292 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 168 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
293 u8 *ctrblk = walk->iv;
294 u8 keystream[SERPENT_BLOCK_SIZE];
295 u8 *src = walk->src.virt.addr;
296 u8 *dst = walk->dst.virt.addr;
297 unsigned int nbytes = walk->nbytes;
298
299 __serpent_encrypt(ctx, keystream, ctrblk);
300 crypto_xor(keystream, src, nbytes);
301 memcpy(dst, keystream, nbytes);
302
303 crypto_inc(ctrblk, SERPENT_BLOCK_SIZE);
304} 169}
305 170
306static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 171static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
307 struct blkcipher_walk *walk)
308{ 172{
309 struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 173 return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
310 const unsigned int bsize = SERPENT_BLOCK_SIZE; 174 NULL, fpu_enabled, nbytes);
311 unsigned int nbytes = walk->nbytes;
312 u128 *src = (u128 *)walk->src.virt.addr;
313 u128 *dst = (u128 *)walk->dst.virt.addr;
314 u128 ctrblk;
315 be128 ctrblocks[SERPENT_PARALLEL_BLOCKS];
316 int i;
317
318 be128_to_u128(&ctrblk, (be128 *)walk->iv);
319
320 /* Process multi-block batch */
321 if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
322 do {
323 /* create ctrblks for parallel encrypt */
324 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
325 if (dst != src)
326 dst[i] = src[i];
327
328 u128_to_be128(&ctrblocks[i], &ctrblk);
329 u128_inc(&ctrblk);
330 }
331
332 serpent_enc_blk_xway_xor(ctx, (u8 *)dst,
333 (u8 *)ctrblocks);
334
335 src += SERPENT_PARALLEL_BLOCKS;
336 dst += SERPENT_PARALLEL_BLOCKS;
337 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
338 } while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
339
340 if (nbytes < bsize)
341 goto done;
342 }
343
344 /* Handle leftovers */
345 do {
346 if (dst != src)
347 *dst = *src;
348
349 u128_to_be128(&ctrblocks[0], &ctrblk);
350 u128_inc(&ctrblk);
351
352 __serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
353 u128_xor(dst, dst, (u128 *)ctrblocks);
354
355 src += 1;
356 dst += 1;
357 nbytes -= bsize;
358 } while (nbytes >= bsize);
359
360done:
361 u128_to_be128((be128 *)walk->iv, &ctrblk);
362 return nbytes;
363} 175}
364 176
365static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 177static inline void serpent_fpu_end(bool fpu_enabled)
366 struct scatterlist *src, unsigned int nbytes)
367{ 178{
368 bool fpu_enabled = false; 179 glue_fpu_end(fpu_enabled);
369 struct blkcipher_walk walk;
370 int err;
371
372 blkcipher_walk_init(&walk, dst, src, nbytes);
373 err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE);
374 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
375
376 while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) {
377 fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
378 nbytes = __ctr_crypt(desc, &walk);
379 err = blkcipher_walk_done(desc, &walk, nbytes);
380 }
381
382 serpent_fpu_end(fpu_enabled);
383
384 if (walk.nbytes) {
385 ctr_crypt_final(desc, &walk);
386 err = blkcipher_walk_done(desc, &walk, 0);
387 }
388
389 return err;
390} 180}
391 181
392struct crypt_priv { 182struct crypt_priv {
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 92b46970c856..a86c2fb92ea9 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -842,6 +842,7 @@ config CRYPTO_SERPENT_AVX_X86_64
842 select CRYPTO_ALGAPI 842 select CRYPTO_ALGAPI
843 select CRYPTO_CRYPTD 843 select CRYPTO_CRYPTD
844 select CRYPTO_ABLK_HELPER_X86 844 select CRYPTO_ABLK_HELPER_X86
845 select CRYPTO_GLUE_HELPER_X86
845 select CRYPTO_SERPENT 846 select CRYPTO_SERPENT
846 select CRYPTO_LRW 847 select CRYPTO_LRW
847 select CRYPTO_XTS 848 select CRYPTO_XTS