diff options
author | Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | 2012-06-18 07:07:19 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2012-06-27 02:42:01 -0400 |
commit | 596d875052dac6bf084f0c3a3e946fb4709b727b (patch) | |
tree | bd5bf090efd70b60d1c26c1ca7b6a559cbcf72b2 /arch/x86/crypto | |
parent | e81792fbc2a6fa4969f724b959829667fb2d4f01 (diff) |
crypto: serpent-sse2 - split generic glue code to new helper module
Now that serpent-sse2 glue code has been made generic, it can be split to
separate module.
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r-- | arch/x86/crypto/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/crypto/glue_helper.c | 307 | ||||
-rw-r--r-- | arch/x86/crypto/serpent_sse2_glue.c | 352 |
3 files changed, 309 insertions, 351 deletions
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index ad746916f912..e908e5de82d3 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o | 5 | obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o |
6 | obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o | ||
6 | 7 | ||
7 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o | 8 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o |
8 | obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o | 9 | obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o |
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c new file mode 100644 index 000000000000..4854f0f31e4f --- /dev/null +++ b/arch/x86/crypto/glue_helper.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /* | ||
2 | * Shared glue code for 128bit block ciphers | ||
3 | * | ||
4 | * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
5 | * | ||
6 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: | ||
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
8 | * CTR part based on code (crypto/ctr.c) by: | ||
9 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
24 | * USA | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <crypto/b128ops.h> | ||
30 | #include <crypto/lrw.h> | ||
31 | #include <crypto/xts.h> | ||
32 | #include <asm/crypto/glue_helper.h> | ||
33 | #include <crypto/scatterwalk.h> | ||
34 | |||
35 | static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
36 | struct blkcipher_desc *desc, | ||
37 | struct blkcipher_walk *walk) | ||
38 | { | ||
39 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
40 | const unsigned int bsize = 128 / 8; | ||
41 | unsigned int nbytes, i, func_bytes; | ||
42 | bool fpu_enabled = false; | ||
43 | int err; | ||
44 | |||
45 | err = blkcipher_walk_virt(desc, walk); | ||
46 | |||
47 | while ((nbytes = walk->nbytes)) { | ||
48 | u8 *wsrc = walk->src.virt.addr; | ||
49 | u8 *wdst = walk->dst.virt.addr; | ||
50 | |||
51 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
52 | desc, fpu_enabled, nbytes); | ||
53 | |||
54 | for (i = 0; i < gctx->num_funcs; i++) { | ||
55 | func_bytes = bsize * gctx->funcs[i].num_blocks; | ||
56 | |||
57 | /* Process multi-block batch */ | ||
58 | if (nbytes >= func_bytes) { | ||
59 | do { | ||
60 | gctx->funcs[i].fn_u.ecb(ctx, wdst, | ||
61 | wsrc); | ||
62 | |||
63 | wsrc += func_bytes; | ||
64 | wdst += func_bytes; | ||
65 | nbytes -= func_bytes; | ||
66 | } while (nbytes >= func_bytes); | ||
67 | |||
68 | if (nbytes < bsize) | ||
69 | goto done; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | done: | ||
74 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
75 | } | ||
76 | |||
77 | glue_fpu_end(fpu_enabled); | ||
78 | return err; | ||
79 | } | ||
80 | |||
81 | int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
82 | struct blkcipher_desc *desc, struct scatterlist *dst, | ||
83 | struct scatterlist *src, unsigned int nbytes) | ||
84 | { | ||
85 | struct blkcipher_walk walk; | ||
86 | |||
87 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
88 | return __glue_ecb_crypt_128bit(gctx, desc, &walk); | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit); | ||
91 | |||
92 | static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
93 | struct blkcipher_desc *desc, | ||
94 | struct blkcipher_walk *walk) | ||
95 | { | ||
96 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
97 | const unsigned int bsize = 128 / 8; | ||
98 | unsigned int nbytes = walk->nbytes; | ||
99 | u128 *src = (u128 *)walk->src.virt.addr; | ||
100 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
101 | u128 *iv = (u128 *)walk->iv; | ||
102 | |||
103 | do { | ||
104 | u128_xor(dst, src, iv); | ||
105 | fn(ctx, (u8 *)dst, (u8 *)dst); | ||
106 | iv = dst; | ||
107 | |||
108 | src += 1; | ||
109 | dst += 1; | ||
110 | nbytes -= bsize; | ||
111 | } while (nbytes >= bsize); | ||
112 | |||
113 | u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); | ||
114 | return nbytes; | ||
115 | } | ||
116 | |||
117 | int glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
118 | struct blkcipher_desc *desc, | ||
119 | struct scatterlist *dst, | ||
120 | struct scatterlist *src, unsigned int nbytes) | ||
121 | { | ||
122 | struct blkcipher_walk walk; | ||
123 | int err; | ||
124 | |||
125 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
126 | err = blkcipher_walk_virt(desc, &walk); | ||
127 | |||
128 | while ((nbytes = walk.nbytes)) { | ||
129 | nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk); | ||
130 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
131 | } | ||
132 | |||
133 | return err; | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit); | ||
136 | |||
137 | static unsigned int | ||
138 | __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
139 | struct blkcipher_desc *desc, | ||
140 | struct blkcipher_walk *walk) | ||
141 | { | ||
142 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
143 | const unsigned int bsize = 128 / 8; | ||
144 | unsigned int nbytes = walk->nbytes; | ||
145 | u128 *src = (u128 *)walk->src.virt.addr; | ||
146 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
147 | u128 last_iv; | ||
148 | unsigned int num_blocks, func_bytes; | ||
149 | unsigned int i; | ||
150 | |||
151 | /* Start of the last block. */ | ||
152 | src += nbytes / bsize - 1; | ||
153 | dst += nbytes / bsize - 1; | ||
154 | |||
155 | last_iv = *src; | ||
156 | |||
157 | for (i = 0; i < gctx->num_funcs; i++) { | ||
158 | num_blocks = gctx->funcs[i].num_blocks; | ||
159 | func_bytes = bsize * num_blocks; | ||
160 | |||
161 | /* Process multi-block batch */ | ||
162 | if (nbytes >= func_bytes) { | ||
163 | do { | ||
164 | nbytes -= func_bytes - bsize; | ||
165 | src -= num_blocks - 1; | ||
166 | dst -= num_blocks - 1; | ||
167 | |||
168 | gctx->funcs[i].fn_u.cbc(ctx, dst, src); | ||
169 | |||
170 | nbytes -= bsize; | ||
171 | if (nbytes < bsize) | ||
172 | goto done; | ||
173 | |||
174 | u128_xor(dst, dst, src - 1); | ||
175 | src -= 1; | ||
176 | dst -= 1; | ||
177 | } while (nbytes >= func_bytes); | ||
178 | |||
179 | if (nbytes < bsize) | ||
180 | goto done; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | done: | ||
185 | u128_xor(dst, dst, (u128 *)walk->iv); | ||
186 | *(u128 *)walk->iv = last_iv; | ||
187 | |||
188 | return nbytes; | ||
189 | } | ||
190 | |||
191 | int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
192 | struct blkcipher_desc *desc, | ||
193 | struct scatterlist *dst, | ||
194 | struct scatterlist *src, unsigned int nbytes) | ||
195 | { | ||
196 | const unsigned int bsize = 128 / 8; | ||
197 | bool fpu_enabled = false; | ||
198 | struct blkcipher_walk walk; | ||
199 | int err; | ||
200 | |||
201 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
202 | err = blkcipher_walk_virt(desc, &walk); | ||
203 | |||
204 | while ((nbytes = walk.nbytes)) { | ||
205 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
206 | desc, fpu_enabled, nbytes); | ||
207 | nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); | ||
208 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
209 | } | ||
210 | |||
211 | glue_fpu_end(fpu_enabled); | ||
212 | return err; | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); | ||
215 | |||
216 | static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr, | ||
217 | struct blkcipher_desc *desc, | ||
218 | struct blkcipher_walk *walk) | ||
219 | { | ||
220 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
221 | u8 *src = (u8 *)walk->src.virt.addr; | ||
222 | u8 *dst = (u8 *)walk->dst.virt.addr; | ||
223 | unsigned int nbytes = walk->nbytes; | ||
224 | u128 ctrblk; | ||
225 | u128 tmp; | ||
226 | |||
227 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
228 | |||
229 | memcpy(&tmp, src, nbytes); | ||
230 | fn_ctr(ctx, &tmp, &tmp, &ctrblk); | ||
231 | memcpy(dst, &tmp, nbytes); | ||
232 | |||
233 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
234 | } | ||
235 | EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit); | ||
236 | |||
237 | static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
238 | struct blkcipher_desc *desc, | ||
239 | struct blkcipher_walk *walk) | ||
240 | { | ||
241 | const unsigned int bsize = 128 / 8; | ||
242 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
243 | unsigned int nbytes = walk->nbytes; | ||
244 | u128 *src = (u128 *)walk->src.virt.addr; | ||
245 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
246 | u128 ctrblk; | ||
247 | unsigned int num_blocks, func_bytes; | ||
248 | unsigned int i; | ||
249 | |||
250 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
251 | |||
252 | /* Process multi-block batch */ | ||
253 | for (i = 0; i < gctx->num_funcs; i++) { | ||
254 | num_blocks = gctx->funcs[i].num_blocks; | ||
255 | func_bytes = bsize * num_blocks; | ||
256 | |||
257 | if (nbytes >= func_bytes) { | ||
258 | do { | ||
259 | gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); | ||
260 | |||
261 | src += num_blocks; | ||
262 | dst += num_blocks; | ||
263 | nbytes -= func_bytes; | ||
264 | } while (nbytes >= func_bytes); | ||
265 | |||
266 | if (nbytes < bsize) | ||
267 | goto done; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | done: | ||
272 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
273 | return nbytes; | ||
274 | } | ||
275 | |||
276 | int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
277 | struct blkcipher_desc *desc, struct scatterlist *dst, | ||
278 | struct scatterlist *src, unsigned int nbytes) | ||
279 | { | ||
280 | const unsigned int bsize = 128 / 8; | ||
281 | bool fpu_enabled = false; | ||
282 | struct blkcipher_walk walk; | ||
283 | int err; | ||
284 | |||
285 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
286 | err = blkcipher_walk_virt_block(desc, &walk, bsize); | ||
287 | |||
288 | while ((nbytes = walk.nbytes) >= bsize) { | ||
289 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
290 | desc, fpu_enabled, nbytes); | ||
291 | nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); | ||
292 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
293 | } | ||
294 | |||
295 | glue_fpu_end(fpu_enabled); | ||
296 | |||
297 | if (walk.nbytes) { | ||
298 | glue_ctr_crypt_final_128bit( | ||
299 | gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); | ||
300 | err = blkcipher_walk_done(desc, &walk, 0); | ||
301 | } | ||
302 | |||
303 | return err; | ||
304 | } | ||
305 | EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit); | ||
306 | |||
307 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 8c86239010ae..49a32eedf0c8 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c | |||
@@ -41,359 +41,9 @@ | |||
41 | #include <crypto/ctr.h> | 41 | #include <crypto/ctr.h> |
42 | #include <crypto/lrw.h> | 42 | #include <crypto/lrw.h> |
43 | #include <crypto/xts.h> | 43 | #include <crypto/xts.h> |
44 | #include <asm/i387.h> | ||
45 | #include <asm/serpent-sse2.h> | 44 | #include <asm/serpent-sse2.h> |
46 | #include <asm/crypto/ablk_helper.h> | 45 | #include <asm/crypto/ablk_helper.h> |
47 | #include <crypto/scatterwalk.h> | 46 | #include <asm/crypto/glue_helper.h> |
48 | #include <linux/workqueue.h> | ||
49 | #include <linux/spinlock.h> | ||
50 | |||
51 | typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); | ||
52 | typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); | ||
53 | typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, | ||
54 | u128 *iv); | ||
55 | |||
56 | #define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) | ||
57 | #define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) | ||
58 | #define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) | ||
59 | |||
60 | struct common_glue_func_entry { | ||
61 | unsigned int num_blocks; /* number of blocks that @fn will process */ | ||
62 | union { | ||
63 | common_glue_func_t ecb; | ||
64 | common_glue_cbc_func_t cbc; | ||
65 | common_glue_ctr_func_t ctr; | ||
66 | } fn_u; | ||
67 | }; | ||
68 | |||
69 | struct common_glue_ctx { | ||
70 | unsigned int num_funcs; | ||
71 | int fpu_blocks_limit; /* -1 means fpu not needed at all */ | ||
72 | |||
73 | /* | ||
74 | * First funcs entry must have largest num_blocks and last funcs entry | ||
75 | * must have num_blocks == 1! | ||
76 | */ | ||
77 | struct common_glue_func_entry funcs[]; | ||
78 | }; | ||
79 | |||
80 | static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, | ||
81 | struct blkcipher_desc *desc, | ||
82 | bool fpu_enabled, unsigned int nbytes) | ||
83 | { | ||
84 | if (likely(fpu_blocks_limit < 0)) | ||
85 | return false; | ||
86 | |||
87 | if (fpu_enabled) | ||
88 | return true; | ||
89 | |||
90 | /* | ||
91 | * Vector-registers are only used when chunk to be processed is large | ||
92 | * enough, so do not enable FPU until it is necessary. | ||
93 | */ | ||
94 | if (nbytes < bsize * (unsigned int)fpu_blocks_limit) | ||
95 | return false; | ||
96 | |||
97 | if (desc) { | ||
98 | /* prevent sleeping if FPU is in use */ | ||
99 | desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
100 | } | ||
101 | |||
102 | kernel_fpu_begin(); | ||
103 | return true; | ||
104 | } | ||
105 | |||
106 | static inline void glue_fpu_end(bool fpu_enabled) | ||
107 | { | ||
108 | if (fpu_enabled) | ||
109 | kernel_fpu_end(); | ||
110 | } | ||
111 | |||
112 | static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
113 | struct blkcipher_desc *desc, | ||
114 | struct blkcipher_walk *walk) | ||
115 | { | ||
116 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
117 | const unsigned int bsize = 128 / 8; | ||
118 | unsigned int nbytes, i, func_bytes; | ||
119 | bool fpu_enabled = false; | ||
120 | int err; | ||
121 | |||
122 | err = blkcipher_walk_virt(desc, walk); | ||
123 | |||
124 | while ((nbytes = walk->nbytes)) { | ||
125 | u8 *wsrc = walk->src.virt.addr; | ||
126 | u8 *wdst = walk->dst.virt.addr; | ||
127 | |||
128 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
129 | desc, fpu_enabled, nbytes); | ||
130 | |||
131 | for (i = 0; i < gctx->num_funcs; i++) { | ||
132 | func_bytes = bsize * gctx->funcs[i].num_blocks; | ||
133 | |||
134 | /* Process multi-block batch */ | ||
135 | if (nbytes >= func_bytes) { | ||
136 | do { | ||
137 | gctx->funcs[i].fn_u.ecb(ctx, wdst, | ||
138 | wsrc); | ||
139 | |||
140 | wsrc += func_bytes; | ||
141 | wdst += func_bytes; | ||
142 | nbytes -= func_bytes; | ||
143 | } while (nbytes >= func_bytes); | ||
144 | |||
145 | if (nbytes < bsize) | ||
146 | goto done; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | done: | ||
151 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
152 | } | ||
153 | |||
154 | glue_fpu_end(fpu_enabled); | ||
155 | return err; | ||
156 | } | ||
157 | |||
158 | int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, | ||
159 | struct blkcipher_desc *desc, struct scatterlist *dst, | ||
160 | struct scatterlist *src, unsigned int nbytes) | ||
161 | { | ||
162 | struct blkcipher_walk walk; | ||
163 | |||
164 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
165 | return __glue_ecb_crypt_128bit(gctx, desc, &walk); | ||
166 | } | ||
167 | |||
168 | static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
169 | struct blkcipher_desc *desc, | ||
170 | struct blkcipher_walk *walk) | ||
171 | { | ||
172 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
173 | const unsigned int bsize = 128 / 8; | ||
174 | unsigned int nbytes = walk->nbytes; | ||
175 | u128 *src = (u128 *)walk->src.virt.addr; | ||
176 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
177 | u128 *iv = (u128 *)walk->iv; | ||
178 | |||
179 | do { | ||
180 | u128_xor(dst, src, iv); | ||
181 | fn(ctx, (u8 *)dst, (u8 *)dst); | ||
182 | iv = dst; | ||
183 | |||
184 | src += 1; | ||
185 | dst += 1; | ||
186 | nbytes -= bsize; | ||
187 | } while (nbytes >= bsize); | ||
188 | |||
189 | u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv); | ||
190 | return nbytes; | ||
191 | } | ||
192 | |||
193 | int glue_cbc_encrypt_128bit(const common_glue_func_t fn, | ||
194 | struct blkcipher_desc *desc, | ||
195 | struct scatterlist *dst, | ||
196 | struct scatterlist *src, unsigned int nbytes) | ||
197 | { | ||
198 | struct blkcipher_walk walk; | ||
199 | int err; | ||
200 | |||
201 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
202 | err = blkcipher_walk_virt(desc, &walk); | ||
203 | |||
204 | while ((nbytes = walk.nbytes)) { | ||
205 | nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk); | ||
206 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
207 | } | ||
208 | |||
209 | return err; | ||
210 | } | ||
211 | |||
212 | static unsigned int | ||
213 | __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
214 | struct blkcipher_desc *desc, | ||
215 | struct blkcipher_walk *walk) | ||
216 | { | ||
217 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
218 | const unsigned int bsize = 128 / 8; | ||
219 | unsigned int nbytes = walk->nbytes; | ||
220 | u128 *src = (u128 *)walk->src.virt.addr; | ||
221 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
222 | u128 last_iv; | ||
223 | unsigned int num_blocks, func_bytes; | ||
224 | unsigned int i; | ||
225 | |||
226 | /* Start of the last block. */ | ||
227 | src += nbytes / bsize - 1; | ||
228 | dst += nbytes / bsize - 1; | ||
229 | |||
230 | last_iv = *src; | ||
231 | |||
232 | for (i = 0; i < gctx->num_funcs; i++) { | ||
233 | num_blocks = gctx->funcs[i].num_blocks; | ||
234 | func_bytes = bsize * num_blocks; | ||
235 | |||
236 | /* Process multi-block batch */ | ||
237 | if (nbytes >= func_bytes) { | ||
238 | do { | ||
239 | nbytes -= func_bytes - bsize; | ||
240 | src -= num_blocks - 1; | ||
241 | dst -= num_blocks - 1; | ||
242 | |||
243 | gctx->funcs[i].fn_u.cbc(ctx, dst, src); | ||
244 | |||
245 | nbytes -= bsize; | ||
246 | if (nbytes < bsize) | ||
247 | goto done; | ||
248 | |||
249 | u128_xor(dst, dst, src - 1); | ||
250 | src -= 1; | ||
251 | dst -= 1; | ||
252 | } while (nbytes >= func_bytes); | ||
253 | |||
254 | if (nbytes < bsize) | ||
255 | goto done; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | done: | ||
260 | u128_xor(dst, dst, (u128 *)walk->iv); | ||
261 | *(u128 *)walk->iv = last_iv; | ||
262 | |||
263 | return nbytes; | ||
264 | } | ||
265 | |||
266 | int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | ||
267 | struct blkcipher_desc *desc, | ||
268 | struct scatterlist *dst, | ||
269 | struct scatterlist *src, unsigned int nbytes) | ||
270 | { | ||
271 | const unsigned int bsize = 128 / 8; | ||
272 | bool fpu_enabled = false; | ||
273 | struct blkcipher_walk walk; | ||
274 | int err; | ||
275 | |||
276 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
277 | err = blkcipher_walk_virt(desc, &walk); | ||
278 | |||
279 | while ((nbytes = walk.nbytes)) { | ||
280 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
281 | desc, fpu_enabled, nbytes); | ||
282 | nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); | ||
283 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
284 | } | ||
285 | |||
286 | glue_fpu_end(fpu_enabled); | ||
287 | return err; | ||
288 | } | ||
289 | |||
290 | static inline void u128_to_be128(be128 *dst, const u128 *src) | ||
291 | { | ||
292 | dst->a = cpu_to_be64(src->a); | ||
293 | dst->b = cpu_to_be64(src->b); | ||
294 | } | ||
295 | |||
296 | static inline void be128_to_u128(u128 *dst, const be128 *src) | ||
297 | { | ||
298 | dst->a = be64_to_cpu(src->a); | ||
299 | dst->b = be64_to_cpu(src->b); | ||
300 | } | ||
301 | |||
302 | static inline void u128_inc(u128 *i) | ||
303 | { | ||
304 | i->b++; | ||
305 | if (!i->b) | ||
306 | i->a++; | ||
307 | } | ||
308 | |||
309 | static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr, | ||
310 | struct blkcipher_desc *desc, | ||
311 | struct blkcipher_walk *walk) | ||
312 | { | ||
313 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
314 | u8 *src = (u8 *)walk->src.virt.addr; | ||
315 | u8 *dst = (u8 *)walk->dst.virt.addr; | ||
316 | unsigned int nbytes = walk->nbytes; | ||
317 | u128 ctrblk; | ||
318 | u128 tmp; | ||
319 | |||
320 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
321 | |||
322 | memcpy(&tmp, src, nbytes); | ||
323 | fn_ctr(ctx, &tmp, &tmp, &ctrblk); | ||
324 | memcpy(dst, &tmp, nbytes); | ||
325 | |||
326 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
327 | } | ||
328 | |||
329 | static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
330 | struct blkcipher_desc *desc, | ||
331 | struct blkcipher_walk *walk) | ||
332 | { | ||
333 | const unsigned int bsize = 128 / 8; | ||
334 | void *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
335 | unsigned int nbytes = walk->nbytes; | ||
336 | u128 *src = (u128 *)walk->src.virt.addr; | ||
337 | u128 *dst = (u128 *)walk->dst.virt.addr; | ||
338 | u128 ctrblk; | ||
339 | unsigned int num_blocks, func_bytes; | ||
340 | unsigned int i; | ||
341 | |||
342 | be128_to_u128(&ctrblk, (be128 *)walk->iv); | ||
343 | |||
344 | /* Process multi-block batch */ | ||
345 | for (i = 0; i < gctx->num_funcs; i++) { | ||
346 | num_blocks = gctx->funcs[i].num_blocks; | ||
347 | func_bytes = bsize * num_blocks; | ||
348 | |||
349 | if (nbytes >= func_bytes) { | ||
350 | do { | ||
351 | gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); | ||
352 | |||
353 | src += num_blocks; | ||
354 | dst += num_blocks; | ||
355 | nbytes -= func_bytes; | ||
356 | } while (nbytes >= func_bytes); | ||
357 | |||
358 | if (nbytes < bsize) | ||
359 | goto done; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | done: | ||
364 | u128_to_be128((be128 *)walk->iv, &ctrblk); | ||
365 | return nbytes; | ||
366 | } | ||
367 | |||
368 | int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, | ||
369 | struct blkcipher_desc *desc, struct scatterlist *dst, | ||
370 | struct scatterlist *src, unsigned int nbytes) | ||
371 | { | ||
372 | const unsigned int bsize = 128 / 8; | ||
373 | bool fpu_enabled = false; | ||
374 | struct blkcipher_walk walk; | ||
375 | int err; | ||
376 | |||
377 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
378 | err = blkcipher_walk_virt_block(desc, &walk, bsize); | ||
379 | |||
380 | while ((nbytes = walk.nbytes) >= bsize) { | ||
381 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, | ||
382 | desc, fpu_enabled, nbytes); | ||
383 | nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); | ||
384 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
385 | } | ||
386 | |||
387 | glue_fpu_end(fpu_enabled); | ||
388 | |||
389 | if (walk.nbytes) { | ||
390 | glue_ctr_crypt_final_128bit( | ||
391 | gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); | ||
392 | err = blkcipher_walk_done(desc, &walk, 0); | ||
393 | } | ||
394 | |||
395 | return err; | ||
396 | } | ||
397 | 47 | ||
398 | static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) | 48 | static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) |
399 | { | 49 | { |