summaryrefslogtreecommitdiffstats
path: root/crypto/echainiv.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-05-27 02:37:33 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-05-27 23:23:18 -0400
commitd97de47ca1d160acdf29f0b4eadf2ae831bd5254 (patch)
treedffb5eabccf22abd355e6aff26fffc8d7457fd2d /crypto/echainiv.c
parent838c9d561aaae4bc3f4b44046ea08b048ecaffe7 (diff)
crypto: echainiv - Use common IV generation code
This patch makes use of the new common IV generation code. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/echainiv.c')
-rw-r--r--crypto/echainiv.c230
1 files changed, 18 insertions, 212 deletions
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 02d054300a9a..0f79fc668ad4 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -18,7 +18,7 @@
18 * 18 *
19 */ 19 */
20 20
21#include <crypto/internal/aead.h> 21#include <crypto/internal/geniv.h>
22#include <crypto/null.h> 22#include <crypto/null.h>
23#include <crypto/rng.h> 23#include <crypto/rng.h>
24#include <crypto/scatterwalk.h> 24#include <crypto/scatterwalk.h>
@@ -33,39 +33,15 @@
33 33
34#define MAX_IV_SIZE 16 34#define MAX_IV_SIZE 16
35 35
36struct echainiv_request_ctx {
37 struct scatterlist src[2];
38 struct scatterlist dst[2];
39 struct scatterlist ivbuf[2];
40 struct scatterlist *ivsg;
41 struct aead_givcrypt_request subreq;
42};
43
44struct echainiv_ctx { 36struct echainiv_ctx {
45 struct crypto_aead *child; 37 /* aead_geniv_ctx must be first the element */
46 spinlock_t lock; 38 struct aead_geniv_ctx geniv;
47 struct crypto_blkcipher *null; 39 struct crypto_blkcipher *null;
48 u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); 40 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
49}; 41};
50 42
51static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); 43static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
52 44
53static int echainiv_setkey(struct crypto_aead *tfm,
54 const u8 *key, unsigned int keylen)
55{
56 struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
57
58 return crypto_aead_setkey(ctx->child, key, keylen);
59}
60
61static int echainiv_setauthsize(struct crypto_aead *tfm,
62 unsigned int authsize)
63{
64 struct echainiv_ctx *ctx = crypto_aead_ctx(tfm);
65
66 return crypto_aead_setauthsize(ctx->child, authsize);
67}
68
69/* We don't care if we get preempted and read/write IVs from the next CPU. */ 45/* We don't care if we get preempted and read/write IVs from the next CPU. */
70static void echainiv_read_iv(u8 *dst, unsigned size) 46static void echainiv_read_iv(u8 *dst, unsigned size)
71{ 47{
@@ -90,36 +66,6 @@ static void echainiv_write_iv(const u8 *src, unsigned size)
90 } 66 }
91} 67}
92 68
93static void echainiv_encrypt_compat_complete2(struct aead_request *req,
94 int err)
95{
96 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
97 struct aead_givcrypt_request *subreq = &rctx->subreq;
98 struct crypto_aead *geniv;
99
100 if (err == -EINPROGRESS)
101 return;
102
103 if (err)
104 goto out;
105
106 geniv = crypto_aead_reqtfm(req);
107 scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
108 crypto_aead_ivsize(geniv), 1);
109
110out:
111 kzfree(subreq->giv);
112}
113
114static void echainiv_encrypt_compat_complete(
115 struct crypto_async_request *base, int err)
116{
117 struct aead_request *req = base->data;
118
119 echainiv_encrypt_compat_complete2(req, err);
120 aead_request_complete(req, err);
121}
122
123static void echainiv_encrypt_complete2(struct aead_request *req, int err) 69static void echainiv_encrypt_complete2(struct aead_request *req, int err)
124{ 70{
125 struct aead_request *subreq = aead_request_ctx(req); 71 struct aead_request *subreq = aead_request_ctx(req);
@@ -154,59 +100,6 @@ static void echainiv_encrypt_complete(struct crypto_async_request *base,
154 aead_request_complete(req, err); 100 aead_request_complete(req, err);
155} 101}
156 102
157static int echainiv_encrypt_compat(struct aead_request *req)
158{
159 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
160 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
161 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
162 struct aead_givcrypt_request *subreq = &rctx->subreq;
163 unsigned int ivsize = crypto_aead_ivsize(geniv);
164 crypto_completion_t compl;
165 void *data;
166 u8 *info;
167 __be64 seq;
168 int err;
169
170 if (req->cryptlen < ivsize)
171 return -EINVAL;
172
173 compl = req->base.complete;
174 data = req->base.data;
175
176 rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
177 info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
178
179 if (!info) {
180 info = kmalloc(ivsize, req->base.flags &
181 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
182 GFP_ATOMIC);
183 if (!info)
184 return -ENOMEM;
185
186 compl = echainiv_encrypt_compat_complete;
187 data = req;
188 }
189
190 memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
191
192 aead_givcrypt_set_tfm(subreq, ctx->child);
193 aead_givcrypt_set_callback(subreq, req->base.flags,
194 req->base.complete, req->base.data);
195 aead_givcrypt_set_crypt(subreq,
196 scatterwalk_ffwd(rctx->src, req->src,
197 req->assoclen + ivsize),
198 scatterwalk_ffwd(rctx->dst, rctx->ivsg,
199 ivsize),
200 req->cryptlen - ivsize, req->iv);
201 aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
202 aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
203
204 err = crypto_aead_givencrypt(subreq);
205 if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
206 echainiv_encrypt_compat_complete2(req, err);
207 return err;
208}
209
210static int echainiv_encrypt(struct aead_request *req) 103static int echainiv_encrypt(struct aead_request *req)
211{ 104{
212 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 105 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
@@ -221,7 +114,7 @@ static int echainiv_encrypt(struct aead_request *req)
221 if (req->cryptlen < ivsize) 114 if (req->cryptlen < ivsize)
222 return -EINVAL; 115 return -EINVAL;
223 116
224 aead_request_set_tfm(subreq, ctx->child); 117 aead_request_set_tfm(subreq, ctx->geniv.child);
225 118
226 compl = echainiv_encrypt_complete; 119 compl = echainiv_encrypt_complete;
227 data = req; 120 data = req;
@@ -264,38 +157,6 @@ static int echainiv_encrypt(struct aead_request *req)
264 return err; 157 return err;
265} 158}
266 159
267static int echainiv_decrypt_compat(struct aead_request *req)
268{
269 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
270 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
271 struct echainiv_request_ctx *rctx = aead_request_ctx(req);
272 struct aead_request *subreq = &rctx->subreq.areq;
273 crypto_completion_t compl;
274 void *data;
275 unsigned int ivsize = crypto_aead_ivsize(geniv);
276
277 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
278 return -EINVAL;
279
280 aead_request_set_tfm(subreq, ctx->child);
281
282 compl = req->base.complete;
283 data = req->base.data;
284
285 aead_request_set_callback(subreq, req->base.flags, compl, data);
286 aead_request_set_crypt(subreq,
287 scatterwalk_ffwd(rctx->src, req->src,
288 req->assoclen + ivsize),
289 scatterwalk_ffwd(rctx->dst, req->dst,
290 req->assoclen + ivsize),
291 req->cryptlen - ivsize, req->iv);
292 aead_request_set_assoc(subreq, req->src, req->assoclen);
293
294 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
295
296 return crypto_aead_decrypt(subreq);
297}
298
299static int echainiv_decrypt(struct aead_request *req) 160static int echainiv_decrypt(struct aead_request *req)
300{ 161{
301 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 162 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
@@ -308,7 +169,7 @@ static int echainiv_decrypt(struct aead_request *req)
308 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) 169 if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
309 return -EINVAL; 170 return -EINVAL;
310 171
311 aead_request_set_tfm(subreq, ctx->child); 172 aead_request_set_tfm(subreq, ctx->geniv.child);
312 173
313 compl = req->base.complete; 174 compl = req->base.complete;
314 data = req->base.data; 175 data = req->base.data;
@@ -326,36 +187,13 @@ static int echainiv_decrypt(struct aead_request *req)
326 return crypto_aead_decrypt(subreq); 187 return crypto_aead_decrypt(subreq);
327} 188}
328 189
329static int echainiv_encrypt_compat_first(struct aead_request *req)
330{
331 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
332 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
333 int err = 0;
334
335 spin_lock_bh(&ctx->lock);
336 if (geniv->encrypt != echainiv_encrypt_compat_first)
337 goto unlock;
338
339 geniv->encrypt = echainiv_encrypt_compat;
340 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
341 crypto_aead_ivsize(geniv));
342
343unlock:
344 spin_unlock_bh(&ctx->lock);
345
346 if (err)
347 return err;
348
349 return echainiv_encrypt_compat(req);
350}
351
352static int echainiv_encrypt_first(struct aead_request *req) 190static int echainiv_encrypt_first(struct aead_request *req)
353{ 191{
354 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 192 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
355 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); 193 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
356 int err = 0; 194 int err = 0;
357 195
358 spin_lock_bh(&ctx->lock); 196 spin_lock_bh(&ctx->geniv.lock);
359 if (geniv->encrypt != echainiv_encrypt_first) 197 if (geniv->encrypt != echainiv_encrypt_first)
360 goto unlock; 198 goto unlock;
361 199
@@ -364,7 +202,7 @@ static int echainiv_encrypt_first(struct aead_request *req)
364 crypto_aead_ivsize(geniv)); 202 crypto_aead_ivsize(geniv));
365 203
366unlock: 204unlock:
367 spin_unlock_bh(&ctx->lock); 205 spin_unlock_bh(&ctx->geniv.lock);
368 206
369 if (err) 207 if (err)
370 return err; 208 return err;
@@ -372,31 +210,13 @@ unlock:
372 return echainiv_encrypt(req); 210 return echainiv_encrypt(req);
373} 211}
374 212
375static int echainiv_compat_init(struct crypto_tfm *tfm)
376{
377 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
378 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
379 int err;
380
381 spin_lock_init(&ctx->lock);
382
383 crypto_aead_set_reqsize(geniv, sizeof(struct echainiv_request_ctx));
384
385 err = aead_geniv_init(tfm);
386
387 ctx->child = geniv->child;
388 geniv->child = geniv;
389
390 return err;
391}
392
393static int echainiv_init(struct crypto_tfm *tfm) 213static int echainiv_init(struct crypto_tfm *tfm)
394{ 214{
395 struct crypto_aead *geniv = __crypto_aead_cast(tfm); 215 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
396 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); 216 struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
397 int err; 217 int err;
398 218
399 spin_lock_init(&ctx->lock); 219 spin_lock_init(&ctx->geniv.lock);
400 220
401 crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); 221 crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
402 222
@@ -409,7 +229,7 @@ static int echainiv_init(struct crypto_tfm *tfm)
409 if (err) 229 if (err)
410 goto drop_null; 230 goto drop_null;
411 231
412 ctx->child = geniv->child; 232 ctx->geniv.child = geniv->child;
413 geniv->child = geniv; 233 geniv->child = geniv;
414 234
415out: 235out:
@@ -420,18 +240,11 @@ drop_null:
420 goto out; 240 goto out;
421} 241}
422 242
423static void echainiv_compat_exit(struct crypto_tfm *tfm)
424{
425 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
426
427 crypto_free_aead(ctx->child);
428}
429
430static void echainiv_exit(struct crypto_tfm *tfm) 243static void echainiv_exit(struct crypto_tfm *tfm)
431{ 244{
432 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); 245 struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
433 246
434 crypto_free_aead(ctx->child); 247 crypto_free_aead(ctx->geniv.child);
435 crypto_put_default_null_skcipher(); 248 crypto_put_default_null_skcipher();
436} 249}
437 250
@@ -448,17 +261,17 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
448 if (IS_ERR(inst)) 261 if (IS_ERR(inst))
449 return PTR_ERR(inst); 262 return PTR_ERR(inst);
450 263
264 spawn = aead_instance_ctx(inst);
265 alg = crypto_spawn_aead_alg(spawn);
266
267 if (alg->base.cra_aead.encrypt)
268 goto done;
269
451 err = -EINVAL; 270 err = -EINVAL;
452 if (inst->alg.ivsize < sizeof(u64) || 271 if (inst->alg.ivsize & (sizeof(u32) - 1) ||
453 inst->alg.ivsize & (sizeof(u32) - 1) ||
454 inst->alg.ivsize > MAX_IV_SIZE) 272 inst->alg.ivsize > MAX_IV_SIZE)
455 goto free_inst; 273 goto free_inst;
456 274
457 spawn = aead_instance_ctx(inst);
458 alg = crypto_spawn_aead_alg(spawn);
459
460 inst->alg.setkey = echainiv_setkey;
461 inst->alg.setauthsize = echainiv_setauthsize;
462 inst->alg.encrypt = echainiv_encrypt_first; 275 inst->alg.encrypt = echainiv_encrypt_first;
463 inst->alg.decrypt = echainiv_decrypt; 276 inst->alg.decrypt = echainiv_decrypt;
464 277
@@ -469,14 +282,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
469 inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); 282 inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
470 inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; 283 inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
471 284
472 if (alg->base.cra_aead.encrypt) { 285done:
473 inst->alg.encrypt = echainiv_encrypt_compat_first;
474 inst->alg.decrypt = echainiv_decrypt_compat;
475
476 inst->alg.base.cra_init = echainiv_compat_init;
477 inst->alg.base.cra_exit = echainiv_compat_exit;
478 }
479
480 err = aead_register_instance(tmpl, inst); 286 err = aead_register_instance(tmpl, inst);
481 if (err) 287 if (err)
482 goto free_inst; 288 goto free_inst;