aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2016-07-12 01:17:50 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2016-07-18 05:35:46 -0400
commit3a01d0ee2b991c8c267620e63a4ab47cd8c30cc4 (patch)
tree5c8d3aa7fb5fbaa3ae9d80883e9057c0395b7def
parent6cf80a296575723aed6ce6c695581540202bfc6b (diff)
crypto: skcipher - Remove top-level givcipher interface
This patch removes the old crypto_grab_skcipher helper and replaces it with crypto_grab_skcipher2. As this is the final entry point into givcipher this patch also removes all traces of the top-level givcipher interface, including all implicit IV generators such as chainiv. The bottom-level givcipher interface remains until the drivers using it are converted. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/Makefile2
-rw-r--r--crypto/ablkcipher.c222
-rw-r--r--crypto/blkcipher.c185
-rw-r--r--crypto/chainiv.c317
-rw-r--r--crypto/eseqiv.c242
-rw-r--r--crypto/seqiv.c162
-rw-r--r--crypto/skcipher.c4
-rw-r--r--include/crypto/internal/skcipher.h63
-rw-r--r--include/crypto/skcipher.h76
-rw-r--r--include/linux/crypto.h19
10 files changed, 18 insertions, 1274 deletions
diff --git a/crypto/Makefile b/crypto/Makefile
index df1bcfb090d2..99cc64ac70ef 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -20,8 +20,6 @@ crypto_blkcipher-y := ablkcipher.o
20crypto_blkcipher-y += blkcipher.o 20crypto_blkcipher-y += blkcipher.o
21crypto_blkcipher-y += skcipher.o 21crypto_blkcipher-y += skcipher.o
22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o 22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
23obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
24obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
25obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 23obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
26obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o 24obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
27 25
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 6b80516778c6..d676fc59521a 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -16,8 +16,6 @@
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/rtnetlink.h>
20#include <linux/sched.h>
21#include <linux/slab.h> 19#include <linux/slab.h>
22#include <linux/seq_file.h> 20#include <linux/seq_file.h>
23#include <linux/cryptouser.h> 21#include <linux/cryptouser.h>
@@ -348,16 +346,6 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
348 return alg->cra_ctxsize; 346 return alg->cra_ctxsize;
349} 347}
350 348
351int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
352{
353 return crypto_ablkcipher_encrypt(&req->creq);
354}
355
356int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
357{
358 return crypto_ablkcipher_decrypt(&req->creq);
359}
360
361static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, 349static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
362 u32 mask) 350 u32 mask)
363{ 351{
@@ -370,10 +358,6 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
370 crt->setkey = setkey; 358 crt->setkey = setkey;
371 crt->encrypt = alg->encrypt; 359 crt->encrypt = alg->encrypt;
372 crt->decrypt = alg->decrypt; 360 crt->decrypt = alg->decrypt;
373 if (!alg->ivsize) {
374 crt->givencrypt = skcipher_null_givencrypt;
375 crt->givdecrypt = skcipher_null_givdecrypt;
376 }
377 crt->base = __crypto_ablkcipher_cast(tfm); 361 crt->base = __crypto_ablkcipher_cast(tfm);
378 crt->ivsize = alg->ivsize; 362 crt->ivsize = alg->ivsize;
379 363
@@ -435,11 +419,6 @@ const struct crypto_type crypto_ablkcipher_type = {
435}; 419};
436EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); 420EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
437 421
438static int no_givdecrypt(struct skcipher_givcrypt_request *req)
439{
440 return -ENOSYS;
441}
442
443static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, 422static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
444 u32 mask) 423 u32 mask)
445{ 424{
@@ -453,8 +432,6 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
453 alg->setkey : setkey; 432 alg->setkey : setkey;
454 crt->encrypt = alg->encrypt; 433 crt->encrypt = alg->encrypt;
455 crt->decrypt = alg->decrypt; 434 crt->decrypt = alg->decrypt;
456 crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
457 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
458 crt->base = __crypto_ablkcipher_cast(tfm); 435 crt->base = __crypto_ablkcipher_cast(tfm);
459 crt->ivsize = alg->ivsize; 436 crt->ivsize = alg->ivsize;
460 437
@@ -515,202 +492,3 @@ const struct crypto_type crypto_givcipher_type = {
515 .report = crypto_givcipher_report, 492 .report = crypto_givcipher_report,
516}; 493};
517EXPORT_SYMBOL_GPL(crypto_givcipher_type); 494EXPORT_SYMBOL_GPL(crypto_givcipher_type);
518
519const char *crypto_default_geniv(const struct crypto_alg *alg)
520{
521 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
522 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
523 alg->cra_ablkcipher.ivsize) !=
524 alg->cra_blocksize)
525 return "chainiv";
526
527 return "eseqiv";
528}
529
530static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
531{
532 struct rtattr *tb[3];
533 struct {
534 struct rtattr attr;
535 struct crypto_attr_type data;
536 } ptype;
537 struct {
538 struct rtattr attr;
539 struct crypto_attr_alg data;
540 } palg;
541 struct crypto_template *tmpl;
542 struct crypto_instance *inst;
543 struct crypto_alg *larval;
544 const char *geniv;
545 int err;
546
547 larval = crypto_larval_lookup(alg->cra_driver_name,
548 (type & ~CRYPTO_ALG_TYPE_MASK) |
549 CRYPTO_ALG_TYPE_GIVCIPHER,
550 mask | CRYPTO_ALG_TYPE_MASK);
551 err = PTR_ERR(larval);
552 if (IS_ERR(larval))
553 goto out;
554
555 err = -EAGAIN;
556 if (!crypto_is_larval(larval))
557 goto drop_larval;
558
559 ptype.attr.rta_len = sizeof(ptype);
560 ptype.attr.rta_type = CRYPTOA_TYPE;
561 ptype.data.type = type | CRYPTO_ALG_GENIV;
562 /* GENIV tells the template that we're making a default geniv. */
563 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
564 tb[0] = &ptype.attr;
565
566 palg.attr.rta_len = sizeof(palg);
567 palg.attr.rta_type = CRYPTOA_ALG;
568 /* Must use the exact name to locate ourselves. */
569 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
570 tb[1] = &palg.attr;
571
572 tb[2] = NULL;
573
574 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
575 CRYPTO_ALG_TYPE_BLKCIPHER)
576 geniv = alg->cra_blkcipher.geniv;
577 else
578 geniv = alg->cra_ablkcipher.geniv;
579
580 if (!geniv)
581 geniv = crypto_default_geniv(alg);
582
583 tmpl = crypto_lookup_template(geniv);
584 err = -ENOENT;
585 if (!tmpl)
586 goto kill_larval;
587
588 if (tmpl->create) {
589 err = tmpl->create(tmpl, tb);
590 if (err)
591 goto put_tmpl;
592 goto ok;
593 }
594
595 inst = tmpl->alloc(tb);
596 err = PTR_ERR(inst);
597 if (IS_ERR(inst))
598 goto put_tmpl;
599
600 err = crypto_register_instance(tmpl, inst);
601 if (err) {
602 tmpl->free(inst);
603 goto put_tmpl;
604 }
605
606ok:
607 /* Redo the lookup to use the instance we just registered. */
608 err = -EAGAIN;
609
610put_tmpl:
611 crypto_tmpl_put(tmpl);
612kill_larval:
613 crypto_larval_kill(larval);
614drop_larval:
615 crypto_mod_put(larval);
616out:
617 crypto_mod_put(alg);
618 return err;
619}
620
621struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
622{
623 struct crypto_alg *alg;
624
625 alg = crypto_alg_mod_lookup(name, type, mask);
626 if (IS_ERR(alg))
627 return alg;
628
629 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
630 CRYPTO_ALG_TYPE_GIVCIPHER)
631 return alg;
632
633 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
634 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
635 alg->cra_ablkcipher.ivsize))
636 return alg;
637
638 crypto_mod_put(alg);
639 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
640 mask & ~CRYPTO_ALG_TESTED);
641 if (IS_ERR(alg))
642 return alg;
643
644 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
645 CRYPTO_ALG_TYPE_GIVCIPHER) {
646 if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
647 crypto_mod_put(alg);
648 alg = ERR_PTR(-ENOENT);
649 }
650 return alg;
651 }
652
653 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
654 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
655 alg->cra_ablkcipher.ivsize));
656
657 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
658}
659EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
660
661int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
662 u32 type, u32 mask)
663{
664 struct crypto_alg *alg;
665 int err;
666
667 type = crypto_skcipher_type(type);
668 mask = crypto_skcipher_mask(mask);
669
670 alg = crypto_lookup_skcipher(name, type, mask);
671 if (IS_ERR(alg))
672 return PTR_ERR(alg);
673
674 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
675 crypto_mod_put(alg);
676 return err;
677}
678EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
679
680struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
681 u32 type, u32 mask)
682{
683 struct crypto_tfm *tfm;
684 int err;
685
686 type = crypto_skcipher_type(type);
687 mask = crypto_skcipher_mask(mask);
688
689 for (;;) {
690 struct crypto_alg *alg;
691
692 alg = crypto_lookup_skcipher(alg_name, type, mask);
693 if (IS_ERR(alg)) {
694 err = PTR_ERR(alg);
695 goto err;
696 }
697
698 tfm = __crypto_alloc_tfm(alg, type, mask);
699 if (!IS_ERR(tfm))
700 return __crypto_ablkcipher_cast(tfm);
701
702 crypto_mod_put(alg);
703 err = PTR_ERR(tfm);
704
705err:
706 if (err != -EAGAIN)
707 break;
708 if (fatal_signal_pending(current)) {
709 err = -EINTR;
710 break;
711 }
712 }
713
714 return ERR_PTR(err);
715}
716EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 8cc1622b2ee0..369999530108 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -21,7 +21,6 @@
21#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/scatterlist.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/string.h> 26#include <linux/string.h>
@@ -466,10 +465,6 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
466 crt->setkey = async_setkey; 465 crt->setkey = async_setkey;
467 crt->encrypt = async_encrypt; 466 crt->encrypt = async_encrypt;
468 crt->decrypt = async_decrypt; 467 crt->decrypt = async_decrypt;
469 if (!alg->ivsize) {
470 crt->givencrypt = skcipher_null_givencrypt;
471 crt->givdecrypt = skcipher_null_givdecrypt;
472 }
473 crt->base = __crypto_ablkcipher_cast(tfm); 468 crt->base = __crypto_ablkcipher_cast(tfm);
474 crt->ivsize = alg->ivsize; 469 crt->ivsize = alg->ivsize;
475 470
@@ -560,185 +555,5 @@ const struct crypto_type crypto_blkcipher_type = {
560}; 555};
561EXPORT_SYMBOL_GPL(crypto_blkcipher_type); 556EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
562 557
563static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
564 const char *name, u32 type, u32 mask)
565{
566 struct crypto_alg *alg;
567 int err;
568
569 type = crypto_skcipher_type(type);
570 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
571
572 alg = crypto_alg_mod_lookup(name, type, mask);
573 if (IS_ERR(alg))
574 return PTR_ERR(alg);
575
576 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
577 crypto_mod_put(alg);
578 return err;
579}
580
581struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
582 struct rtattr **tb, u32 type,
583 u32 mask)
584{
585 struct {
586 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
587 unsigned int keylen);
588 int (*encrypt)(struct ablkcipher_request *req);
589 int (*decrypt)(struct ablkcipher_request *req);
590
591 unsigned int min_keysize;
592 unsigned int max_keysize;
593 unsigned int ivsize;
594
595 const char *geniv;
596 } balg;
597 const char *name;
598 struct crypto_skcipher_spawn *spawn;
599 struct crypto_attr_type *algt;
600 struct crypto_instance *inst;
601 struct crypto_alg *alg;
602 int err;
603
604 algt = crypto_get_attr_type(tb);
605 if (IS_ERR(algt))
606 return ERR_CAST(algt);
607
608 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
609 algt->mask)
610 return ERR_PTR(-EINVAL);
611
612 name = crypto_attr_alg_name(tb[1]);
613 if (IS_ERR(name))
614 return ERR_CAST(name);
615
616 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
617 if (!inst)
618 return ERR_PTR(-ENOMEM);
619
620 spawn = crypto_instance_ctx(inst);
621
622 /* Ignore async algorithms if necessary. */
623 mask |= crypto_requires_sync(algt->type, algt->mask);
624
625 crypto_set_skcipher_spawn(spawn, inst);
626 err = crypto_grab_nivcipher(spawn, name, type, mask);
627 if (err)
628 goto err_free_inst;
629
630 alg = crypto_skcipher_spawn_alg(spawn);
631
632 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
633 CRYPTO_ALG_TYPE_BLKCIPHER) {
634 balg.ivsize = alg->cra_blkcipher.ivsize;
635 balg.min_keysize = alg->cra_blkcipher.min_keysize;
636 balg.max_keysize = alg->cra_blkcipher.max_keysize;
637
638 balg.setkey = async_setkey;
639 balg.encrypt = async_encrypt;
640 balg.decrypt = async_decrypt;
641
642 balg.geniv = alg->cra_blkcipher.geniv;
643 } else {
644 balg.ivsize = alg->cra_ablkcipher.ivsize;
645 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
646 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
647
648 balg.setkey = alg->cra_ablkcipher.setkey;
649 balg.encrypt = alg->cra_ablkcipher.encrypt;
650 balg.decrypt = alg->cra_ablkcipher.decrypt;
651
652 balg.geniv = alg->cra_ablkcipher.geniv;
653 }
654
655 err = -EINVAL;
656 if (!balg.ivsize)
657 goto err_drop_alg;
658
659 /*
660 * This is only true if we're constructing an algorithm with its
661 * default IV generator. For the default generator we elide the
662 * template name and double-check the IV generator.
663 */
664 if (algt->mask & CRYPTO_ALG_GENIV) {
665 if (!balg.geniv)
666 balg.geniv = crypto_default_geniv(alg);
667 err = -EAGAIN;
668 if (strcmp(tmpl->name, balg.geniv))
669 goto err_drop_alg;
670
671 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
672 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
673 CRYPTO_MAX_ALG_NAME);
674 } else {
675 err = -ENAMETOOLONG;
676 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
677 "%s(%s)", tmpl->name, alg->cra_name) >=
678 CRYPTO_MAX_ALG_NAME)
679 goto err_drop_alg;
680 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
681 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
682 CRYPTO_MAX_ALG_NAME)
683 goto err_drop_alg;
684 }
685
686 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
687 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
688 inst->alg.cra_priority = alg->cra_priority;
689 inst->alg.cra_blocksize = alg->cra_blocksize;
690 inst->alg.cra_alignmask = alg->cra_alignmask;
691 inst->alg.cra_type = &crypto_givcipher_type;
692
693 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
694 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
695 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
696 inst->alg.cra_ablkcipher.geniv = balg.geniv;
697
698 inst->alg.cra_ablkcipher.setkey = balg.setkey;
699 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
700 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
701
702out:
703 return inst;
704
705err_drop_alg:
706 crypto_drop_skcipher(spawn);
707err_free_inst:
708 kfree(inst);
709 inst = ERR_PTR(err);
710 goto out;
711}
712EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
713
714void skcipher_geniv_free(struct crypto_instance *inst)
715{
716 crypto_drop_skcipher(crypto_instance_ctx(inst));
717 kfree(inst);
718}
719EXPORT_SYMBOL_GPL(skcipher_geniv_free);
720
721int skcipher_geniv_init(struct crypto_tfm *tfm)
722{
723 struct crypto_instance *inst = (void *)tfm->__crt_alg;
724 struct crypto_ablkcipher *cipher;
725
726 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
727 if (IS_ERR(cipher))
728 return PTR_ERR(cipher);
729
730 tfm->crt_ablkcipher.base = cipher;
731 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
732
733 return 0;
734}
735EXPORT_SYMBOL_GPL(skcipher_geniv_init);
736
737void skcipher_geniv_exit(struct crypto_tfm *tfm)
738{
739 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
740}
741EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
742
743MODULE_LICENSE("GPL"); 558MODULE_LICENSE("GPL");
744MODULE_DESCRIPTION("Generic block chaining cipher type"); 559MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
deleted file mode 100644
index b4340018c8d4..000000000000
--- a/crypto/chainiv.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * chainiv: Chain IV Generator
3 *
4 * Generate IVs simply be using the last block of the previous encryption.
5 * This is mainly useful for CBC with a synchronous algorithm.
6 *
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/skcipher.h>
17#include <crypto/rng.h>
18#include <crypto/crypto_wq.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/workqueue.h>
26
27enum {
28 CHAINIV_STATE_INUSE = 0,
29};
30
31struct chainiv_ctx {
32 spinlock_t lock;
33 char iv[];
34};
35
36struct async_chainiv_ctx {
37 unsigned long state;
38
39 spinlock_t lock;
40 int err;
41
42 struct crypto_queue queue;
43 struct work_struct postponed;
44
45 char iv[];
46};
47
48static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
49{
50 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
51 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
52 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
53 unsigned int ivsize;
54 int err;
55
56 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
57 ablkcipher_request_set_callback(subreq, req->creq.base.flags &
58 ~CRYPTO_TFM_REQ_MAY_SLEEP,
59 req->creq.base.complete,
60 req->creq.base.data);
61 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
62 req->creq.nbytes, req->creq.info);
63
64 spin_lock_bh(&ctx->lock);
65
66 ivsize = crypto_ablkcipher_ivsize(geniv);
67
68 memcpy(req->giv, ctx->iv, ivsize);
69 memcpy(subreq->info, ctx->iv, ivsize);
70
71 err = crypto_ablkcipher_encrypt(subreq);
72 if (err)
73 goto unlock;
74
75 memcpy(ctx->iv, subreq->info, ivsize);
76
77unlock:
78 spin_unlock_bh(&ctx->lock);
79
80 return err;
81}
82
83static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
84{
85 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
86 int err = 0;
87
88 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
89
90 if (iv) {
91 err = crypto_rng_get_bytes(crypto_default_rng, iv,
92 crypto_ablkcipher_ivsize(geniv));
93 crypto_put_default_rng();
94 }
95
96 return err ?: skcipher_geniv_init(tfm);
97}
98
99static int chainiv_init(struct crypto_tfm *tfm)
100{
101 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
102 struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
103 char *iv;
104
105 spin_lock_init(&ctx->lock);
106
107 iv = NULL;
108 if (!crypto_get_default_rng()) {
109 crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
110 iv = ctx->iv;
111 }
112
113 return chainiv_init_common(tfm, iv);
114}
115
116static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
117{
118 int queued;
119 int err = ctx->err;
120
121 if (!ctx->queue.qlen) {
122 smp_mb__before_atomic();
123 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
124
125 if (!ctx->queue.qlen ||
126 test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
127 goto out;
128 }
129
130 queued = queue_work(kcrypto_wq, &ctx->postponed);
131 BUG_ON(!queued);
132
133out:
134 return err;
135}
136
137static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
138{
139 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
140 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
141 int err;
142
143 spin_lock_bh(&ctx->lock);
144 err = skcipher_enqueue_givcrypt(&ctx->queue, req);
145 spin_unlock_bh(&ctx->lock);
146
147 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
148 return err;
149
150 ctx->err = err;
151 return async_chainiv_schedule_work(ctx);
152}
153
154static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
155{
156 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
157 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
158 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
159 unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
160
161 memcpy(req->giv, ctx->iv, ivsize);
162 memcpy(subreq->info, ctx->iv, ivsize);
163
164 ctx->err = crypto_ablkcipher_encrypt(subreq);
165 if (ctx->err)
166 goto out;
167
168 memcpy(ctx->iv, subreq->info, ivsize);
169
170out:
171 return async_chainiv_schedule_work(ctx);
172}
173
174static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
175{
176 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
177 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
178 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
179
180 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
181 ablkcipher_request_set_callback(subreq, req->creq.base.flags,
182 req->creq.base.complete,
183 req->creq.base.data);
184 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
185 req->creq.nbytes, req->creq.info);
186
187 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
188 goto postpone;
189
190 if (ctx->queue.qlen) {
191 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
192 goto postpone;
193 }
194
195 return async_chainiv_givencrypt_tail(req);
196
197postpone:
198 return async_chainiv_postpone_request(req);
199}
200
201static void async_chainiv_do_postponed(struct work_struct *work)
202{
203 struct async_chainiv_ctx *ctx = container_of(work,
204 struct async_chainiv_ctx,
205 postponed);
206 struct skcipher_givcrypt_request *req;
207 struct ablkcipher_request *subreq;
208 int err;
209
210 /* Only handle one request at a time to avoid hogging keventd. */
211 spin_lock_bh(&ctx->lock);
212 req = skcipher_dequeue_givcrypt(&ctx->queue);
213 spin_unlock_bh(&ctx->lock);
214
215 if (!req) {
216 async_chainiv_schedule_work(ctx);
217 return;
218 }
219
220 subreq = skcipher_givcrypt_reqctx(req);
221 subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
222
223 err = async_chainiv_givencrypt_tail(req);
224
225 local_bh_disable();
226 skcipher_givcrypt_complete(req, err);
227 local_bh_enable();
228}
229
230static int async_chainiv_init(struct crypto_tfm *tfm)
231{
232 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
233 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
234 char *iv;
235
236 spin_lock_init(&ctx->lock);
237
238 crypto_init_queue(&ctx->queue, 100);
239 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
240
241 iv = NULL;
242 if (!crypto_get_default_rng()) {
243 crypto_ablkcipher_crt(geniv)->givencrypt =
244 async_chainiv_givencrypt;
245 iv = ctx->iv;
246 }
247
248 return chainiv_init_common(tfm, iv);
249}
250
251static void async_chainiv_exit(struct crypto_tfm *tfm)
252{
253 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
254
255 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
256
257 skcipher_geniv_exit(tfm);
258}
259
260static struct crypto_template chainiv_tmpl;
261
262static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
263{
264 struct crypto_attr_type *algt;
265 struct crypto_instance *inst;
266
267 algt = crypto_get_attr_type(tb);
268 if (IS_ERR(algt))
269 return ERR_CAST(algt);
270
271 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
272 if (IS_ERR(inst))
273 goto out;
274
275 inst->alg.cra_init = chainiv_init;
276 inst->alg.cra_exit = skcipher_geniv_exit;
277
278 inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
279
280 if (!crypto_requires_sync(algt->type, algt->mask)) {
281 inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
282
283 inst->alg.cra_init = async_chainiv_init;
284 inst->alg.cra_exit = async_chainiv_exit;
285
286 inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
287 }
288
289 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
290
291out:
292 return inst;
293}
294
295static struct crypto_template chainiv_tmpl = {
296 .name = "chainiv",
297 .alloc = chainiv_alloc,
298 .free = skcipher_geniv_free,
299 .module = THIS_MODULE,
300};
301
302static int __init chainiv_module_init(void)
303{
304 return crypto_register_template(&chainiv_tmpl);
305}
306
307static void chainiv_module_exit(void)
308{
309 crypto_unregister_template(&chainiv_tmpl);
310}
311
312module_init(chainiv_module_init);
313module_exit(chainiv_module_exit);
314
315MODULE_LICENSE("GPL");
316MODULE_DESCRIPTION("Chain IV Generator");
317MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
deleted file mode 100644
index 16dda72fc4f8..000000000000
--- a/crypto/eseqiv.c
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * eseqiv: Encrypted Sequence Number IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
8 *
9 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17
18#include <crypto/internal/skcipher.h>
19#include <crypto/rng.h>
20#include <crypto/scatterwalk.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29
30struct eseqiv_request_ctx {
31 struct scatterlist src[2];
32 struct scatterlist dst[2];
33 char tail[];
34};
35
36struct eseqiv_ctx {
37 spinlock_t lock;
38 unsigned int reqoff;
39 char salt[];
40};
41
42static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
43{
44 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
45 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
46
47 memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
48 crypto_ablkcipher_alignmask(geniv) + 1),
49 crypto_ablkcipher_ivsize(geniv));
50}
51
52static void eseqiv_complete(struct crypto_async_request *base, int err)
53{
54 struct skcipher_givcrypt_request *req = base->data;
55
56 if (err)
57 goto out;
58
59 eseqiv_complete2(req);
60
61out:
62 skcipher_givcrypt_complete(req, err);
63}
64
65static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
66{
67 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
68 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
69 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
70 struct ablkcipher_request *subreq;
71 crypto_completion_t compl;
72 void *data;
73 struct scatterlist *osrc, *odst;
74 struct scatterlist *dst;
75 struct page *srcp;
76 struct page *dstp;
77 u8 *giv;
78 u8 *vsrc;
79 u8 *vdst;
80 __be64 seq;
81 unsigned int ivsize;
82 unsigned int len;
83 int err;
84
85 subreq = (void *)(reqctx->tail + ctx->reqoff);
86 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
87
88 giv = req->giv;
89 compl = req->creq.base.complete;
90 data = req->creq.base.data;
91
92 osrc = req->creq.src;
93 odst = req->creq.dst;
94 srcp = sg_page(osrc);
95 dstp = sg_page(odst);
96 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
97 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
98
99 ivsize = crypto_ablkcipher_ivsize(geniv);
100
101 if (vsrc != giv + ivsize && vdst != giv + ivsize) {
102 giv = PTR_ALIGN((u8 *)reqctx->tail,
103 crypto_ablkcipher_alignmask(geniv) + 1);
104 compl = eseqiv_complete;
105 data = req;
106 }
107
108 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
109 data);
110
111 sg_init_table(reqctx->src, 2);
112 sg_set_buf(reqctx->src, giv, ivsize);
113 scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
114
115 dst = reqctx->src;
116 if (osrc != odst) {
117 sg_init_table(reqctx->dst, 2);
118 sg_set_buf(reqctx->dst, giv, ivsize);
119 scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
120
121 dst = reqctx->dst;
122 }
123
124 ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
125 req->creq.nbytes + ivsize,
126 req->creq.info);
127
128 memcpy(req->creq.info, ctx->salt, ivsize);
129
130 len = ivsize;
131 if (ivsize > sizeof(u64)) {
132 memset(req->giv, 0, ivsize - sizeof(u64));
133 len = sizeof(u64);
134 }
135 seq = cpu_to_be64(req->seq);
136 memcpy(req->giv + ivsize - len, &seq, len);
137
138 err = crypto_ablkcipher_encrypt(subreq);
139 if (err)
140 goto out;
141
142 if (giv != req->giv)
143 eseqiv_complete2(req);
144
145out:
146 return err;
147}
148
149static int eseqiv_init(struct crypto_tfm *tfm)
150{
151 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
152 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
153 unsigned long alignmask;
154 unsigned int reqsize;
155 int err;
156
157 spin_lock_init(&ctx->lock);
158
159 alignmask = crypto_tfm_ctx_alignment() - 1;
160 reqsize = sizeof(struct eseqiv_request_ctx);
161
162 if (alignmask & reqsize) {
163 alignmask &= reqsize;
164 alignmask--;
165 }
166
167 alignmask = ~alignmask;
168 alignmask &= crypto_ablkcipher_alignmask(geniv);
169
170 reqsize += alignmask;
171 reqsize += crypto_ablkcipher_ivsize(geniv);
172 reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
173
174 ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
175
176 tfm->crt_ablkcipher.reqsize = reqsize +
177 sizeof(struct ablkcipher_request);
178
179 err = 0;
180 if (!crypto_get_default_rng()) {
181 crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
182 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
183 crypto_ablkcipher_ivsize(geniv));
184 crypto_put_default_rng();
185 }
186
187 return err ?: skcipher_geniv_init(tfm);
188}
189
190static struct crypto_template eseqiv_tmpl;
191
192static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
193{
194 struct crypto_instance *inst;
195 int err;
196
197 inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
198 if (IS_ERR(inst))
199 goto out;
200
201 err = -EINVAL;
202 if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
203 goto free_inst;
204
205 inst->alg.cra_init = eseqiv_init;
206 inst->alg.cra_exit = skcipher_geniv_exit;
207
208 inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
209 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
210
211out:
212 return inst;
213
214free_inst:
215 skcipher_geniv_free(inst);
216 inst = ERR_PTR(err);
217 goto out;
218}
219
220static struct crypto_template eseqiv_tmpl = {
221 .name = "eseqiv",
222 .alloc = eseqiv_alloc,
223 .free = skcipher_geniv_free,
224 .module = THIS_MODULE,
225};
226
227static int __init eseqiv_module_init(void)
228{
229 return crypto_register_template(&eseqiv_tmpl);
230}
231
232static void __exit eseqiv_module_exit(void)
233{
234 crypto_unregister_template(&eseqiv_tmpl);
235}
236
237module_init(eseqiv_module_init);
238module_exit(eseqiv_module_exit);
239
240MODULE_LICENSE("GPL");
241MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
242MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index a859b3ae239d..c7049231861f 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -14,50 +14,17 @@
14 */ 14 */
15 15
16#include <crypto/internal/geniv.h> 16#include <crypto/internal/geniv.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/rng.h>
19#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <crypto/skcipher.h>
20#include <linux/err.h> 19#include <linux/err.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/string.h> 24#include <linux/string.h>
27 25
28struct seqiv_ctx {
29 spinlock_t lock;
30 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
31};
32
33static void seqiv_free(struct crypto_instance *inst); 26static void seqiv_free(struct crypto_instance *inst);
34 27
35static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
36{
37 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
38 struct crypto_ablkcipher *geniv;
39
40 if (err == -EINPROGRESS)
41 return;
42
43 if (err)
44 goto out;
45
46 geniv = skcipher_givcrypt_reqtfm(req);
47 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
48
49out:
50 kfree(subreq->info);
51}
52
53static void seqiv_complete(struct crypto_async_request *base, int err)
54{
55 struct skcipher_givcrypt_request *req = base->data;
56
57 seqiv_complete2(req, err);
58 skcipher_givcrypt_complete(req, err);
59}
60
61static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) 28static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
62{ 29{
63 struct aead_request *subreq = aead_request_ctx(req); 30 struct aead_request *subreq = aead_request_ctx(req);
@@ -85,65 +52,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
85 aead_request_complete(req, err); 52 aead_request_complete(req, err);
86} 53}
87 54
88static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
89 unsigned int ivsize)
90{
91 unsigned int len = ivsize;
92
93 if (ivsize > sizeof(u64)) {
94 memset(info, 0, ivsize - sizeof(u64));
95 len = sizeof(u64);
96 }
97 seq = cpu_to_be64(seq);
98 memcpy(info + ivsize - len, &seq, len);
99 crypto_xor(info, ctx->salt, ivsize);
100}
101
102static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
103{
104 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
105 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
106 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
107 crypto_completion_t compl;
108 void *data;
109 u8 *info;
110 unsigned int ivsize;
111 int err;
112
113 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
114
115 compl = req->creq.base.complete;
116 data = req->creq.base.data;
117 info = req->creq.info;
118
119 ivsize = crypto_ablkcipher_ivsize(geniv);
120
121 if (unlikely(!IS_ALIGNED((unsigned long)info,
122 crypto_ablkcipher_alignmask(geniv) + 1))) {
123 info = kmalloc(ivsize, req->creq.base.flags &
124 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
125 GFP_ATOMIC);
126 if (!info)
127 return -ENOMEM;
128
129 compl = seqiv_complete;
130 data = req;
131 }
132
133 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
134 data);
135 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
136 req->creq.nbytes, info);
137
138 seqiv_geniv(ctx, info, req->seq, ivsize);
139 memcpy(req->giv, info, ivsize);
140
141 err = crypto_ablkcipher_encrypt(subreq);
142 if (unlikely(info != req->creq.info))
143 seqiv_complete2(req, err);
144 return err;
145}
146
147static int seqiv_aead_encrypt(struct aead_request *req) 55static int seqiv_aead_encrypt(struct aead_request *req)
148{ 56{
149 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 57 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
@@ -233,62 +141,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
233 return crypto_aead_decrypt(subreq); 141 return crypto_aead_decrypt(subreq);
234} 142}
235 143
236static int seqiv_init(struct crypto_tfm *tfm)
237{
238 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
239 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
240 int err;
241
242 spin_lock_init(&ctx->lock);
243
244 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
245
246 err = 0;
247 if (!crypto_get_default_rng()) {
248 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
249 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
250 crypto_ablkcipher_ivsize(geniv));
251 crypto_put_default_rng();
252 }
253
254 return err ?: skcipher_geniv_init(tfm);
255}
256
257static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
258 struct rtattr **tb)
259{
260 struct crypto_instance *inst;
261 int err;
262
263 inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
264
265 if (IS_ERR(inst))
266 return PTR_ERR(inst);
267
268 err = -EINVAL;
269 if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
270 goto free_inst;
271
272 inst->alg.cra_init = seqiv_init;
273 inst->alg.cra_exit = skcipher_geniv_exit;
274
275 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
276 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
277
278 inst->alg.cra_alignmask |= __alignof__(u32) - 1;
279
280 err = crypto_register_instance(tmpl, inst);
281 if (err)
282 goto free_inst;
283
284out:
285 return err;
286
287free_inst:
288 skcipher_geniv_free(inst);
289 goto out;
290}
291
292static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) 144static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
293{ 145{
294 struct aead_instance *inst; 146 struct aead_instance *inst;
@@ -334,26 +186,20 @@ free_inst:
334static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) 186static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
335{ 187{
336 struct crypto_attr_type *algt; 188 struct crypto_attr_type *algt;
337 int err;
338 189
339 algt = crypto_get_attr_type(tb); 190 algt = crypto_get_attr_type(tb);
340 if (IS_ERR(algt)) 191 if (IS_ERR(algt))
341 return PTR_ERR(algt); 192 return PTR_ERR(algt);
342 193
343 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 194 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
344 err = seqiv_ablkcipher_create(tmpl, tb); 195 return -EINVAL;
345 else
346 err = seqiv_aead_create(tmpl, tb);
347 196
348 return err; 197 return seqiv_aead_create(tmpl, tb);
349} 198}
350 199
351static void seqiv_free(struct crypto_instance *inst) 200static void seqiv_free(struct crypto_instance *inst)
352{ 201{
353 if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 202 aead_geniv_free(aead_instance(inst));
354 skcipher_geniv_free(inst);
355 else
356 aead_geniv_free(aead_instance(inst));
357} 203}
358 204
359static struct crypto_template seqiv_tmpl = { 205static struct crypto_template seqiv_tmpl = {
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index d248008e7f7b..f7d0018dcaee 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -325,13 +325,13 @@ static const struct crypto_type crypto_skcipher_type2 = {
325 .tfmsize = offsetof(struct crypto_skcipher, base), 325 .tfmsize = offsetof(struct crypto_skcipher, base),
326}; 326};
327 327
328int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, 328int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
329 const char *name, u32 type, u32 mask) 329 const char *name, u32 type, u32 mask)
330{ 330{
331 spawn->base.frontend = &crypto_skcipher_type2; 331 spawn->base.frontend = &crypto_skcipher_type2;
332 return crypto_grab_spawn(&spawn->base, name, type, mask); 332 return crypto_grab_spawn(&spawn->base, name, type, mask);
333} 333}
334EXPORT_SYMBOL_GPL(crypto_grab_skcipher2); 334EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
335 335
336struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 336struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
337 u32 type, u32 mask) 337 u32 type, u32 mask)
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index ce6619c339fe..a21a95e1a375 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -67,8 +67,12 @@ static inline void crypto_set_skcipher_spawn(
67 67
68int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, 68int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
69 u32 type, u32 mask); 69 u32 type, u32 mask);
70int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, 70
71 const char *name, u32 type, u32 mask); 71static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn,
72 const char *name, u32 type, u32 mask)
73{
74 return crypto_grab_skcipher(spawn, name, type, mask);
75}
72 76
73struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); 77struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
74 78
@@ -77,30 +81,28 @@ static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
77 crypto_drop_spawn(&spawn->base); 81 crypto_drop_spawn(&spawn->base);
78} 82}
79 83
80static inline struct crypto_alg *crypto_skcipher_spawn_alg( 84static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
81 struct crypto_skcipher_spawn *spawn) 85 struct crypto_skcipher_spawn *spawn)
82{ 86{
83 return spawn->base.alg; 87 return container_of(spawn->base.alg, struct skcipher_alg, base);
84} 88}
85 89
86static inline struct skcipher_alg *crypto_spawn_skcipher_alg( 90static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
87 struct crypto_skcipher_spawn *spawn) 91 struct crypto_skcipher_spawn *spawn)
88{ 92{
89 return container_of(spawn->base.alg, struct skcipher_alg, base); 93 return crypto_skcipher_spawn_alg(spawn);
90} 94}
91 95
92static inline struct crypto_ablkcipher *crypto_spawn_skcipher( 96static inline struct crypto_skcipher *crypto_spawn_skcipher(
93 struct crypto_skcipher_spawn *spawn) 97 struct crypto_skcipher_spawn *spawn)
94{ 98{
95 return __crypto_ablkcipher_cast( 99 return crypto_spawn_tfm2(&spawn->base);
96 crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0),
97 crypto_skcipher_mask(0)));
98} 100}
99 101
100static inline struct crypto_skcipher *crypto_spawn_skcipher2( 102static inline struct crypto_skcipher *crypto_spawn_skcipher2(
101 struct crypto_skcipher_spawn *spawn) 103 struct crypto_skcipher_spawn *spawn)
102{ 104{
103 return crypto_spawn_tfm2(&spawn->base); 105 return crypto_spawn_skcipher(spawn);
104} 106}
105 107
106static inline void crypto_skcipher_set_reqsize( 108static inline void crypto_skcipher_set_reqsize(
@@ -116,53 +118,12 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
116int skcipher_register_instance(struct crypto_template *tmpl, 118int skcipher_register_instance(struct crypto_template *tmpl,
117 struct skcipher_instance *inst); 119 struct skcipher_instance *inst);
118 120
119int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
120int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
121const char *crypto_default_geniv(const struct crypto_alg *alg);
122
123struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
124 struct rtattr **tb, u32 type,
125 u32 mask);
126void skcipher_geniv_free(struct crypto_instance *inst);
127int skcipher_geniv_init(struct crypto_tfm *tfm);
128void skcipher_geniv_exit(struct crypto_tfm *tfm);
129
130static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
131 struct crypto_ablkcipher *geniv)
132{
133 return crypto_ablkcipher_crt(geniv)->base;
134}
135
136static inline int skcipher_enqueue_givcrypt(
137 struct crypto_queue *queue, struct skcipher_givcrypt_request *request)
138{
139 return ablkcipher_enqueue_request(queue, &request->creq);
140}
141
142static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
143 struct crypto_queue *queue)
144{
145 return skcipher_givcrypt_cast(crypto_dequeue_request(queue));
146}
147
148static inline void *skcipher_givcrypt_reqctx(
149 struct skcipher_givcrypt_request *req)
150{
151 return ablkcipher_request_ctx(&req->creq);
152}
153
154static inline void ablkcipher_request_complete(struct ablkcipher_request *req, 121static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
155 int err) 122 int err)
156{ 123{
157 req->base.complete(&req->base, err); 124 req->base.complete(&req->base, err);
158} 125}
159 126
160static inline void skcipher_givcrypt_complete(
161 struct skcipher_givcrypt_request *req, int err)
162{
163 ablkcipher_request_complete(&req->creq, err);
164}
165
166static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) 127static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
167{ 128{
168 return req->base.flags; 129 return req->base.flags;
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index a381f57ea695..59c8f6c593e6 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -139,82 +139,6 @@ struct skcipher_alg {
139 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ 139 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
140 struct skcipher_request *name = (void *)__##name##_desc 140 struct skcipher_request *name = (void *)__##name##_desc
141 141
142static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
143 struct skcipher_givcrypt_request *req)
144{
145 return crypto_ablkcipher_reqtfm(&req->creq);
146}
147
148static inline int crypto_skcipher_givencrypt(
149 struct skcipher_givcrypt_request *req)
150{
151 struct ablkcipher_tfm *crt =
152 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
153 return crt->givencrypt(req);
154};
155
156static inline int crypto_skcipher_givdecrypt(
157 struct skcipher_givcrypt_request *req)
158{
159 struct ablkcipher_tfm *crt =
160 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
161 return crt->givdecrypt(req);
162};
163
164static inline void skcipher_givcrypt_set_tfm(
165 struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm)
166{
167 req->creq.base.tfm = crypto_ablkcipher_tfm(tfm);
168}
169
170static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast(
171 struct crypto_async_request *req)
172{
173 return container_of(ablkcipher_request_cast(req),
174 struct skcipher_givcrypt_request, creq);
175}
176
177static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc(
178 struct crypto_ablkcipher *tfm, gfp_t gfp)
179{
180 struct skcipher_givcrypt_request *req;
181
182 req = kmalloc(sizeof(struct skcipher_givcrypt_request) +
183 crypto_ablkcipher_reqsize(tfm), gfp);
184
185 if (likely(req))
186 skcipher_givcrypt_set_tfm(req, tfm);
187
188 return req;
189}
190
191static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
192{
193 kfree(req);
194}
195
196static inline void skcipher_givcrypt_set_callback(
197 struct skcipher_givcrypt_request *req, u32 flags,
198 crypto_completion_t compl, void *data)
199{
200 ablkcipher_request_set_callback(&req->creq, flags, compl, data);
201}
202
203static inline void skcipher_givcrypt_set_crypt(
204 struct skcipher_givcrypt_request *req,
205 struct scatterlist *src, struct scatterlist *dst,
206 unsigned int nbytes, void *iv)
207{
208 ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv);
209}
210
211static inline void skcipher_givcrypt_set_giv(
212 struct skcipher_givcrypt_request *req, u8 *giv, u64 seq)
213{
214 req->giv = giv;
215 req->seq = seq;
216}
217
218/** 142/**
219 * DOC: Symmetric Key Cipher API 143 * DOC: Symmetric Key Cipher API
220 * 144 *
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 37a652d1639d..7cee5551625b 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -488,8 +488,6 @@ struct ablkcipher_tfm {
488 unsigned int keylen); 488 unsigned int keylen);
489 int (*encrypt)(struct ablkcipher_request *req); 489 int (*encrypt)(struct ablkcipher_request *req);
490 int (*decrypt)(struct ablkcipher_request *req); 490 int (*decrypt)(struct ablkcipher_request *req);
491 int (*givencrypt)(struct skcipher_givcrypt_request *req);
492 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
493 491
494 struct crypto_ablkcipher *base; 492 struct crypto_ablkcipher *base;
495 493
@@ -714,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask)
714 * state information is unused by the kernel crypto API. 712 * state information is unused by the kernel crypto API.
715 */ 713 */
716 714
717/**
718 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
719 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
720 * ablkcipher cipher
721 * @type: specifies the type of the cipher
722 * @mask: specifies the mask for the cipher
723 *
724 * Allocate a cipher handle for an ablkcipher. The returned struct
725 * crypto_ablkcipher is the cipher handle that is required for any subsequent
726 * API invocation for that ablkcipher.
727 *
728 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
729 * of an error, PTR_ERR() returns the error code.
730 */
731struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
732 u32 type, u32 mask);
733
734static inline struct crypto_tfm *crypto_ablkcipher_tfm( 715static inline struct crypto_tfm *crypto_ablkcipher_tfm(
735 struct crypto_ablkcipher *tfm) 716 struct crypto_ablkcipher *tfm)
736{ 717{