summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig77
-rw-r--r--crypto/Makefile12
-rw-r--r--crypto/ablk_helper.c6
-rw-r--r--crypto/ablkcipher.c223
-rw-r--r--crypto/aead.c16
-rw-r--r--crypto/ahash.c6
-rw-r--r--crypto/algapi.c24
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c7
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c2
-rw-r--r--crypto/asymmetric_keys/restrict.c2
-rw-r--r--crypto/authenc.c116
-rw-r--r--crypto/authencesn.c106
-rw-r--r--crypto/blkcipher.c185
-rw-r--r--crypto/ccm.c72
-rw-r--r--crypto/chacha20_generic.c61
-rw-r--r--crypto/chacha20poly1305.c89
-rw-r--r--crypto/chainiv.c317
-rw-r--r--crypto/cryptd.c133
-rw-r--r--crypto/crypto_null.c11
-rw-r--r--crypto/crypto_user.c57
-rw-r--r--crypto/ctr.c183
-rw-r--r--crypto/cts.c495
-rw-r--r--crypto/dh.c189
-rw-r--r--crypto/dh_helper.c95
-rw-r--r--crypto/drbg.c269
-rw-r--r--crypto/ecc.c1018
-rw-r--r--crypto/ecc.h83
-rw-r--r--crypto/ecc_curve_defs.h57
-rw-r--r--crypto/ecdh.c151
-rw-r--r--crypto/ecdh_helper.c86
-rw-r--r--crypto/echainiv.c16
-rw-r--r--crypto/eseqiv.c242
-rw-r--r--crypto/gcm.c115
-rw-r--r--crypto/jitterentropy-kcapi.c22
-rw-r--r--crypto/kpp.c123
-rw-r--r--crypto/mcryptd.c132
-rw-r--r--crypto/rsa-pkcs1pad.c327
-rw-r--r--crypto/rsa.c113
-rw-r--r--crypto/rsa_helper.c172
-rw-r--r--crypto/rsaprivkey.asn110
-rw-r--r--crypto/scatterwalk.c81
-rw-r--r--crypto/seqiv.c176
-rw-r--r--crypto/sha3_generic.c300
-rw-r--r--crypto/skcipher.c196
-rw-r--r--crypto/tcrypt.c442
-rw-r--r--crypto/testmgr.c288
-rw-r--r--crypto/testmgr.h1036
47 files changed, 5353 insertions, 2586 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1d33beb6a1ae..84d71482bf08 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER
93 select CRYPTO_AKCIPHER2 93 select CRYPTO_AKCIPHER2
94 select CRYPTO_ALGAPI 94 select CRYPTO_ALGAPI
95 95
96config CRYPTO_KPP2
97 tristate
98 select CRYPTO_ALGAPI2
99
100config CRYPTO_KPP
101 tristate
102 select CRYPTO_ALGAPI
103 select CRYPTO_KPP2
104
96config CRYPTO_RSA 105config CRYPTO_RSA
97 tristate "RSA algorithm" 106 tristate "RSA algorithm"
98 select CRYPTO_AKCIPHER 107 select CRYPTO_AKCIPHER
@@ -102,6 +111,19 @@ config CRYPTO_RSA
102 help 111 help
103 Generic implementation of the RSA public key algorithm. 112 Generic implementation of the RSA public key algorithm.
104 113
114config CRYPTO_DH
115 tristate "Diffie-Hellman algorithm"
116 select CRYPTO_KPP
117 select MPILIB
118 help
119 Generic implementation of the Diffie-Hellman algorithm.
120
121config CRYPTO_ECDH
122 tristate "ECDH algorithm"
123 select CRYTPO_KPP
124 help
125 Generic implementation of the ECDH algorithm
126
105config CRYPTO_MANAGER 127config CRYPTO_MANAGER
106 tristate "Cryptographic algorithm manager" 128 tristate "Cryptographic algorithm manager"
107 select CRYPTO_MANAGER2 129 select CRYPTO_MANAGER2
@@ -115,6 +137,7 @@ config CRYPTO_MANAGER2
115 select CRYPTO_HASH2 137 select CRYPTO_HASH2
116 select CRYPTO_BLKCIPHER2 138 select CRYPTO_BLKCIPHER2
117 select CRYPTO_AKCIPHER2 139 select CRYPTO_AKCIPHER2
140 select CRYPTO_KPP2
118 141
119config CRYPTO_USER 142config CRYPTO_USER
120 tristate "Userspace cryptographic algorithm configuration" 143 tristate "Userspace cryptographic algorithm configuration"
@@ -414,6 +437,17 @@ config CRYPTO_CRC32C_INTEL
414 gain performance compared with software implementation. 437 gain performance compared with software implementation.
415 Module will be crc32c-intel. 438 Module will be crc32c-intel.
416 439
440config CRYPT_CRC32C_VPMSUM
441 tristate "CRC32c CRC algorithm (powerpc64)"
442 depends on PPC64 && ALTIVEC
443 select CRYPTO_HASH
444 select CRC32
445 help
446 CRC32c algorithm implemented using vector polynomial multiply-sum
447 (vpmsum) instructions, introduced in POWER8. Enable on POWER8
448 and newer processors for improved performance.
449
450
417config CRYPTO_CRC32C_SPARC64 451config CRYPTO_CRC32C_SPARC64
418 tristate "CRC32c CRC algorithm (SPARC64)" 452 tristate "CRC32c CRC algorithm (SPARC64)"
419 depends on SPARC64 453 depends on SPARC64
@@ -681,6 +715,38 @@ config CRYPTO_SHA1_MB
681 lanes remain unfilled, a flush operation will be initiated to 715 lanes remain unfilled, a flush operation will be initiated to
682 process the crypto jobs, adding a slight latency. 716 process the crypto jobs, adding a slight latency.
683 717
718config CRYPTO_SHA256_MB
719 tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
720 depends on X86 && 64BIT
721 select CRYPTO_SHA256
722 select CRYPTO_HASH
723 select CRYPTO_MCRYPTD
724 help
725 SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
726 using multi-buffer technique. This algorithm computes on
727 multiple data lanes concurrently with SIMD instructions for
728 better throughput. It should not be enabled by default but
729 used when there is significant amount of work to keep the keep
730 the data lanes filled to get performance benefit. If the data
731 lanes remain unfilled, a flush operation will be initiated to
732 process the crypto jobs, adding a slight latency.
733
734config CRYPTO_SHA512_MB
735 tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
736 depends on X86 && 64BIT
737 select CRYPTO_SHA512
738 select CRYPTO_HASH
739 select CRYPTO_MCRYPTD
740 help
741 SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
742 using multi-buffer technique. This algorithm computes on
743 multiple data lanes concurrently with SIMD instructions for
744 better throughput. It should not be enabled by default but
745 used when there is significant amount of work to keep the keep
746 the data lanes filled to get performance benefit. If the data
747 lanes remain unfilled, a flush operation will be initiated to
748 process the crypto jobs, adding a slight latency.
749
684config CRYPTO_SHA256 750config CRYPTO_SHA256
685 tristate "SHA224 and SHA256 digest algorithm" 751 tristate "SHA224 and SHA256 digest algorithm"
686 select CRYPTO_HASH 752 select CRYPTO_HASH
@@ -750,6 +816,16 @@ config CRYPTO_SHA512_SPARC64
750 SHA-512 secure hash standard (DFIPS 180-2) implemented 816 SHA-512 secure hash standard (DFIPS 180-2) implemented
751 using sparc64 crypto instructions, when available. 817 using sparc64 crypto instructions, when available.
752 818
819config CRYPTO_SHA3
820 tristate "SHA3 digest algorithm"
821 select CRYPTO_HASH
822 help
823 SHA-3 secure hash standard (DFIPS 202). It's based on
824 cryptographic sponge function family called Keccak.
825
826 References:
827 http://keccak.noekeon.org/
828
753config CRYPTO_TGR192 829config CRYPTO_TGR192
754 tristate "Tiger digest algorithms" 830 tristate "Tiger digest algorithms"
755 select CRYPTO_HASH 831 select CRYPTO_HASH
@@ -1567,6 +1643,7 @@ config CRYPTO_DRBG_HASH
1567config CRYPTO_DRBG_CTR 1643config CRYPTO_DRBG_CTR
1568 bool "Enable CTR DRBG" 1644 bool "Enable CTR DRBG"
1569 select CRYPTO_AES 1645 select CRYPTO_AES
1646 depends on CRYPTO_CTR
1570 help 1647 help
1571 Enable the CTR DRBG variant as defined in NIST SP800-90A. 1648 Enable the CTR DRBG variant as defined in NIST SP800-90A.
1572 1649
diff --git a/crypto/Makefile b/crypto/Makefile
index 4f4ef7eaae3f..99cc64ac70ef 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -20,8 +20,6 @@ crypto_blkcipher-y := ablkcipher.o
20crypto_blkcipher-y += blkcipher.o 20crypto_blkcipher-y += blkcipher.o
21crypto_blkcipher-y += skcipher.o 21crypto_blkcipher-y += skcipher.o
22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o 22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
23obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
24obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
25obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 23obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
26obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o 24obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
27 25
@@ -30,6 +28,15 @@ crypto_hash-y += shash.o
30obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 28obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
31 29
32obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o 30obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
31obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
32
33dh_generic-y := dh.o
34dh_generic-y += dh_helper.o
35obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
36ecdh_generic-y := ecc.o
37ecdh_generic-y += ecdh.o
38ecdh_generic-y += ecdh_helper.o
39obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
33 40
34$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h 41$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
35$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h 42$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
@@ -61,6 +68,7 @@ obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
61obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o 68obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
62obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o 69obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
63obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o 70obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
71obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
64obj-$(CONFIG_CRYPTO_WP512) += wp512.o 72obj-$(CONFIG_CRYPTO_WP512) += wp512.o
65obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o 73obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
66obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o 74obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index e1fcf53bb931..1441f07d0a19 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -71,7 +71,8 @@ int ablk_encrypt(struct ablkcipher_request *req)
71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
73 73
74 if (!may_use_simd()) { 74 if (!may_use_simd() ||
75 (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
75 struct ablkcipher_request *cryptd_req = 76 struct ablkcipher_request *cryptd_req =
76 ablkcipher_request_ctx(req); 77 ablkcipher_request_ctx(req);
77 78
@@ -90,7 +91,8 @@ int ablk_decrypt(struct ablkcipher_request *req)
90 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 91 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
91 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 92 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
92 93
93 if (!may_use_simd()) { 94 if (!may_use_simd() ||
95 (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
94 struct ablkcipher_request *cryptd_req = 96 struct ablkcipher_request *cryptd_req =
95 ablkcipher_request_ctx(req); 97 ablkcipher_request_ctx(req);
96 98
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e5b5721809e2..d676fc59521a 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,11 +14,8 @@
14 */ 14 */
15 15
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/rtnetlink.h>
21#include <linux/sched.h>
22#include <linux/slab.h> 19#include <linux/slab.h>
23#include <linux/seq_file.h> 20#include <linux/seq_file.h>
24#include <linux/cryptouser.h> 21#include <linux/cryptouser.h>
@@ -349,16 +346,6 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
349 return alg->cra_ctxsize; 346 return alg->cra_ctxsize;
350} 347}
351 348
352int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
353{
354 return crypto_ablkcipher_encrypt(&req->creq);
355}
356
357int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
358{
359 return crypto_ablkcipher_decrypt(&req->creq);
360}
361
362static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, 349static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
363 u32 mask) 350 u32 mask)
364{ 351{
@@ -371,10 +358,6 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
371 crt->setkey = setkey; 358 crt->setkey = setkey;
372 crt->encrypt = alg->encrypt; 359 crt->encrypt = alg->encrypt;
373 crt->decrypt = alg->decrypt; 360 crt->decrypt = alg->decrypt;
374 if (!alg->ivsize) {
375 crt->givencrypt = skcipher_null_givencrypt;
376 crt->givdecrypt = skcipher_null_givdecrypt;
377 }
378 crt->base = __crypto_ablkcipher_cast(tfm); 361 crt->base = __crypto_ablkcipher_cast(tfm);
379 crt->ivsize = alg->ivsize; 362 crt->ivsize = alg->ivsize;
380 363
@@ -436,11 +419,6 @@ const struct crypto_type crypto_ablkcipher_type = {
436}; 419};
437EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); 420EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
438 421
439static int no_givdecrypt(struct skcipher_givcrypt_request *req)
440{
441 return -ENOSYS;
442}
443
444static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, 422static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
445 u32 mask) 423 u32 mask)
446{ 424{
@@ -454,8 +432,6 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
454 alg->setkey : setkey; 432 alg->setkey : setkey;
455 crt->encrypt = alg->encrypt; 433 crt->encrypt = alg->encrypt;
456 crt->decrypt = alg->decrypt; 434 crt->decrypt = alg->decrypt;
457 crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
458 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
459 crt->base = __crypto_ablkcipher_cast(tfm); 435 crt->base = __crypto_ablkcipher_cast(tfm);
460 crt->ivsize = alg->ivsize; 436 crt->ivsize = alg->ivsize;
461 437
@@ -516,202 +492,3 @@ const struct crypto_type crypto_givcipher_type = {
516 .report = crypto_givcipher_report, 492 .report = crypto_givcipher_report,
517}; 493};
518EXPORT_SYMBOL_GPL(crypto_givcipher_type); 494EXPORT_SYMBOL_GPL(crypto_givcipher_type);
519
520const char *crypto_default_geniv(const struct crypto_alg *alg)
521{
522 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
523 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
524 alg->cra_ablkcipher.ivsize) !=
525 alg->cra_blocksize)
526 return "chainiv";
527
528 return "eseqiv";
529}
530
531static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
532{
533 struct rtattr *tb[3];
534 struct {
535 struct rtattr attr;
536 struct crypto_attr_type data;
537 } ptype;
538 struct {
539 struct rtattr attr;
540 struct crypto_attr_alg data;
541 } palg;
542 struct crypto_template *tmpl;
543 struct crypto_instance *inst;
544 struct crypto_alg *larval;
545 const char *geniv;
546 int err;
547
548 larval = crypto_larval_lookup(alg->cra_driver_name,
549 (type & ~CRYPTO_ALG_TYPE_MASK) |
550 CRYPTO_ALG_TYPE_GIVCIPHER,
551 mask | CRYPTO_ALG_TYPE_MASK);
552 err = PTR_ERR(larval);
553 if (IS_ERR(larval))
554 goto out;
555
556 err = -EAGAIN;
557 if (!crypto_is_larval(larval))
558 goto drop_larval;
559
560 ptype.attr.rta_len = sizeof(ptype);
561 ptype.attr.rta_type = CRYPTOA_TYPE;
562 ptype.data.type = type | CRYPTO_ALG_GENIV;
563 /* GENIV tells the template that we're making a default geniv. */
564 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
565 tb[0] = &ptype.attr;
566
567 palg.attr.rta_len = sizeof(palg);
568 palg.attr.rta_type = CRYPTOA_ALG;
569 /* Must use the exact name to locate ourselves. */
570 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
571 tb[1] = &palg.attr;
572
573 tb[2] = NULL;
574
575 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
576 CRYPTO_ALG_TYPE_BLKCIPHER)
577 geniv = alg->cra_blkcipher.geniv;
578 else
579 geniv = alg->cra_ablkcipher.geniv;
580
581 if (!geniv)
582 geniv = crypto_default_geniv(alg);
583
584 tmpl = crypto_lookup_template(geniv);
585 err = -ENOENT;
586 if (!tmpl)
587 goto kill_larval;
588
589 if (tmpl->create) {
590 err = tmpl->create(tmpl, tb);
591 if (err)
592 goto put_tmpl;
593 goto ok;
594 }
595
596 inst = tmpl->alloc(tb);
597 err = PTR_ERR(inst);
598 if (IS_ERR(inst))
599 goto put_tmpl;
600
601 err = crypto_register_instance(tmpl, inst);
602 if (err) {
603 tmpl->free(inst);
604 goto put_tmpl;
605 }
606
607ok:
608 /* Redo the lookup to use the instance we just registered. */
609 err = -EAGAIN;
610
611put_tmpl:
612 crypto_tmpl_put(tmpl);
613kill_larval:
614 crypto_larval_kill(larval);
615drop_larval:
616 crypto_mod_put(larval);
617out:
618 crypto_mod_put(alg);
619 return err;
620}
621
622struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
623{
624 struct crypto_alg *alg;
625
626 alg = crypto_alg_mod_lookup(name, type, mask);
627 if (IS_ERR(alg))
628 return alg;
629
630 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
631 CRYPTO_ALG_TYPE_GIVCIPHER)
632 return alg;
633
634 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
635 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
636 alg->cra_ablkcipher.ivsize))
637 return alg;
638
639 crypto_mod_put(alg);
640 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
641 mask & ~CRYPTO_ALG_TESTED);
642 if (IS_ERR(alg))
643 return alg;
644
645 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
646 CRYPTO_ALG_TYPE_GIVCIPHER) {
647 if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
648 crypto_mod_put(alg);
649 alg = ERR_PTR(-ENOENT);
650 }
651 return alg;
652 }
653
654 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
655 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
656 alg->cra_ablkcipher.ivsize));
657
658 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
659}
660EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
661
662int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
663 u32 type, u32 mask)
664{
665 struct crypto_alg *alg;
666 int err;
667
668 type = crypto_skcipher_type(type);
669 mask = crypto_skcipher_mask(mask);
670
671 alg = crypto_lookup_skcipher(name, type, mask);
672 if (IS_ERR(alg))
673 return PTR_ERR(alg);
674
675 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
676 crypto_mod_put(alg);
677 return err;
678}
679EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
680
681struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
682 u32 type, u32 mask)
683{
684 struct crypto_tfm *tfm;
685 int err;
686
687 type = crypto_skcipher_type(type);
688 mask = crypto_skcipher_mask(mask);
689
690 for (;;) {
691 struct crypto_alg *alg;
692
693 alg = crypto_lookup_skcipher(alg_name, type, mask);
694 if (IS_ERR(alg)) {
695 err = PTR_ERR(alg);
696 goto err;
697 }
698
699 tfm = __crypto_alloc_tfm(alg, type, mask);
700 if (!IS_ERR(tfm))
701 return __crypto_ablkcipher_cast(tfm);
702
703 crypto_mod_put(alg);
704 err = PTR_ERR(tfm);
705
706err:
707 if (err != -EAGAIN)
708 break;
709 if (fatal_signal_pending(current)) {
710 err = -EINTR;
711 break;
712 }
713 }
714
715 return ERR_PTR(err);
716}
717EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
diff --git a/crypto/aead.c b/crypto/aead.c
index 9b18a1e40d6a..3f5c5ff004ab 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -294,9 +294,9 @@ int aead_init_geniv(struct crypto_aead *aead)
294 if (err) 294 if (err)
295 goto out; 295 goto out;
296 296
297 ctx->null = crypto_get_default_null_skcipher(); 297 ctx->sknull = crypto_get_default_null_skcipher2();
298 err = PTR_ERR(ctx->null); 298 err = PTR_ERR(ctx->sknull);
299 if (IS_ERR(ctx->null)) 299 if (IS_ERR(ctx->sknull))
300 goto out; 300 goto out;
301 301
302 child = crypto_spawn_aead(aead_instance_ctx(inst)); 302 child = crypto_spawn_aead(aead_instance_ctx(inst));
@@ -314,7 +314,7 @@ out:
314 return err; 314 return err;
315 315
316drop_null: 316drop_null:
317 crypto_put_default_null_skcipher(); 317 crypto_put_default_null_skcipher2();
318 goto out; 318 goto out;
319} 319}
320EXPORT_SYMBOL_GPL(aead_init_geniv); 320EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -324,7 +324,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
324 struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); 324 struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
325 325
326 crypto_free_aead(ctx->child); 326 crypto_free_aead(ctx->child);
327 crypto_put_default_null_skcipher(); 327 crypto_put_default_null_skcipher2();
328} 328}
329EXPORT_SYMBOL_GPL(aead_exit_geniv); 329EXPORT_SYMBOL_GPL(aead_exit_geniv);
330 330
@@ -346,9 +346,13 @@ static int aead_prepare_alg(struct aead_alg *alg)
346{ 346{
347 struct crypto_alg *base = &alg->base; 347 struct crypto_alg *base = &alg->base;
348 348
349 if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) 349 if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
350 PAGE_SIZE / 8)
350 return -EINVAL; 351 return -EINVAL;
351 352
353 if (!alg->chunksize)
354 alg->chunksize = base->cra_blocksize;
355
352 base->cra_type = &crypto_aead_type; 356 base->cra_type = &crypto_aead_type;
353 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 357 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
354 base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; 358 base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3887a98abcc3..2ce8bcb9049c 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -461,10 +461,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
461 461
462static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 462static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
463{ 463{
464 if (alg->cra_type == &crypto_ahash_type) 464 if (alg->cra_type != &crypto_ahash_type)
465 return alg->cra_ctxsize; 465 return sizeof(struct crypto_shash *);
466 466
467 return sizeof(struct crypto_shash *); 467 return crypto_alg_extsize(alg);
468} 468}
469 469
470#ifdef CONFIG_NET 470#ifdef CONFIG_NET
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 731255a6104f..df939b54b09f 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -811,6 +811,21 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
811} 811}
812EXPORT_SYMBOL_GPL(crypto_attr_u32); 812EXPORT_SYMBOL_GPL(crypto_attr_u32);
813 813
814int crypto_inst_setname(struct crypto_instance *inst, const char *name,
815 struct crypto_alg *alg)
816{
817 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
818 alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
819 return -ENAMETOOLONG;
820
821 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
822 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
823 return -ENAMETOOLONG;
824
825 return 0;
826}
827EXPORT_SYMBOL_GPL(crypto_inst_setname);
828
814void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, 829void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
815 unsigned int head) 830 unsigned int head)
816{ 831{
@@ -825,13 +840,8 @@ void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
825 840
826 inst = (void *)(p + head); 841 inst = (void *)(p + head);
827 842
828 err = -ENAMETOOLONG; 843 err = crypto_inst_setname(inst, name, alg);
829 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, 844 if (err)
830 alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
831 goto err_free_inst;
832
833 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
834 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
835 goto err_free_inst; 845 goto err_free_inst;
836 846
837 return p; 847 return p;
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 6a76d5c70ef6..9492e1c22d38 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -124,5 +124,10 @@ int mscode_note_digest(void *context, size_t hdrlen,
124 struct pefile_context *ctx = context; 124 struct pefile_context *ctx = context;
125 125
126 ctx->digest = kmemdup(value, vlen, GFP_KERNEL); 126 ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
127 return ctx->digest ? 0 : -ENOMEM; 127 if (!ctx->digest)
128 return -ENOMEM;
129
130 ctx->digest_len = vlen;
131
132 return 0;
128} 133}
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 44b746e9df1b..2ffd69769466 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -227,7 +227,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
227 if (asymmetric_key_id_same(p->id, auth)) 227 if (asymmetric_key_id_same(p->id, auth))
228 goto found_issuer_check_skid; 228 goto found_issuer_check_skid;
229 } 229 }
230 } else { 230 } else if (sig->auth_ids[1]) {
231 auth = sig->auth_ids[1]; 231 auth = sig->auth_ids[1];
232 pr_debug("- want %*phN\n", auth->len, auth->data); 232 pr_debug("- want %*phN\n", auth->len, auth->data);
233 for (p = pkcs7->certs; p; p = p->next) { 233 for (p = pkcs7->certs; p; p = p->next) {
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index ac4bddf669de..19d1afb9890f 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -87,7 +87,7 @@ int restrict_link_by_signature(struct key *trust_keyring,
87 87
88 sig = payload->data[asym_auth]; 88 sig = payload->data[asym_auth];
89 if (!sig->auth_ids[0] && !sig->auth_ids[1]) 89 if (!sig->auth_ids[0] && !sig->auth_ids[1])
90 return 0; 90 return -ENOKEY;
91 91
92 if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid)) 92 if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
93 return -EPERM; 93 return -EPERM;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 55a354d57251..a7e1ac786c5d 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -32,8 +32,8 @@ struct authenc_instance_ctx {
32 32
33struct crypto_authenc_ctx { 33struct crypto_authenc_ctx {
34 struct crypto_ahash *auth; 34 struct crypto_ahash *auth;
35 struct crypto_ablkcipher *enc; 35 struct crypto_skcipher *enc;
36 struct crypto_blkcipher *null; 36 struct crypto_skcipher *null;
37}; 37};
38 38
39struct authenc_request_ctx { 39struct authenc_request_ctx {
@@ -83,7 +83,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
83{ 83{
84 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 84 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
85 struct crypto_ahash *auth = ctx->auth; 85 struct crypto_ahash *auth = ctx->auth;
86 struct crypto_ablkcipher *enc = ctx->enc; 86 struct crypto_skcipher *enc = ctx->enc;
87 struct crypto_authenc_keys keys; 87 struct crypto_authenc_keys keys;
88 int err = -EINVAL; 88 int err = -EINVAL;
89 89
@@ -100,11 +100,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
100 if (err) 100 if (err)
101 goto out; 101 goto out;
102 102
103 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 103 crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
104 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & 104 crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
105 CRYPTO_TFM_REQ_MASK); 105 CRYPTO_TFM_REQ_MASK);
106 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); 106 err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
107 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & 107 crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) &
108 CRYPTO_TFM_RES_MASK); 108 CRYPTO_TFM_RES_MASK);
109 109
110out: 110out:
@@ -184,12 +184,15 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
184{ 184{
185 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 185 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
186 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 186 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
187 struct blkcipher_desc desc = { 187 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
188 .tfm = ctx->null,
189 };
190 188
191 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, 189 skcipher_request_set_tfm(skreq, ctx->null);
192 req->assoclen); 190 skcipher_request_set_callback(skreq, aead_request_flags(req),
191 NULL, NULL);
192 skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
193 NULL);
194
195 return crypto_skcipher_encrypt(skreq);
193} 196}
194 197
195static int crypto_authenc_encrypt(struct aead_request *req) 198static int crypto_authenc_encrypt(struct aead_request *req)
@@ -199,14 +202,13 @@ static int crypto_authenc_encrypt(struct aead_request *req)
199 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 202 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
200 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); 203 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
201 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 204 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
202 struct crypto_ablkcipher *enc = ctx->enc; 205 struct crypto_skcipher *enc = ctx->enc;
203 unsigned int cryptlen = req->cryptlen; 206 unsigned int cryptlen = req->cryptlen;
204 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + 207 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
205 ictx->reqoff); 208 ictx->reqoff);
206 struct scatterlist *src, *dst; 209 struct scatterlist *src, *dst;
207 int err; 210 int err;
208 211
209 sg_init_table(areq_ctx->src, 2);
210 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); 212 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
211 dst = src; 213 dst = src;
212 214
@@ -215,16 +217,15 @@ static int crypto_authenc_encrypt(struct aead_request *req)
215 if (err) 217 if (err)
216 return err; 218 return err;
217 219
218 sg_init_table(areq_ctx->dst, 2);
219 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); 220 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
220 } 221 }
221 222
222 ablkcipher_request_set_tfm(abreq, enc); 223 skcipher_request_set_tfm(skreq, enc);
223 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 224 skcipher_request_set_callback(skreq, aead_request_flags(req),
224 crypto_authenc_encrypt_done, req); 225 crypto_authenc_encrypt_done, req);
225 ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); 226 skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
226 227
227 err = crypto_ablkcipher_encrypt(abreq); 228 err = crypto_skcipher_encrypt(skreq);
228 if (err) 229 if (err)
229 return err; 230 return err;
230 231
@@ -240,8 +241,8 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
240 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); 241 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
241 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 242 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
242 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); 243 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
243 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + 244 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
244 ictx->reqoff); 245 ictx->reqoff);
245 unsigned int authsize = crypto_aead_authsize(authenc); 246 unsigned int authsize = crypto_aead_authsize(authenc);
246 u8 *ihash = ahreq->result + authsize; 247 u8 *ihash = ahreq->result + authsize;
247 struct scatterlist *src, *dst; 248 struct scatterlist *src, *dst;
@@ -251,22 +252,19 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
251 if (crypto_memneq(ihash, ahreq->result, authsize)) 252 if (crypto_memneq(ihash, ahreq->result, authsize))
252 return -EBADMSG; 253 return -EBADMSG;
253 254
254 sg_init_table(areq_ctx->src, 2);
255 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); 255 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
256 dst = src; 256 dst = src;
257 257
258 if (req->src != req->dst) { 258 if (req->src != req->dst)
259 sg_init_table(areq_ctx->dst, 2);
260 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); 259 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
261 }
262 260
263 ablkcipher_request_set_tfm(abreq, ctx->enc); 261 skcipher_request_set_tfm(skreq, ctx->enc);
264 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 262 skcipher_request_set_callback(skreq, aead_request_flags(req),
265 req->base.complete, req->base.data); 263 req->base.complete, req->base.data);
266 ablkcipher_request_set_crypt(abreq, src, dst, 264 skcipher_request_set_crypt(skreq, src, dst,
267 req->cryptlen - authsize, req->iv); 265 req->cryptlen - authsize, req->iv);
268 266
269 return crypto_ablkcipher_decrypt(abreq); 267 return crypto_skcipher_decrypt(skreq);
270} 268}
271 269
272static void authenc_verify_ahash_done(struct crypto_async_request *areq, 270static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -318,20 +316,20 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
318 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); 316 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
319 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); 317 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
320 struct crypto_ahash *auth; 318 struct crypto_ahash *auth;
321 struct crypto_ablkcipher *enc; 319 struct crypto_skcipher *enc;
322 struct crypto_blkcipher *null; 320 struct crypto_skcipher *null;
323 int err; 321 int err;
324 322
325 auth = crypto_spawn_ahash(&ictx->auth); 323 auth = crypto_spawn_ahash(&ictx->auth);
326 if (IS_ERR(auth)) 324 if (IS_ERR(auth))
327 return PTR_ERR(auth); 325 return PTR_ERR(auth);
328 326
329 enc = crypto_spawn_skcipher(&ictx->enc); 327 enc = crypto_spawn_skcipher2(&ictx->enc);
330 err = PTR_ERR(enc); 328 err = PTR_ERR(enc);
331 if (IS_ERR(enc)) 329 if (IS_ERR(enc))
332 goto err_free_ahash; 330 goto err_free_ahash;
333 331
334 null = crypto_get_default_null_skcipher(); 332 null = crypto_get_default_null_skcipher2();
335 err = PTR_ERR(null); 333 err = PTR_ERR(null);
336 if (IS_ERR(null)) 334 if (IS_ERR(null))
337 goto err_free_skcipher; 335 goto err_free_skcipher;
@@ -347,13 +345,13 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
347 max_t(unsigned int, 345 max_t(unsigned int,
348 crypto_ahash_reqsize(auth) + 346 crypto_ahash_reqsize(auth) +
349 sizeof(struct ahash_request), 347 sizeof(struct ahash_request),
350 sizeof(struct ablkcipher_request) + 348 sizeof(struct skcipher_request) +
351 crypto_ablkcipher_reqsize(enc))); 349 crypto_skcipher_reqsize(enc)));
352 350
353 return 0; 351 return 0;
354 352
355err_free_skcipher: 353err_free_skcipher:
356 crypto_free_ablkcipher(enc); 354 crypto_free_skcipher(enc);
357err_free_ahash: 355err_free_ahash:
358 crypto_free_ahash(auth); 356 crypto_free_ahash(auth);
359 return err; 357 return err;
@@ -364,8 +362,8 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
364 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); 362 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
365 363
366 crypto_free_ahash(ctx->auth); 364 crypto_free_ahash(ctx->auth);
367 crypto_free_ablkcipher(ctx->enc); 365 crypto_free_skcipher(ctx->enc);
368 crypto_put_default_null_skcipher(); 366 crypto_put_default_null_skcipher2();
369} 367}
370 368
371static void crypto_authenc_free(struct aead_instance *inst) 369static void crypto_authenc_free(struct aead_instance *inst)
@@ -384,7 +382,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
384 struct aead_instance *inst; 382 struct aead_instance *inst;
385 struct hash_alg_common *auth; 383 struct hash_alg_common *auth;
386 struct crypto_alg *auth_base; 384 struct crypto_alg *auth_base;
387 struct crypto_alg *enc; 385 struct skcipher_alg *enc;
388 struct authenc_instance_ctx *ctx; 386 struct authenc_instance_ctx *ctx;
389 const char *enc_name; 387 const char *enc_name;
390 int err; 388 int err;
@@ -397,7 +395,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
397 return -EINVAL; 395 return -EINVAL;
398 396
399 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, 397 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
400 CRYPTO_ALG_TYPE_AHASH_MASK); 398 CRYPTO_ALG_TYPE_AHASH_MASK |
399 crypto_requires_sync(algt->type, algt->mask));
401 if (IS_ERR(auth)) 400 if (IS_ERR(auth))
402 return PTR_ERR(auth); 401 return PTR_ERR(auth);
403 402
@@ -421,37 +420,40 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
421 goto err_free_inst; 420 goto err_free_inst;
422 421
423 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); 422 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
424 err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, 423 err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
425 crypto_requires_sync(algt->type, 424 crypto_requires_sync(algt->type,
426 algt->mask)); 425 algt->mask));
427 if (err) 426 if (err)
428 goto err_drop_auth; 427 goto err_drop_auth;
429 428
430 enc = crypto_skcipher_spawn_alg(&ctx->enc); 429 enc = crypto_spawn_skcipher_alg(&ctx->enc);
431 430
432 ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, 431 ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
433 auth_base->cra_alignmask + 1); 432 auth_base->cra_alignmask + 1);
434 433
435 err = -ENAMETOOLONG; 434 err = -ENAMETOOLONG;
436 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 435 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
437 "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= 436 "authenc(%s,%s)", auth_base->cra_name,
437 enc->base.cra_name) >=
438 CRYPTO_MAX_ALG_NAME) 438 CRYPTO_MAX_ALG_NAME)
439 goto err_drop_enc; 439 goto err_drop_enc;
440 440
441 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 441 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
442 "authenc(%s,%s)", auth_base->cra_driver_name, 442 "authenc(%s,%s)", auth_base->cra_driver_name,
443 enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 443 enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
444 goto err_drop_enc; 444 goto err_drop_enc;
445 445
446 inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; 446 inst->alg.base.cra_flags = (auth_base->cra_flags |
447 inst->alg.base.cra_priority = enc->cra_priority * 10 + 447 enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
448 inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
448 auth_base->cra_priority; 449 auth_base->cra_priority;
449 inst->alg.base.cra_blocksize = enc->cra_blocksize; 450 inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
450 inst->alg.base.cra_alignmask = auth_base->cra_alignmask | 451 inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
451 enc->cra_alignmask; 452 enc->base.cra_alignmask;
452 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); 453 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
453 454
454 inst->alg.ivsize = enc->cra_ablkcipher.ivsize; 455 inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
456 inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
455 inst->alg.maxauthsize = auth->digestsize; 457 inst->alg.maxauthsize = auth->digestsize;
456 458
457 inst->alg.init = crypto_authenc_init_tfm; 459 inst->alg.init = crypto_authenc_init_tfm;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0c0468869e25..121010ac9962 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -35,8 +35,8 @@ struct authenc_esn_instance_ctx {
35struct crypto_authenc_esn_ctx { 35struct crypto_authenc_esn_ctx {
36 unsigned int reqoff; 36 unsigned int reqoff;
37 struct crypto_ahash *auth; 37 struct crypto_ahash *auth;
38 struct crypto_ablkcipher *enc; 38 struct crypto_skcipher *enc;
39 struct crypto_blkcipher *null; 39 struct crypto_skcipher *null;
40}; 40};
41 41
42struct authenc_esn_request_ctx { 42struct authenc_esn_request_ctx {
@@ -65,7 +65,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
65{ 65{
66 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 66 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
67 struct crypto_ahash *auth = ctx->auth; 67 struct crypto_ahash *auth = ctx->auth;
68 struct crypto_ablkcipher *enc = ctx->enc; 68 struct crypto_skcipher *enc = ctx->enc;
69 struct crypto_authenc_keys keys; 69 struct crypto_authenc_keys keys;
70 int err = -EINVAL; 70 int err = -EINVAL;
71 71
@@ -82,11 +82,11 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
82 if (err) 82 if (err)
83 goto out; 83 goto out;
84 84
85 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 85 crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
86 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & 86 crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
87 CRYPTO_TFM_REQ_MASK); 87 CRYPTO_TFM_REQ_MASK);
88 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); 88 err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
89 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & 89 crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) &
90 CRYPTO_TFM_RES_MASK); 90 CRYPTO_TFM_RES_MASK);
91 91
92out: 92out:
@@ -182,11 +182,14 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
182{ 182{
183 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); 183 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
184 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 184 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
185 struct blkcipher_desc desc = { 185 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
186 .tfm = ctx->null,
187 };
188 186
189 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len); 187 skcipher_request_set_tfm(skreq, ctx->null);
188 skcipher_request_set_callback(skreq, aead_request_flags(req),
189 NULL, NULL);
190 skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
191
192 return crypto_skcipher_encrypt(skreq);
190} 193}
191 194
192static int crypto_authenc_esn_encrypt(struct aead_request *req) 195static int crypto_authenc_esn_encrypt(struct aead_request *req)
@@ -194,9 +197,9 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
194 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); 197 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
195 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); 198 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
196 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 199 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
197 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail 200 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
198 + ctx->reqoff); 201 ctx->reqoff);
199 struct crypto_ablkcipher *enc = ctx->enc; 202 struct crypto_skcipher *enc = ctx->enc;
200 unsigned int assoclen = req->assoclen; 203 unsigned int assoclen = req->assoclen;
201 unsigned int cryptlen = req->cryptlen; 204 unsigned int cryptlen = req->cryptlen;
202 struct scatterlist *src, *dst; 205 struct scatterlist *src, *dst;
@@ -215,12 +218,12 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
215 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); 218 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
216 } 219 }
217 220
218 ablkcipher_request_set_tfm(abreq, enc); 221 skcipher_request_set_tfm(skreq, enc);
219 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 222 skcipher_request_set_callback(skreq, aead_request_flags(req),
220 crypto_authenc_esn_encrypt_done, req); 223 crypto_authenc_esn_encrypt_done, req);
221 ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); 224 skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
222 225
223 err = crypto_ablkcipher_encrypt(abreq); 226 err = crypto_skcipher_encrypt(skreq);
224 if (err) 227 if (err)
225 return err; 228 return err;
226 229
@@ -234,8 +237,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
234 unsigned int authsize = crypto_aead_authsize(authenc_esn); 237 unsigned int authsize = crypto_aead_authsize(authenc_esn);
235 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); 238 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
236 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 239 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
237 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail 240 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
238 + ctx->reqoff); 241 ctx->reqoff);
239 struct crypto_ahash *auth = ctx->auth; 242 struct crypto_ahash *auth = ctx->auth;
240 u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, 243 u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
241 crypto_ahash_alignmask(auth) + 1); 244 crypto_ahash_alignmask(auth) + 1);
@@ -256,12 +259,12 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
256 sg_init_table(areq_ctx->dst, 2); 259 sg_init_table(areq_ctx->dst, 2);
257 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); 260 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
258 261
259 ablkcipher_request_set_tfm(abreq, ctx->enc); 262 skcipher_request_set_tfm(skreq, ctx->enc);
260 ablkcipher_request_set_callback(abreq, flags, 263 skcipher_request_set_callback(skreq, flags,
261 req->base.complete, req->base.data); 264 req->base.complete, req->base.data);
262 ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv); 265 skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
263 266
264 return crypto_ablkcipher_decrypt(abreq); 267 return crypto_skcipher_decrypt(skreq);
265} 268}
266 269
267static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, 270static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
@@ -331,20 +334,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
331 struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst); 334 struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
332 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); 335 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
333 struct crypto_ahash *auth; 336 struct crypto_ahash *auth;
334 struct crypto_ablkcipher *enc; 337 struct crypto_skcipher *enc;
335 struct crypto_blkcipher *null; 338 struct crypto_skcipher *null;
336 int err; 339 int err;
337 340
338 auth = crypto_spawn_ahash(&ictx->auth); 341 auth = crypto_spawn_ahash(&ictx->auth);
339 if (IS_ERR(auth)) 342 if (IS_ERR(auth))
340 return PTR_ERR(auth); 343 return PTR_ERR(auth);
341 344
342 enc = crypto_spawn_skcipher(&ictx->enc); 345 enc = crypto_spawn_skcipher2(&ictx->enc);
343 err = PTR_ERR(enc); 346 err = PTR_ERR(enc);
344 if (IS_ERR(enc)) 347 if (IS_ERR(enc))
345 goto err_free_ahash; 348 goto err_free_ahash;
346 349
347 null = crypto_get_default_null_skcipher(); 350 null = crypto_get_default_null_skcipher2();
348 err = PTR_ERR(null); 351 err = PTR_ERR(null);
349 if (IS_ERR(null)) 352 if (IS_ERR(null))
350 goto err_free_skcipher; 353 goto err_free_skcipher;
@@ -361,15 +364,15 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
361 sizeof(struct authenc_esn_request_ctx) + 364 sizeof(struct authenc_esn_request_ctx) +
362 ctx->reqoff + 365 ctx->reqoff +
363 max_t(unsigned int, 366 max_t(unsigned int,
364 crypto_ahash_reqsize(auth) + 367 crypto_ahash_reqsize(auth) +
365 sizeof(struct ahash_request), 368 sizeof(struct ahash_request),
366 sizeof(struct skcipher_givcrypt_request) + 369 sizeof(struct skcipher_request) +
367 crypto_ablkcipher_reqsize(enc))); 370 crypto_skcipher_reqsize(enc)));
368 371
369 return 0; 372 return 0;
370 373
371err_free_skcipher: 374err_free_skcipher:
372 crypto_free_ablkcipher(enc); 375 crypto_free_skcipher(enc);
373err_free_ahash: 376err_free_ahash:
374 crypto_free_ahash(auth); 377 crypto_free_ahash(auth);
375 return err; 378 return err;
@@ -380,8 +383,8 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
380 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); 383 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
381 384
382 crypto_free_ahash(ctx->auth); 385 crypto_free_ahash(ctx->auth);
383 crypto_free_ablkcipher(ctx->enc); 386 crypto_free_skcipher(ctx->enc);
384 crypto_put_default_null_skcipher(); 387 crypto_put_default_null_skcipher2();
385} 388}
386 389
387static void crypto_authenc_esn_free(struct aead_instance *inst) 390static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -400,7 +403,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
400 struct aead_instance *inst; 403 struct aead_instance *inst;
401 struct hash_alg_common *auth; 404 struct hash_alg_common *auth;
402 struct crypto_alg *auth_base; 405 struct crypto_alg *auth_base;
403 struct crypto_alg *enc; 406 struct skcipher_alg *enc;
404 struct authenc_esn_instance_ctx *ctx; 407 struct authenc_esn_instance_ctx *ctx;
405 const char *enc_name; 408 const char *enc_name;
406 int err; 409 int err;
@@ -413,7 +416,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
413 return -EINVAL; 416 return -EINVAL;
414 417
415 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, 418 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
416 CRYPTO_ALG_TYPE_AHASH_MASK); 419 CRYPTO_ALG_TYPE_AHASH_MASK |
420 crypto_requires_sync(algt->type, algt->mask));
417 if (IS_ERR(auth)) 421 if (IS_ERR(auth))
418 return PTR_ERR(auth); 422 return PTR_ERR(auth);
419 423
@@ -437,34 +441,36 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
437 goto err_free_inst; 441 goto err_free_inst;
438 442
439 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); 443 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
440 err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, 444 err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
441 crypto_requires_sync(algt->type, 445 crypto_requires_sync(algt->type,
442 algt->mask)); 446 algt->mask));
443 if (err) 447 if (err)
444 goto err_drop_auth; 448 goto err_drop_auth;
445 449
446 enc = crypto_skcipher_spawn_alg(&ctx->enc); 450 enc = crypto_spawn_skcipher_alg(&ctx->enc);
447 451
448 err = -ENAMETOOLONG; 452 err = -ENAMETOOLONG;
449 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 453 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
450 "authencesn(%s,%s)", auth_base->cra_name, 454 "authencesn(%s,%s)", auth_base->cra_name,
451 enc->cra_name) >= CRYPTO_MAX_ALG_NAME) 455 enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
452 goto err_drop_enc; 456 goto err_drop_enc;
453 457
454 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 458 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
455 "authencesn(%s,%s)", auth_base->cra_driver_name, 459 "authencesn(%s,%s)", auth_base->cra_driver_name,
456 enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 460 enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
457 goto err_drop_enc; 461 goto err_drop_enc;
458 462
459 inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; 463 inst->alg.base.cra_flags = (auth_base->cra_flags |
460 inst->alg.base.cra_priority = enc->cra_priority * 10 + 464 enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
465 inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
461 auth_base->cra_priority; 466 auth_base->cra_priority;
462 inst->alg.base.cra_blocksize = enc->cra_blocksize; 467 inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
463 inst->alg.base.cra_alignmask = auth_base->cra_alignmask | 468 inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
464 enc->cra_alignmask; 469 enc->base.cra_alignmask;
465 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); 470 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
466 471
467 inst->alg.ivsize = enc->cra_ablkcipher.ivsize; 472 inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
473 inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
468 inst->alg.maxauthsize = auth->digestsize; 474 inst->alg.maxauthsize = auth->digestsize;
469 475
470 inst->alg.init = crypto_authenc_esn_init_tfm; 476 inst->alg.init = crypto_authenc_esn_init_tfm;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 8cc1622b2ee0..369999530108 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -21,7 +21,6 @@
21#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/scatterlist.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/string.h> 26#include <linux/string.h>
@@ -466,10 +465,6 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
466 crt->setkey = async_setkey; 465 crt->setkey = async_setkey;
467 crt->encrypt = async_encrypt; 466 crt->encrypt = async_encrypt;
468 crt->decrypt = async_decrypt; 467 crt->decrypt = async_decrypt;
469 if (!alg->ivsize) {
470 crt->givencrypt = skcipher_null_givencrypt;
471 crt->givdecrypt = skcipher_null_givdecrypt;
472 }
473 crt->base = __crypto_ablkcipher_cast(tfm); 468 crt->base = __crypto_ablkcipher_cast(tfm);
474 crt->ivsize = alg->ivsize; 469 crt->ivsize = alg->ivsize;
475 470
@@ -560,185 +555,5 @@ const struct crypto_type crypto_blkcipher_type = {
560}; 555};
561EXPORT_SYMBOL_GPL(crypto_blkcipher_type); 556EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
562 557
563static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
564 const char *name, u32 type, u32 mask)
565{
566 struct crypto_alg *alg;
567 int err;
568
569 type = crypto_skcipher_type(type);
570 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
571
572 alg = crypto_alg_mod_lookup(name, type, mask);
573 if (IS_ERR(alg))
574 return PTR_ERR(alg);
575
576 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
577 crypto_mod_put(alg);
578 return err;
579}
580
581struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
582 struct rtattr **tb, u32 type,
583 u32 mask)
584{
585 struct {
586 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
587 unsigned int keylen);
588 int (*encrypt)(struct ablkcipher_request *req);
589 int (*decrypt)(struct ablkcipher_request *req);
590
591 unsigned int min_keysize;
592 unsigned int max_keysize;
593 unsigned int ivsize;
594
595 const char *geniv;
596 } balg;
597 const char *name;
598 struct crypto_skcipher_spawn *spawn;
599 struct crypto_attr_type *algt;
600 struct crypto_instance *inst;
601 struct crypto_alg *alg;
602 int err;
603
604 algt = crypto_get_attr_type(tb);
605 if (IS_ERR(algt))
606 return ERR_CAST(algt);
607
608 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
609 algt->mask)
610 return ERR_PTR(-EINVAL);
611
612 name = crypto_attr_alg_name(tb[1]);
613 if (IS_ERR(name))
614 return ERR_CAST(name);
615
616 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
617 if (!inst)
618 return ERR_PTR(-ENOMEM);
619
620 spawn = crypto_instance_ctx(inst);
621
622 /* Ignore async algorithms if necessary. */
623 mask |= crypto_requires_sync(algt->type, algt->mask);
624
625 crypto_set_skcipher_spawn(spawn, inst);
626 err = crypto_grab_nivcipher(spawn, name, type, mask);
627 if (err)
628 goto err_free_inst;
629
630 alg = crypto_skcipher_spawn_alg(spawn);
631
632 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
633 CRYPTO_ALG_TYPE_BLKCIPHER) {
634 balg.ivsize = alg->cra_blkcipher.ivsize;
635 balg.min_keysize = alg->cra_blkcipher.min_keysize;
636 balg.max_keysize = alg->cra_blkcipher.max_keysize;
637
638 balg.setkey = async_setkey;
639 balg.encrypt = async_encrypt;
640 balg.decrypt = async_decrypt;
641
642 balg.geniv = alg->cra_blkcipher.geniv;
643 } else {
644 balg.ivsize = alg->cra_ablkcipher.ivsize;
645 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
646 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
647
648 balg.setkey = alg->cra_ablkcipher.setkey;
649 balg.encrypt = alg->cra_ablkcipher.encrypt;
650 balg.decrypt = alg->cra_ablkcipher.decrypt;
651
652 balg.geniv = alg->cra_ablkcipher.geniv;
653 }
654
655 err = -EINVAL;
656 if (!balg.ivsize)
657 goto err_drop_alg;
658
659 /*
660 * This is only true if we're constructing an algorithm with its
661 * default IV generator. For the default generator we elide the
662 * template name and double-check the IV generator.
663 */
664 if (algt->mask & CRYPTO_ALG_GENIV) {
665 if (!balg.geniv)
666 balg.geniv = crypto_default_geniv(alg);
667 err = -EAGAIN;
668 if (strcmp(tmpl->name, balg.geniv))
669 goto err_drop_alg;
670
671 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
672 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
673 CRYPTO_MAX_ALG_NAME);
674 } else {
675 err = -ENAMETOOLONG;
676 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
677 "%s(%s)", tmpl->name, alg->cra_name) >=
678 CRYPTO_MAX_ALG_NAME)
679 goto err_drop_alg;
680 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
681 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
682 CRYPTO_MAX_ALG_NAME)
683 goto err_drop_alg;
684 }
685
686 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
687 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
688 inst->alg.cra_priority = alg->cra_priority;
689 inst->alg.cra_blocksize = alg->cra_blocksize;
690 inst->alg.cra_alignmask = alg->cra_alignmask;
691 inst->alg.cra_type = &crypto_givcipher_type;
692
693 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
694 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
695 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
696 inst->alg.cra_ablkcipher.geniv = balg.geniv;
697
698 inst->alg.cra_ablkcipher.setkey = balg.setkey;
699 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
700 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
701
702out:
703 return inst;
704
705err_drop_alg:
706 crypto_drop_skcipher(spawn);
707err_free_inst:
708 kfree(inst);
709 inst = ERR_PTR(err);
710 goto out;
711}
712EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
713
714void skcipher_geniv_free(struct crypto_instance *inst)
715{
716 crypto_drop_skcipher(crypto_instance_ctx(inst));
717 kfree(inst);
718}
719EXPORT_SYMBOL_GPL(skcipher_geniv_free);
720
721int skcipher_geniv_init(struct crypto_tfm *tfm)
722{
723 struct crypto_instance *inst = (void *)tfm->__crt_alg;
724 struct crypto_ablkcipher *cipher;
725
726 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
727 if (IS_ERR(cipher))
728 return PTR_ERR(cipher);
729
730 tfm->crt_ablkcipher.base = cipher;
731 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
732
733 return 0;
734}
735EXPORT_SYMBOL_GPL(skcipher_geniv_init);
736
737void skcipher_geniv_exit(struct crypto_tfm *tfm)
738{
739 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
740}
741EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
742
743MODULE_LICENSE("GPL"); 558MODULE_LICENSE("GPL");
744MODULE_DESCRIPTION("Generic block chaining cipher type"); 559MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index cc31ea4335bf..006d8575ef5c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -28,7 +28,7 @@ struct ccm_instance_ctx {
28 28
29struct crypto_ccm_ctx { 29struct crypto_ccm_ctx {
30 struct crypto_cipher *cipher; 30 struct crypto_cipher *cipher;
31 struct crypto_ablkcipher *ctr; 31 struct crypto_skcipher *ctr;
32}; 32};
33 33
34struct crypto_rfc4309_ctx { 34struct crypto_rfc4309_ctx {
@@ -50,7 +50,7 @@ struct crypto_ccm_req_priv_ctx {
50 u32 flags; 50 u32 flags;
51 struct scatterlist src[3]; 51 struct scatterlist src[3];
52 struct scatterlist dst[3]; 52 struct scatterlist dst[3];
53 struct ablkcipher_request abreq; 53 struct skcipher_request skreq;
54}; 54};
55 55
56static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( 56static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
@@ -83,15 +83,15 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
83 unsigned int keylen) 83 unsigned int keylen)
84{ 84{
85 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 85 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
86 struct crypto_ablkcipher *ctr = ctx->ctr; 86 struct crypto_skcipher *ctr = ctx->ctr;
87 struct crypto_cipher *tfm = ctx->cipher; 87 struct crypto_cipher *tfm = ctx->cipher;
88 int err = 0; 88 int err = 0;
89 89
90 crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); 90 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
91 crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & 91 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
92 CRYPTO_TFM_REQ_MASK); 92 CRYPTO_TFM_REQ_MASK);
93 err = crypto_ablkcipher_setkey(ctr, key, keylen); 93 err = crypto_skcipher_setkey(ctr, key, keylen);
94 crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & 94 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
95 CRYPTO_TFM_RES_MASK); 95 CRYPTO_TFM_RES_MASK);
96 if (err) 96 if (err)
97 goto out; 97 goto out;
@@ -347,7 +347,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
347 struct crypto_aead *aead = crypto_aead_reqtfm(req); 347 struct crypto_aead *aead = crypto_aead_reqtfm(req);
348 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 348 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
349 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); 349 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
350 struct ablkcipher_request *abreq = &pctx->abreq; 350 struct skcipher_request *skreq = &pctx->skreq;
351 struct scatterlist *dst; 351 struct scatterlist *dst;
352 unsigned int cryptlen = req->cryptlen; 352 unsigned int cryptlen = req->cryptlen;
353 u8 *odata = pctx->odata; 353 u8 *odata = pctx->odata;
@@ -366,11 +366,11 @@ static int crypto_ccm_encrypt(struct aead_request *req)
366 if (req->src != req->dst) 366 if (req->src != req->dst)
367 dst = pctx->dst; 367 dst = pctx->dst;
368 368
369 ablkcipher_request_set_tfm(abreq, ctx->ctr); 369 skcipher_request_set_tfm(skreq, ctx->ctr);
370 ablkcipher_request_set_callback(abreq, pctx->flags, 370 skcipher_request_set_callback(skreq, pctx->flags,
371 crypto_ccm_encrypt_done, req); 371 crypto_ccm_encrypt_done, req);
372 ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); 372 skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
373 err = crypto_ablkcipher_encrypt(abreq); 373 err = crypto_skcipher_encrypt(skreq);
374 if (err) 374 if (err)
375 return err; 375 return err;
376 376
@@ -407,7 +407,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
407 struct crypto_aead *aead = crypto_aead_reqtfm(req); 407 struct crypto_aead *aead = crypto_aead_reqtfm(req);
408 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 408 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
409 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); 409 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
410 struct ablkcipher_request *abreq = &pctx->abreq; 410 struct skcipher_request *skreq = &pctx->skreq;
411 struct scatterlist *dst; 411 struct scatterlist *dst;
412 unsigned int authsize = crypto_aead_authsize(aead); 412 unsigned int authsize = crypto_aead_authsize(aead);
413 unsigned int cryptlen = req->cryptlen; 413 unsigned int cryptlen = req->cryptlen;
@@ -429,11 +429,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
429 if (req->src != req->dst) 429 if (req->src != req->dst)
430 dst = pctx->dst; 430 dst = pctx->dst;
431 431
432 ablkcipher_request_set_tfm(abreq, ctx->ctr); 432 skcipher_request_set_tfm(skreq, ctx->ctr);
433 ablkcipher_request_set_callback(abreq, pctx->flags, 433 skcipher_request_set_callback(skreq, pctx->flags,
434 crypto_ccm_decrypt_done, req); 434 crypto_ccm_decrypt_done, req);
435 ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); 435 skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
436 err = crypto_ablkcipher_decrypt(abreq); 436 err = crypto_skcipher_decrypt(skreq);
437 if (err) 437 if (err)
438 return err; 438 return err;
439 439
@@ -454,7 +454,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
454 struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); 454 struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
455 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); 455 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
456 struct crypto_cipher *cipher; 456 struct crypto_cipher *cipher;
457 struct crypto_ablkcipher *ctr; 457 struct crypto_skcipher *ctr;
458 unsigned long align; 458 unsigned long align;
459 int err; 459 int err;
460 460
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
462 if (IS_ERR(cipher)) 462 if (IS_ERR(cipher))
463 return PTR_ERR(cipher); 463 return PTR_ERR(cipher);
464 464
465 ctr = crypto_spawn_skcipher(&ictx->ctr); 465 ctr = crypto_spawn_skcipher2(&ictx->ctr);
466 err = PTR_ERR(ctr); 466 err = PTR_ERR(ctr);
467 if (IS_ERR(ctr)) 467 if (IS_ERR(ctr))
468 goto err_free_cipher; 468 goto err_free_cipher;
@@ -475,7 +475,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
475 crypto_aead_set_reqsize( 475 crypto_aead_set_reqsize(
476 tfm, 476 tfm,
477 align + sizeof(struct crypto_ccm_req_priv_ctx) + 477 align + sizeof(struct crypto_ccm_req_priv_ctx) +
478 crypto_ablkcipher_reqsize(ctr)); 478 crypto_skcipher_reqsize(ctr));
479 479
480 return 0; 480 return 0;
481 481
@@ -489,7 +489,7 @@ static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
489 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); 489 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
490 490
491 crypto_free_cipher(ctx->cipher); 491 crypto_free_cipher(ctx->cipher);
492 crypto_free_ablkcipher(ctx->ctr); 492 crypto_free_skcipher(ctx->ctr);
493} 493}
494 494
495static void crypto_ccm_free(struct aead_instance *inst) 495static void crypto_ccm_free(struct aead_instance *inst)
@@ -509,7 +509,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
509{ 509{
510 struct crypto_attr_type *algt; 510 struct crypto_attr_type *algt;
511 struct aead_instance *inst; 511 struct aead_instance *inst;
512 struct crypto_alg *ctr; 512 struct skcipher_alg *ctr;
513 struct crypto_alg *cipher; 513 struct crypto_alg *cipher;
514 struct ccm_instance_ctx *ictx; 514 struct ccm_instance_ctx *ictx;
515 int err; 515 int err;
@@ -544,39 +544,40 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
544 goto err_free_inst; 544 goto err_free_inst;
545 545
546 crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); 546 crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
547 err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, 547 err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
548 crypto_requires_sync(algt->type, 548 crypto_requires_sync(algt->type,
549 algt->mask)); 549 algt->mask));
550 if (err) 550 if (err)
551 goto err_drop_cipher; 551 goto err_drop_cipher;
552 552
553 ctr = crypto_skcipher_spawn_alg(&ictx->ctr); 553 ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
554 554
555 /* Not a stream cipher? */ 555 /* Not a stream cipher? */
556 err = -EINVAL; 556 err = -EINVAL;
557 if (ctr->cra_blocksize != 1) 557 if (ctr->base.cra_blocksize != 1)
558 goto err_drop_ctr; 558 goto err_drop_ctr;
559 559
560 /* We want the real thing! */ 560 /* We want the real thing! */
561 if (ctr->cra_ablkcipher.ivsize != 16) 561 if (crypto_skcipher_alg_ivsize(ctr) != 16)
562 goto err_drop_ctr; 562 goto err_drop_ctr;
563 563
564 err = -ENAMETOOLONG; 564 err = -ENAMETOOLONG;
565 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 565 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
566 "ccm_base(%s,%s)", ctr->cra_driver_name, 566 "ccm_base(%s,%s)", ctr->base.cra_driver_name,
567 cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 567 cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
568 goto err_drop_ctr; 568 goto err_drop_ctr;
569 569
570 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); 570 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
571 571
572 inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC; 572 inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
573 inst->alg.base.cra_priority = (cipher->cra_priority + 573 inst->alg.base.cra_priority = (cipher->cra_priority +
574 ctr->cra_priority) / 2; 574 ctr->base.cra_priority) / 2;
575 inst->alg.base.cra_blocksize = 1; 575 inst->alg.base.cra_blocksize = 1;
576 inst->alg.base.cra_alignmask = cipher->cra_alignmask | 576 inst->alg.base.cra_alignmask = cipher->cra_alignmask |
577 ctr->cra_alignmask | 577 ctr->base.cra_alignmask |
578 (__alignof__(u32) - 1); 578 (__alignof__(u32) - 1);
579 inst->alg.ivsize = 16; 579 inst->alg.ivsize = 16;
580 inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
580 inst->alg.maxauthsize = 16; 581 inst->alg.maxauthsize = 16;
581 inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); 582 inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
582 inst->alg.init = crypto_ccm_init_tfm; 583 inst->alg.init = crypto_ccm_init_tfm;
@@ -863,6 +864,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
863 inst->alg.base.cra_alignmask = alg->base.cra_alignmask; 864 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
864 865
865 inst->alg.ivsize = 8; 866 inst->alg.ivsize = 8;
867 inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
866 inst->alg.maxauthsize = 16; 868 inst->alg.maxauthsize = 16;
867 869
868 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); 870 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index da9c89968223..1cab83146e33 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -15,72 +15,11 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <crypto/chacha20.h> 16#include <crypto/chacha20.h>
17 17
18static inline u32 rotl32(u32 v, u8 n)
19{
20 return (v << n) | (v >> (sizeof(v) * 8 - n));
21}
22
23static inline u32 le32_to_cpuvp(const void *p) 18static inline u32 le32_to_cpuvp(const void *p)
24{ 19{
25 return le32_to_cpup(p); 20 return le32_to_cpup(p);
26} 21}
27 22
28static void chacha20_block(u32 *state, void *stream)
29{
30 u32 x[16], *out = stream;
31 int i;
32
33 for (i = 0; i < ARRAY_SIZE(x); i++)
34 x[i] = state[i];
35
36 for (i = 0; i < 20; i += 2) {
37 x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16);
38 x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16);
39 x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16);
40 x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16);
41
42 x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12);
43 x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12);
44 x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12);
45 x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12);
46
47 x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8);
48 x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8);
49 x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8);
50 x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8);
51
52 x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7);
53 x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7);
54 x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7);
55 x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7);
56
57 x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16);
58 x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16);
59 x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16);
60 x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16);
61
62 x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12);
63 x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12);
64 x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12);
65 x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12);
66
67 x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8);
68 x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8);
69 x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8);
70 x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8);
71
72 x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7);
73 x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7);
74 x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7);
75 x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7);
76 }
77
78 for (i = 0; i < ARRAY_SIZE(x); i++)
79 out[i] = cpu_to_le32(x[i] + state[i]);
80
81 state[12]++;
82}
83
84static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, 23static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
85 unsigned int bytes) 24 unsigned int bytes)
86{ 25{
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 7b6b935cef23..e899ef51dc8e 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -31,7 +31,7 @@ struct chachapoly_instance_ctx {
31}; 31};
32 32
33struct chachapoly_ctx { 33struct chachapoly_ctx {
34 struct crypto_ablkcipher *chacha; 34 struct crypto_skcipher *chacha;
35 struct crypto_ahash *poly; 35 struct crypto_ahash *poly;
36 /* key bytes we use for the ChaCha20 IV */ 36 /* key bytes we use for the ChaCha20 IV */
37 unsigned int saltlen; 37 unsigned int saltlen;
@@ -53,7 +53,7 @@ struct poly_req {
53struct chacha_req { 53struct chacha_req {
54 u8 iv[CHACHA20_IV_SIZE]; 54 u8 iv[CHACHA20_IV_SIZE];
55 struct scatterlist src[1]; 55 struct scatterlist src[1];
56 struct ablkcipher_request req; /* must be last member */ 56 struct skcipher_request req; /* must be last member */
57}; 57};
58 58
59struct chachapoly_req_ctx { 59struct chachapoly_req_ctx {
@@ -144,12 +144,12 @@ static int chacha_decrypt(struct aead_request *req)
144 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 144 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
145 } 145 }
146 146
147 ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), 147 skcipher_request_set_callback(&creq->req, aead_request_flags(req),
148 chacha_decrypt_done, req); 148 chacha_decrypt_done, req);
149 ablkcipher_request_set_tfm(&creq->req, ctx->chacha); 149 skcipher_request_set_tfm(&creq->req, ctx->chacha);
150 ablkcipher_request_set_crypt(&creq->req, src, dst, 150 skcipher_request_set_crypt(&creq->req, src, dst,
151 rctx->cryptlen, creq->iv); 151 rctx->cryptlen, creq->iv);
152 err = crypto_ablkcipher_decrypt(&creq->req); 152 err = crypto_skcipher_decrypt(&creq->req);
153 if (err) 153 if (err)
154 return err; 154 return err;
155 155
@@ -393,13 +393,13 @@ static int poly_genkey(struct aead_request *req)
393 393
394 chacha_iv(creq->iv, req, 0); 394 chacha_iv(creq->iv, req, 0);
395 395
396 ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), 396 skcipher_request_set_callback(&creq->req, aead_request_flags(req),
397 poly_genkey_done, req); 397 poly_genkey_done, req);
398 ablkcipher_request_set_tfm(&creq->req, ctx->chacha); 398 skcipher_request_set_tfm(&creq->req, ctx->chacha);
399 ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src, 399 skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
400 POLY1305_KEY_SIZE, creq->iv); 400 POLY1305_KEY_SIZE, creq->iv);
401 401
402 err = crypto_ablkcipher_decrypt(&creq->req); 402 err = crypto_skcipher_decrypt(&creq->req);
403 if (err) 403 if (err)
404 return err; 404 return err;
405 405
@@ -433,12 +433,12 @@ static int chacha_encrypt(struct aead_request *req)
433 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 433 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
434 } 434 }
435 435
436 ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), 436 skcipher_request_set_callback(&creq->req, aead_request_flags(req),
437 chacha_encrypt_done, req); 437 chacha_encrypt_done, req);
438 ablkcipher_request_set_tfm(&creq->req, ctx->chacha); 438 skcipher_request_set_tfm(&creq->req, ctx->chacha);
439 ablkcipher_request_set_crypt(&creq->req, src, dst, 439 skcipher_request_set_crypt(&creq->req, src, dst,
440 req->cryptlen, creq->iv); 440 req->cryptlen, creq->iv);
441 err = crypto_ablkcipher_encrypt(&creq->req); 441 err = crypto_skcipher_encrypt(&creq->req);
442 if (err) 442 if (err)
443 return err; 443 return err;
444 444
@@ -500,13 +500,13 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
500 keylen -= ctx->saltlen; 500 keylen -= ctx->saltlen;
501 memcpy(ctx->salt, key + keylen, ctx->saltlen); 501 memcpy(ctx->salt, key + keylen, ctx->saltlen);
502 502
503 crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); 503 crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
504 crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & 504 crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
505 CRYPTO_TFM_REQ_MASK); 505 CRYPTO_TFM_REQ_MASK);
506 506
507 err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen); 507 err = crypto_skcipher_setkey(ctx->chacha, key, keylen);
508 crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) & 508 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) &
509 CRYPTO_TFM_RES_MASK); 509 CRYPTO_TFM_RES_MASK);
510 return err; 510 return err;
511} 511}
512 512
@@ -524,7 +524,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
524 struct aead_instance *inst = aead_alg_instance(tfm); 524 struct aead_instance *inst = aead_alg_instance(tfm);
525 struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); 525 struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
526 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); 526 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
527 struct crypto_ablkcipher *chacha; 527 struct crypto_skcipher *chacha;
528 struct crypto_ahash *poly; 528 struct crypto_ahash *poly;
529 unsigned long align; 529 unsigned long align;
530 530
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
532 if (IS_ERR(poly)) 532 if (IS_ERR(poly))
533 return PTR_ERR(poly); 533 return PTR_ERR(poly);
534 534
535 chacha = crypto_spawn_skcipher(&ictx->chacha); 535 chacha = crypto_spawn_skcipher2(&ictx->chacha);
536 if (IS_ERR(chacha)) { 536 if (IS_ERR(chacha)) {
537 crypto_free_ahash(poly); 537 crypto_free_ahash(poly);
538 return PTR_ERR(chacha); 538 return PTR_ERR(chacha);
@@ -548,8 +548,8 @@ static int chachapoly_init(struct crypto_aead *tfm)
548 tfm, 548 tfm,
549 align + offsetof(struct chachapoly_req_ctx, u) + 549 align + offsetof(struct chachapoly_req_ctx, u) +
550 max(offsetof(struct chacha_req, req) + 550 max(offsetof(struct chacha_req, req) +
551 sizeof(struct ablkcipher_request) + 551 sizeof(struct skcipher_request) +
552 crypto_ablkcipher_reqsize(chacha), 552 crypto_skcipher_reqsize(chacha),
553 offsetof(struct poly_req, req) + 553 offsetof(struct poly_req, req) +
554 sizeof(struct ahash_request) + 554 sizeof(struct ahash_request) +
555 crypto_ahash_reqsize(poly))); 555 crypto_ahash_reqsize(poly)));
@@ -562,7 +562,7 @@ static void chachapoly_exit(struct crypto_aead *tfm)
562 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); 562 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
563 563
564 crypto_free_ahash(ctx->poly); 564 crypto_free_ahash(ctx->poly);
565 crypto_free_ablkcipher(ctx->chacha); 565 crypto_free_skcipher(ctx->chacha);
566} 566}
567 567
568static void chachapoly_free(struct aead_instance *inst) 568static void chachapoly_free(struct aead_instance *inst)
@@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
579{ 579{
580 struct crypto_attr_type *algt; 580 struct crypto_attr_type *algt;
581 struct aead_instance *inst; 581 struct aead_instance *inst;
582 struct crypto_alg *chacha; 582 struct skcipher_alg *chacha;
583 struct crypto_alg *poly; 583 struct crypto_alg *poly;
584 struct hash_alg_common *poly_hash; 584 struct hash_alg_common *poly_hash;
585 struct chachapoly_instance_ctx *ctx; 585 struct chachapoly_instance_ctx *ctx;
@@ -605,7 +605,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
605 605
606 poly = crypto_find_alg(poly_name, &crypto_ahash_type, 606 poly = crypto_find_alg(poly_name, &crypto_ahash_type,
607 CRYPTO_ALG_TYPE_HASH, 607 CRYPTO_ALG_TYPE_HASH,
608 CRYPTO_ALG_TYPE_AHASH_MASK); 608 CRYPTO_ALG_TYPE_AHASH_MASK |
609 crypto_requires_sync(algt->type,
610 algt->mask));
609 if (IS_ERR(poly)) 611 if (IS_ERR(poly))
610 return PTR_ERR(poly); 612 return PTR_ERR(poly);
611 613
@@ -623,20 +625,20 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
623 goto err_free_inst; 625 goto err_free_inst;
624 626
625 crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); 627 crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
626 err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, 628 err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
627 crypto_requires_sync(algt->type, 629 crypto_requires_sync(algt->type,
628 algt->mask)); 630 algt->mask));
629 if (err) 631 if (err)
630 goto err_drop_poly; 632 goto err_drop_poly;
631 633
632 chacha = crypto_skcipher_spawn_alg(&ctx->chacha); 634 chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
633 635
634 err = -EINVAL; 636 err = -EINVAL;
635 /* Need 16-byte IV size, including Initial Block Counter value */ 637 /* Need 16-byte IV size, including Initial Block Counter value */
636 if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE) 638 if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE)
637 goto out_drop_chacha; 639 goto out_drop_chacha;
638 /* Not a stream cipher? */ 640 /* Not a stream cipher? */
639 if (chacha->cra_blocksize != 1) 641 if (chacha->base.cra_blocksize != 1)
640 goto out_drop_chacha; 642 goto out_drop_chacha;
641 643
642 err = -ENAMETOOLONG; 644 err = -ENAMETOOLONG;
@@ -645,20 +647,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
645 poly_name) >= CRYPTO_MAX_ALG_NAME) 647 poly_name) >= CRYPTO_MAX_ALG_NAME)
646 goto out_drop_chacha; 648 goto out_drop_chacha;
647 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 649 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
648 "%s(%s,%s)", name, chacha->cra_driver_name, 650 "%s(%s,%s)", name, chacha->base.cra_driver_name,
649 poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 651 poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
650 goto out_drop_chacha; 652 goto out_drop_chacha;
651 653
652 inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) & 654 inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) &
653 CRYPTO_ALG_ASYNC; 655 CRYPTO_ALG_ASYNC;
654 inst->alg.base.cra_priority = (chacha->cra_priority + 656 inst->alg.base.cra_priority = (chacha->base.cra_priority +
655 poly->cra_priority) / 2; 657 poly->cra_priority) / 2;
656 inst->alg.base.cra_blocksize = 1; 658 inst->alg.base.cra_blocksize = 1;
657 inst->alg.base.cra_alignmask = chacha->cra_alignmask | 659 inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
658 poly->cra_alignmask; 660 poly->cra_alignmask;
659 inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + 661 inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
660 ctx->saltlen; 662 ctx->saltlen;
661 inst->alg.ivsize = ivsize; 663 inst->alg.ivsize = ivsize;
664 inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
662 inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; 665 inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
663 inst->alg.init = chachapoly_init; 666 inst->alg.init = chachapoly_init;
664 inst->alg.exit = chachapoly_exit; 667 inst->alg.exit = chachapoly_exit;
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
deleted file mode 100644
index b4340018c8d4..000000000000
--- a/crypto/chainiv.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * chainiv: Chain IV Generator
3 *
4 * Generate IVs simply be using the last block of the previous encryption.
5 * This is mainly useful for CBC with a synchronous algorithm.
6 *
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/skcipher.h>
17#include <crypto/rng.h>
18#include <crypto/crypto_wq.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/workqueue.h>
26
27enum {
28 CHAINIV_STATE_INUSE = 0,
29};
30
31struct chainiv_ctx {
32 spinlock_t lock;
33 char iv[];
34};
35
36struct async_chainiv_ctx {
37 unsigned long state;
38
39 spinlock_t lock;
40 int err;
41
42 struct crypto_queue queue;
43 struct work_struct postponed;
44
45 char iv[];
46};
47
48static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
49{
50 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
51 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
52 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
53 unsigned int ivsize;
54 int err;
55
56 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
57 ablkcipher_request_set_callback(subreq, req->creq.base.flags &
58 ~CRYPTO_TFM_REQ_MAY_SLEEP,
59 req->creq.base.complete,
60 req->creq.base.data);
61 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
62 req->creq.nbytes, req->creq.info);
63
64 spin_lock_bh(&ctx->lock);
65
66 ivsize = crypto_ablkcipher_ivsize(geniv);
67
68 memcpy(req->giv, ctx->iv, ivsize);
69 memcpy(subreq->info, ctx->iv, ivsize);
70
71 err = crypto_ablkcipher_encrypt(subreq);
72 if (err)
73 goto unlock;
74
75 memcpy(ctx->iv, subreq->info, ivsize);
76
77unlock:
78 spin_unlock_bh(&ctx->lock);
79
80 return err;
81}
82
83static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
84{
85 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
86 int err = 0;
87
88 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
89
90 if (iv) {
91 err = crypto_rng_get_bytes(crypto_default_rng, iv,
92 crypto_ablkcipher_ivsize(geniv));
93 crypto_put_default_rng();
94 }
95
96 return err ?: skcipher_geniv_init(tfm);
97}
98
99static int chainiv_init(struct crypto_tfm *tfm)
100{
101 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
102 struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
103 char *iv;
104
105 spin_lock_init(&ctx->lock);
106
107 iv = NULL;
108 if (!crypto_get_default_rng()) {
109 crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
110 iv = ctx->iv;
111 }
112
113 return chainiv_init_common(tfm, iv);
114}
115
116static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
117{
118 int queued;
119 int err = ctx->err;
120
121 if (!ctx->queue.qlen) {
122 smp_mb__before_atomic();
123 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
124
125 if (!ctx->queue.qlen ||
126 test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
127 goto out;
128 }
129
130 queued = queue_work(kcrypto_wq, &ctx->postponed);
131 BUG_ON(!queued);
132
133out:
134 return err;
135}
136
137static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
138{
139 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
140 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
141 int err;
142
143 spin_lock_bh(&ctx->lock);
144 err = skcipher_enqueue_givcrypt(&ctx->queue, req);
145 spin_unlock_bh(&ctx->lock);
146
147 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
148 return err;
149
150 ctx->err = err;
151 return async_chainiv_schedule_work(ctx);
152}
153
154static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
155{
156 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
157 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
158 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
159 unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
160
161 memcpy(req->giv, ctx->iv, ivsize);
162 memcpy(subreq->info, ctx->iv, ivsize);
163
164 ctx->err = crypto_ablkcipher_encrypt(subreq);
165 if (ctx->err)
166 goto out;
167
168 memcpy(ctx->iv, subreq->info, ivsize);
169
170out:
171 return async_chainiv_schedule_work(ctx);
172}
173
174static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
175{
176 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
177 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
178 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
179
180 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
181 ablkcipher_request_set_callback(subreq, req->creq.base.flags,
182 req->creq.base.complete,
183 req->creq.base.data);
184 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
185 req->creq.nbytes, req->creq.info);
186
187 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
188 goto postpone;
189
190 if (ctx->queue.qlen) {
191 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
192 goto postpone;
193 }
194
195 return async_chainiv_givencrypt_tail(req);
196
197postpone:
198 return async_chainiv_postpone_request(req);
199}
200
201static void async_chainiv_do_postponed(struct work_struct *work)
202{
203 struct async_chainiv_ctx *ctx = container_of(work,
204 struct async_chainiv_ctx,
205 postponed);
206 struct skcipher_givcrypt_request *req;
207 struct ablkcipher_request *subreq;
208 int err;
209
210 /* Only handle one request at a time to avoid hogging keventd. */
211 spin_lock_bh(&ctx->lock);
212 req = skcipher_dequeue_givcrypt(&ctx->queue);
213 spin_unlock_bh(&ctx->lock);
214
215 if (!req) {
216 async_chainiv_schedule_work(ctx);
217 return;
218 }
219
220 subreq = skcipher_givcrypt_reqctx(req);
221 subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
222
223 err = async_chainiv_givencrypt_tail(req);
224
225 local_bh_disable();
226 skcipher_givcrypt_complete(req, err);
227 local_bh_enable();
228}
229
230static int async_chainiv_init(struct crypto_tfm *tfm)
231{
232 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
233 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
234 char *iv;
235
236 spin_lock_init(&ctx->lock);
237
238 crypto_init_queue(&ctx->queue, 100);
239 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
240
241 iv = NULL;
242 if (!crypto_get_default_rng()) {
243 crypto_ablkcipher_crt(geniv)->givencrypt =
244 async_chainiv_givencrypt;
245 iv = ctx->iv;
246 }
247
248 return chainiv_init_common(tfm, iv);
249}
250
251static void async_chainiv_exit(struct crypto_tfm *tfm)
252{
253 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
254
255 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
256
257 skcipher_geniv_exit(tfm);
258}
259
260static struct crypto_template chainiv_tmpl;
261
262static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
263{
264 struct crypto_attr_type *algt;
265 struct crypto_instance *inst;
266
267 algt = crypto_get_attr_type(tb);
268 if (IS_ERR(algt))
269 return ERR_CAST(algt);
270
271 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
272 if (IS_ERR(inst))
273 goto out;
274
275 inst->alg.cra_init = chainiv_init;
276 inst->alg.cra_exit = skcipher_geniv_exit;
277
278 inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
279
280 if (!crypto_requires_sync(algt->type, algt->mask)) {
281 inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
282
283 inst->alg.cra_init = async_chainiv_init;
284 inst->alg.cra_exit = async_chainiv_exit;
285
286 inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
287 }
288
289 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
290
291out:
292 return inst;
293}
294
295static struct crypto_template chainiv_tmpl = {
296 .name = "chainiv",
297 .alloc = chainiv_alloc,
298 .free = skcipher_geniv_free,
299 .module = THIS_MODULE,
300};
301
302static int __init chainiv_module_init(void)
303{
304 return crypto_register_template(&chainiv_tmpl);
305}
306
307static void chainiv_module_exit(void)
308{
309 crypto_unregister_template(&chainiv_tmpl);
310}
311
312module_init(chainiv_module_init);
313module_exit(chainiv_module_exit);
314
315MODULE_LICENSE("GPL");
316MODULE_DESCRIPTION("Chain IV Generator");
317MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 7921251cdb13..77207b41940c 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -22,6 +22,7 @@
22#include <crypto/internal/aead.h> 22#include <crypto/internal/aead.h>
23#include <crypto/cryptd.h> 23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h> 24#include <crypto/crypto_wq.h>
25#include <linux/atomic.h>
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -31,7 +32,7 @@
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33 34
34#define CRYPTD_MAX_CPU_QLEN 100 35#define CRYPTD_MAX_CPU_QLEN 1000
35 36
36struct cryptd_cpu_queue { 37struct cryptd_cpu_queue {
37 struct crypto_queue queue; 38 struct crypto_queue queue;
@@ -58,6 +59,7 @@ struct aead_instance_ctx {
58}; 59};
59 60
60struct cryptd_blkcipher_ctx { 61struct cryptd_blkcipher_ctx {
62 atomic_t refcnt;
61 struct crypto_blkcipher *child; 63 struct crypto_blkcipher *child;
62}; 64};
63 65
@@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx {
66}; 68};
67 69
68struct cryptd_hash_ctx { 70struct cryptd_hash_ctx {
71 atomic_t refcnt;
69 struct crypto_shash *child; 72 struct crypto_shash *child;
70}; 73};
71 74
@@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx {
75}; 78};
76 79
77struct cryptd_aead_ctx { 80struct cryptd_aead_ctx {
81 atomic_t refcnt;
78 struct crypto_aead *child; 82 struct crypto_aead *child;
79}; 83};
80 84
@@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
118{ 122{
119 int cpu, err; 123 int cpu, err;
120 struct cryptd_cpu_queue *cpu_queue; 124 struct cryptd_cpu_queue *cpu_queue;
125 struct crypto_tfm *tfm;
126 atomic_t *refcnt;
127 bool may_backlog;
121 128
122 cpu = get_cpu(); 129 cpu = get_cpu();
123 cpu_queue = this_cpu_ptr(queue->cpu_queue); 130 cpu_queue = this_cpu_ptr(queue->cpu_queue);
124 err = crypto_enqueue_request(&cpu_queue->queue, request); 131 err = crypto_enqueue_request(&cpu_queue->queue, request);
132
133 refcnt = crypto_tfm_ctx(request->tfm);
134 may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
135
136 if (err == -EBUSY && !may_backlog)
137 goto out_put_cpu;
138
125 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 139 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
140
141 if (!atomic_read(refcnt))
142 goto out_put_cpu;
143
144 tfm = request->tfm;
145 atomic_inc(refcnt);
146
147out_put_cpu:
126 put_cpu(); 148 put_cpu();
127 149
128 return err; 150 return err;
@@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
206 unsigned int len)) 228 unsigned int len))
207{ 229{
208 struct cryptd_blkcipher_request_ctx *rctx; 230 struct cryptd_blkcipher_request_ctx *rctx;
231 struct cryptd_blkcipher_ctx *ctx;
232 struct crypto_ablkcipher *tfm;
209 struct blkcipher_desc desc; 233 struct blkcipher_desc desc;
234 int refcnt;
210 235
211 rctx = ablkcipher_request_ctx(req); 236 rctx = ablkcipher_request_ctx(req);
212 237
@@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
222 req->base.complete = rctx->complete; 247 req->base.complete = rctx->complete;
223 248
224out: 249out:
250 tfm = crypto_ablkcipher_reqtfm(req);
251 ctx = crypto_ablkcipher_ctx(tfm);
252 refcnt = atomic_read(&ctx->refcnt);
253
225 local_bh_disable(); 254 local_bh_disable();
226 rctx->complete(&req->base, err); 255 rctx->complete(&req->base, err);
227 local_bh_enable(); 256 local_bh_enable();
257
258 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
259 crypto_free_ablkcipher(tfm);
228} 260}
229 261
230static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) 262static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
@@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
456 return cryptd_enqueue_request(queue, &req->base); 488 return cryptd_enqueue_request(queue, &req->base);
457} 489}
458 490
491static void cryptd_hash_complete(struct ahash_request *req, int err)
492{
493 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
494 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
495 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
496 int refcnt = atomic_read(&ctx->refcnt);
497
498 local_bh_disable();
499 rctx->complete(&req->base, err);
500 local_bh_enable();
501
502 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
503 crypto_free_ahash(tfm);
504}
505
459static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 506static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
460{ 507{
461 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 508 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
@@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
475 req->base.complete = rctx->complete; 522 req->base.complete = rctx->complete;
476 523
477out: 524out:
478 local_bh_disable(); 525 cryptd_hash_complete(req, err);
479 rctx->complete(&req->base, err);
480 local_bh_enable();
481} 526}
482 527
483static int cryptd_hash_init_enqueue(struct ahash_request *req) 528static int cryptd_hash_init_enqueue(struct ahash_request *req)
@@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
500 req->base.complete = rctx->complete; 545 req->base.complete = rctx->complete;
501 546
502out: 547out:
503 local_bh_disable(); 548 cryptd_hash_complete(req, err);
504 rctx->complete(&req->base, err);
505 local_bh_enable();
506} 549}
507 550
508static int cryptd_hash_update_enqueue(struct ahash_request *req) 551static int cryptd_hash_update_enqueue(struct ahash_request *req)
@@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
523 req->base.complete = rctx->complete; 566 req->base.complete = rctx->complete;
524 567
525out: 568out:
526 local_bh_disable(); 569 cryptd_hash_complete(req, err);
527 rctx->complete(&req->base, err);
528 local_bh_enable();
529} 570}
530 571
531static int cryptd_hash_final_enqueue(struct ahash_request *req) 572static int cryptd_hash_final_enqueue(struct ahash_request *req)
@@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
546 req->base.complete = rctx->complete; 587 req->base.complete = rctx->complete;
547 588
548out: 589out:
549 local_bh_disable(); 590 cryptd_hash_complete(req, err);
550 rctx->complete(&req->base, err);
551 local_bh_enable();
552} 591}
553 592
554static int cryptd_hash_finup_enqueue(struct ahash_request *req) 593static int cryptd_hash_finup_enqueue(struct ahash_request *req)
@@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
575 req->base.complete = rctx->complete; 614 req->base.complete = rctx->complete;
576 615
577out: 616out:
578 local_bh_disable(); 617 cryptd_hash_complete(req, err);
579 rctx->complete(&req->base, err);
580 local_bh_enable();
581} 618}
582 619
583static int cryptd_hash_digest_enqueue(struct ahash_request *req) 620static int cryptd_hash_digest_enqueue(struct ahash_request *req)
@@ -688,19 +725,31 @@ static void cryptd_aead_crypt(struct aead_request *req,
688 int (*crypt)(struct aead_request *req)) 725 int (*crypt)(struct aead_request *req))
689{ 726{
690 struct cryptd_aead_request_ctx *rctx; 727 struct cryptd_aead_request_ctx *rctx;
728 struct cryptd_aead_ctx *ctx;
691 crypto_completion_t compl; 729 crypto_completion_t compl;
730 struct crypto_aead *tfm;
731 int refcnt;
692 732
693 rctx = aead_request_ctx(req); 733 rctx = aead_request_ctx(req);
694 compl = rctx->complete; 734 compl = rctx->complete;
695 735
736 tfm = crypto_aead_reqtfm(req);
737
696 if (unlikely(err == -EINPROGRESS)) 738 if (unlikely(err == -EINPROGRESS))
697 goto out; 739 goto out;
698 aead_request_set_tfm(req, child); 740 aead_request_set_tfm(req, child);
699 err = crypt( req ); 741 err = crypt( req );
742
700out: 743out:
744 ctx = crypto_aead_ctx(tfm);
745 refcnt = atomic_read(&ctx->refcnt);
746
701 local_bh_disable(); 747 local_bh_disable();
702 compl(&req->base, err); 748 compl(&req->base, err);
703 local_bh_enable(); 749 local_bh_enable();
750
751 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
752 crypto_free_aead(tfm);
704} 753}
705 754
706static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 755static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
@@ -883,6 +932,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
883 u32 type, u32 mask) 932 u32 type, u32 mask)
884{ 933{
885 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 934 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
935 struct cryptd_blkcipher_ctx *ctx;
886 struct crypto_tfm *tfm; 936 struct crypto_tfm *tfm;
887 937
888 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 938 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -899,6 +949,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
899 return ERR_PTR(-EINVAL); 949 return ERR_PTR(-EINVAL);
900 } 950 }
901 951
952 ctx = crypto_tfm_ctx(tfm);
953 atomic_set(&ctx->refcnt, 1);
954
902 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); 955 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
903} 956}
904EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); 957EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
@@ -910,9 +963,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
910} 963}
911EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 964EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
912 965
966bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
967{
968 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
969
970 return atomic_read(&ctx->refcnt) - 1;
971}
972EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
973
913void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 974void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
914{ 975{
915 crypto_free_ablkcipher(&tfm->base); 976 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
977
978 if (atomic_dec_and_test(&ctx->refcnt))
979 crypto_free_ablkcipher(&tfm->base);
916} 980}
917EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 981EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
918 982
@@ -920,6 +984,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
920 u32 type, u32 mask) 984 u32 type, u32 mask)
921{ 985{
922 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 986 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
987 struct cryptd_hash_ctx *ctx;
923 struct crypto_ahash *tfm; 988 struct crypto_ahash *tfm;
924 989
925 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 990 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -933,6 +998,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
933 return ERR_PTR(-EINVAL); 998 return ERR_PTR(-EINVAL);
934 } 999 }
935 1000
1001 ctx = crypto_ahash_ctx(tfm);
1002 atomic_set(&ctx->refcnt, 1);
1003
936 return __cryptd_ahash_cast(tfm); 1004 return __cryptd_ahash_cast(tfm);
937} 1005}
938EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1006EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
@@ -952,9 +1020,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
952} 1020}
953EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1021EXPORT_SYMBOL_GPL(cryptd_shash_desc);
954 1022
1023bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1024{
1025 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1026
1027 return atomic_read(&ctx->refcnt) - 1;
1028}
1029EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1030
955void cryptd_free_ahash(struct cryptd_ahash *tfm) 1031void cryptd_free_ahash(struct cryptd_ahash *tfm)
956{ 1032{
957 crypto_free_ahash(&tfm->base); 1033 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1034
1035 if (atomic_dec_and_test(&ctx->refcnt))
1036 crypto_free_ahash(&tfm->base);
958} 1037}
959EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1038EXPORT_SYMBOL_GPL(cryptd_free_ahash);
960 1039
@@ -962,6 +1041,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
962 u32 type, u32 mask) 1041 u32 type, u32 mask)
963{ 1042{
964 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1043 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1044 struct cryptd_aead_ctx *ctx;
965 struct crypto_aead *tfm; 1045 struct crypto_aead *tfm;
966 1046
967 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1047 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -974,6 +1054,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
974 crypto_free_aead(tfm); 1054 crypto_free_aead(tfm);
975 return ERR_PTR(-EINVAL); 1055 return ERR_PTR(-EINVAL);
976 } 1056 }
1057
1058 ctx = crypto_aead_ctx(tfm);
1059 atomic_set(&ctx->refcnt, 1);
1060
977 return __cryptd_aead_cast(tfm); 1061 return __cryptd_aead_cast(tfm);
978} 1062}
979EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1063EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
@@ -986,9 +1070,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
986} 1070}
987EXPORT_SYMBOL_GPL(cryptd_aead_child); 1071EXPORT_SYMBOL_GPL(cryptd_aead_child);
988 1072
1073bool cryptd_aead_queued(struct cryptd_aead *tfm)
1074{
1075 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1076
1077 return atomic_read(&ctx->refcnt) - 1;
1078}
1079EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1080
989void cryptd_free_aead(struct cryptd_aead *tfm) 1081void cryptd_free_aead(struct cryptd_aead *tfm)
990{ 1082{
991 crypto_free_aead(&tfm->base); 1083 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1084
1085 if (atomic_dec_and_test(&ctx->refcnt))
1086 crypto_free_aead(&tfm->base);
992} 1087}
993EXPORT_SYMBOL_GPL(cryptd_free_aead); 1088EXPORT_SYMBOL_GPL(cryptd_free_aead);
994 1089
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 941c9a434d50..20ff2c746e0b 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -26,7 +26,7 @@
26#include <linux/string.h> 26#include <linux/string.h>
27 27
28static DEFINE_MUTEX(crypto_default_null_skcipher_lock); 28static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
29static struct crypto_blkcipher *crypto_default_null_skcipher; 29static struct crypto_skcipher *crypto_default_null_skcipher;
30static int crypto_default_null_skcipher_refcnt; 30static int crypto_default_null_skcipher_refcnt;
31 31
32static int null_compress(struct crypto_tfm *tfm, const u8 *src, 32static int null_compress(struct crypto_tfm *tfm, const u8 *src,
@@ -153,15 +153,16 @@ MODULE_ALIAS_CRYPTO("compress_null");
153MODULE_ALIAS_CRYPTO("digest_null"); 153MODULE_ALIAS_CRYPTO("digest_null");
154MODULE_ALIAS_CRYPTO("cipher_null"); 154MODULE_ALIAS_CRYPTO("cipher_null");
155 155
156struct crypto_blkcipher *crypto_get_default_null_skcipher(void) 156struct crypto_skcipher *crypto_get_default_null_skcipher(void)
157{ 157{
158 struct crypto_blkcipher *tfm; 158 struct crypto_skcipher *tfm;
159 159
160 mutex_lock(&crypto_default_null_skcipher_lock); 160 mutex_lock(&crypto_default_null_skcipher_lock);
161 tfm = crypto_default_null_skcipher; 161 tfm = crypto_default_null_skcipher;
162 162
163 if (!tfm) { 163 if (!tfm) {
164 tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0); 164 tfm = crypto_alloc_skcipher("ecb(cipher_null)",
165 0, CRYPTO_ALG_ASYNC);
165 if (IS_ERR(tfm)) 166 if (IS_ERR(tfm))
166 goto unlock; 167 goto unlock;
167 168
@@ -181,7 +182,7 @@ void crypto_put_default_null_skcipher(void)
181{ 182{
182 mutex_lock(&crypto_default_null_skcipher_lock); 183 mutex_lock(&crypto_default_null_skcipher_lock);
183 if (!--crypto_default_null_skcipher_refcnt) { 184 if (!--crypto_default_null_skcipher_refcnt) {
184 crypto_free_blkcipher(crypto_default_null_skcipher); 185 crypto_free_skcipher(crypto_default_null_skcipher);
185 crypto_default_null_skcipher = NULL; 186 crypto_default_null_skcipher = NULL;
186 } 187 }
187 mutex_unlock(&crypto_default_null_skcipher_lock); 188 mutex_unlock(&crypto_default_null_skcipher_lock);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 7097a3395b25..1c5705481c69 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -28,6 +28,7 @@
28#include <crypto/internal/skcipher.h> 28#include <crypto/internal/skcipher.h>
29#include <crypto/internal/rng.h> 29#include <crypto/internal/rng.h>
30#include <crypto/akcipher.h> 30#include <crypto/akcipher.h>
31#include <crypto/kpp.h>
31 32
32#include "internal.h" 33#include "internal.h"
33 34
@@ -126,6 +127,21 @@ nla_put_failure:
126 return -EMSGSIZE; 127 return -EMSGSIZE;
127} 128}
128 129
130static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
131{
132 struct crypto_report_kpp rkpp;
133
134 strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
135
136 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
137 sizeof(struct crypto_report_kpp), &rkpp))
138 goto nla_put_failure;
139 return 0;
140
141nla_put_failure:
142 return -EMSGSIZE;
143}
144
129static int crypto_report_one(struct crypto_alg *alg, 145static int crypto_report_one(struct crypto_alg *alg,
130 struct crypto_user_alg *ualg, struct sk_buff *skb) 146 struct crypto_user_alg *ualg, struct sk_buff *skb)
131{ 147{
@@ -176,6 +192,10 @@ static int crypto_report_one(struct crypto_alg *alg,
176 goto nla_put_failure; 192 goto nla_put_failure;
177 193
178 break; 194 break;
195 case CRYPTO_ALG_TYPE_KPP:
196 if (crypto_report_kpp(skb, alg))
197 goto nla_put_failure;
198 break;
179 } 199 }
180 200
181out: 201out:
@@ -358,32 +378,6 @@ drop_alg:
358 return err; 378 return err;
359} 379}
360 380
361static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
362 u32 mask)
363{
364 int err;
365 struct crypto_alg *alg;
366
367 type = crypto_skcipher_type(type);
368 mask = crypto_skcipher_mask(mask);
369
370 for (;;) {
371 alg = crypto_lookup_skcipher(name, type, mask);
372 if (!IS_ERR(alg))
373 return alg;
374
375 err = PTR_ERR(alg);
376 if (err != -EAGAIN)
377 break;
378 if (fatal_signal_pending(current)) {
379 err = -EINTR;
380 break;
381 }
382 }
383
384 return ERR_PTR(err);
385}
386
387static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, 381static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
388 struct nlattr **attrs) 382 struct nlattr **attrs)
389{ 383{
@@ -416,16 +410,7 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
416 else 410 else
417 name = p->cru_name; 411 name = p->cru_name;
418 412
419 switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { 413 alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
420 case CRYPTO_ALG_TYPE_GIVCIPHER:
421 case CRYPTO_ALG_TYPE_BLKCIPHER:
422 case CRYPTO_ALG_TYPE_ABLKCIPHER:
423 alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
424 break;
425 default:
426 alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
427 }
428
429 if (IS_ERR(alg)) 414 if (IS_ERR(alg))
430 return PTR_ERR(alg); 415 return PTR_ERR(alg);
431 416
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2386f7313952..ff4d21eddb83 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -26,13 +26,13 @@ struct crypto_ctr_ctx {
26}; 26};
27 27
28struct crypto_rfc3686_ctx { 28struct crypto_rfc3686_ctx {
29 struct crypto_ablkcipher *child; 29 struct crypto_skcipher *child;
30 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 30 u8 nonce[CTR_RFC3686_NONCE_SIZE];
31}; 31};
32 32
33struct crypto_rfc3686_req_ctx { 33struct crypto_rfc3686_req_ctx {
34 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 34 u8 iv[CTR_RFC3686_BLOCK_SIZE];
35 struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; 35 struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
36}; 36};
37 37
38static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, 38static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
@@ -249,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = {
249 .module = THIS_MODULE, 249 .module = THIS_MODULE,
250}; 250};
251 251
252static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, 252static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
253 const u8 *key, unsigned int keylen) 253 const u8 *key, unsigned int keylen)
254{ 254{
255 struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); 255 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
256 struct crypto_ablkcipher *child = ctx->child; 256 struct crypto_skcipher *child = ctx->child;
257 int err; 257 int err;
258 258
259 /* the nonce is stored in bytes at end of key */ 259 /* the nonce is stored in bytes at end of key */
@@ -265,173 +265,178 @@ static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent,
265 265
266 keylen -= CTR_RFC3686_NONCE_SIZE; 266 keylen -= CTR_RFC3686_NONCE_SIZE;
267 267
268 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 268 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
269 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & 269 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
270 CRYPTO_TFM_REQ_MASK); 270 CRYPTO_TFM_REQ_MASK);
271 err = crypto_ablkcipher_setkey(child, key, keylen); 271 err = crypto_skcipher_setkey(child, key, keylen);
272 crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & 272 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
273 CRYPTO_TFM_RES_MASK); 273 CRYPTO_TFM_RES_MASK);
274 274
275 return err; 275 return err;
276} 276}
277 277
278static int crypto_rfc3686_crypt(struct ablkcipher_request *req) 278static int crypto_rfc3686_crypt(struct skcipher_request *req)
279{ 279{
280 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 280 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
281 struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); 281 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
282 struct crypto_ablkcipher *child = ctx->child; 282 struct crypto_skcipher *child = ctx->child;
283 unsigned long align = crypto_ablkcipher_alignmask(tfm); 283 unsigned long align = crypto_skcipher_alignmask(tfm);
284 struct crypto_rfc3686_req_ctx *rctx = 284 struct crypto_rfc3686_req_ctx *rctx =
285 (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); 285 (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
286 struct ablkcipher_request *subreq = &rctx->subreq; 286 struct skcipher_request *subreq = &rctx->subreq;
287 u8 *iv = rctx->iv; 287 u8 *iv = rctx->iv;
288 288
289 /* set up counter block */ 289 /* set up counter block */
290 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 290 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
291 memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); 291 memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
292 292
293 /* initialize counter portion of counter block */ 293 /* initialize counter portion of counter block */
294 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 294 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
295 cpu_to_be32(1); 295 cpu_to_be32(1);
296 296
297 ablkcipher_request_set_tfm(subreq, child); 297 skcipher_request_set_tfm(subreq, child);
298 ablkcipher_request_set_callback(subreq, req->base.flags, 298 skcipher_request_set_callback(subreq, req->base.flags,
299 req->base.complete, req->base.data); 299 req->base.complete, req->base.data);
300 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, 300 skcipher_request_set_crypt(subreq, req->src, req->dst,
301 iv); 301 req->cryptlen, iv);
302 302
303 return crypto_ablkcipher_encrypt(subreq); 303 return crypto_skcipher_encrypt(subreq);
304} 304}
305 305
306static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) 306static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
307{ 307{
308 struct crypto_instance *inst = (void *)tfm->__crt_alg; 308 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
309 struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); 309 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
310 struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); 310 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
311 struct crypto_ablkcipher *cipher; 311 struct crypto_skcipher *cipher;
312 unsigned long align; 312 unsigned long align;
313 unsigned int reqsize;
313 314
314 cipher = crypto_spawn_skcipher(spawn); 315 cipher = crypto_spawn_skcipher2(spawn);
315 if (IS_ERR(cipher)) 316 if (IS_ERR(cipher))
316 return PTR_ERR(cipher); 317 return PTR_ERR(cipher);
317 318
318 ctx->child = cipher; 319 ctx->child = cipher;
319 320
320 align = crypto_tfm_alg_alignmask(tfm); 321 align = crypto_skcipher_alignmask(tfm);
321 align &= ~(crypto_tfm_ctx_alignment() - 1); 322 align &= ~(crypto_tfm_ctx_alignment() - 1);
322 tfm->crt_ablkcipher.reqsize = align + 323 reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
323 sizeof(struct crypto_rfc3686_req_ctx) + 324 crypto_skcipher_reqsize(cipher);
324 crypto_ablkcipher_reqsize(cipher); 325 crypto_skcipher_set_reqsize(tfm, reqsize);
325 326
326 return 0; 327 return 0;
327} 328}
328 329
329static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) 330static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
330{ 331{
331 struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); 332 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
333
334 crypto_free_skcipher(ctx->child);
335}
332 336
333 crypto_free_ablkcipher(ctx->child); 337static void crypto_rfc3686_free(struct skcipher_instance *inst)
338{
339 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
340
341 crypto_drop_skcipher(spawn);
342 kfree(inst);
334} 343}
335 344
336static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) 345static int crypto_rfc3686_create(struct crypto_template *tmpl,
346 struct rtattr **tb)
337{ 347{
338 struct crypto_attr_type *algt; 348 struct crypto_attr_type *algt;
339 struct crypto_instance *inst; 349 struct skcipher_instance *inst;
340 struct crypto_alg *alg; 350 struct skcipher_alg *alg;
341 struct crypto_skcipher_spawn *spawn; 351 struct crypto_skcipher_spawn *spawn;
342 const char *cipher_name; 352 const char *cipher_name;
343 int err; 353 int err;
344 354
345 algt = crypto_get_attr_type(tb); 355 algt = crypto_get_attr_type(tb);
346 if (IS_ERR(algt)) 356 if (IS_ERR(algt))
347 return ERR_CAST(algt); 357 return PTR_ERR(algt);
348 358
349 if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) 359 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
350 return ERR_PTR(-EINVAL); 360 return -EINVAL;
351 361
352 cipher_name = crypto_attr_alg_name(tb[1]); 362 cipher_name = crypto_attr_alg_name(tb[1]);
353 if (IS_ERR(cipher_name)) 363 if (IS_ERR(cipher_name))
354 return ERR_CAST(cipher_name); 364 return PTR_ERR(cipher_name);
355 365
356 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 366 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
357 if (!inst) 367 if (!inst)
358 return ERR_PTR(-ENOMEM); 368 return -ENOMEM;
359 369
360 spawn = crypto_instance_ctx(inst); 370 spawn = skcipher_instance_ctx(inst);
361 371
362 crypto_set_skcipher_spawn(spawn, inst); 372 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
363 err = crypto_grab_skcipher(spawn, cipher_name, 0, 373 err = crypto_grab_skcipher2(spawn, cipher_name, 0,
364 crypto_requires_sync(algt->type, 374 crypto_requires_sync(algt->type,
365 algt->mask)); 375 algt->mask));
366 if (err) 376 if (err)
367 goto err_free_inst; 377 goto err_free_inst;
368 378
369 alg = crypto_skcipher_spawn_alg(spawn); 379 alg = crypto_spawn_skcipher_alg(spawn);
370 380
371 /* We only support 16-byte blocks. */ 381 /* We only support 16-byte blocks. */
372 err = -EINVAL; 382 err = -EINVAL;
373 if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) 383 if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
374 goto err_drop_spawn; 384 goto err_drop_spawn;
375 385
376 /* Not a stream cipher? */ 386 /* Not a stream cipher? */
377 if (alg->cra_blocksize != 1) 387 if (alg->base.cra_blocksize != 1)
378 goto err_drop_spawn; 388 goto err_drop_spawn;
379 389
380 err = -ENAMETOOLONG; 390 err = -ENAMETOOLONG;
381 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", 391 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
382 alg->cra_name) >= CRYPTO_MAX_ALG_NAME) 392 "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
383 goto err_drop_spawn; 393 goto err_drop_spawn;
384 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 394 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
385 "rfc3686(%s)", alg->cra_driver_name) >= 395 "rfc3686(%s)", alg->base.cra_driver_name) >=
386 CRYPTO_MAX_ALG_NAME) 396 CRYPTO_MAX_ALG_NAME)
387 goto err_drop_spawn; 397 goto err_drop_spawn;
388 398
389 inst->alg.cra_priority = alg->cra_priority; 399 inst->alg.base.cra_priority = alg->base.cra_priority;
390 inst->alg.cra_blocksize = 1; 400 inst->alg.base.cra_blocksize = 1;
391 inst->alg.cra_alignmask = alg->cra_alignmask; 401 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
392 402
393 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 403 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
394 (alg->cra_flags & CRYPTO_ALG_ASYNC);
395 inst->alg.cra_type = &crypto_ablkcipher_type;
396 404
397 inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; 405 inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
398 inst->alg.cra_ablkcipher.min_keysize = 406 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
399 alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; 407 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
400 inst->alg.cra_ablkcipher.max_keysize = 408 CTR_RFC3686_NONCE_SIZE;
401 alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; 409 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
410 CTR_RFC3686_NONCE_SIZE;
402 411
403 inst->alg.cra_ablkcipher.geniv = "seqiv"; 412 inst->alg.setkey = crypto_rfc3686_setkey;
413 inst->alg.encrypt = crypto_rfc3686_crypt;
414 inst->alg.decrypt = crypto_rfc3686_crypt;
404 415
405 inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; 416 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
406 inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt;
407 inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt;
408 417
409 inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); 418 inst->alg.init = crypto_rfc3686_init_tfm;
419 inst->alg.exit = crypto_rfc3686_exit_tfm;
410 420
411 inst->alg.cra_init = crypto_rfc3686_init_tfm; 421 inst->free = crypto_rfc3686_free;
412 inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
413 422
414 return inst; 423 err = skcipher_register_instance(tmpl, inst);
424 if (err)
425 goto err_drop_spawn;
426
427out:
428 return err;
415 429
416err_drop_spawn: 430err_drop_spawn:
417 crypto_drop_skcipher(spawn); 431 crypto_drop_skcipher(spawn);
418err_free_inst: 432err_free_inst:
419 kfree(inst); 433 kfree(inst);
420 return ERR_PTR(err); 434 goto out;
421}
422
423static void crypto_rfc3686_free(struct crypto_instance *inst)
424{
425 struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
426
427 crypto_drop_skcipher(spawn);
428 kfree(inst);
429} 435}
430 436
431static struct crypto_template crypto_rfc3686_tmpl = { 437static struct crypto_template crypto_rfc3686_tmpl = {
432 .name = "rfc3686", 438 .name = "rfc3686",
433 .alloc = crypto_rfc3686_alloc, 439 .create = crypto_rfc3686_create,
434 .free = crypto_rfc3686_free,
435 .module = THIS_MODULE, 440 .module = THIS_MODULE,
436}; 441};
437 442
diff --git a/crypto/cts.c b/crypto/cts.c
index e467ec0acf9f..51976187b2bf 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -40,7 +40,7 @@
40 * rfc3962 includes errata information in its Appendix A. 40 * rfc3962 includes errata information in its Appendix A.
41 */ 41 */
42 42
43#include <crypto/algapi.h> 43#include <crypto/internal/skcipher.h>
44#include <linux/err.h> 44#include <linux/err.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/kernel.h> 46#include <linux/kernel.h>
@@ -51,289 +51,364 @@
51#include <linux/slab.h> 51#include <linux/slab.h>
52 52
53struct crypto_cts_ctx { 53struct crypto_cts_ctx {
54 struct crypto_blkcipher *child; 54 struct crypto_skcipher *child;
55}; 55};
56 56
57static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key, 57struct crypto_cts_reqctx {
58 unsigned int keylen) 58 struct scatterlist sg[2];
59 unsigned offset;
60 struct skcipher_request subreq;
61};
62
63static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
59{ 64{
60 struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent); 65 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
61 struct crypto_blkcipher *child = ctx->child; 66 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
62 int err; 67 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
68 struct crypto_skcipher *child = ctx->child;
63 69
64 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 70 return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child),
65 crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & 71 crypto_skcipher_alignmask(tfm) + 1);
66 CRYPTO_TFM_REQ_MASK);
67 err = crypto_blkcipher_setkey(child, key, keylen);
68 crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
69 CRYPTO_TFM_RES_MASK);
70 return err;
71} 72}
72 73
73static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, 74static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
74 struct blkcipher_desc *desc, 75 unsigned int keylen)
75 struct scatterlist *dst,
76 struct scatterlist *src,
77 unsigned int offset,
78 unsigned int nbytes)
79{ 76{
80 int bsize = crypto_blkcipher_blocksize(desc->tfm); 77 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
81 u8 tmp[bsize], tmp2[bsize]; 78 struct crypto_skcipher *child = ctx->child;
82 struct blkcipher_desc lcldesc;
83 struct scatterlist sgsrc[1], sgdst[1];
84 int lastn = nbytes - bsize;
85 u8 iv[bsize];
86 u8 s[bsize * 2], d[bsize * 2];
87 int err; 79 int err;
88 80
89 if (lastn < 0) 81 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
90 return -EINVAL; 82 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
83 CRYPTO_TFM_REQ_MASK);
84 err = crypto_skcipher_setkey(child, key, keylen);
85 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
86 CRYPTO_TFM_RES_MASK);
87 return err;
88}
91 89
92 sg_init_table(sgsrc, 1); 90static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
93 sg_init_table(sgdst, 1); 91{
92 struct skcipher_request *req = areq->data;
94 93
95 memset(s, 0, sizeof(s)); 94 if (err == -EINPROGRESS)
96 scatterwalk_map_and_copy(s, src, offset, nbytes, 0); 95 return;
97 96
98 memcpy(iv, desc->info, bsize); 97 skcipher_request_complete(req, err);
98}
99 99
100 lcldesc.tfm = ctx->child; 100static int cts_cbc_encrypt(struct skcipher_request *req)
101 lcldesc.info = iv; 101{
102 lcldesc.flags = desc->flags; 102 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
103 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
104 struct skcipher_request *subreq = &rctx->subreq;
105 int bsize = crypto_skcipher_blocksize(tfm);
106 u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
107 struct scatterlist *sg;
108 unsigned int offset;
109 int lastn;
110
111 offset = rctx->offset;
112 lastn = req->cryptlen - offset;
113
114 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
115 scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
116
117 memset(d, 0, bsize);
118 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
119
120 scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
121 memzero_explicit(d, sizeof(d));
122
123 skcipher_request_set_callback(subreq, req->base.flags &
124 CRYPTO_TFM_REQ_MAY_BACKLOG,
125 cts_cbc_crypt_done, req);
126 skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
127 return crypto_skcipher_encrypt(subreq);
128}
103 129
104 sg_set_buf(&sgsrc[0], s, bsize); 130static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
105 sg_set_buf(&sgdst[0], tmp, bsize); 131{
106 err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 132 struct skcipher_request *req = areq->data;
107 133
108 memcpy(d + bsize, tmp, lastn); 134 if (err)
135 goto out;
109 136
110 lcldesc.info = tmp; 137 err = cts_cbc_encrypt(req);
138 if (err == -EINPROGRESS ||
139 (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
140 return;
111 141
112 sg_set_buf(&sgsrc[0], s + bsize, bsize); 142out:
113 sg_set_buf(&sgdst[0], tmp2, bsize); 143 skcipher_request_complete(req, err);
114 err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 144}
115 145
116 memcpy(d, tmp2, bsize); 146static int crypto_cts_encrypt(struct skcipher_request *req)
147{
148 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
149 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
150 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
151 struct skcipher_request *subreq = &rctx->subreq;
152 int bsize = crypto_skcipher_blocksize(tfm);
153 unsigned int nbytes = req->cryptlen;
154 int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
155 unsigned int offset;
156
157 skcipher_request_set_tfm(subreq, ctx->child);
158
159 if (cbc_blocks <= 0) {
160 skcipher_request_set_callback(subreq, req->base.flags,
161 req->base.complete,
162 req->base.data);
163 skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
164 req->iv);
165 return crypto_skcipher_encrypt(subreq);
166 }
117 167
118 scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); 168 offset = cbc_blocks * bsize;
169 rctx->offset = offset;
119 170
120 memcpy(desc->info, tmp2, bsize); 171 skcipher_request_set_callback(subreq, req->base.flags,
172 crypto_cts_encrypt_done, req);
173 skcipher_request_set_crypt(subreq, req->src, req->dst,
174 offset, req->iv);
121 175
122 return err; 176 return crypto_skcipher_encrypt(subreq) ?:
177 cts_cbc_encrypt(req);
123} 178}
124 179
125static int crypto_cts_encrypt(struct blkcipher_desc *desc, 180static int cts_cbc_decrypt(struct skcipher_request *req)
126 struct scatterlist *dst, struct scatterlist *src,
127 unsigned int nbytes)
128{ 181{
129 struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 182 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
130 int bsize = crypto_blkcipher_blocksize(desc->tfm); 183 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
131 int tot_blocks = (nbytes + bsize - 1) / bsize; 184 struct skcipher_request *subreq = &rctx->subreq;
132 int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; 185 int bsize = crypto_skcipher_blocksize(tfm);
133 struct blkcipher_desc lcldesc; 186 u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
134 int err; 187 struct scatterlist *sg;
188 unsigned int offset;
189 u8 *space;
190 int lastn;
191
192 offset = rctx->offset;
193 lastn = req->cryptlen - offset;
194
195 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
196
197 /* 1. Decrypt Cn-1 (s) to create Dn */
198 scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
199 space = crypto_cts_reqctx_space(req);
200 crypto_xor(d + bsize, space, bsize);
201 /* 2. Pad Cn with zeros at the end to create C of length BB */
202 memset(d, 0, bsize);
203 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
204 /* 3. Exclusive-or Dn with C to create Xn */
205 /* 4. Select the first Ln bytes of Xn to create Pn */
206 crypto_xor(d + bsize, d, lastn);
207
208 /* 5. Append the tail (BB - Ln) bytes of Xn to Cn to create En */
209 memcpy(d + lastn, d + bsize + lastn, bsize - lastn);
210 /* 6. Decrypt En to create Pn-1 */
135 211
136 lcldesc.tfm = ctx->child; 212 scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
137 lcldesc.info = desc->info; 213 memzero_explicit(d, sizeof(d));
138 lcldesc.flags = desc->flags;
139
140 if (tot_blocks == 1) {
141 err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize);
142 } else if (nbytes <= bsize * 2) {
143 err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes);
144 } else {
145 /* do normal function for tot_blocks - 2 */
146 err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src,
147 cbc_blocks * bsize);
148 if (err == 0) {
149 /* do cts for final two blocks */
150 err = cts_cbc_encrypt(ctx, desc, dst, src,
151 cbc_blocks * bsize,
152 nbytes - (cbc_blocks * bsize));
153 }
154 }
155 214
156 return err; 215 skcipher_request_set_callback(subreq, req->base.flags &
216 CRYPTO_TFM_REQ_MAY_BACKLOG,
217 cts_cbc_crypt_done, req);
218
219 skcipher_request_set_crypt(subreq, sg, sg, bsize, space);
220 return crypto_skcipher_decrypt(subreq);
157} 221}
158 222
159static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, 223static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
160 struct blkcipher_desc *desc,
161 struct scatterlist *dst,
162 struct scatterlist *src,
163 unsigned int offset,
164 unsigned int nbytes)
165{ 224{
166 int bsize = crypto_blkcipher_blocksize(desc->tfm); 225 struct skcipher_request *req = areq->data;
167 u8 tmp[bsize];
168 struct blkcipher_desc lcldesc;
169 struct scatterlist sgsrc[1], sgdst[1];
170 int lastn = nbytes - bsize;
171 u8 iv[bsize];
172 u8 s[bsize * 2], d[bsize * 2];
173 int err;
174
175 if (lastn < 0)
176 return -EINVAL;
177 226
178 sg_init_table(sgsrc, 1); 227 if (err)
179 sg_init_table(sgdst, 1); 228 goto out;
180 229
181 scatterwalk_map_and_copy(s, src, offset, nbytes, 0); 230 err = cts_cbc_decrypt(req);
231 if (err == -EINPROGRESS ||
232 (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
233 return;
182 234
183 lcldesc.tfm = ctx->child; 235out:
184 lcldesc.info = iv; 236 skcipher_request_complete(req, err);
185 lcldesc.flags = desc->flags; 237}
186 238
187 /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ 239static int crypto_cts_decrypt(struct skcipher_request *req)
188 memset(iv, 0, sizeof(iv)); 240{
189 sg_set_buf(&sgsrc[0], s, bsize); 241 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
190 sg_set_buf(&sgdst[0], tmp, bsize); 242 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
191 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 243 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
192 if (err) 244 struct skcipher_request *subreq = &rctx->subreq;
193 return err; 245 int bsize = crypto_skcipher_blocksize(tfm);
194 /* 2. Pad Cn with zeros at the end to create C of length BB */ 246 unsigned int nbytes = req->cryptlen;
195 memset(iv, 0, sizeof(iv)); 247 int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
196 memcpy(iv, s + bsize, lastn); 248 unsigned int offset;
197 /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ 249 u8 *space;
198 crypto_xor(tmp, iv, bsize); 250
199 /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ 251 skcipher_request_set_tfm(subreq, ctx->child);
200 memcpy(d + bsize, tmp, lastn); 252
201 253 if (cbc_blocks <= 0) {
202 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ 254 skcipher_request_set_callback(subreq, req->base.flags,
203 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); 255 req->base.complete,
204 /* 6. Decrypt En to create Pn-1 */ 256 req->base.data);
205 memzero_explicit(iv, sizeof(iv)); 257 skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
258 req->iv);
259 return crypto_skcipher_decrypt(subreq);
260 }
206 261
207 sg_set_buf(&sgsrc[0], s + bsize, bsize); 262 skcipher_request_set_callback(subreq, req->base.flags,
208 sg_set_buf(&sgdst[0], d, bsize); 263 crypto_cts_decrypt_done, req);
209 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
210 264
211 /* XOR with previous block */ 265 space = crypto_cts_reqctx_space(req);
212 crypto_xor(d, desc->info, bsize);
213 266
214 scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); 267 offset = cbc_blocks * bsize;
268 rctx->offset = offset;
215 269
216 memcpy(desc->info, s, bsize); 270 if (cbc_blocks <= 1)
217 return err; 271 memcpy(space, req->iv, bsize);
218} 272 else
273 scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
274 bsize, 0);
219 275
220static int crypto_cts_decrypt(struct blkcipher_desc *desc, 276 skcipher_request_set_crypt(subreq, req->src, req->dst,
221 struct scatterlist *dst, struct scatterlist *src, 277 offset, req->iv);
222 unsigned int nbytes)
223{
224 struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
225 int bsize = crypto_blkcipher_blocksize(desc->tfm);
226 int tot_blocks = (nbytes + bsize - 1) / bsize;
227 int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0;
228 struct blkcipher_desc lcldesc;
229 int err;
230 278
231 lcldesc.tfm = ctx->child; 279 return crypto_skcipher_decrypt(subreq) ?:
232 lcldesc.info = desc->info; 280 cts_cbc_decrypt(req);
233 lcldesc.flags = desc->flags;
234
235 if (tot_blocks == 1) {
236 err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize);
237 } else if (nbytes <= bsize * 2) {
238 err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes);
239 } else {
240 /* do normal function for tot_blocks - 2 */
241 err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src,
242 cbc_blocks * bsize);
243 if (err == 0) {
244 /* do cts for final two blocks */
245 err = cts_cbc_decrypt(ctx, desc, dst, src,
246 cbc_blocks * bsize,
247 nbytes - (cbc_blocks * bsize));
248 }
249 }
250 return err;
251} 281}
252 282
253static int crypto_cts_init_tfm(struct crypto_tfm *tfm) 283static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
254{ 284{
255 struct crypto_instance *inst = (void *)tfm->__crt_alg; 285 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
256 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 286 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
257 struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); 287 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
258 struct crypto_blkcipher *cipher; 288 struct crypto_skcipher *cipher;
259 289 unsigned reqsize;
260 cipher = crypto_spawn_blkcipher(spawn); 290 unsigned bsize;
291 unsigned align;
292
293 cipher = crypto_spawn_skcipher2(spawn);
261 if (IS_ERR(cipher)) 294 if (IS_ERR(cipher))
262 return PTR_ERR(cipher); 295 return PTR_ERR(cipher);
263 296
264 ctx->child = cipher; 297 ctx->child = cipher;
298
299 align = crypto_skcipher_alignmask(tfm);
300 bsize = crypto_skcipher_blocksize(cipher);
301 reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) +
302 crypto_skcipher_reqsize(cipher),
303 crypto_tfm_ctx_alignment()) +
304 (align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize;
305
306 crypto_skcipher_set_reqsize(tfm, reqsize);
307
265 return 0; 308 return 0;
266} 309}
267 310
268static void crypto_cts_exit_tfm(struct crypto_tfm *tfm) 311static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm)
269{ 312{
270 struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); 313 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
271 crypto_free_blkcipher(ctx->child); 314
315 crypto_free_skcipher(ctx->child);
272} 316}
273 317
274static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) 318static void crypto_cts_free(struct skcipher_instance *inst)
275{ 319{
276 struct crypto_instance *inst; 320 crypto_drop_skcipher(skcipher_instance_ctx(inst));
277 struct crypto_alg *alg; 321 kfree(inst);
322}
323
324static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
325{
326 struct crypto_skcipher_spawn *spawn;
327 struct skcipher_instance *inst;
328 struct crypto_attr_type *algt;
329 struct skcipher_alg *alg;
330 const char *cipher_name;
278 int err; 331 int err;
279 332
280 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 333 algt = crypto_get_attr_type(tb);
334 if (IS_ERR(algt))
335 return PTR_ERR(algt);
336
337 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
338 return -EINVAL;
339
340 cipher_name = crypto_attr_alg_name(tb[1]);
341 if (IS_ERR(cipher_name))
342 return PTR_ERR(cipher_name);
343
344 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
345 if (!inst)
346 return -ENOMEM;
347
348 spawn = skcipher_instance_ctx(inst);
349
350 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
351 err = crypto_grab_skcipher2(spawn, cipher_name, 0,
352 crypto_requires_sync(algt->type,
353 algt->mask));
281 if (err) 354 if (err)
282 return ERR_PTR(err); 355 goto err_free_inst;
283 356
284 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, 357 alg = crypto_spawn_skcipher_alg(spawn);
285 CRYPTO_ALG_TYPE_MASK);
286 if (IS_ERR(alg))
287 return ERR_CAST(alg);
288 358
289 inst = ERR_PTR(-EINVAL); 359 err = -EINVAL;
290 if (!is_power_of_2(alg->cra_blocksize)) 360 if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
291 goto out_put_alg; 361 goto err_drop_spawn;
292 362
293 if (strncmp(alg->cra_name, "cbc(", 4)) 363 if (strncmp(alg->base.cra_name, "cbc(", 4))
294 goto out_put_alg; 364 goto err_drop_spawn;
295 365
296 inst = crypto_alloc_instance("cts", alg); 366 err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
297 if (IS_ERR(inst)) 367 &alg->base);
298 goto out_put_alg; 368 if (err)
369 goto err_drop_spawn;
299 370
300 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 371 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
301 inst->alg.cra_priority = alg->cra_priority; 372 inst->alg.base.cra_priority = alg->base.cra_priority;
302 inst->alg.cra_blocksize = alg->cra_blocksize; 373 inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
303 inst->alg.cra_alignmask = alg->cra_alignmask; 374 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
304 inst->alg.cra_type = &crypto_blkcipher_type;
305 375
306 /* We access the data as u32s when xoring. */ 376 /* We access the data as u32s when xoring. */
307 inst->alg.cra_alignmask |= __alignof__(u32) - 1; 377 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
308 378
309 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 379 inst->alg.ivsize = alg->base.cra_blocksize;
310 inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 380 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
311 inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 381 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
382 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
312 383
313 inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); 384 inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
314 385
315 inst->alg.cra_init = crypto_cts_init_tfm; 386 inst->alg.init = crypto_cts_init_tfm;
316 inst->alg.cra_exit = crypto_cts_exit_tfm; 387 inst->alg.exit = crypto_cts_exit_tfm;
317 388
318 inst->alg.cra_blkcipher.setkey = crypto_cts_setkey; 389 inst->alg.setkey = crypto_cts_setkey;
319 inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt; 390 inst->alg.encrypt = crypto_cts_encrypt;
320 inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt; 391 inst->alg.decrypt = crypto_cts_decrypt;
321 392
322out_put_alg: 393 inst->free = crypto_cts_free;
323 crypto_mod_put(alg);
324 return inst;
325}
326 394
327static void crypto_cts_free(struct crypto_instance *inst) 395 err = skcipher_register_instance(tmpl, inst);
328{ 396 if (err)
329 crypto_drop_spawn(crypto_instance_ctx(inst)); 397 goto err_drop_spawn;
398
399out:
400 return err;
401
402err_drop_spawn:
403 crypto_drop_skcipher(spawn);
404err_free_inst:
330 kfree(inst); 405 kfree(inst);
406 goto out;
331} 407}
332 408
333static struct crypto_template crypto_cts_tmpl = { 409static struct crypto_template crypto_cts_tmpl = {
334 .name = "cts", 410 .name = "cts",
335 .alloc = crypto_cts_alloc, 411 .create = crypto_cts_create,
336 .free = crypto_cts_free,
337 .module = THIS_MODULE, 412 .module = THIS_MODULE,
338}; 413};
339 414
diff --git a/crypto/dh.c b/crypto/dh.c
new file mode 100644
index 000000000000..9d19360e7189
--- /dev/null
+++ b/crypto/dh.c
@@ -0,0 +1,189 @@
1/* Diffie-Hellman Key Agreement Method [RFC2631]
2 *
3 * Copyright (c) 2016, Intel Corporation
4 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <crypto/internal/kpp.h>
14#include <crypto/kpp.h>
15#include <crypto/dh.h>
16#include <linux/mpi.h>
17
18struct dh_ctx {
19 MPI p;
20 MPI g;
21 MPI xa;
22};
23
24static inline void dh_clear_params(struct dh_ctx *ctx)
25{
26 mpi_free(ctx->p);
27 mpi_free(ctx->g);
28 ctx->p = NULL;
29 ctx->g = NULL;
30}
31
32static void dh_free_ctx(struct dh_ctx *ctx)
33{
34 dh_clear_params(ctx);
35 mpi_free(ctx->xa);
36 ctx->xa = NULL;
37}
38
39/*
40 * If base is g we compute the public key
41 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
42 * else if base if the counterpart public key we compute the shared secret
43 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
44 */
45static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val)
46{
47 /* val = base^xa mod p */
48 return mpi_powm(val, base, ctx->xa, ctx->p);
49}
50
51static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm)
52{
53 return kpp_tfm_ctx(tfm);
54}
55
56static int dh_check_params_length(unsigned int p_len)
57{
58 return (p_len < 1536) ? -EINVAL : 0;
59}
60
61static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
62{
63 if (unlikely(!params->p || !params->g))
64 return -EINVAL;
65
66 if (dh_check_params_length(params->p_size << 3))
67 return -EINVAL;
68
69 ctx->p = mpi_read_raw_data(params->p, params->p_size);
70 if (!ctx->p)
71 return -EINVAL;
72
73 ctx->g = mpi_read_raw_data(params->g, params->g_size);
74 if (!ctx->g) {
75 mpi_free(ctx->p);
76 return -EINVAL;
77 }
78
79 return 0;
80}
81
82static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
83{
84 struct dh_ctx *ctx = dh_get_ctx(tfm);
85 struct dh params;
86
87 if (crypto_dh_decode_key(buf, len, &params) < 0)
88 return -EINVAL;
89
90 if (dh_set_params(ctx, &params) < 0)
91 return -EINVAL;
92
93 ctx->xa = mpi_read_raw_data(params.key, params.key_size);
94 if (!ctx->xa) {
95 dh_clear_params(ctx);
96 return -EINVAL;
97 }
98
99 return 0;
100}
101
102static int dh_compute_value(struct kpp_request *req)
103{
104 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
105 struct dh_ctx *ctx = dh_get_ctx(tfm);
106 MPI base, val = mpi_alloc(0);
107 int ret = 0;
108 int sign;
109
110 if (!val)
111 return -ENOMEM;
112
113 if (unlikely(!ctx->xa)) {
114 ret = -EINVAL;
115 goto err_free_val;
116 }
117
118 if (req->src) {
119 base = mpi_read_raw_from_sgl(req->src, req->src_len);
120 if (!base) {
121 ret = EINVAL;
122 goto err_free_val;
123 }
124 } else {
125 base = ctx->g;
126 }
127
128 ret = _compute_val(ctx, base, val);
129 if (ret)
130 goto err_free_base;
131
132 ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
133 if (ret)
134 goto err_free_base;
135
136 if (sign < 0)
137 ret = -EBADMSG;
138err_free_base:
139 if (req->src)
140 mpi_free(base);
141err_free_val:
142 mpi_free(val);
143 return ret;
144}
145
146static int dh_max_size(struct crypto_kpp *tfm)
147{
148 struct dh_ctx *ctx = dh_get_ctx(tfm);
149
150 return mpi_get_size(ctx->p);
151}
152
153static void dh_exit_tfm(struct crypto_kpp *tfm)
154{
155 struct dh_ctx *ctx = dh_get_ctx(tfm);
156
157 dh_free_ctx(ctx);
158}
159
160static struct kpp_alg dh = {
161 .set_secret = dh_set_secret,
162 .generate_public_key = dh_compute_value,
163 .compute_shared_secret = dh_compute_value,
164 .max_size = dh_max_size,
165 .exit = dh_exit_tfm,
166 .base = {
167 .cra_name = "dh",
168 .cra_driver_name = "dh-generic",
169 .cra_priority = 100,
170 .cra_module = THIS_MODULE,
171 .cra_ctxsize = sizeof(struct dh_ctx),
172 },
173};
174
175static int dh_init(void)
176{
177 return crypto_register_kpp(&dh);
178}
179
180static void dh_exit(void)
181{
182 crypto_unregister_kpp(&dh);
183}
184
185module_init(dh_init);
186module_exit(dh_exit);
187MODULE_ALIAS_CRYPTO("dh");
188MODULE_LICENSE("GPL");
189MODULE_DESCRIPTION("DH generic algorithm");
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
new file mode 100644
index 000000000000..02db76b20d00
--- /dev/null
+++ b/crypto/dh_helper.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright (c) 2016, Intel Corporation
3 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public Licence
7 * as published by the Free Software Foundation; either version
8 * 2 of the Licence, or (at your option) any later version.
9 */
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/err.h>
13#include <linux/string.h>
14#include <crypto/dh.h>
15#include <crypto/kpp.h>
16
17#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
18
19static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
20{
21 memcpy(dst, src, size);
22 return dst + size;
23}
24
25static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
26{
27 memcpy(dst, src, size);
28 return src + size;
29}
30
31static inline int dh_data_size(const struct dh *p)
32{
33 return p->key_size + p->p_size + p->g_size;
34}
35
36int crypto_dh_key_len(const struct dh *p)
37{
38 return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
39}
40EXPORT_SYMBOL_GPL(crypto_dh_key_len);
41
42int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
43{
44 u8 *ptr = buf;
45 struct kpp_secret secret = {
46 .type = CRYPTO_KPP_SECRET_TYPE_DH,
47 .len = len
48 };
49
50 if (unlikely(!buf))
51 return -EINVAL;
52
53 if (len != crypto_dh_key_len(params))
54 return -EINVAL;
55
56 ptr = dh_pack_data(ptr, &secret, sizeof(secret));
57 ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
58 ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
59 ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
60 ptr = dh_pack_data(ptr, params->key, params->key_size);
61 ptr = dh_pack_data(ptr, params->p, params->p_size);
62 dh_pack_data(ptr, params->g, params->g_size);
63
64 return 0;
65}
66EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
67
68int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
69{
70 const u8 *ptr = buf;
71 struct kpp_secret secret;
72
73 if (unlikely(!buf || len < DH_KPP_SECRET_MIN_SIZE))
74 return -EINVAL;
75
76 ptr = dh_unpack_data(&secret, ptr, sizeof(secret));
77 if (secret.type != CRYPTO_KPP_SECRET_TYPE_DH)
78 return -EINVAL;
79
80 ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
81 ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
82 ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
83 if (secret.len != crypto_dh_key_len(params))
84 return -EINVAL;
85
86 /* Don't allocate memory. Set pointers to data within
87 * the given buffer
88 */
89 params->key = (void *)ptr;
90 params->p = (void *)(ptr + params->key_size);
91 params->g = (void *)(ptr + params->key_size + params->p_size);
92
93 return 0;
94}
95EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 0a3538f6cf22..f752da3a7c75 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -252,10 +252,16 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
252MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128"); 252MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
253MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128"); 253MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
254 254
255static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, 255static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
256 unsigned char *outval, const struct drbg_string *in); 256 const unsigned char *key);
257static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
258 const struct drbg_string *in);
257static int drbg_init_sym_kernel(struct drbg_state *drbg); 259static int drbg_init_sym_kernel(struct drbg_state *drbg);
258static int drbg_fini_sym_kernel(struct drbg_state *drbg); 260static int drbg_fini_sym_kernel(struct drbg_state *drbg);
261static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
262 u8 *inbuf, u32 inbuflen,
263 u8 *outbuf, u32 outlen);
264#define DRBG_CTR_NULL_LEN 128
259 265
260/* BCC function for CTR DRBG as defined in 10.4.3 */ 266/* BCC function for CTR DRBG as defined in 10.4.3 */
261static int drbg_ctr_bcc(struct drbg_state *drbg, 267static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -270,6 +276,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
270 drbg_string_fill(&data, out, drbg_blocklen(drbg)); 276 drbg_string_fill(&data, out, drbg_blocklen(drbg));
271 277
272 /* 10.4.3 step 2 / 4 */ 278 /* 10.4.3 step 2 / 4 */
279 drbg_kcapi_symsetkey(drbg, key);
273 list_for_each_entry(curr, in, list) { 280 list_for_each_entry(curr, in, list) {
274 const unsigned char *pos = curr->buf; 281 const unsigned char *pos = curr->buf;
275 size_t len = curr->len; 282 size_t len = curr->len;
@@ -278,7 +285,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
278 /* 10.4.3 step 4.2 */ 285 /* 10.4.3 step 4.2 */
279 if (drbg_blocklen(drbg) == cnt) { 286 if (drbg_blocklen(drbg) == cnt) {
280 cnt = 0; 287 cnt = 0;
281 ret = drbg_kcapi_sym(drbg, key, out, &data); 288 ret = drbg_kcapi_sym(drbg, out, &data);
282 if (ret) 289 if (ret)
283 return ret; 290 return ret;
284 } 291 }
@@ -290,7 +297,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
290 } 297 }
291 /* 10.4.3 step 4.2 for last block */ 298 /* 10.4.3 step 4.2 for last block */
292 if (cnt) 299 if (cnt)
293 ret = drbg_kcapi_sym(drbg, key, out, &data); 300 ret = drbg_kcapi_sym(drbg, out, &data);
294 301
295 return ret; 302 return ret;
296} 303}
@@ -425,6 +432,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
425 /* 10.4.2 step 12: overwriting of outval is implemented in next step */ 432 /* 10.4.2 step 12: overwriting of outval is implemented in next step */
426 433
427 /* 10.4.2 step 13 */ 434 /* 10.4.2 step 13 */
435 drbg_kcapi_symsetkey(drbg, temp);
428 while (generated_len < bytes_to_return) { 436 while (generated_len < bytes_to_return) {
429 short blocklen = 0; 437 short blocklen = 0;
430 /* 438 /*
@@ -432,7 +440,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
432 * implicit as the key is only drbg_blocklen in size based on 440 * implicit as the key is only drbg_blocklen in size based on
433 * the implementation of the cipher function callback 441 * the implementation of the cipher function callback
434 */ 442 */
435 ret = drbg_kcapi_sym(drbg, temp, X, &cipherin); 443 ret = drbg_kcapi_sym(drbg, X, &cipherin);
436 if (ret) 444 if (ret)
437 goto out; 445 goto out;
438 blocklen = (drbg_blocklen(drbg) < 446 blocklen = (drbg_blocklen(drbg) <
@@ -476,49 +484,47 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
476 unsigned char *temp = drbg->scratchpad; 484 unsigned char *temp = drbg->scratchpad;
477 unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) + 485 unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
478 drbg_blocklen(drbg); 486 drbg_blocklen(drbg);
479 unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
480 unsigned int len = 0;
481 struct drbg_string cipherin;
482 487
483 if (3 > reseed) 488 if (3 > reseed)
484 memset(df_data, 0, drbg_statelen(drbg)); 489 memset(df_data, 0, drbg_statelen(drbg));
485 490
486 /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ 491 if (!reseed) {
487 if (seed) { 492 /*
488 ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); 493 * The DRBG uses the CTR mode of the underlying AES cipher. The
494 * CTR mode increments the counter value after the AES operation
495 * but SP800-90A requires that the counter is incremented before
496 * the AES operation. Hence, we increment it at the time we set
497 * it by one.
498 */
499 crypto_inc(drbg->V, drbg_blocklen(drbg));
500
501 ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
502 drbg_keylen(drbg));
489 if (ret) 503 if (ret)
490 goto out; 504 goto out;
491 } 505 }
492 506
493 drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg)); 507 /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
494 /* 508 if (seed) {
495 * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation 509 ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
496 * zeroizes all memory during initialization
497 */
498 while (len < (drbg_statelen(drbg))) {
499 /* 10.2.1.2 step 2.1 */
500 crypto_inc(drbg->V, drbg_blocklen(drbg));
501 /*
502 * 10.2.1.2 step 2.2 */
503 ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
504 if (ret) 510 if (ret)
505 goto out; 511 goto out;
506 /* 10.2.1.2 step 2.3 and 3 */
507 len += drbg_blocklen(drbg);
508 } 512 }
509 513
510 /* 10.2.1.2 step 4 */ 514 ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg),
511 temp_p = temp; 515 temp, drbg_statelen(drbg));
512 df_data_p = df_data; 516 if (ret)
513 for (len = 0; len < drbg_statelen(drbg); len++) { 517 return ret;
514 *temp_p ^= *df_data_p;
515 df_data_p++; temp_p++;
516 }
517 518
518 /* 10.2.1.2 step 5 */ 519 /* 10.2.1.2 step 5 */
519 memcpy(drbg->C, temp, drbg_keylen(drbg)); 520 ret = crypto_skcipher_setkey(drbg->ctr_handle, temp,
521 drbg_keylen(drbg));
522 if (ret)
523 goto out;
520 /* 10.2.1.2 step 6 */ 524 /* 10.2.1.2 step 6 */
521 memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); 525 memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
526 /* See above: increment counter by one to compensate timing of CTR op */
527 crypto_inc(drbg->V, drbg_blocklen(drbg));
522 ret = 0; 528 ret = 0;
523 529
524out: 530out:
@@ -537,9 +543,8 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
537 unsigned char *buf, unsigned int buflen, 543 unsigned char *buf, unsigned int buflen,
538 struct list_head *addtl) 544 struct list_head *addtl)
539{ 545{
540 int len = 0; 546 int ret;
541 int ret = 0; 547 int len = min_t(int, buflen, INT_MAX);
542 struct drbg_string data;
543 548
544 /* 10.2.1.5.2 step 2 */ 549 /* 10.2.1.5.2 step 2 */
545 if (addtl && !list_empty(addtl)) { 550 if (addtl && !list_empty(addtl)) {
@@ -549,33 +554,16 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
549 } 554 }
550 555
551 /* 10.2.1.5.2 step 4.1 */ 556 /* 10.2.1.5.2 step 4.1 */
552 crypto_inc(drbg->V, drbg_blocklen(drbg)); 557 ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
553 drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg)); 558 buf, len);
554 while (len < buflen) { 559 if (ret)
555 int outlen = 0; 560 return ret;
556 /* 10.2.1.5.2 step 4.2 */
557 ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
558 if (ret) {
559 len = ret;
560 goto out;
561 }
562 outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
563 drbg_blocklen(drbg) : (buflen - len);
564 /* 10.2.1.5.2 step 4.3 */
565 memcpy(buf + len, drbg->scratchpad, outlen);
566 len += outlen;
567 /* 10.2.1.5.2 step 6 */
568 if (len < buflen)
569 crypto_inc(drbg->V, drbg_blocklen(drbg));
570 }
571 561
572 /* 10.2.1.5.2 step 6 */ 562 /* 10.2.1.5.2 step 6 */
573 ret = drbg_ctr_update(drbg, NULL, 3); 563 ret = drbg_ctr_update(drbg, NULL, 3);
574 if (ret) 564 if (ret)
575 len = ret; 565 len = ret;
576 566
577out:
578 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
579 return len; 567 return len;
580} 568}
581 569
@@ -1145,11 +1133,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
1145 if (!drbg) 1133 if (!drbg)
1146 return; 1134 return;
1147 kzfree(drbg->V); 1135 kzfree(drbg->V);
1148 drbg->V = NULL; 1136 drbg->Vbuf = NULL;
1149 kzfree(drbg->C); 1137 kzfree(drbg->C);
1150 drbg->C = NULL; 1138 drbg->Cbuf = NULL;
1151 kzfree(drbg->scratchpad); 1139 kzfree(drbg->scratchpadbuf);
1152 drbg->scratchpad = NULL; 1140 drbg->scratchpadbuf = NULL;
1153 drbg->reseed_ctr = 0; 1141 drbg->reseed_ctr = 0;
1154 drbg->d_ops = NULL; 1142 drbg->d_ops = NULL;
1155 drbg->core = NULL; 1143 drbg->core = NULL;
@@ -1185,12 +1173,18 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
1185 goto err; 1173 goto err;
1186 } 1174 }
1187 1175
1188 drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); 1176 ret = drbg->d_ops->crypto_init(drbg);
1189 if (!drbg->V) 1177 if (ret < 0)
1190 goto err;
1191 drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
1192 if (!drbg->C)
1193 goto err; 1178 goto err;
1179
1180 drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
1181 if (!drbg->Vbuf)
1182 goto fini;
1183 drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
1184 drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
1185 if (!drbg->Cbuf)
1186 goto fini;
1187 drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
1194 /* scratchpad is only generated for CTR and Hash */ 1188 /* scratchpad is only generated for CTR and Hash */
1195 if (drbg->core->flags & DRBG_HMAC) 1189 if (drbg->core->flags & DRBG_HMAC)
1196 sb_size = 0; 1190 sb_size = 0;
@@ -1204,13 +1198,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
1204 sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); 1198 sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
1205 1199
1206 if (0 < sb_size) { 1200 if (0 < sb_size) {
1207 drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL); 1201 drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
1208 if (!drbg->scratchpad) 1202 if (!drbg->scratchpadbuf)
1209 goto err; 1203 goto fini;
1204 drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
1210 } 1205 }
1211 1206
1212 return 0; 1207 return 0;
1213 1208
1209fini:
1210 drbg->d_ops->crypto_fini(drbg);
1214err: 1211err:
1215 drbg_dealloc_state(drbg); 1212 drbg_dealloc_state(drbg);
1216 return ret; 1213 return ret;
@@ -1478,10 +1475,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
1478 if (ret) 1475 if (ret)
1479 goto unlock; 1476 goto unlock;
1480 1477
1481 ret = -EFAULT;
1482 if (drbg->d_ops->crypto_init(drbg))
1483 goto err;
1484
1485 ret = drbg_prepare_hrng(drbg); 1478 ret = drbg_prepare_hrng(drbg);
1486 if (ret) 1479 if (ret)
1487 goto free_everything; 1480 goto free_everything;
@@ -1505,8 +1498,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
1505 mutex_unlock(&drbg->drbg_mutex); 1498 mutex_unlock(&drbg->drbg_mutex);
1506 return ret; 1499 return ret;
1507 1500
1508err:
1509 drbg_dealloc_state(drbg);
1510unlock: 1501unlock:
1511 mutex_unlock(&drbg->drbg_mutex); 1502 mutex_unlock(&drbg->drbg_mutex);
1512 return ret; 1503 return ret;
@@ -1591,7 +1582,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
1591 sdesc->shash.tfm = tfm; 1582 sdesc->shash.tfm = tfm;
1592 sdesc->shash.flags = 0; 1583 sdesc->shash.flags = 0;
1593 drbg->priv_data = sdesc; 1584 drbg->priv_data = sdesc;
1594 return 0; 1585
1586 return crypto_shash_alignmask(tfm);
1595} 1587}
1596 1588
1597static int drbg_fini_hash_kernel(struct drbg_state *drbg) 1589static int drbg_fini_hash_kernel(struct drbg_state *drbg)
@@ -1627,10 +1619,45 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
1627#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ 1619#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
1628 1620
1629#ifdef CONFIG_CRYPTO_DRBG_CTR 1621#ifdef CONFIG_CRYPTO_DRBG_CTR
1622static int drbg_fini_sym_kernel(struct drbg_state *drbg)
1623{
1624 struct crypto_cipher *tfm =
1625 (struct crypto_cipher *)drbg->priv_data;
1626 if (tfm)
1627 crypto_free_cipher(tfm);
1628 drbg->priv_data = NULL;
1629
1630 if (drbg->ctr_handle)
1631 crypto_free_skcipher(drbg->ctr_handle);
1632 drbg->ctr_handle = NULL;
1633
1634 if (drbg->ctr_req)
1635 skcipher_request_free(drbg->ctr_req);
1636 drbg->ctr_req = NULL;
1637
1638 kfree(drbg->ctr_null_value_buf);
1639 drbg->ctr_null_value = NULL;
1640
1641 return 0;
1642}
1643
1644static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
1645{
1646 struct drbg_state *drbg = req->data;
1647
1648 if (error == -EINPROGRESS)
1649 return;
1650 drbg->ctr_async_err = error;
1651 complete(&drbg->ctr_completion);
1652}
1653
1630static int drbg_init_sym_kernel(struct drbg_state *drbg) 1654static int drbg_init_sym_kernel(struct drbg_state *drbg)
1631{ 1655{
1632 int ret = 0;
1633 struct crypto_cipher *tfm; 1656 struct crypto_cipher *tfm;
1657 struct crypto_skcipher *sk_tfm;
1658 struct skcipher_request *req;
1659 unsigned int alignmask;
1660 char ctr_name[CRYPTO_MAX_ALG_NAME];
1634 1661
1635 tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); 1662 tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
1636 if (IS_ERR(tfm)) { 1663 if (IS_ERR(tfm)) {
@@ -1640,31 +1667,103 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
1640 } 1667 }
1641 BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); 1668 BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
1642 drbg->priv_data = tfm; 1669 drbg->priv_data = tfm;
1643 return ret; 1670
1671 if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
1672 drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
1673 drbg_fini_sym_kernel(drbg);
1674 return -EINVAL;
1675 }
1676 sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
1677 if (IS_ERR(sk_tfm)) {
1678 pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
1679 ctr_name);
1680 drbg_fini_sym_kernel(drbg);
1681 return PTR_ERR(sk_tfm);
1682 }
1683 drbg->ctr_handle = sk_tfm;
1684
1685 req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
1686 if (!req) {
1687 pr_info("DRBG: could not allocate request queue\n");
1688 drbg_fini_sym_kernel(drbg);
1689 return -ENOMEM;
1690 }
1691 drbg->ctr_req = req;
1692 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1693 drbg_skcipher_cb, drbg);
1694
1695 alignmask = crypto_skcipher_alignmask(sk_tfm);
1696 drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
1697 GFP_KERNEL);
1698 if (!drbg->ctr_null_value_buf) {
1699 drbg_fini_sym_kernel(drbg);
1700 return -ENOMEM;
1701 }
1702 drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
1703 alignmask + 1);
1704
1705 return alignmask;
1644} 1706}
1645 1707
1646static int drbg_fini_sym_kernel(struct drbg_state *drbg) 1708static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
1709 const unsigned char *key)
1647{ 1710{
1648 struct crypto_cipher *tfm = 1711 struct crypto_cipher *tfm =
1649 (struct crypto_cipher *)drbg->priv_data; 1712 (struct crypto_cipher *)drbg->priv_data;
1650 if (tfm) 1713
1651 crypto_free_cipher(tfm); 1714 crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
1652 drbg->priv_data = NULL;
1653 return 0;
1654} 1715}
1655 1716
1656static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, 1717static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
1657 unsigned char *outval, const struct drbg_string *in) 1718 const struct drbg_string *in)
1658{ 1719{
1659 struct crypto_cipher *tfm = 1720 struct crypto_cipher *tfm =
1660 (struct crypto_cipher *)drbg->priv_data; 1721 (struct crypto_cipher *)drbg->priv_data;
1661 1722
1662 crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
1663 /* there is only component in *in */ 1723 /* there is only component in *in */
1664 BUG_ON(in->len < drbg_blocklen(drbg)); 1724 BUG_ON(in->len < drbg_blocklen(drbg));
1665 crypto_cipher_encrypt_one(tfm, outval, in->buf); 1725 crypto_cipher_encrypt_one(tfm, outval, in->buf);
1666 return 0; 1726 return 0;
1667} 1727}
1728
1729static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1730 u8 *inbuf, u32 inlen,
1731 u8 *outbuf, u32 outlen)
1732{
1733 struct scatterlist sg_in;
1734
1735 sg_init_one(&sg_in, inbuf, inlen);
1736
1737 while (outlen) {
1738 u32 cryptlen = min_t(u32, inlen, outlen);
1739 struct scatterlist sg_out;
1740 int ret;
1741
1742 sg_init_one(&sg_out, outbuf, cryptlen);
1743 skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
1744 cryptlen, drbg->V);
1745 ret = crypto_skcipher_encrypt(drbg->ctr_req);
1746 switch (ret) {
1747 case 0:
1748 break;
1749 case -EINPROGRESS:
1750 case -EBUSY:
1751 ret = wait_for_completion_interruptible(
1752 &drbg->ctr_completion);
1753 if (!ret && !drbg->ctr_async_err) {
1754 reinit_completion(&drbg->ctr_completion);
1755 break;
1756 }
1757 default:
1758 return ret;
1759 }
1760 init_completion(&drbg->ctr_completion);
1761
1762 outlen -= cryptlen;
1763 }
1764
1765 return 0;
1766}
1668#endif /* CONFIG_CRYPTO_DRBG_CTR */ 1767#endif /* CONFIG_CRYPTO_DRBG_CTR */
1669 1768
1670/*************************************************************** 1769/***************************************************************
diff --git a/crypto/ecc.c b/crypto/ecc.c
new file mode 100644
index 000000000000..414c78a9c214
--- /dev/null
+++ b/crypto/ecc.c
@@ -0,0 +1,1018 @@
1/*
2 * Copyright (c) 2013, Kenneth MacKay
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <linux/random.h>
28#include <linux/slab.h>
29#include <linux/swab.h>
30#include <linux/fips.h>
31#include <crypto/ecdh.h>
32
33#include "ecc.h"
34#include "ecc_curve_defs.h"
35
36typedef struct {
37 u64 m_low;
38 u64 m_high;
39} uint128_t;
40
41static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
42{
43 switch (curve_id) {
44 /* In FIPS mode only allow P256 and higher */
45 case ECC_CURVE_NIST_P192:
46 return fips_enabled ? NULL : &nist_p192;
47 case ECC_CURVE_NIST_P256:
48 return &nist_p256;
49 default:
50 return NULL;
51 }
52}
53
54static u64 *ecc_alloc_digits_space(unsigned int ndigits)
55{
56 size_t len = ndigits * sizeof(u64);
57
58 if (!len)
59 return NULL;
60
61 return kmalloc(len, GFP_KERNEL);
62}
63
64static void ecc_free_digits_space(u64 *space)
65{
66 kzfree(space);
67}
68
69static struct ecc_point *ecc_alloc_point(unsigned int ndigits)
70{
71 struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
72
73 if (!p)
74 return NULL;
75
76 p->x = ecc_alloc_digits_space(ndigits);
77 if (!p->x)
78 goto err_alloc_x;
79
80 p->y = ecc_alloc_digits_space(ndigits);
81 if (!p->y)
82 goto err_alloc_y;
83
84 p->ndigits = ndigits;
85
86 return p;
87
88err_alloc_y:
89 ecc_free_digits_space(p->x);
90err_alloc_x:
91 kfree(p);
92 return NULL;
93}
94
95static void ecc_free_point(struct ecc_point *p)
96{
97 if (!p)
98 return;
99
100 kzfree(p->x);
101 kzfree(p->y);
102 kzfree(p);
103}
104
105static void vli_clear(u64 *vli, unsigned int ndigits)
106{
107 int i;
108
109 for (i = 0; i < ndigits; i++)
110 vli[i] = 0;
111}
112
113/* Returns true if vli == 0, false otherwise. */
114static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
115{
116 int i;
117
118 for (i = 0; i < ndigits; i++) {
119 if (vli[i])
120 return false;
121 }
122
123 return true;
124}
125
126/* Returns nonzero if bit bit of vli is set. */
127static u64 vli_test_bit(const u64 *vli, unsigned int bit)
128{
129 return (vli[bit / 64] & ((u64)1 << (bit % 64)));
130}
131
132/* Counts the number of 64-bit "digits" in vli. */
133static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
134{
135 int i;
136
137 /* Search from the end until we find a non-zero digit.
138 * We do it in reverse because we expect that most digits will
139 * be nonzero.
140 */
141 for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--);
142
143 return (i + 1);
144}
145
146/* Counts the number of bits required for vli. */
147static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
148{
149 unsigned int i, num_digits;
150 u64 digit;
151
152 num_digits = vli_num_digits(vli, ndigits);
153 if (num_digits == 0)
154 return 0;
155
156 digit = vli[num_digits - 1];
157 for (i = 0; digit; i++)
158 digit >>= 1;
159
160 return ((num_digits - 1) * 64 + i);
161}
162
163/* Sets dest = src. */
164static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
165{
166 int i;
167
168 for (i = 0; i < ndigits; i++)
169 dest[i] = src[i];
170}
171
172/* Returns sign of left - right. */
173static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
174{
175 int i;
176
177 for (i = ndigits - 1; i >= 0; i--) {
178 if (left[i] > right[i])
179 return 1;
180 else if (left[i] < right[i])
181 return -1;
182 }
183
184 return 0;
185}
186
187/* Computes result = in << c, returning carry. Can modify in place
188 * (if result == in). 0 < shift < 64.
189 */
190static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift,
191 unsigned int ndigits)
192{
193 u64 carry = 0;
194 int i;
195
196 for (i = 0; i < ndigits; i++) {
197 u64 temp = in[i];
198
199 result[i] = (temp << shift) | carry;
200 carry = temp >> (64 - shift);
201 }
202
203 return carry;
204}
205
206/* Computes vli = vli >> 1. */
207static void vli_rshift1(u64 *vli, unsigned int ndigits)
208{
209 u64 *end = vli;
210 u64 carry = 0;
211
212 vli += ndigits;
213
214 while (vli-- > end) {
215 u64 temp = *vli;
216 *vli = (temp >> 1) | carry;
217 carry = temp << 63;
218 }
219}
220
221/* Computes result = left + right, returning carry. Can modify in place. */
222static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
223 unsigned int ndigits)
224{
225 u64 carry = 0;
226 int i;
227
228 for (i = 0; i < ndigits; i++) {
229 u64 sum;
230
231 sum = left[i] + right[i] + carry;
232 if (sum != left[i])
233 carry = (sum < left[i]);
234
235 result[i] = sum;
236 }
237
238 return carry;
239}
240
241/* Computes result = left - right, returning borrow. Can modify in place. */
242static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
243 unsigned int ndigits)
244{
245 u64 borrow = 0;
246 int i;
247
248 for (i = 0; i < ndigits; i++) {
249 u64 diff;
250
251 diff = left[i] - right[i] - borrow;
252 if (diff != left[i])
253 borrow = (diff > left[i]);
254
255 result[i] = diff;
256 }
257
258 return borrow;
259}
260
261static uint128_t mul_64_64(u64 left, u64 right)
262{
263 u64 a0 = left & 0xffffffffull;
264 u64 a1 = left >> 32;
265 u64 b0 = right & 0xffffffffull;
266 u64 b1 = right >> 32;
267 u64 m0 = a0 * b0;
268 u64 m1 = a0 * b1;
269 u64 m2 = a1 * b0;
270 u64 m3 = a1 * b1;
271 uint128_t result;
272
273 m2 += (m0 >> 32);
274 m2 += m1;
275
276 /* Overflow */
277 if (m2 < m1)
278 m3 += 0x100000000ull;
279
280 result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
281 result.m_high = m3 + (m2 >> 32);
282
283 return result;
284}
285
286static uint128_t add_128_128(uint128_t a, uint128_t b)
287{
288 uint128_t result;
289
290 result.m_low = a.m_low + b.m_low;
291 result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);
292
293 return result;
294}
295
296static void vli_mult(u64 *result, const u64 *left, const u64 *right,
297 unsigned int ndigits)
298{
299 uint128_t r01 = { 0, 0 };
300 u64 r2 = 0;
301 unsigned int i, k;
302
303 /* Compute each digit of result in sequence, maintaining the
304 * carries.
305 */
306 for (k = 0; k < ndigits * 2 - 1; k++) {
307 unsigned int min;
308
309 if (k < ndigits)
310 min = 0;
311 else
312 min = (k + 1) - ndigits;
313
314 for (i = min; i <= k && i < ndigits; i++) {
315 uint128_t product;
316
317 product = mul_64_64(left[i], right[k - i]);
318
319 r01 = add_128_128(r01, product);
320 r2 += (r01.m_high < product.m_high);
321 }
322
323 result[k] = r01.m_low;
324 r01.m_low = r01.m_high;
325 r01.m_high = r2;
326 r2 = 0;
327 }
328
329 result[ndigits * 2 - 1] = r01.m_low;
330}
331
332static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
333{
334 uint128_t r01 = { 0, 0 };
335 u64 r2 = 0;
336 int i, k;
337
338 for (k = 0; k < ndigits * 2 - 1; k++) {
339 unsigned int min;
340
341 if (k < ndigits)
342 min = 0;
343 else
344 min = (k + 1) - ndigits;
345
346 for (i = min; i <= k && i <= k - i; i++) {
347 uint128_t product;
348
349 product = mul_64_64(left[i], left[k - i]);
350
351 if (i < k - i) {
352 r2 += product.m_high >> 63;
353 product.m_high = (product.m_high << 1) |
354 (product.m_low >> 63);
355 product.m_low <<= 1;
356 }
357
358 r01 = add_128_128(r01, product);
359 r2 += (r01.m_high < product.m_high);
360 }
361
362 result[k] = r01.m_low;
363 r01.m_low = r01.m_high;
364 r01.m_high = r2;
365 r2 = 0;
366 }
367
368 result[ndigits * 2 - 1] = r01.m_low;
369}
370
371/* Computes result = (left + right) % mod.
372 * Assumes that left < mod and right < mod, result != mod.
373 */
374static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
375 const u64 *mod, unsigned int ndigits)
376{
377 u64 carry;
378
379 carry = vli_add(result, left, right, ndigits);
380
381 /* result > mod (result = mod + remainder), so subtract mod to
382 * get remainder.
383 */
384 if (carry || vli_cmp(result, mod, ndigits) >= 0)
385 vli_sub(result, result, mod, ndigits);
386}
387
388/* Computes result = (left - right) % mod.
389 * Assumes that left < mod and right < mod, result != mod.
390 */
391static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
392 const u64 *mod, unsigned int ndigits)
393{
394 u64 borrow = vli_sub(result, left, right, ndigits);
395
396 /* In this case, p_result == -diff == (max int) - diff.
397 * Since -x % d == d - x, we can get the correct result from
398 * result + mod (with overflow).
399 */
400 if (borrow)
401 vli_add(result, result, mod, ndigits);
402}
403
404/* Computes p_result = p_product % curve_p.
405 * See algorithm 5 and 6 from
406 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
407 */
408static void vli_mmod_fast_192(u64 *result, const u64 *product,
409 const u64 *curve_prime, u64 *tmp)
410{
411 const unsigned int ndigits = 3;
412 int carry;
413
414 vli_set(result, product, ndigits);
415
416 vli_set(tmp, &product[3], ndigits);
417 carry = vli_add(result, result, tmp, ndigits);
418
419 tmp[0] = 0;
420 tmp[1] = product[3];
421 tmp[2] = product[4];
422 carry += vli_add(result, result, tmp, ndigits);
423
424 tmp[0] = tmp[1] = product[5];
425 tmp[2] = 0;
426 carry += vli_add(result, result, tmp, ndigits);
427
428 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
429 carry -= vli_sub(result, result, curve_prime, ndigits);
430}
431
432/* Computes result = product % curve_prime
433 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
434 */
435static void vli_mmod_fast_256(u64 *result, const u64 *product,
436 const u64 *curve_prime, u64 *tmp)
437{
438 int carry;
439 const unsigned int ndigits = 4;
440
441 /* t */
442 vli_set(result, product, ndigits);
443
444 /* s1 */
445 tmp[0] = 0;
446 tmp[1] = product[5] & 0xffffffff00000000ull;
447 tmp[2] = product[6];
448 tmp[3] = product[7];
449 carry = vli_lshift(tmp, tmp, 1, ndigits);
450 carry += vli_add(result, result, tmp, ndigits);
451
452 /* s2 */
453 tmp[1] = product[6] << 32;
454 tmp[2] = (product[6] >> 32) | (product[7] << 32);
455 tmp[3] = product[7] >> 32;
456 carry += vli_lshift(tmp, tmp, 1, ndigits);
457 carry += vli_add(result, result, tmp, ndigits);
458
459 /* s3 */
460 tmp[0] = product[4];
461 tmp[1] = product[5] & 0xffffffff;
462 tmp[2] = 0;
463 tmp[3] = product[7];
464 carry += vli_add(result, result, tmp, ndigits);
465
466 /* s4 */
467 tmp[0] = (product[4] >> 32) | (product[5] << 32);
468 tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
469 tmp[2] = product[7];
470 tmp[3] = (product[6] >> 32) | (product[4] << 32);
471 carry += vli_add(result, result, tmp, ndigits);
472
473 /* d1 */
474 tmp[0] = (product[5] >> 32) | (product[6] << 32);
475 tmp[1] = (product[6] >> 32);
476 tmp[2] = 0;
477 tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
478 carry -= vli_sub(result, result, tmp, ndigits);
479
480 /* d2 */
481 tmp[0] = product[6];
482 tmp[1] = product[7];
483 tmp[2] = 0;
484 tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
485 carry -= vli_sub(result, result, tmp, ndigits);
486
487 /* d3 */
488 tmp[0] = (product[6] >> 32) | (product[7] << 32);
489 tmp[1] = (product[7] >> 32) | (product[4] << 32);
490 tmp[2] = (product[4] >> 32) | (product[5] << 32);
491 tmp[3] = (product[6] << 32);
492 carry -= vli_sub(result, result, tmp, ndigits);
493
494 /* d4 */
495 tmp[0] = product[7];
496 tmp[1] = product[4] & 0xffffffff00000000ull;
497 tmp[2] = product[5];
498 tmp[3] = product[6] & 0xffffffff00000000ull;
499 carry -= vli_sub(result, result, tmp, ndigits);
500
501 if (carry < 0) {
502 do {
503 carry += vli_add(result, result, curve_prime, ndigits);
504 } while (carry < 0);
505 } else {
506 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
507 carry -= vli_sub(result, result, curve_prime, ndigits);
508 }
509}
510
511/* Computes result = product % curve_prime
512 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
513*/
514static bool vli_mmod_fast(u64 *result, u64 *product,
515 const u64 *curve_prime, unsigned int ndigits)
516{
517 u64 tmp[2 * ndigits];
518
519 switch (ndigits) {
520 case 3:
521 vli_mmod_fast_192(result, product, curve_prime, tmp);
522 break;
523 case 4:
524 vli_mmod_fast_256(result, product, curve_prime, tmp);
525 break;
526 default:
527 pr_err("unsupports digits size!\n");
528 return false;
529 }
530
531 return true;
532}
533
534/* Computes result = (left * right) % curve_prime. */
535static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
536 const u64 *curve_prime, unsigned int ndigits)
537{
538 u64 product[2 * ndigits];
539
540 vli_mult(product, left, right, ndigits);
541 vli_mmod_fast(result, product, curve_prime, ndigits);
542}
543
544/* Computes result = left^2 % curve_prime. */
545static void vli_mod_square_fast(u64 *result, const u64 *left,
546 const u64 *curve_prime, unsigned int ndigits)
547{
548 u64 product[2 * ndigits];
549
550 vli_square(product, left, ndigits);
551 vli_mmod_fast(result, product, curve_prime, ndigits);
552}
553
554#define EVEN(vli) (!(vli[0] & 1))
555/* Computes result = (1 / p_input) % mod. All VLIs are the same size.
556 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
557 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
558 */
559static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
560 unsigned int ndigits)
561{
562 u64 a[ndigits], b[ndigits];
563 u64 u[ndigits], v[ndigits];
564 u64 carry;
565 int cmp_result;
566
567 if (vli_is_zero(input, ndigits)) {
568 vli_clear(result, ndigits);
569 return;
570 }
571
572 vli_set(a, input, ndigits);
573 vli_set(b, mod, ndigits);
574 vli_clear(u, ndigits);
575 u[0] = 1;
576 vli_clear(v, ndigits);
577
578 while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) {
579 carry = 0;
580
581 if (EVEN(a)) {
582 vli_rshift1(a, ndigits);
583
584 if (!EVEN(u))
585 carry = vli_add(u, u, mod, ndigits);
586
587 vli_rshift1(u, ndigits);
588 if (carry)
589 u[ndigits - 1] |= 0x8000000000000000ull;
590 } else if (EVEN(b)) {
591 vli_rshift1(b, ndigits);
592
593 if (!EVEN(v))
594 carry = vli_add(v, v, mod, ndigits);
595
596 vli_rshift1(v, ndigits);
597 if (carry)
598 v[ndigits - 1] |= 0x8000000000000000ull;
599 } else if (cmp_result > 0) {
600 vli_sub(a, a, b, ndigits);
601 vli_rshift1(a, ndigits);
602
603 if (vli_cmp(u, v, ndigits) < 0)
604 vli_add(u, u, mod, ndigits);
605
606 vli_sub(u, u, v, ndigits);
607 if (!EVEN(u))
608 carry = vli_add(u, u, mod, ndigits);
609
610 vli_rshift1(u, ndigits);
611 if (carry)
612 u[ndigits - 1] |= 0x8000000000000000ull;
613 } else {
614 vli_sub(b, b, a, ndigits);
615 vli_rshift1(b, ndigits);
616
617 if (vli_cmp(v, u, ndigits) < 0)
618 vli_add(v, v, mod, ndigits);
619
620 vli_sub(v, v, u, ndigits);
621 if (!EVEN(v))
622 carry = vli_add(v, v, mod, ndigits);
623
624 vli_rshift1(v, ndigits);
625 if (carry)
626 v[ndigits - 1] |= 0x8000000000000000ull;
627 }
628 }
629
630 vli_set(result, u, ndigits);
631}
632
633/* ------ Point operations ------ */
634
635/* Returns true if p_point is the point at infinity, false otherwise. */
636static bool ecc_point_is_zero(const struct ecc_point *point)
637{
638 return (vli_is_zero(point->x, point->ndigits) &&
639 vli_is_zero(point->y, point->ndigits));
640}
641
642/* Point multiplication algorithm using Montgomery's ladder with co-Z
643 * coordinates. From http://eprint.iacr.org/2011/338.pdf
644 */
645
646/* Double in place */
647static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
648 u64 *curve_prime, unsigned int ndigits)
649{
650 /* t1 = x, t2 = y, t3 = z */
651 u64 t4[ndigits];
652 u64 t5[ndigits];
653
654 if (vli_is_zero(z1, ndigits))
655 return;
656
657 /* t4 = y1^2 */
658 vli_mod_square_fast(t4, y1, curve_prime, ndigits);
659 /* t5 = x1*y1^2 = A */
660 vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits);
661 /* t4 = y1^4 */
662 vli_mod_square_fast(t4, t4, curve_prime, ndigits);
663 /* t2 = y1*z1 = z3 */
664 vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits);
665 /* t3 = z1^2 */
666 vli_mod_square_fast(z1, z1, curve_prime, ndigits);
667
668 /* t1 = x1 + z1^2 */
669 vli_mod_add(x1, x1, z1, curve_prime, ndigits);
670 /* t3 = 2*z1^2 */
671 vli_mod_add(z1, z1, z1, curve_prime, ndigits);
672 /* t3 = x1 - z1^2 */
673 vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
674 /* t1 = x1^2 - z1^4 */
675 vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits);
676
677 /* t3 = 2*(x1^2 - z1^4) */
678 vli_mod_add(z1, x1, x1, curve_prime, ndigits);
679 /* t1 = 3*(x1^2 - z1^4) */
680 vli_mod_add(x1, x1, z1, curve_prime, ndigits);
681 if (vli_test_bit(x1, 0)) {
682 u64 carry = vli_add(x1, x1, curve_prime, ndigits);
683
684 vli_rshift1(x1, ndigits);
685 x1[ndigits - 1] |= carry << 63;
686 } else {
687 vli_rshift1(x1, ndigits);
688 }
689 /* t1 = 3/2*(x1^2 - z1^4) = B */
690
691 /* t3 = B^2 */
692 vli_mod_square_fast(z1, x1, curve_prime, ndigits);
693 /* t3 = B^2 - A */
694 vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
695 /* t3 = B^2 - 2A = x3 */
696 vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
697 /* t5 = A - x3 */
698 vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
699 /* t1 = B * (A - x3) */
700 vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
701 /* t4 = B * (A - x3) - y1^4 = y3 */
702 vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
703
704 vli_set(x1, z1, ndigits);
705 vli_set(z1, y1, ndigits);
706 vli_set(y1, t4, ndigits);
707}
708
709/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
710static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
711 unsigned int ndigits)
712{
713 u64 t1[ndigits];
714
715 vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */
716 vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
717 vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */
718 vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */
719}
720
721/* P = (x1, y1) => 2P, (x2, y2) => P' */
722static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
723 u64 *p_initial_z, u64 *curve_prime,
724 unsigned int ndigits)
725{
726 u64 z[ndigits];
727
728 vli_set(x2, x1, ndigits);
729 vli_set(y2, y1, ndigits);
730
731 vli_clear(z, ndigits);
732 z[0] = 1;
733
734 if (p_initial_z)
735 vli_set(z, p_initial_z, ndigits);
736
737 apply_z(x1, y1, z, curve_prime, ndigits);
738
739 ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits);
740
741 apply_z(x2, y2, z, curve_prime, ndigits);
742}
743
744/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
745 * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
746 * or P => P', Q => P + Q
747 */
748static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
749 unsigned int ndigits)
750{
751 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
752 u64 t5[ndigits];
753
754 /* t5 = x2 - x1 */
755 vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
756 /* t5 = (x2 - x1)^2 = A */
757 vli_mod_square_fast(t5, t5, curve_prime, ndigits);
758 /* t1 = x1*A = B */
759 vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
760 /* t3 = x2*A = C */
761 vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
762 /* t4 = y2 - y1 */
763 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
764 /* t5 = (y2 - y1)^2 = D */
765 vli_mod_square_fast(t5, y2, curve_prime, ndigits);
766
767 /* t5 = D - B */
768 vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
769 /* t5 = D - B - C = x3 */
770 vli_mod_sub(t5, t5, x2, curve_prime, ndigits);
771 /* t3 = C - B */
772 vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
773 /* t2 = y1*(C - B) */
774 vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits);
775 /* t3 = B - x3 */
776 vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
777 /* t4 = (y2 - y1)*(B - x3) */
778 vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits);
779 /* t4 = y3 */
780 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
781
782 vli_set(x2, t5, ndigits);
783}
784
785/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
786 * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
787 * or P => P - Q, Q => P + Q
788 */
789static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
790 unsigned int ndigits)
791{
792 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
793 u64 t5[ndigits];
794 u64 t6[ndigits];
795 u64 t7[ndigits];
796
797 /* t5 = x2 - x1 */
798 vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
799 /* t5 = (x2 - x1)^2 = A */
800 vli_mod_square_fast(t5, t5, curve_prime, ndigits);
801 /* t1 = x1*A = B */
802 vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
803 /* t3 = x2*A = C */
804 vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
805 /* t4 = y2 + y1 */
806 vli_mod_add(t5, y2, y1, curve_prime, ndigits);
807 /* t4 = y2 - y1 */
808 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
809
810 /* t6 = C - B */
811 vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
812 /* t2 = y1 * (C - B) */
813 vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits);
814 /* t6 = B + C */
815 vli_mod_add(t6, x1, x2, curve_prime, ndigits);
816 /* t3 = (y2 - y1)^2 */
817 vli_mod_square_fast(x2, y2, curve_prime, ndigits);
818 /* t3 = x3 */
819 vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
820
821 /* t7 = B - x3 */
822 vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
823 /* t4 = (y2 - y1)*(B - x3) */
824 vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits);
825 /* t4 = y3 */
826 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
827
828 /* t7 = (y2 + y1)^2 = F */
829 vli_mod_square_fast(t7, t5, curve_prime, ndigits);
830 /* t7 = x3' */
831 vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
832 /* t6 = x3' - B */
833 vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
834 /* t6 = (y2 + y1)*(x3' - B) */
835 vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits);
836 /* t2 = y3' */
837 vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
838
839 vli_set(x1, t7, ndigits);
840}
841
842static void ecc_point_mult(struct ecc_point *result,
843 const struct ecc_point *point, const u64 *scalar,
844 u64 *initial_z, u64 *curve_prime,
845 unsigned int ndigits)
846{
847 /* R0 and R1 */
848 u64 rx[2][ndigits];
849 u64 ry[2][ndigits];
850 u64 z[ndigits];
851 int i, nb;
852 int num_bits = vli_num_bits(scalar, ndigits);
853
854 vli_set(rx[1], point->x, ndigits);
855 vli_set(ry[1], point->y, ndigits);
856
857 xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime,
858 ndigits);
859
860 for (i = num_bits - 2; i > 0; i--) {
861 nb = !vli_test_bit(scalar, i);
862 xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
863 ndigits);
864 xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
865 ndigits);
866 }
867
868 nb = !vli_test_bit(scalar, 0);
869 xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
870 ndigits);
871
872 /* Find final 1/Z value. */
873 /* X1 - X0 */
874 vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
875 /* Yb * (X1 - X0) */
876 vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits);
877 /* xP * Yb * (X1 - X0) */
878 vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits);
879
880 /* 1 / (xP * Yb * (X1 - X0)) */
881 vli_mod_inv(z, z, curve_prime, point->ndigits);
882
883 /* yP / (xP * Yb * (X1 - X0)) */
884 vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits);
885 /* Xb * yP / (xP * Yb * (X1 - X0)) */
886 vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits);
887 /* End 1/Z calculation */
888
889 xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits);
890
891 apply_z(rx[0], ry[0], z, curve_prime, ndigits);
892
893 vli_set(result->x, rx[0], ndigits);
894 vli_set(result->y, ry[0], ndigits);
895}
896
897static inline void ecc_swap_digits(const u64 *in, u64 *out,
898 unsigned int ndigits)
899{
900 int i;
901
902 for (i = 0; i < ndigits; i++)
903 out[i] = __swab64(in[ndigits - 1 - i]);
904}
905
906int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
907 const u8 *private_key, unsigned int private_key_len)
908{
909 int nbytes;
910 const struct ecc_curve *curve = ecc_get_curve(curve_id);
911
912 if (!private_key)
913 return -EINVAL;
914
915 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
916
917 if (private_key_len != nbytes)
918 return -EINVAL;
919
920 if (vli_is_zero((const u64 *)&private_key[0], ndigits))
921 return -EINVAL;
922
923 /* Make sure the private key is in the range [1, n-1]. */
924 if (vli_cmp(curve->n, (const u64 *)&private_key[0], ndigits) != 1)
925 return -EINVAL;
926
927 return 0;
928}
929
930int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
931 const u8 *private_key, unsigned int private_key_len,
932 u8 *public_key, unsigned int public_key_len)
933{
934 int ret = 0;
935 struct ecc_point *pk;
936 u64 priv[ndigits];
937 unsigned int nbytes;
938 const struct ecc_curve *curve = ecc_get_curve(curve_id);
939
940 if (!private_key || !curve) {
941 ret = -EINVAL;
942 goto out;
943 }
944
945 ecc_swap_digits((const u64 *)private_key, priv, ndigits);
946
947 pk = ecc_alloc_point(ndigits);
948 if (!pk) {
949 ret = -ENOMEM;
950 goto out;
951 }
952
953 ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
954 if (ecc_point_is_zero(pk)) {
955 ret = -EAGAIN;
956 goto err_free_point;
957 }
958
959 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
960 ecc_swap_digits(pk->x, (u64 *)public_key, ndigits);
961 ecc_swap_digits(pk->y, (u64 *)&public_key[nbytes], ndigits);
962
963err_free_point:
964 ecc_free_point(pk);
965out:
966 return ret;
967}
968
969int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
970 const u8 *private_key, unsigned int private_key_len,
971 const u8 *public_key, unsigned int public_key_len,
972 u8 *secret, unsigned int secret_len)
973{
974 int ret = 0;
975 struct ecc_point *product, *pk;
976 u64 priv[ndigits];
977 u64 rand_z[ndigits];
978 unsigned int nbytes;
979 const struct ecc_curve *curve = ecc_get_curve(curve_id);
980
981 if (!private_key || !public_key || !curve) {
982 ret = -EINVAL;
983 goto out;
984 }
985
986 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
987
988 get_random_bytes(rand_z, nbytes);
989
990 pk = ecc_alloc_point(ndigits);
991 if (!pk) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 product = ecc_alloc_point(ndigits);
997 if (!product) {
998 ret = -ENOMEM;
999 goto err_alloc_product;
1000 }
1001
1002 ecc_swap_digits((const u64 *)public_key, pk->x, ndigits);
1003 ecc_swap_digits((const u64 *)&public_key[nbytes], pk->y, ndigits);
1004 ecc_swap_digits((const u64 *)private_key, priv, ndigits);
1005
1006 ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
1007
1008 ecc_swap_digits(product->x, (u64 *)secret, ndigits);
1009
1010 if (ecc_point_is_zero(product))
1011 ret = -EFAULT;
1012
1013 ecc_free_point(product);
1014err_alloc_product:
1015 ecc_free_point(pk);
1016out:
1017 return ret;
1018}
diff --git a/crypto/ecc.h b/crypto/ecc.h
new file mode 100644
index 000000000000..663d598c7406
--- /dev/null
+++ b/crypto/ecc.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (c) 2013, Kenneth MacKay
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26#ifndef _CRYPTO_ECC_H
27#define _CRYPTO_ECC_H
28
29#define ECC_MAX_DIGITS 4 /* 256 */
30
31#define ECC_DIGITS_TO_BYTES_SHIFT 3
32
33/**
34 * ecc_is_key_valid() - Validate a given ECDH private key
35 *
36 * @curve_id: id representing the curve to use
37 * @ndigits: curve number of digits
38 * @private_key: private key to be used for the given curve
39 * @private_key_len: private key len
40 *
41 * Returns 0 if the key is acceptable, a negative value otherwise
42 */
43int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
44 const u8 *private_key, unsigned int private_key_len);
45
46/**
47 * ecdh_make_pub_key() - Compute an ECC public key
48 *
49 * @curve_id: id representing the curve to use
50 * @private_key: pregenerated private key for the given curve
51 * @private_key_len: length of private_key
52 * @public_key: buffer for storing the public key generated
53 * @public_key_len: length of the public_key buffer
54 *
55 * Returns 0 if the public key was generated successfully, a negative value
56 * if an error occurred.
57 */
58int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
59 const u8 *private_key, unsigned int private_key_len,
60 u8 *public_key, unsigned int public_key_len);
61
62/**
63 * crypto_ecdh_shared_secret() - Compute a shared secret
64 *
65 * @curve_id: id representing the curve to use
66 * @private_key: private key of part A
67 * @private_key_len: length of private_key
68 * @public_key: public key of counterpart B
69 * @public_key_len: length of public_key
70 * @secret: buffer for storing the calculated shared secret
71 * @secret_len: length of the secret buffer
72 *
73 * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
74 * before using it for symmetric encryption or HMAC.
75 *
76 * Returns 0 if the shared secret was generated successfully, a negative value
77 * if an error occurred.
78 */
79int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
80 const u8 *private_key, unsigned int private_key_len,
81 const u8 *public_key, unsigned int public_key_len,
82 u8 *secret, unsigned int secret_len);
83#endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
new file mode 100644
index 000000000000..03ae5f714028
--- /dev/null
+++ b/crypto/ecc_curve_defs.h
@@ -0,0 +1,57 @@
1#ifndef _CRYTO_ECC_CURVE_DEFS_H
2#define _CRYTO_ECC_CURVE_DEFS_H
3
4struct ecc_point {
5 u64 *x;
6 u64 *y;
7 u8 ndigits;
8};
9
10struct ecc_curve {
11 char *name;
12 struct ecc_point g;
13 u64 *p;
14 u64 *n;
15};
16
17/* NIST P-192 */
18static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
19 0x188DA80EB03090F6ull };
20static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
21 0x07192B95FFC8DA78ull };
22static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
23 0xFFFFFFFFFFFFFFFFull };
24static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
25 0xFFFFFFFFFFFFFFFFull };
26static struct ecc_curve nist_p192 = {
27 .name = "nist_192",
28 .g = {
29 .x = nist_p192_g_x,
30 .y = nist_p192_g_y,
31 .ndigits = 3,
32 },
33 .p = nist_p192_p,
34 .n = nist_p192_n
35};
36
37/* NIST P-256 */
38static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
39 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
40static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
41 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull };
42static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
43 0x0000000000000000ull, 0xFFFFFFFF00000001ull };
44static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
45 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
46static struct ecc_curve nist_p256 = {
47 .name = "nist_256",
48 .g = {
49 .x = nist_p256_g_x,
50 .y = nist_p256_g_y,
51 .ndigits = 4,
52 },
53 .p = nist_p256_p,
54 .n = nist_p256_n
55};
56
57#endif
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
new file mode 100644
index 000000000000..3de289806d67
--- /dev/null
+++ b/crypto/ecdh.c
@@ -0,0 +1,151 @@
1/* ECDH key-agreement protocol
2 *
3 * Copyright (c) 2016, Intel Corporation
4 * Authors: Salvator Benedetto <salvatore.benedetto@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <crypto/internal/kpp.h>
14#include <crypto/kpp.h>
15#include <crypto/ecdh.h>
16#include <linux/scatterlist.h>
17#include "ecc.h"
18
19struct ecdh_ctx {
20 unsigned int curve_id;
21 unsigned int ndigits;
22 u64 private_key[ECC_MAX_DIGITS];
23 u64 public_key[2 * ECC_MAX_DIGITS];
24 u64 shared_secret[ECC_MAX_DIGITS];
25};
26
27static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
28{
29 return kpp_tfm_ctx(tfm);
30}
31
32static unsigned int ecdh_supported_curve(unsigned int curve_id)
33{
34 switch (curve_id) {
35 case ECC_CURVE_NIST_P192: return 3;
36 case ECC_CURVE_NIST_P256: return 4;
37 default: return 0;
38 }
39}
40
41static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
42{
43 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
44 struct ecdh params;
45 unsigned int ndigits;
46
47 if (crypto_ecdh_decode_key(buf, len, &params) < 0)
48 return -EINVAL;
49
50 ndigits = ecdh_supported_curve(params.curve_id);
51 if (!ndigits)
52 return -EINVAL;
53
54 ctx->curve_id = params.curve_id;
55 ctx->ndigits = ndigits;
56
57 if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
58 (const u8 *)params.key, params.key_size) < 0)
59 return -EINVAL;
60
61 memcpy(ctx->private_key, params.key, params.key_size);
62
63 return 0;
64}
65
66static int ecdh_compute_value(struct kpp_request *req)
67{
68 int ret = 0;
69 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
70 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
71 size_t copied, nbytes;
72 void *buf;
73
74 nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
75
76 if (req->src) {
77 copied = sg_copy_to_buffer(req->src, 1, ctx->public_key,
78 2 * nbytes);
79 if (copied != 2 * nbytes)
80 return -EINVAL;
81
82 ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
83 (const u8 *)ctx->private_key, nbytes,
84 (const u8 *)ctx->public_key, 2 * nbytes,
85 (u8 *)ctx->shared_secret, nbytes);
86
87 buf = ctx->shared_secret;
88 } else {
89 ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits,
90 (const u8 *)ctx->private_key, nbytes,
91 (u8 *)ctx->public_key,
92 sizeof(ctx->public_key));
93 buf = ctx->public_key;
94 /* Public part is a point thus it has both coordinates */
95 nbytes *= 2;
96 }
97
98 if (ret < 0)
99 return ret;
100
101 copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
102 if (copied != nbytes)
103 return -EINVAL;
104
105 return ret;
106}
107
108static int ecdh_max_size(struct crypto_kpp *tfm)
109{
110 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
111 int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
112
113 /* Public key is made of two coordinates */
114 return 2 * nbytes;
115}
116
117static void no_exit_tfm(struct crypto_kpp *tfm)
118{
119 return;
120}
121
122static struct kpp_alg ecdh = {
123 .set_secret = ecdh_set_secret,
124 .generate_public_key = ecdh_compute_value,
125 .compute_shared_secret = ecdh_compute_value,
126 .max_size = ecdh_max_size,
127 .exit = no_exit_tfm,
128 .base = {
129 .cra_name = "ecdh",
130 .cra_driver_name = "ecdh-generic",
131 .cra_priority = 100,
132 .cra_module = THIS_MODULE,
133 .cra_ctxsize = sizeof(struct ecdh_ctx),
134 },
135};
136
137static int ecdh_init(void)
138{
139 return crypto_register_kpp(&ecdh);
140}
141
142static void ecdh_exit(void)
143{
144 crypto_unregister_kpp(&ecdh);
145}
146
147module_init(ecdh_init);
148module_exit(ecdh_exit);
149MODULE_ALIAS_CRYPTO("ecdh");
150MODULE_LICENSE("GPL");
151MODULE_DESCRIPTION("ECDH generic algorithm");
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
new file mode 100644
index 000000000000..3cd8a2414e60
--- /dev/null
+++ b/crypto/ecdh_helper.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright (c) 2016, Intel Corporation
3 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public Licence
7 * as published by the Free Software Foundation; either version
8 * 2 of the Licence, or (at your option) any later version.
9 */
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/err.h>
13#include <linux/string.h>
14#include <crypto/ecdh.h>
15#include <crypto/kpp.h>
16
17#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
18
19static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
20{
21 memcpy(dst, src, sz);
22 return dst + sz;
23}
24
25static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
26{
27 memcpy(dst, src, sz);
28 return src + sz;
29}
30
31int crypto_ecdh_key_len(const struct ecdh *params)
32{
33 return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
34}
35EXPORT_SYMBOL_GPL(crypto_ecdh_key_len);
36
37int crypto_ecdh_encode_key(char *buf, unsigned int len,
38 const struct ecdh *params)
39{
40 u8 *ptr = buf;
41 struct kpp_secret secret = {
42 .type = CRYPTO_KPP_SECRET_TYPE_ECDH,
43 .len = len
44 };
45
46 if (unlikely(!buf))
47 return -EINVAL;
48
49 if (len != crypto_ecdh_key_len(params))
50 return -EINVAL;
51
52 ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
53 ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
54 ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
55 ecdh_pack_data(ptr, params->key, params->key_size);
56
57 return 0;
58}
59EXPORT_SYMBOL_GPL(crypto_ecdh_encode_key);
60
61int crypto_ecdh_decode_key(const char *buf, unsigned int len,
62 struct ecdh *params)
63{
64 const u8 *ptr = buf;
65 struct kpp_secret secret;
66
67 if (unlikely(!buf || len < ECDH_KPP_SECRET_MIN_SIZE))
68 return -EINVAL;
69
70 ptr = ecdh_unpack_data(&secret, ptr, sizeof(secret));
71 if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
72 return -EINVAL;
73
74 ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
75 ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
76 if (secret.len != crypto_ecdh_key_len(params))
77 return -EINVAL;
78
79 /* Don't allocate memory. Set pointer to data
80 * within the given buffer
81 */
82 params->key = (void *)ptr;
83
84 return 0;
85}
86EXPORT_SYMBOL_GPL(crypto_ecdh_decode_key);
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index b96a84560b67..1b01fe98e91f 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -20,6 +20,7 @@
20 20
21#include <crypto/internal/geniv.h> 21#include <crypto/internal/geniv.h>
22#include <crypto/scatterwalk.h> 22#include <crypto/scatterwalk.h>
23#include <crypto/skcipher.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
@@ -112,13 +113,16 @@ static int echainiv_encrypt(struct aead_request *req)
112 info = req->iv; 113 info = req->iv;
113 114
114 if (req->src != req->dst) { 115 if (req->src != req->dst) {
115 struct blkcipher_desc desc = { 116 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
116 .tfm = ctx->null,
117 };
118 117
119 err = crypto_blkcipher_encrypt( 118 skcipher_request_set_tfm(nreq, ctx->sknull);
120 &desc, req->dst, req->src, 119 skcipher_request_set_callback(nreq, req->base.flags,
121 req->assoclen + req->cryptlen); 120 NULL, NULL);
121 skcipher_request_set_crypt(nreq, req->src, req->dst,
122 req->assoclen + req->cryptlen,
123 NULL);
124
125 err = crypto_skcipher_encrypt(nreq);
122 if (err) 126 if (err)
123 return err; 127 return err;
124 } 128 }
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
deleted file mode 100644
index 16dda72fc4f8..000000000000
--- a/crypto/eseqiv.c
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * eseqiv: Encrypted Sequence Number IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
8 *
9 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17
18#include <crypto/internal/skcipher.h>
19#include <crypto/rng.h>
20#include <crypto/scatterwalk.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29
30struct eseqiv_request_ctx {
31 struct scatterlist src[2];
32 struct scatterlist dst[2];
33 char tail[];
34};
35
36struct eseqiv_ctx {
37 spinlock_t lock;
38 unsigned int reqoff;
39 char salt[];
40};
41
42static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
43{
44 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
45 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
46
47 memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
48 crypto_ablkcipher_alignmask(geniv) + 1),
49 crypto_ablkcipher_ivsize(geniv));
50}
51
52static void eseqiv_complete(struct crypto_async_request *base, int err)
53{
54 struct skcipher_givcrypt_request *req = base->data;
55
56 if (err)
57 goto out;
58
59 eseqiv_complete2(req);
60
61out:
62 skcipher_givcrypt_complete(req, err);
63}
64
65static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
66{
67 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
68 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
69 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
70 struct ablkcipher_request *subreq;
71 crypto_completion_t compl;
72 void *data;
73 struct scatterlist *osrc, *odst;
74 struct scatterlist *dst;
75 struct page *srcp;
76 struct page *dstp;
77 u8 *giv;
78 u8 *vsrc;
79 u8 *vdst;
80 __be64 seq;
81 unsigned int ivsize;
82 unsigned int len;
83 int err;
84
85 subreq = (void *)(reqctx->tail + ctx->reqoff);
86 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
87
88 giv = req->giv;
89 compl = req->creq.base.complete;
90 data = req->creq.base.data;
91
92 osrc = req->creq.src;
93 odst = req->creq.dst;
94 srcp = sg_page(osrc);
95 dstp = sg_page(odst);
96 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
97 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
98
99 ivsize = crypto_ablkcipher_ivsize(geniv);
100
101 if (vsrc != giv + ivsize && vdst != giv + ivsize) {
102 giv = PTR_ALIGN((u8 *)reqctx->tail,
103 crypto_ablkcipher_alignmask(geniv) + 1);
104 compl = eseqiv_complete;
105 data = req;
106 }
107
108 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
109 data);
110
111 sg_init_table(reqctx->src, 2);
112 sg_set_buf(reqctx->src, giv, ivsize);
113 scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
114
115 dst = reqctx->src;
116 if (osrc != odst) {
117 sg_init_table(reqctx->dst, 2);
118 sg_set_buf(reqctx->dst, giv, ivsize);
119 scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
120
121 dst = reqctx->dst;
122 }
123
124 ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
125 req->creq.nbytes + ivsize,
126 req->creq.info);
127
128 memcpy(req->creq.info, ctx->salt, ivsize);
129
130 len = ivsize;
131 if (ivsize > sizeof(u64)) {
132 memset(req->giv, 0, ivsize - sizeof(u64));
133 len = sizeof(u64);
134 }
135 seq = cpu_to_be64(req->seq);
136 memcpy(req->giv + ivsize - len, &seq, len);
137
138 err = crypto_ablkcipher_encrypt(subreq);
139 if (err)
140 goto out;
141
142 if (giv != req->giv)
143 eseqiv_complete2(req);
144
145out:
146 return err;
147}
148
149static int eseqiv_init(struct crypto_tfm *tfm)
150{
151 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
152 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
153 unsigned long alignmask;
154 unsigned int reqsize;
155 int err;
156
157 spin_lock_init(&ctx->lock);
158
159 alignmask = crypto_tfm_ctx_alignment() - 1;
160 reqsize = sizeof(struct eseqiv_request_ctx);
161
162 if (alignmask & reqsize) {
163 alignmask &= reqsize;
164 alignmask--;
165 }
166
167 alignmask = ~alignmask;
168 alignmask &= crypto_ablkcipher_alignmask(geniv);
169
170 reqsize += alignmask;
171 reqsize += crypto_ablkcipher_ivsize(geniv);
172 reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
173
174 ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
175
176 tfm->crt_ablkcipher.reqsize = reqsize +
177 sizeof(struct ablkcipher_request);
178
179 err = 0;
180 if (!crypto_get_default_rng()) {
181 crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
182 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
183 crypto_ablkcipher_ivsize(geniv));
184 crypto_put_default_rng();
185 }
186
187 return err ?: skcipher_geniv_init(tfm);
188}
189
190static struct crypto_template eseqiv_tmpl;
191
192static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
193{
194 struct crypto_instance *inst;
195 int err;
196
197 inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
198 if (IS_ERR(inst))
199 goto out;
200
201 err = -EINVAL;
202 if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
203 goto free_inst;
204
205 inst->alg.cra_init = eseqiv_init;
206 inst->alg.cra_exit = skcipher_geniv_exit;
207
208 inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
209 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
210
211out:
212 return inst;
213
214free_inst:
215 skcipher_geniv_free(inst);
216 inst = ERR_PTR(err);
217 goto out;
218}
219
220static struct crypto_template eseqiv_tmpl = {
221 .name = "eseqiv",
222 .alloc = eseqiv_alloc,
223 .free = skcipher_geniv_free,
224 .module = THIS_MODULE,
225};
226
227static int __init eseqiv_module_init(void)
228{
229 return crypto_register_template(&eseqiv_tmpl);
230}
231
232static void __exit eseqiv_module_exit(void)
233{
234 crypto_unregister_template(&eseqiv_tmpl);
235}
236
237module_init(eseqiv_module_init);
238module_exit(eseqiv_module_exit);
239
240MODULE_LICENSE("GPL");
241MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
242MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index bec329b3de8d..70a892e87ccb 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -29,7 +29,7 @@ struct gcm_instance_ctx {
29}; 29};
30 30
31struct crypto_gcm_ctx { 31struct crypto_gcm_ctx {
32 struct crypto_ablkcipher *ctr; 32 struct crypto_skcipher *ctr;
33 struct crypto_ahash *ghash; 33 struct crypto_ahash *ghash;
34}; 34};
35 35
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
50 50
51struct crypto_rfc4543_ctx { 51struct crypto_rfc4543_ctx {
52 struct crypto_aead *child; 52 struct crypto_aead *child;
53 struct crypto_blkcipher *null; 53 struct crypto_skcipher *null;
54 u8 nonce[4]; 54 u8 nonce[4];
55}; 55};
56 56
@@ -74,7 +74,7 @@ struct crypto_gcm_req_priv_ctx {
74 struct crypto_gcm_ghash_ctx ghash_ctx; 74 struct crypto_gcm_ghash_ctx ghash_ctx;
75 union { 75 union {
76 struct ahash_request ahreq; 76 struct ahash_request ahreq;
77 struct ablkcipher_request abreq; 77 struct skcipher_request skreq;
78 } u; 78 } u;
79}; 79};
80 80
@@ -114,7 +114,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
114{ 114{
115 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); 115 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
116 struct crypto_ahash *ghash = ctx->ghash; 116 struct crypto_ahash *ghash = ctx->ghash;
117 struct crypto_ablkcipher *ctr = ctx->ctr; 117 struct crypto_skcipher *ctr = ctx->ctr;
118 struct { 118 struct {
119 be128 hash; 119 be128 hash;
120 u8 iv[8]; 120 u8 iv[8];
@@ -122,35 +122,35 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
122 struct crypto_gcm_setkey_result result; 122 struct crypto_gcm_setkey_result result;
123 123
124 struct scatterlist sg[1]; 124 struct scatterlist sg[1];
125 struct ablkcipher_request req; 125 struct skcipher_request req;
126 } *data; 126 } *data;
127 int err; 127 int err;
128 128
129 crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); 129 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
130 crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & 130 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
131 CRYPTO_TFM_REQ_MASK); 131 CRYPTO_TFM_REQ_MASK);
132 err = crypto_ablkcipher_setkey(ctr, key, keylen); 132 err = crypto_skcipher_setkey(ctr, key, keylen);
133 crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & 133 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
134 CRYPTO_TFM_RES_MASK); 134 CRYPTO_TFM_RES_MASK);
135 if (err) 135 if (err)
136 return err; 136 return err;
137 137
138 data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), 138 data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
139 GFP_KERNEL); 139 GFP_KERNEL);
140 if (!data) 140 if (!data)
141 return -ENOMEM; 141 return -ENOMEM;
142 142
143 init_completion(&data->result.completion); 143 init_completion(&data->result.completion);
144 sg_init_one(data->sg, &data->hash, sizeof(data->hash)); 144 sg_init_one(data->sg, &data->hash, sizeof(data->hash));
145 ablkcipher_request_set_tfm(&data->req, ctr); 145 skcipher_request_set_tfm(&data->req, ctr);
146 ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 146 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
147 CRYPTO_TFM_REQ_MAY_BACKLOG, 147 CRYPTO_TFM_REQ_MAY_BACKLOG,
148 crypto_gcm_setkey_done, 148 crypto_gcm_setkey_done,
149 &data->result); 149 &data->result);
150 ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, 150 skcipher_request_set_crypt(&data->req, data->sg, data->sg,
151 sizeof(data->hash), data->iv); 151 sizeof(data->hash), data->iv);
152 152
153 err = crypto_ablkcipher_encrypt(&data->req); 153 err = crypto_skcipher_encrypt(&data->req);
154 if (err == -EINPROGRESS || err == -EBUSY) { 154 if (err == -EINPROGRESS || err == -EBUSY) {
155 err = wait_for_completion_interruptible( 155 err = wait_for_completion_interruptible(
156 &data->result.completion); 156 &data->result.completion);
@@ -223,13 +223,13 @@ static void crypto_gcm_init_crypt(struct aead_request *req,
223 struct crypto_aead *aead = crypto_aead_reqtfm(req); 223 struct crypto_aead *aead = crypto_aead_reqtfm(req);
224 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); 224 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
225 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 225 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
226 struct ablkcipher_request *ablk_req = &pctx->u.abreq; 226 struct skcipher_request *skreq = &pctx->u.skreq;
227 struct scatterlist *dst; 227 struct scatterlist *dst;
228 228
229 dst = req->src == req->dst ? pctx->src : pctx->dst; 229 dst = req->src == req->dst ? pctx->src : pctx->dst;
230 230
231 ablkcipher_request_set_tfm(ablk_req, ctx->ctr); 231 skcipher_request_set_tfm(skreq, ctx->ctr);
232 ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, 232 skcipher_request_set_crypt(skreq, pctx->src, dst,
233 cryptlen + sizeof(pctx->auth_tag), 233 cryptlen + sizeof(pctx->auth_tag),
234 pctx->iv); 234 pctx->iv);
235} 235}
@@ -494,14 +494,14 @@ out:
494static int crypto_gcm_encrypt(struct aead_request *req) 494static int crypto_gcm_encrypt(struct aead_request *req)
495{ 495{
496 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 496 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
497 struct ablkcipher_request *abreq = &pctx->u.abreq; 497 struct skcipher_request *skreq = &pctx->u.skreq;
498 u32 flags = aead_request_flags(req); 498 u32 flags = aead_request_flags(req);
499 499
500 crypto_gcm_init_common(req); 500 crypto_gcm_init_common(req);
501 crypto_gcm_init_crypt(req, req->cryptlen); 501 crypto_gcm_init_crypt(req, req->cryptlen);
502 ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req); 502 skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
503 503
504 return crypto_ablkcipher_encrypt(abreq) ?: 504 return crypto_skcipher_encrypt(skreq) ?:
505 gcm_encrypt_continue(req, flags); 505 gcm_encrypt_continue(req, flags);
506} 506}
507 507
@@ -533,12 +533,12 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
533static int gcm_dec_hash_continue(struct aead_request *req, u32 flags) 533static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
534{ 534{
535 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 535 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
536 struct ablkcipher_request *abreq = &pctx->u.abreq; 536 struct skcipher_request *skreq = &pctx->u.skreq;
537 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 537 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
538 538
539 crypto_gcm_init_crypt(req, gctx->cryptlen); 539 crypto_gcm_init_crypt(req, gctx->cryptlen);
540 ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req); 540 skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
541 return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req); 541 return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
542} 542}
543 543
544static int crypto_gcm_decrypt(struct aead_request *req) 544static int crypto_gcm_decrypt(struct aead_request *req)
@@ -566,7 +566,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
566 struct aead_instance *inst = aead_alg_instance(tfm); 566 struct aead_instance *inst = aead_alg_instance(tfm);
567 struct gcm_instance_ctx *ictx = aead_instance_ctx(inst); 567 struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
568 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); 568 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
569 struct crypto_ablkcipher *ctr; 569 struct crypto_skcipher *ctr;
570 struct crypto_ahash *ghash; 570 struct crypto_ahash *ghash;
571 unsigned long align; 571 unsigned long align;
572 int err; 572 int err;
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
575 if (IS_ERR(ghash)) 575 if (IS_ERR(ghash))
576 return PTR_ERR(ghash); 576 return PTR_ERR(ghash);
577 577
578 ctr = crypto_spawn_skcipher(&ictx->ctr); 578 ctr = crypto_spawn_skcipher2(&ictx->ctr);
579 err = PTR_ERR(ctr); 579 err = PTR_ERR(ctr);
580 if (IS_ERR(ctr)) 580 if (IS_ERR(ctr))
581 goto err_free_hash; 581 goto err_free_hash;
@@ -587,8 +587,8 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
587 align &= ~(crypto_tfm_ctx_alignment() - 1); 587 align &= ~(crypto_tfm_ctx_alignment() - 1);
588 crypto_aead_set_reqsize(tfm, 588 crypto_aead_set_reqsize(tfm,
589 align + offsetof(struct crypto_gcm_req_priv_ctx, u) + 589 align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
590 max(sizeof(struct ablkcipher_request) + 590 max(sizeof(struct skcipher_request) +
591 crypto_ablkcipher_reqsize(ctr), 591 crypto_skcipher_reqsize(ctr),
592 sizeof(struct ahash_request) + 592 sizeof(struct ahash_request) +
593 crypto_ahash_reqsize(ghash))); 593 crypto_ahash_reqsize(ghash)));
594 594
@@ -604,7 +604,7 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
604 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); 604 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
605 605
606 crypto_free_ahash(ctx->ghash); 606 crypto_free_ahash(ctx->ghash);
607 crypto_free_ablkcipher(ctx->ctr); 607 crypto_free_skcipher(ctx->ctr);
608} 608}
609 609
610static void crypto_gcm_free(struct aead_instance *inst) 610static void crypto_gcm_free(struct aead_instance *inst)
@@ -624,7 +624,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
624{ 624{
625 struct crypto_attr_type *algt; 625 struct crypto_attr_type *algt;
626 struct aead_instance *inst; 626 struct aead_instance *inst;
627 struct crypto_alg *ctr; 627 struct skcipher_alg *ctr;
628 struct crypto_alg *ghash_alg; 628 struct crypto_alg *ghash_alg;
629 struct hash_alg_common *ghash; 629 struct hash_alg_common *ghash;
630 struct gcm_instance_ctx *ctx; 630 struct gcm_instance_ctx *ctx;
@@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
639 639
640 ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, 640 ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
641 CRYPTO_ALG_TYPE_HASH, 641 CRYPTO_ALG_TYPE_HASH,
642 CRYPTO_ALG_TYPE_AHASH_MASK); 642 CRYPTO_ALG_TYPE_AHASH_MASK |
643 crypto_requires_sync(algt->type,
644 algt->mask));
643 if (IS_ERR(ghash_alg)) 645 if (IS_ERR(ghash_alg))
644 return PTR_ERR(ghash_alg); 646 return PTR_ERR(ghash_alg);
645 647
@@ -661,41 +663,42 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
661 goto err_drop_ghash; 663 goto err_drop_ghash;
662 664
663 crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); 665 crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
664 err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, 666 err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
665 crypto_requires_sync(algt->type, 667 crypto_requires_sync(algt->type,
666 algt->mask)); 668 algt->mask));
667 if (err) 669 if (err)
668 goto err_drop_ghash; 670 goto err_drop_ghash;
669 671
670 ctr = crypto_skcipher_spawn_alg(&ctx->ctr); 672 ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
671 673
672 /* We only support 16-byte blocks. */ 674 /* We only support 16-byte blocks. */
673 if (ctr->cra_ablkcipher.ivsize != 16) 675 if (crypto_skcipher_alg_ivsize(ctr) != 16)
674 goto out_put_ctr; 676 goto out_put_ctr;
675 677
676 /* Not a stream cipher? */ 678 /* Not a stream cipher? */
677 err = -EINVAL; 679 err = -EINVAL;
678 if (ctr->cra_blocksize != 1) 680 if (ctr->base.cra_blocksize != 1)
679 goto out_put_ctr; 681 goto out_put_ctr;
680 682
681 err = -ENAMETOOLONG; 683 err = -ENAMETOOLONG;
682 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 684 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
683 "gcm_base(%s,%s)", ctr->cra_driver_name, 685 "gcm_base(%s,%s)", ctr->base.cra_driver_name,
684 ghash_alg->cra_driver_name) >= 686 ghash_alg->cra_driver_name) >=
685 CRYPTO_MAX_ALG_NAME) 687 CRYPTO_MAX_ALG_NAME)
686 goto out_put_ctr; 688 goto out_put_ctr;
687 689
688 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); 690 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
689 691
690 inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) & 692 inst->alg.base.cra_flags = (ghash->base.cra_flags |
691 CRYPTO_ALG_ASYNC; 693 ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
692 inst->alg.base.cra_priority = (ghash->base.cra_priority + 694 inst->alg.base.cra_priority = (ghash->base.cra_priority +
693 ctr->cra_priority) / 2; 695 ctr->base.cra_priority) / 2;
694 inst->alg.base.cra_blocksize = 1; 696 inst->alg.base.cra_blocksize = 1;
695 inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | 697 inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
696 ctr->cra_alignmask; 698 ctr->base.cra_alignmask;
697 inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); 699 inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
698 inst->alg.ivsize = 12; 700 inst->alg.ivsize = 12;
701 inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
699 inst->alg.maxauthsize = 16; 702 inst->alg.maxauthsize = 16;
700 inst->alg.init = crypto_gcm_init_tfm; 703 inst->alg.init = crypto_gcm_init_tfm;
701 inst->alg.exit = crypto_gcm_exit_tfm; 704 inst->alg.exit = crypto_gcm_exit_tfm;
@@ -980,6 +983,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
980 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); 983 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
981 984
982 inst->alg.ivsize = 8; 985 inst->alg.ivsize = 8;
986 inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
983 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 987 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
984 988
985 inst->alg.init = crypto_rfc4106_init_tfm; 989 inst->alg.init = crypto_rfc4106_init_tfm;
@@ -1084,11 +1088,13 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
1084 unsigned int authsize = crypto_aead_authsize(aead); 1088 unsigned int authsize = crypto_aead_authsize(aead);
1085 unsigned int nbytes = req->assoclen + req->cryptlen - 1089 unsigned int nbytes = req->assoclen + req->cryptlen -
1086 (enc ? 0 : authsize); 1090 (enc ? 0 : authsize);
1087 struct blkcipher_desc desc = { 1091 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
1088 .tfm = ctx->null,
1089 };
1090 1092
1091 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); 1093 skcipher_request_set_tfm(nreq, ctx->null);
1094 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
1095 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
1096
1097 return crypto_skcipher_encrypt(nreq);
1092} 1098}
1093 1099
1094static int crypto_rfc4543_encrypt(struct aead_request *req) 1100static int crypto_rfc4543_encrypt(struct aead_request *req)
@@ -1108,7 +1114,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
1108 struct crypto_aead_spawn *spawn = &ictx->aead; 1114 struct crypto_aead_spawn *spawn = &ictx->aead;
1109 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); 1115 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
1110 struct crypto_aead *aead; 1116 struct crypto_aead *aead;
1111 struct crypto_blkcipher *null; 1117 struct crypto_skcipher *null;
1112 unsigned long align; 1118 unsigned long align;
1113 int err = 0; 1119 int err = 0;
1114 1120
@@ -1116,7 +1122,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
1116 if (IS_ERR(aead)) 1122 if (IS_ERR(aead))
1117 return PTR_ERR(aead); 1123 return PTR_ERR(aead);
1118 1124
1119 null = crypto_get_default_null_skcipher(); 1125 null = crypto_get_default_null_skcipher2();
1120 err = PTR_ERR(null); 1126 err = PTR_ERR(null);
1121 if (IS_ERR(null)) 1127 if (IS_ERR(null))
1122 goto err_free_aead; 1128 goto err_free_aead;
@@ -1144,7 +1150,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
1144 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); 1150 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
1145 1151
1146 crypto_free_aead(ctx->child); 1152 crypto_free_aead(ctx->child);
1147 crypto_put_default_null_skcipher(); 1153 crypto_put_default_null_skcipher2();
1148} 1154}
1149 1155
1150static void crypto_rfc4543_free(struct aead_instance *inst) 1156static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1219,6 +1225,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
1219 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); 1225 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
1220 1226
1221 inst->alg.ivsize = 8; 1227 inst->alg.ivsize = 8;
1228 inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
1222 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 1229 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1223 1230
1224 inst->alg.init = crypto_rfc4543_init_tfm; 1231 inst->alg.init = crypto_rfc4543_init_tfm;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 597cedd3531c..c4938497eedb 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -87,24 +87,28 @@ void jent_memcpy(void *dest, const void *src, unsigned int n)
87 memcpy(dest, src, n); 87 memcpy(dest, src, n);
88} 88}
89 89
90/*
91 * Obtain a high-resolution time stamp value. The time stamp is used to measure
92 * the execution time of a given code path and its variations. Hence, the time
93 * stamp must have a sufficiently high resolution.
94 *
95 * Note, if the function returns zero because a given architecture does not
96 * implement a high-resolution time stamp, the RNG code's runtime test
97 * will detect it and will not produce output.
98 */
90void jent_get_nstime(__u64 *out) 99void jent_get_nstime(__u64 *out)
91{ 100{
92 struct timespec ts;
93 __u64 tmp = 0; 101 __u64 tmp = 0;
94 102
95 tmp = random_get_entropy(); 103 tmp = random_get_entropy();
96 104
97 /* 105 /*
98 * If random_get_entropy does not return a value (which is possible on, 106 * If random_get_entropy does not return a value, i.e. it is not
99 * for example, MIPS), invoke __getnstimeofday 107 * implemented for a given architecture, use a clock source.
100 * hoping that there are timers we can work with. 108 * hoping that there are timers we can work with.
101 */ 109 */
102 if ((0 == tmp) && 110 if (tmp == 0)
103 (0 == __getnstimeofday(&ts))) { 111 tmp = ktime_get_ns();
104 tmp = ts.tv_sec;
105 tmp = tmp << 32;
106 tmp = tmp | ts.tv_nsec;
107 }
108 112
109 *out = tmp; 113 *out = tmp;
110} 114}
diff --git a/crypto/kpp.c b/crypto/kpp.c
new file mode 100644
index 000000000000..d36ce05eee43
--- /dev/null
+++ b/crypto/kpp.c
@@ -0,0 +1,123 @@
1/*
2 * Key-agreement Protocol Primitives (KPP)
3 *
4 * Copyright (c) 2016, Intel Corporation
5 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/crypto.h>
20#include <crypto/algapi.h>
21#include <linux/cryptouser.h>
22#include <net/netlink.h>
23#include <crypto/kpp.h>
24#include <crypto/internal/kpp.h>
25#include "internal.h"
26
27#ifdef CONFIG_NET
28static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
29{
30 struct crypto_report_kpp rkpp;
31
32 strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
33
34 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
35 sizeof(struct crypto_report_kpp), &rkpp))
36 goto nla_put_failure;
37 return 0;
38
39nla_put_failure:
40 return -EMSGSIZE;
41}
42#else
43static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
44{
45 return -ENOSYS;
46}
47#endif
48
49static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
50 __attribute__ ((unused));
51
52static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
53{
54 seq_puts(m, "type : kpp\n");
55}
56
57static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm)
58{
59 struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
60 struct kpp_alg *alg = crypto_kpp_alg(kpp);
61
62 alg->exit(kpp);
63}
64
65static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
66{
67 struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
68 struct kpp_alg *alg = crypto_kpp_alg(kpp);
69
70 if (alg->exit)
71 kpp->base.exit = crypto_kpp_exit_tfm;
72
73 if (alg->init)
74 return alg->init(kpp);
75
76 return 0;
77}
78
79static const struct crypto_type crypto_kpp_type = {
80 .extsize = crypto_alg_extsize,
81 .init_tfm = crypto_kpp_init_tfm,
82#ifdef CONFIG_PROC_FS
83 .show = crypto_kpp_show,
84#endif
85 .report = crypto_kpp_report,
86 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
87 .maskset = CRYPTO_ALG_TYPE_MASK,
88 .type = CRYPTO_ALG_TYPE_KPP,
89 .tfmsize = offsetof(struct crypto_kpp, base),
90};
91
92struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
93{
94 return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask);
95}
96EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
97
98static void kpp_prepare_alg(struct kpp_alg *alg)
99{
100 struct crypto_alg *base = &alg->base;
101
102 base->cra_type = &crypto_kpp_type;
103 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
104 base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
105}
106
107int crypto_register_kpp(struct kpp_alg *alg)
108{
109 struct crypto_alg *base = &alg->base;
110
111 kpp_prepare_alg(alg);
112 return crypto_register_alg(base);
113}
114EXPORT_SYMBOL_GPL(crypto_register_kpp);
115
116void crypto_unregister_kpp(struct kpp_alg *alg)
117{
118 crypto_unregister_alg(&alg->base);
119}
120EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
121
122MODULE_LICENSE("GPL");
123MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index c4eb9da49d4f..86fb59b109a9 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -41,7 +41,7 @@ struct mcryptd_flush_list {
41static struct mcryptd_flush_list __percpu *mcryptd_flist; 41static struct mcryptd_flush_list __percpu *mcryptd_flist;
42 42
43struct hashd_instance_ctx { 43struct hashd_instance_ctx {
44 struct crypto_shash_spawn spawn; 44 struct crypto_ahash_spawn spawn;
45 struct mcryptd_queue *queue; 45 struct mcryptd_queue *queue;
46}; 46};
47 47
@@ -272,18 +272,18 @@ static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
272{ 272{
273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
274 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 274 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
275 struct crypto_shash_spawn *spawn = &ictx->spawn; 275 struct crypto_ahash_spawn *spawn = &ictx->spawn;
276 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 276 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
277 struct crypto_shash *hash; 277 struct crypto_ahash *hash;
278 278
279 hash = crypto_spawn_shash(spawn); 279 hash = crypto_spawn_ahash(spawn);
280 if (IS_ERR(hash)) 280 if (IS_ERR(hash))
281 return PTR_ERR(hash); 281 return PTR_ERR(hash);
282 282
283 ctx->child = hash; 283 ctx->child = hash;
284 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 284 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
285 sizeof(struct mcryptd_hash_request_ctx) + 285 sizeof(struct mcryptd_hash_request_ctx) +
286 crypto_shash_descsize(hash)); 286 crypto_ahash_reqsize(hash));
287 return 0; 287 return 0;
288} 288}
289 289
@@ -291,21 +291,21 @@ static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
291{ 291{
292 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 292 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
293 293
294 crypto_free_shash(ctx->child); 294 crypto_free_ahash(ctx->child);
295} 295}
296 296
297static int mcryptd_hash_setkey(struct crypto_ahash *parent, 297static int mcryptd_hash_setkey(struct crypto_ahash *parent,
298 const u8 *key, unsigned int keylen) 298 const u8 *key, unsigned int keylen)
299{ 299{
300 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 300 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
301 struct crypto_shash *child = ctx->child; 301 struct crypto_ahash *child = ctx->child;
302 int err; 302 int err;
303 303
304 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 304 crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
305 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 305 crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
306 CRYPTO_TFM_REQ_MASK); 306 CRYPTO_TFM_REQ_MASK);
307 err = crypto_shash_setkey(child, key, keylen); 307 err = crypto_ahash_setkey(child, key, keylen);
308 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 308 crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
309 CRYPTO_TFM_RES_MASK); 309 CRYPTO_TFM_RES_MASK);
310 return err; 310 return err;
311} 311}
@@ -331,20 +331,20 @@ static int mcryptd_hash_enqueue(struct ahash_request *req,
331static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) 331static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
332{ 332{
333 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 333 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
334 struct crypto_shash *child = ctx->child; 334 struct crypto_ahash *child = ctx->child;
335 struct ahash_request *req = ahash_request_cast(req_async); 335 struct ahash_request *req = ahash_request_cast(req_async);
336 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 336 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
337 struct shash_desc *desc = &rctx->desc; 337 struct ahash_request *desc = &rctx->areq;
338 338
339 if (unlikely(err == -EINPROGRESS)) 339 if (unlikely(err == -EINPROGRESS))
340 goto out; 340 goto out;
341 341
342 desc->tfm = child; 342 ahash_request_set_tfm(desc, child);
343 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 343 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
344 rctx->complete, req_async);
344 345
345 err = crypto_shash_init(desc); 346 rctx->out = req->result;
346 347 err = crypto_ahash_init(desc);
347 req->base.complete = rctx->complete;
348 348
349out: 349out:
350 local_bh_disable(); 350 local_bh_disable();
@@ -365,7 +365,8 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
365 if (unlikely(err == -EINPROGRESS)) 365 if (unlikely(err == -EINPROGRESS))
366 goto out; 366 goto out;
367 367
368 err = shash_ahash_mcryptd_update(req, &rctx->desc); 368 rctx->out = req->result;
369 err = ahash_mcryptd_update(&rctx->areq);
369 if (err) { 370 if (err) {
370 req->base.complete = rctx->complete; 371 req->base.complete = rctx->complete;
371 goto out; 372 goto out;
@@ -391,7 +392,8 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
391 if (unlikely(err == -EINPROGRESS)) 392 if (unlikely(err == -EINPROGRESS))
392 goto out; 393 goto out;
393 394
394 err = shash_ahash_mcryptd_final(req, &rctx->desc); 395 rctx->out = req->result;
396 err = ahash_mcryptd_final(&rctx->areq);
395 if (err) { 397 if (err) {
396 req->base.complete = rctx->complete; 398 req->base.complete = rctx->complete;
397 goto out; 399 goto out;
@@ -416,8 +418,8 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
416 418
417 if (unlikely(err == -EINPROGRESS)) 419 if (unlikely(err == -EINPROGRESS))
418 goto out; 420 goto out;
419 421 rctx->out = req->result;
420 err = shash_ahash_mcryptd_finup(req, &rctx->desc); 422 err = ahash_mcryptd_finup(&rctx->areq);
421 423
422 if (err) { 424 if (err) {
423 req->base.complete = rctx->complete; 425 req->base.complete = rctx->complete;
@@ -439,25 +441,21 @@ static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
439static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) 441static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
440{ 442{
441 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 443 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
442 struct crypto_shash *child = ctx->child; 444 struct crypto_ahash *child = ctx->child;
443 struct ahash_request *req = ahash_request_cast(req_async); 445 struct ahash_request *req = ahash_request_cast(req_async);
444 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 446 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
445 struct shash_desc *desc = &rctx->desc; 447 struct ahash_request *desc = &rctx->areq;
446 448
447 if (unlikely(err == -EINPROGRESS)) 449 if (unlikely(err == -EINPROGRESS))
448 goto out; 450 goto out;
449 451
450 desc->tfm = child; 452 ahash_request_set_tfm(desc, child);
451 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ 453 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
452 454 rctx->complete, req_async);
453 err = shash_ahash_mcryptd_digest(req, desc);
454 455
455 if (err) { 456 rctx->out = req->result;
456 req->base.complete = rctx->complete; 457 err = ahash_mcryptd_digest(desc);
457 goto out;
458 }
459 458
460 return;
461out: 459out:
462 local_bh_disable(); 460 local_bh_disable();
463 rctx->complete(&req->base, err); 461 rctx->complete(&req->base, err);
@@ -473,14 +471,14 @@ static int mcryptd_hash_export(struct ahash_request *req, void *out)
473{ 471{
474 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 472 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
475 473
476 return crypto_shash_export(&rctx->desc, out); 474 return crypto_ahash_export(&rctx->areq, out);
477} 475}
478 476
479static int mcryptd_hash_import(struct ahash_request *req, const void *in) 477static int mcryptd_hash_import(struct ahash_request *req, const void *in)
480{ 478{
481 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 479 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
482 480
483 return crypto_shash_import(&rctx->desc, in); 481 return crypto_ahash_import(&rctx->areq, in);
484} 482}
485 483
486static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 484static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -488,7 +486,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
488{ 486{
489 struct hashd_instance_ctx *ctx; 487 struct hashd_instance_ctx *ctx;
490 struct ahash_instance *inst; 488 struct ahash_instance *inst;
491 struct shash_alg *salg; 489 struct hash_alg_common *halg;
492 struct crypto_alg *alg; 490 struct crypto_alg *alg;
493 u32 type = 0; 491 u32 type = 0;
494 u32 mask = 0; 492 u32 mask = 0;
@@ -496,11 +494,11 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
496 494
497 mcryptd_check_internal(tb, &type, &mask); 495 mcryptd_check_internal(tb, &type, &mask);
498 496
499 salg = shash_attr_alg(tb[1], type, mask); 497 halg = ahash_attr_alg(tb[1], type, mask);
500 if (IS_ERR(salg)) 498 if (IS_ERR(halg))
501 return PTR_ERR(salg); 499 return PTR_ERR(halg);
502 500
503 alg = &salg->base; 501 alg = &halg->base;
504 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); 502 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
505 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), 503 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
506 sizeof(*ctx)); 504 sizeof(*ctx));
@@ -511,7 +509,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
511 ctx = ahash_instance_ctx(inst); 509 ctx = ahash_instance_ctx(inst);
512 ctx->queue = queue; 510 ctx->queue = queue;
513 511
514 err = crypto_init_shash_spawn(&ctx->spawn, salg, 512 err = crypto_init_ahash_spawn(&ctx->spawn, halg,
515 ahash_crypto_instance(inst)); 513 ahash_crypto_instance(inst));
516 if (err) 514 if (err)
517 goto out_free_inst; 515 goto out_free_inst;
@@ -521,8 +519,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
521 type |= CRYPTO_ALG_INTERNAL; 519 type |= CRYPTO_ALG_INTERNAL;
522 inst->alg.halg.base.cra_flags = type; 520 inst->alg.halg.base.cra_flags = type;
523 521
524 inst->alg.halg.digestsize = salg->digestsize; 522 inst->alg.halg.digestsize = halg->digestsize;
525 inst->alg.halg.statesize = salg->statesize; 523 inst->alg.halg.statesize = halg->statesize;
526 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); 524 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
527 525
528 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; 526 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
@@ -539,7 +537,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
539 537
540 err = ahash_register_instance(tmpl, inst); 538 err = ahash_register_instance(tmpl, inst);
541 if (err) { 539 if (err) {
542 crypto_drop_shash(&ctx->spawn); 540 crypto_drop_ahash(&ctx->spawn);
543out_free_inst: 541out_free_inst:
544 kfree(inst); 542 kfree(inst);
545 } 543 }
@@ -575,7 +573,7 @@ static void mcryptd_free(struct crypto_instance *inst)
575 573
576 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 574 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
577 case CRYPTO_ALG_TYPE_AHASH: 575 case CRYPTO_ALG_TYPE_AHASH:
578 crypto_drop_shash(&hctx->spawn); 576 crypto_drop_ahash(&hctx->spawn);
579 kfree(ahash_instance(inst)); 577 kfree(ahash_instance(inst));
580 return; 578 return;
581 default: 579 default:
@@ -612,55 +610,38 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
612} 610}
613EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); 611EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
614 612
615int shash_ahash_mcryptd_digest(struct ahash_request *req, 613int ahash_mcryptd_digest(struct ahash_request *desc)
616 struct shash_desc *desc)
617{ 614{
618 int err; 615 int err;
619 616
620 err = crypto_shash_init(desc) ?: 617 err = crypto_ahash_init(desc) ?:
621 shash_ahash_mcryptd_finup(req, desc); 618 ahash_mcryptd_finup(desc);
622 619
623 return err; 620 return err;
624} 621}
625EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
626 622
627int shash_ahash_mcryptd_update(struct ahash_request *req, 623int ahash_mcryptd_update(struct ahash_request *desc)
628 struct shash_desc *desc)
629{ 624{
630 struct crypto_shash *tfm = desc->tfm;
631 struct shash_alg *shash = crypto_shash_alg(tfm);
632
633 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 625 /* alignment is to be done by multi-buffer crypto algorithm if needed */
634 626
635 return shash->update(desc, NULL, 0); 627 return crypto_ahash_update(desc);
636} 628}
637EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
638 629
639int shash_ahash_mcryptd_finup(struct ahash_request *req, 630int ahash_mcryptd_finup(struct ahash_request *desc)
640 struct shash_desc *desc)
641{ 631{
642 struct crypto_shash *tfm = desc->tfm;
643 struct shash_alg *shash = crypto_shash_alg(tfm);
644
645 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 632 /* alignment is to be done by multi-buffer crypto algorithm if needed */
646 633
647 return shash->finup(desc, NULL, 0, req->result); 634 return crypto_ahash_finup(desc);
648} 635}
649EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
650 636
651int shash_ahash_mcryptd_final(struct ahash_request *req, 637int ahash_mcryptd_final(struct ahash_request *desc)
652 struct shash_desc *desc)
653{ 638{
654 struct crypto_shash *tfm = desc->tfm;
655 struct shash_alg *shash = crypto_shash_alg(tfm);
656
657 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 639 /* alignment is to be done by multi-buffer crypto algorithm if needed */
658 640
659 return shash->final(desc, req->result); 641 return crypto_ahash_final(desc);
660} 642}
661EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
662 643
663struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) 644struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
664{ 645{
665 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 646 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
666 647
@@ -668,12 +649,12 @@ struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
668} 649}
669EXPORT_SYMBOL_GPL(mcryptd_ahash_child); 650EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
670 651
671struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) 652struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
672{ 653{
673 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 654 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
674 return &rctx->desc; 655 return &rctx->areq;
675} 656}
676EXPORT_SYMBOL_GPL(mcryptd_shash_desc); 657EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
677 658
678void mcryptd_free_ahash(struct mcryptd_ahash *tfm) 659void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
679{ 660{
@@ -681,7 +662,6 @@ void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
681} 662}
682EXPORT_SYMBOL_GPL(mcryptd_free_ahash); 663EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
683 664
684
685static int __init mcryptd_init(void) 665static int __init mcryptd_init(void)
686{ 666{
687 int err, cpu; 667 int err, cpu;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index ead8dc0d084e..877019a6d3ea 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -92,60 +92,66 @@ static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
92 92
93struct pkcs1pad_ctx { 93struct pkcs1pad_ctx {
94 struct crypto_akcipher *child; 94 struct crypto_akcipher *child;
95 const char *hash_name;
96 unsigned int key_size; 95 unsigned int key_size;
97}; 96};
98 97
99struct pkcs1pad_inst_ctx { 98struct pkcs1pad_inst_ctx {
100 struct crypto_akcipher_spawn spawn; 99 struct crypto_akcipher_spawn spawn;
101 const char *hash_name; 100 const struct rsa_asn1_template *digest_info;
102}; 101};
103 102
104struct pkcs1pad_request { 103struct pkcs1pad_request {
105 struct akcipher_request child_req; 104 struct scatterlist in_sg[2], out_sg[1];
106
107 struct scatterlist in_sg[3], out_sg[2];
108 uint8_t *in_buf, *out_buf; 105 uint8_t *in_buf, *out_buf;
106 struct akcipher_request child_req;
109}; 107};
110 108
111static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, 109static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
112 unsigned int keylen) 110 unsigned int keylen)
113{ 111{
114 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 112 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
115 int err, size; 113 int err;
114
115 ctx->key_size = 0;
116 116
117 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); 117 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
118 if (err)
119 return err;
118 120
119 if (!err) { 121 /* Find out new modulus size from rsa implementation */
120 /* Find out new modulus size from rsa implementation */ 122 err = crypto_akcipher_maxsize(ctx->child);
121 size = crypto_akcipher_maxsize(ctx->child); 123 if (err < 0)
124 return err;
122 125
123 ctx->key_size = size > 0 ? size : 0; 126 if (err > PAGE_SIZE)
124 if (size <= 0) 127 return -ENOTSUPP;
125 err = size;
126 }
127 128
128 return err; 129 ctx->key_size = err;
130 return 0;
129} 131}
130 132
131static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, 133static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
132 unsigned int keylen) 134 unsigned int keylen)
133{ 135{
134 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 136 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
135 int err, size; 137 int err;
138
139 ctx->key_size = 0;
136 140
137 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); 141 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
142 if (err)
143 return err;
138 144
139 if (!err) { 145 /* Find out new modulus size from rsa implementation */
140 /* Find out new modulus size from rsa implementation */ 146 err = crypto_akcipher_maxsize(ctx->child);
141 size = crypto_akcipher_maxsize(ctx->child); 147 if (err < 0)
148 return err;
142 149
143 ctx->key_size = size > 0 ? size : 0; 150 if (err > PAGE_SIZE)
144 if (size <= 0) 151 return -ENOTSUPP;
145 err = size;
146 }
147 152
148 return err; 153 ctx->key_size = err;
154 return 0;
149} 155}
150 156
151static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) 157static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
@@ -164,19 +170,10 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
164static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, 170static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
165 struct scatterlist *next) 171 struct scatterlist *next)
166{ 172{
167 int nsegs = next ? 1 : 0; 173 int nsegs = next ? 2 : 1;
168 174
169 if (offset_in_page(buf) + len <= PAGE_SIZE) { 175 sg_init_table(sg, nsegs);
170 nsegs += 1; 176 sg_set_buf(sg, buf, len);
171 sg_init_table(sg, nsegs);
172 sg_set_buf(sg, buf, len);
173 } else {
174 nsegs += 2;
175 sg_init_table(sg, nsegs);
176 sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
177 sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
178 offset_in_page(buf) + len - PAGE_SIZE);
179 }
180 177
181 if (next) 178 if (next)
182 sg_chain(sg, nsegs, next); 179 sg_chain(sg, nsegs, next);
@@ -187,37 +184,36 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
187 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 184 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
188 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 185 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
189 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 186 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
190 size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; 187 unsigned int pad_len;
191 size_t chunk_len, pad_left; 188 unsigned int len;
192 struct sg_mapping_iter miter; 189 u8 *out_buf;
193 190
194 if (!err) { 191 if (err)
195 if (pad_len) { 192 goto out;
196 sg_miter_start(&miter, req->dst, 193
197 sg_nents_for_len(req->dst, pad_len), 194 len = req_ctx->child_req.dst_len;
198 SG_MITER_ATOMIC | SG_MITER_TO_SG); 195 pad_len = ctx->key_size - len;
199 196
200 pad_left = pad_len; 197 /* Four billion to one */
201 while (pad_left) { 198 if (likely(!pad_len))
202 sg_miter_next(&miter); 199 goto out;
203 200
204 chunk_len = min(miter.length, pad_left); 201 out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
205 memset(miter.addr, 0, chunk_len); 202 err = -ENOMEM;
206 pad_left -= chunk_len; 203 if (!out_buf)
207 } 204 goto out;
208 205
209 sg_miter_stop(&miter); 206 sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
210 } 207 out_buf + pad_len, len);
211 208 sg_copy_from_buffer(req->dst,
212 sg_pcopy_from_buffer(req->dst, 209 sg_nents_for_len(req->dst, ctx->key_size),
213 sg_nents_for_len(req->dst, ctx->key_size), 210 out_buf, ctx->key_size);
214 req_ctx->out_buf, req_ctx->child_req.dst_len, 211 kzfree(out_buf);
215 pad_len); 212
216 } 213out:
217 req->dst_len = ctx->key_size; 214 req->dst_len = ctx->key_size;
218 215
219 kfree(req_ctx->in_buf); 216 kfree(req_ctx->in_buf);
220 kzfree(req_ctx->out_buf);
221 217
222 return err; 218 return err;
223} 219}
@@ -257,21 +253,8 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
257 return -EOVERFLOW; 253 return -EOVERFLOW;
258 } 254 }
259 255
260 if (ctx->key_size > PAGE_SIZE)
261 return -ENOTSUPP;
262
263 /*
264 * Replace both input and output to add the padding in the input and
265 * the potential missing leading zeros in the output.
266 */
267 req_ctx->child_req.src = req_ctx->in_sg;
268 req_ctx->child_req.src_len = ctx->key_size - 1;
269 req_ctx->child_req.dst = req_ctx->out_sg;
270 req_ctx->child_req.dst_len = ctx->key_size;
271
272 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, 256 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
273 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 257 GFP_KERNEL);
274 GFP_KERNEL : GFP_ATOMIC);
275 if (!req_ctx->in_buf) 258 if (!req_ctx->in_buf)
276 return -ENOMEM; 259 return -ENOMEM;
277 260
@@ -284,9 +267,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
284 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, 267 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
285 ctx->key_size - 1 - req->src_len, req->src); 268 ctx->key_size - 1 - req->src_len, req->src);
286 269
287 req_ctx->out_buf = kmalloc(ctx->key_size, 270 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
288 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
289 GFP_KERNEL : GFP_ATOMIC);
290 if (!req_ctx->out_buf) { 271 if (!req_ctx->out_buf) {
291 kfree(req_ctx->in_buf); 272 kfree(req_ctx->in_buf);
292 return -ENOMEM; 273 return -ENOMEM;
@@ -299,6 +280,10 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
299 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 280 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
300 pkcs1pad_encrypt_sign_complete_cb, req); 281 pkcs1pad_encrypt_sign_complete_cb, req);
301 282
283 /* Reuse output buffer */
284 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
285 req->dst, ctx->key_size - 1, req->dst_len);
286
302 err = crypto_akcipher_encrypt(&req_ctx->child_req); 287 err = crypto_akcipher_encrypt(&req_ctx->child_req);
303 if (err != -EINPROGRESS && 288 if (err != -EINPROGRESS &&
304 (err != -EBUSY || 289 (err != -EBUSY ||
@@ -380,18 +365,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
380 if (!ctx->key_size || req->src_len != ctx->key_size) 365 if (!ctx->key_size || req->src_len != ctx->key_size)
381 return -EINVAL; 366 return -EINVAL;
382 367
383 if (ctx->key_size > PAGE_SIZE) 368 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
384 return -ENOTSUPP;
385
386 /* Reuse input buffer, output to a new buffer */
387 req_ctx->child_req.src = req->src;
388 req_ctx->child_req.src_len = req->src_len;
389 req_ctx->child_req.dst = req_ctx->out_sg;
390 req_ctx->child_req.dst_len = ctx->key_size ;
391
392 req_ctx->out_buf = kmalloc(ctx->key_size,
393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
394 GFP_KERNEL : GFP_ATOMIC);
395 if (!req_ctx->out_buf) 369 if (!req_ctx->out_buf)
396 return -ENOMEM; 370 return -ENOMEM;
397 371
@@ -402,6 +376,11 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 376 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
403 pkcs1pad_decrypt_complete_cb, req); 377 pkcs1pad_decrypt_complete_cb, req);
404 378
379 /* Reuse input buffer, output to a new buffer */
380 akcipher_request_set_crypt(&req_ctx->child_req, req->src,
381 req_ctx->out_sg, req->src_len,
382 ctx->key_size);
383
405 err = crypto_akcipher_decrypt(&req_ctx->child_req); 384 err = crypto_akcipher_decrypt(&req_ctx->child_req);
406 if (err != -EINPROGRESS && 385 if (err != -EINPROGRESS &&
407 (err != -EBUSY || 386 (err != -EBUSY ||
@@ -416,20 +395,16 @@ static int pkcs1pad_sign(struct akcipher_request *req)
416 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 395 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
417 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 396 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
418 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 397 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
419 const struct rsa_asn1_template *digest_info = NULL; 398 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
399 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
400 const struct rsa_asn1_template *digest_info = ictx->digest_info;
420 int err; 401 int err;
421 unsigned int ps_end, digest_size = 0; 402 unsigned int ps_end, digest_size = 0;
422 403
423 if (!ctx->key_size) 404 if (!ctx->key_size)
424 return -EINVAL; 405 return -EINVAL;
425 406
426 if (ctx->hash_name) { 407 digest_size = digest_info->size;
427 digest_info = rsa_lookup_asn1(ctx->hash_name);
428 if (!digest_info)
429 return -EINVAL;
430
431 digest_size = digest_info->size;
432 }
433 408
434 if (req->src_len + digest_size > ctx->key_size - 11) 409 if (req->src_len + digest_size > ctx->key_size - 11)
435 return -EOVERFLOW; 410 return -EOVERFLOW;
@@ -439,21 +414,8 @@ static int pkcs1pad_sign(struct akcipher_request *req)
439 return -EOVERFLOW; 414 return -EOVERFLOW;
440 } 415 }
441 416
442 if (ctx->key_size > PAGE_SIZE)
443 return -ENOTSUPP;
444
445 /*
446 * Replace both input and output to add the padding in the input and
447 * the potential missing leading zeros in the output.
448 */
449 req_ctx->child_req.src = req_ctx->in_sg;
450 req_ctx->child_req.src_len = ctx->key_size - 1;
451 req_ctx->child_req.dst = req_ctx->out_sg;
452 req_ctx->child_req.dst_len = ctx->key_size;
453
454 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, 417 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
455 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 418 GFP_KERNEL);
456 GFP_KERNEL : GFP_ATOMIC);
457 if (!req_ctx->in_buf) 419 if (!req_ctx->in_buf)
458 return -ENOMEM; 420 return -ENOMEM;
459 421
@@ -462,29 +424,20 @@ static int pkcs1pad_sign(struct akcipher_request *req)
462 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); 424 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
463 req_ctx->in_buf[ps_end] = 0x00; 425 req_ctx->in_buf[ps_end] = 0x00;
464 426
465 if (digest_info) { 427 memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
466 memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, 428 digest_info->size);
467 digest_info->size);
468 }
469 429
470 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, 430 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
471 ctx->key_size - 1 - req->src_len, req->src); 431 ctx->key_size - 1 - req->src_len, req->src);
472 432
473 req_ctx->out_buf = kmalloc(ctx->key_size,
474 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
475 GFP_KERNEL : GFP_ATOMIC);
476 if (!req_ctx->out_buf) {
477 kfree(req_ctx->in_buf);
478 return -ENOMEM;
479 }
480
481 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
482 ctx->key_size, NULL);
483
484 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 433 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
485 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 434 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
486 pkcs1pad_encrypt_sign_complete_cb, req); 435 pkcs1pad_encrypt_sign_complete_cb, req);
487 436
437 /* Reuse output buffer */
438 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
439 req->dst, ctx->key_size - 1, req->dst_len);
440
488 err = crypto_akcipher_sign(&req_ctx->child_req); 441 err = crypto_akcipher_sign(&req_ctx->child_req);
489 if (err != -EINPROGRESS && 442 if (err != -EINPROGRESS &&
490 (err != -EBUSY || 443 (err != -EBUSY ||
@@ -499,56 +452,58 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
499 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 452 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
500 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 453 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
501 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 454 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
502 const struct rsa_asn1_template *digest_info; 455 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
456 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
457 const struct rsa_asn1_template *digest_info = ictx->digest_info;
458 unsigned int dst_len;
503 unsigned int pos; 459 unsigned int pos;
504 460 u8 *out_buf;
505 if (err == -EOVERFLOW)
506 /* Decrypted value had no leading 0 byte */
507 err = -EINVAL;
508 461
509 if (err) 462 if (err)
510 goto done; 463 goto done;
511 464
512 if (req_ctx->child_req.dst_len != ctx->key_size - 1) { 465 err = -EINVAL;
513 err = -EINVAL; 466 dst_len = req_ctx->child_req.dst_len;
467 if (dst_len < ctx->key_size - 1)
514 goto done; 468 goto done;
469
470 out_buf = req_ctx->out_buf;
471 if (dst_len == ctx->key_size) {
472 if (out_buf[0] != 0x00)
473 /* Decrypted value had no leading 0 byte */
474 goto done;
475
476 dst_len--;
477 out_buf++;
515 } 478 }
516 479
517 err = -EBADMSG; 480 err = -EBADMSG;
518 if (req_ctx->out_buf[0] != 0x01) 481 if (out_buf[0] != 0x01)
519 goto done; 482 goto done;
520 483
521 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) 484 for (pos = 1; pos < dst_len; pos++)
522 if (req_ctx->out_buf[pos] != 0xff) 485 if (out_buf[pos] != 0xff)
523 break; 486 break;
524 487
525 if (pos < 9 || pos == req_ctx->child_req.dst_len || 488 if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
526 req_ctx->out_buf[pos] != 0x00)
527 goto done; 489 goto done;
528 pos++; 490 pos++;
529 491
530 if (ctx->hash_name) { 492 if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
531 digest_info = rsa_lookup_asn1(ctx->hash_name); 493 goto done;
532 if (!digest_info)
533 goto done;
534
535 if (memcmp(req_ctx->out_buf + pos, digest_info->data,
536 digest_info->size))
537 goto done;
538 494
539 pos += digest_info->size; 495 pos += digest_info->size;
540 }
541 496
542 err = 0; 497 err = 0;
543 498
544 if (req->dst_len < req_ctx->child_req.dst_len - pos) 499 if (req->dst_len < dst_len - pos)
545 err = -EOVERFLOW; 500 err = -EOVERFLOW;
546 req->dst_len = req_ctx->child_req.dst_len - pos; 501 req->dst_len = dst_len - pos;
547 502
548 if (!err) 503 if (!err)
549 sg_copy_from_buffer(req->dst, 504 sg_copy_from_buffer(req->dst,
550 sg_nents_for_len(req->dst, req->dst_len), 505 sg_nents_for_len(req->dst, req->dst_len),
551 req_ctx->out_buf + pos, req->dst_len); 506 out_buf + pos, req->dst_len);
552done: 507done:
553 kzfree(req_ctx->out_buf); 508 kzfree(req_ctx->out_buf);
554 509
@@ -588,18 +543,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
588 if (!ctx->key_size || req->src_len < ctx->key_size) 543 if (!ctx->key_size || req->src_len < ctx->key_size)
589 return -EINVAL; 544 return -EINVAL;
590 545
591 if (ctx->key_size > PAGE_SIZE) 546 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
592 return -ENOTSUPP;
593
594 /* Reuse input buffer, output to a new buffer */
595 req_ctx->child_req.src = req->src;
596 req_ctx->child_req.src_len = req->src_len;
597 req_ctx->child_req.dst = req_ctx->out_sg;
598 req_ctx->child_req.dst_len = ctx->key_size;
599
600 req_ctx->out_buf = kmalloc(ctx->key_size,
601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
602 GFP_KERNEL : GFP_ATOMIC);
603 if (!req_ctx->out_buf) 547 if (!req_ctx->out_buf)
604 return -ENOMEM; 548 return -ENOMEM;
605 549
@@ -610,6 +554,11 @@ static int pkcs1pad_verify(struct akcipher_request *req)
610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 554 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
611 pkcs1pad_verify_complete_cb, req); 555 pkcs1pad_verify_complete_cb, req);
612 556
557 /* Reuse input buffer, output to a new buffer */
558 akcipher_request_set_crypt(&req_ctx->child_req, req->src,
559 req_ctx->out_sg, req->src_len,
560 ctx->key_size);
561
613 err = crypto_akcipher_verify(&req_ctx->child_req); 562 err = crypto_akcipher_verify(&req_ctx->child_req);
614 if (err != -EINPROGRESS && 563 if (err != -EINPROGRESS &&
615 (err != -EBUSY || 564 (err != -EBUSY ||
@@ -626,12 +575,11 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
626 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 575 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
627 struct crypto_akcipher *child_tfm; 576 struct crypto_akcipher *child_tfm;
628 577
629 child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); 578 child_tfm = crypto_spawn_akcipher(&ictx->spawn);
630 if (IS_ERR(child_tfm)) 579 if (IS_ERR(child_tfm))
631 return PTR_ERR(child_tfm); 580 return PTR_ERR(child_tfm);
632 581
633 ctx->child = child_tfm; 582 ctx->child = child_tfm;
634 ctx->hash_name = ictx->hash_name;
635 return 0; 583 return 0;
636} 584}
637 585
@@ -648,12 +596,12 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
648 struct crypto_akcipher_spawn *spawn = &ctx->spawn; 596 struct crypto_akcipher_spawn *spawn = &ctx->spawn;
649 597
650 crypto_drop_akcipher(spawn); 598 crypto_drop_akcipher(spawn);
651 kfree(ctx->hash_name);
652 kfree(inst); 599 kfree(inst);
653} 600}
654 601
655static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) 602static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
656{ 603{
604 const struct rsa_asn1_template *digest_info;
657 struct crypto_attr_type *algt; 605 struct crypto_attr_type *algt;
658 struct akcipher_instance *inst; 606 struct akcipher_instance *inst;
659 struct pkcs1pad_inst_ctx *ctx; 607 struct pkcs1pad_inst_ctx *ctx;
@@ -676,7 +624,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
676 624
677 hash_name = crypto_attr_alg_name(tb[2]); 625 hash_name = crypto_attr_alg_name(tb[2]);
678 if (IS_ERR(hash_name)) 626 if (IS_ERR(hash_name))
679 hash_name = NULL; 627 return PTR_ERR(hash_name);
628
629 digest_info = rsa_lookup_asn1(hash_name);
630 if (!digest_info)
631 return -EINVAL;
680 632
681 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 633 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
682 if (!inst) 634 if (!inst)
@@ -684,7 +636,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
684 636
685 ctx = akcipher_instance_ctx(inst); 637 ctx = akcipher_instance_ctx(inst);
686 spawn = &ctx->spawn; 638 spawn = &ctx->spawn;
687 ctx->hash_name = hash_name ? kstrdup(hash_name, GFP_KERNEL) : NULL; 639 ctx->digest_info = digest_info;
688 640
689 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); 641 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
690 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, 642 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
@@ -696,27 +648,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
696 648
697 err = -ENAMETOOLONG; 649 err = -ENAMETOOLONG;
698 650
699 if (!hash_name) { 651 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
700 if (snprintf(inst->alg.base.cra_name, 652 "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
701 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 653 CRYPTO_MAX_ALG_NAME ||
702 rsa_alg->base.cra_name) >= 654 snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
703 CRYPTO_MAX_ALG_NAME || 655 "pkcs1pad(%s,%s)",
704 snprintf(inst->alg.base.cra_driver_name, 656 rsa_alg->base.cra_driver_name, hash_name) >=
705 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 657 CRYPTO_MAX_ALG_NAME)
706 rsa_alg->base.cra_driver_name) >=
707 CRYPTO_MAX_ALG_NAME)
708 goto out_drop_alg; 658 goto out_drop_alg;
709 } else {
710 if (snprintf(inst->alg.base.cra_name,
711 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
712 rsa_alg->base.cra_name, hash_name) >=
713 CRYPTO_MAX_ALG_NAME ||
714 snprintf(inst->alg.base.cra_driver_name,
715 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
716 rsa_alg->base.cra_driver_name, hash_name) >=
717 CRYPTO_MAX_ALG_NAME)
718 goto out_free_hash;
719 }
720 659
721 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; 660 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
722 inst->alg.base.cra_priority = rsa_alg->base.cra_priority; 661 inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
@@ -738,12 +677,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
738 677
739 err = akcipher_register_instance(tmpl, inst); 678 err = akcipher_register_instance(tmpl, inst);
740 if (err) 679 if (err)
741 goto out_free_hash; 680 goto out_drop_alg;
742 681
743 return 0; 682 return 0;
744 683
745out_free_hash:
746 kfree(ctx->hash_name);
747out_drop_alg: 684out_drop_alg:
748 crypto_drop_akcipher(spawn); 685 crypto_drop_akcipher(spawn);
749out_free_inst: 686out_free_inst:
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 77d737f52147..4c280b6a3ea9 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -10,16 +10,23 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mpi.h>
13#include <crypto/internal/rsa.h> 14#include <crypto/internal/rsa.h>
14#include <crypto/internal/akcipher.h> 15#include <crypto/internal/akcipher.h>
15#include <crypto/akcipher.h> 16#include <crypto/akcipher.h>
16#include <crypto/algapi.h> 17#include <crypto/algapi.h>
17 18
19struct rsa_mpi_key {
20 MPI n;
21 MPI e;
22 MPI d;
23};
24
18/* 25/*
19 * RSAEP function [RFC3447 sec 5.1.1] 26 * RSAEP function [RFC3447 sec 5.1.1]
20 * c = m^e mod n; 27 * c = m^e mod n;
21 */ 28 */
22static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m) 29static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
23{ 30{
24 /* (1) Validate 0 <= m < n */ 31 /* (1) Validate 0 <= m < n */
25 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) 32 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
@@ -33,7 +40,7 @@ static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
33 * RSADP function [RFC3447 sec 5.1.2] 40 * RSADP function [RFC3447 sec 5.1.2]
34 * m = c^d mod n; 41 * m = c^d mod n;
35 */ 42 */
36static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c) 43static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
37{ 44{
38 /* (1) Validate 0 <= c < n */ 45 /* (1) Validate 0 <= c < n */
39 if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) 46 if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
@@ -47,7 +54,7 @@ static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
47 * RSASP1 function [RFC3447 sec 5.2.1] 54 * RSASP1 function [RFC3447 sec 5.2.1]
48 * s = m^d mod n 55 * s = m^d mod n
49 */ 56 */
50static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m) 57static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
51{ 58{
52 /* (1) Validate 0 <= m < n */ 59 /* (1) Validate 0 <= m < n */
53 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) 60 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
@@ -61,7 +68,7 @@ static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
61 * RSAVP1 function [RFC3447 sec 5.2.2] 68 * RSAVP1 function [RFC3447 sec 5.2.2]
62 * m = s^e mod n; 69 * m = s^e mod n;
63 */ 70 */
64static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s) 71static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
65{ 72{
66 /* (1) Validate 0 <= s < n */ 73 /* (1) Validate 0 <= s < n */
67 if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0) 74 if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
@@ -71,7 +78,7 @@ static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
71 return mpi_powm(m, s, key->e, key->n); 78 return mpi_powm(m, s, key->e, key->n);
72} 79}
73 80
74static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm) 81static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
75{ 82{
76 return akcipher_tfm_ctx(tfm); 83 return akcipher_tfm_ctx(tfm);
77} 84}
@@ -79,7 +86,7 @@ static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
79static int rsa_enc(struct akcipher_request *req) 86static int rsa_enc(struct akcipher_request *req)
80{ 87{
81 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 88 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
82 const struct rsa_key *pkey = rsa_get_key(tfm); 89 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
83 MPI m, c = mpi_alloc(0); 90 MPI m, c = mpi_alloc(0);
84 int ret = 0; 91 int ret = 0;
85 int sign; 92 int sign;
@@ -101,7 +108,7 @@ static int rsa_enc(struct akcipher_request *req)
101 if (ret) 108 if (ret)
102 goto err_free_m; 109 goto err_free_m;
103 110
104 ret = mpi_write_to_sgl(c, req->dst, &req->dst_len, &sign); 111 ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign);
105 if (ret) 112 if (ret)
106 goto err_free_m; 113 goto err_free_m;
107 114
@@ -118,7 +125,7 @@ err_free_c:
118static int rsa_dec(struct akcipher_request *req) 125static int rsa_dec(struct akcipher_request *req)
119{ 126{
120 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 127 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
121 const struct rsa_key *pkey = rsa_get_key(tfm); 128 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
122 MPI c, m = mpi_alloc(0); 129 MPI c, m = mpi_alloc(0);
123 int ret = 0; 130 int ret = 0;
124 int sign; 131 int sign;
@@ -140,7 +147,7 @@ static int rsa_dec(struct akcipher_request *req)
140 if (ret) 147 if (ret)
141 goto err_free_c; 148 goto err_free_c;
142 149
143 ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign); 150 ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
144 if (ret) 151 if (ret)
145 goto err_free_c; 152 goto err_free_c;
146 153
@@ -156,7 +163,7 @@ err_free_m:
156static int rsa_sign(struct akcipher_request *req) 163static int rsa_sign(struct akcipher_request *req)
157{ 164{
158 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 165 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
159 const struct rsa_key *pkey = rsa_get_key(tfm); 166 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
160 MPI m, s = mpi_alloc(0); 167 MPI m, s = mpi_alloc(0);
161 int ret = 0; 168 int ret = 0;
162 int sign; 169 int sign;
@@ -178,7 +185,7 @@ static int rsa_sign(struct akcipher_request *req)
178 if (ret) 185 if (ret)
179 goto err_free_m; 186 goto err_free_m;
180 187
181 ret = mpi_write_to_sgl(s, req->dst, &req->dst_len, &sign); 188 ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
182 if (ret) 189 if (ret)
183 goto err_free_m; 190 goto err_free_m;
184 191
@@ -195,7 +202,7 @@ err_free_s:
195static int rsa_verify(struct akcipher_request *req) 202static int rsa_verify(struct akcipher_request *req)
196{ 203{
197 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 204 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
198 const struct rsa_key *pkey = rsa_get_key(tfm); 205 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
199 MPI s, m = mpi_alloc(0); 206 MPI s, m = mpi_alloc(0);
200 int ret = 0; 207 int ret = 0;
201 int sign; 208 int sign;
@@ -219,7 +226,7 @@ static int rsa_verify(struct akcipher_request *req)
219 if (ret) 226 if (ret)
220 goto err_free_s; 227 goto err_free_s;
221 228
222 ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign); 229 ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
223 if (ret) 230 if (ret)
224 goto err_free_s; 231 goto err_free_s;
225 232
@@ -233,6 +240,16 @@ err_free_m:
233 return ret; 240 return ret;
234} 241}
235 242
243static void rsa_free_mpi_key(struct rsa_mpi_key *key)
244{
245 mpi_free(key->d);
246 mpi_free(key->e);
247 mpi_free(key->n);
248 key->d = NULL;
249 key->e = NULL;
250 key->n = NULL;
251}
252
236static int rsa_check_key_length(unsigned int len) 253static int rsa_check_key_length(unsigned int len)
237{ 254{
238 switch (len) { 255 switch (len) {
@@ -251,49 +268,87 @@ static int rsa_check_key_length(unsigned int len)
251static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, 268static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
252 unsigned int keylen) 269 unsigned int keylen)
253{ 270{
254 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 271 struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
272 struct rsa_key raw_key = {0};
255 int ret; 273 int ret;
256 274
257 ret = rsa_parse_pub_key(pkey, key, keylen); 275 /* Free the old MPI key if any */
276 rsa_free_mpi_key(mpi_key);
277
278 ret = rsa_parse_pub_key(&raw_key, key, keylen);
258 if (ret) 279 if (ret)
259 return ret; 280 return ret;
260 281
261 if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { 282 mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
262 rsa_free_key(pkey); 283 if (!mpi_key->e)
263 ret = -EINVAL; 284 goto err;
285
286 mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
287 if (!mpi_key->n)
288 goto err;
289
290 if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
291 rsa_free_mpi_key(mpi_key);
292 return -EINVAL;
264 } 293 }
265 return ret; 294
295 return 0;
296
297err:
298 rsa_free_mpi_key(mpi_key);
299 return -ENOMEM;
266} 300}
267 301
268static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, 302static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
269 unsigned int keylen) 303 unsigned int keylen)
270{ 304{
271 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 305 struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
306 struct rsa_key raw_key = {0};
272 int ret; 307 int ret;
273 308
274 ret = rsa_parse_priv_key(pkey, key, keylen); 309 /* Free the old MPI key if any */
310 rsa_free_mpi_key(mpi_key);
311
312 ret = rsa_parse_priv_key(&raw_key, key, keylen);
275 if (ret) 313 if (ret)
276 return ret; 314 return ret;
277 315
278 if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { 316 mpi_key->d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
279 rsa_free_key(pkey); 317 if (!mpi_key->d)
280 ret = -EINVAL; 318 goto err;
319
320 mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
321 if (!mpi_key->e)
322 goto err;
323
324 mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
325 if (!mpi_key->n)
326 goto err;
327
328 if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
329 rsa_free_mpi_key(mpi_key);
330 return -EINVAL;
281 } 331 }
282 return ret; 332
333 return 0;
334
335err:
336 rsa_free_mpi_key(mpi_key);
337 return -ENOMEM;
283} 338}
284 339
285static int rsa_max_size(struct crypto_akcipher *tfm) 340static int rsa_max_size(struct crypto_akcipher *tfm)
286{ 341{
287 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 342 struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
288 343
289 return pkey->n ? mpi_get_size(pkey->n) : -EINVAL; 344 return pkey->n ? mpi_get_size(pkey->n) : -EINVAL;
290} 345}
291 346
292static void rsa_exit_tfm(struct crypto_akcipher *tfm) 347static void rsa_exit_tfm(struct crypto_akcipher *tfm)
293{ 348{
294 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 349 struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
295 350
296 rsa_free_key(pkey); 351 rsa_free_mpi_key(pkey);
297} 352}
298 353
299static struct akcipher_alg rsa = { 354static struct akcipher_alg rsa = {
@@ -310,7 +365,7 @@ static struct akcipher_alg rsa = {
310 .cra_driver_name = "rsa-generic", 365 .cra_driver_name = "rsa-generic",
311 .cra_priority = 100, 366 .cra_priority = 100,
312 .cra_module = THIS_MODULE, 367 .cra_module = THIS_MODULE,
313 .cra_ctxsize = sizeof(struct rsa_key), 368 .cra_ctxsize = sizeof(struct rsa_mpi_key),
314 }, 369 },
315}; 370};
316 371
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index d226f48d0907..4df6451e7543 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -22,20 +22,29 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
22 const void *value, size_t vlen) 22 const void *value, size_t vlen)
23{ 23{
24 struct rsa_key *key = context; 24 struct rsa_key *key = context;
25 const u8 *ptr = value;
26 size_t n_sz = vlen;
25 27
26 key->n = mpi_read_raw_data(value, vlen); 28 /* invalid key provided */
27 29 if (!value || !vlen)
28 if (!key->n)
29 return -ENOMEM;
30
31 /* In FIPS mode only allow key size 2K & 3K */
32 if (fips_enabled && (mpi_get_size(key->n) != 256 &&
33 mpi_get_size(key->n) != 384)) {
34 pr_err("RSA: key size not allowed in FIPS mode\n");
35 mpi_free(key->n);
36 key->n = NULL;
37 return -EINVAL; 30 return -EINVAL;
31
32 if (fips_enabled) {
33 while (!*ptr && n_sz) {
34 ptr++;
35 n_sz--;
36 }
37
38 /* In FIPS mode only allow key size 2K & 3K */
39 if (n_sz != 256 && n_sz != 384) {
40 pr_err("RSA: key size not allowed in FIPS mode\n");
41 return -EINVAL;
42 }
38 } 43 }
44
45 key->n = value;
46 key->n_sz = vlen;
47
39 return 0; 48 return 0;
40} 49}
41 50
@@ -44,10 +53,12 @@ int rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
44{ 53{
45 struct rsa_key *key = context; 54 struct rsa_key *key = context;
46 55
47 key->e = mpi_read_raw_data(value, vlen); 56 /* invalid key provided */
57 if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
58 return -EINVAL;
48 59
49 if (!key->e) 60 key->e = value;
50 return -ENOMEM; 61 key->e_sz = vlen;
51 62
52 return 0; 63 return 0;
53} 64}
@@ -57,46 +68,95 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
57{ 68{
58 struct rsa_key *key = context; 69 struct rsa_key *key = context;
59 70
60 key->d = mpi_read_raw_data(value, vlen); 71 /* invalid key provided */
72 if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
73 return -EINVAL;
61 74
62 if (!key->d) 75 key->d = value;
63 return -ENOMEM; 76 key->d_sz = vlen;
64 77
65 /* In FIPS mode only allow key size 2K & 3K */ 78 return 0;
66 if (fips_enabled && (mpi_get_size(key->d) != 256 && 79}
67 mpi_get_size(key->d) != 384)) { 80
68 pr_err("RSA: key size not allowed in FIPS mode\n"); 81int rsa_get_p(void *context, size_t hdrlen, unsigned char tag,
69 mpi_free(key->d); 82 const void *value, size_t vlen)
70 key->d = NULL; 83{
84 struct rsa_key *key = context;
85
86 /* invalid key provided */
87 if (!value || !vlen || vlen > key->n_sz)
71 return -EINVAL; 88 return -EINVAL;
72 } 89
90 key->p = value;
91 key->p_sz = vlen;
92
73 return 0; 93 return 0;
74} 94}
75 95
76static void free_mpis(struct rsa_key *key) 96int rsa_get_q(void *context, size_t hdrlen, unsigned char tag,
97 const void *value, size_t vlen)
77{ 98{
78 mpi_free(key->n); 99 struct rsa_key *key = context;
79 mpi_free(key->e); 100
80 mpi_free(key->d); 101 /* invalid key provided */
81 key->n = NULL; 102 if (!value || !vlen || vlen > key->n_sz)
82 key->e = NULL; 103 return -EINVAL;
83 key->d = NULL; 104
105 key->q = value;
106 key->q_sz = vlen;
107
108 return 0;
84} 109}
85 110
86/** 111int rsa_get_dp(void *context, size_t hdrlen, unsigned char tag,
87 * rsa_free_key() - frees rsa key allocated by rsa_parse_key() 112 const void *value, size_t vlen)
88 * 113{
89 * @rsa_key: struct rsa_key key representation 114 struct rsa_key *key = context;
90 */ 115
91void rsa_free_key(struct rsa_key *key) 116 /* invalid key provided */
117 if (!value || !vlen || vlen > key->n_sz)
118 return -EINVAL;
119
120 key->dp = value;
121 key->dp_sz = vlen;
122
123 return 0;
124}
125
126int rsa_get_dq(void *context, size_t hdrlen, unsigned char tag,
127 const void *value, size_t vlen)
92{ 128{
93 free_mpis(key); 129 struct rsa_key *key = context;
130
131 /* invalid key provided */
132 if (!value || !vlen || vlen > key->n_sz)
133 return -EINVAL;
134
135 key->dq = value;
136 key->dq_sz = vlen;
137
138 return 0;
139}
140
141int rsa_get_qinv(void *context, size_t hdrlen, unsigned char tag,
142 const void *value, size_t vlen)
143{
144 struct rsa_key *key = context;
145
146 /* invalid key provided */
147 if (!value || !vlen || vlen > key->n_sz)
148 return -EINVAL;
149
150 key->qinv = value;
151 key->qinv_sz = vlen;
152
153 return 0;
94} 154}
95EXPORT_SYMBOL_GPL(rsa_free_key);
96 155
97/** 156/**
98 * rsa_parse_pub_key() - extracts an rsa public key from BER encoded buffer 157 * rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the
99 * and stores it in the provided struct rsa_key 158 * provided struct rsa_key, pointers to the raw key as is,
159 * so that the caller can copy it or MPI parse it, etc.
100 * 160 *
101 * @rsa_key: struct rsa_key key representation 161 * @rsa_key: struct rsa_key key representation
102 * @key: key in BER format 162 * @key: key in BER format
@@ -107,23 +167,15 @@ EXPORT_SYMBOL_GPL(rsa_free_key);
107int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, 167int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
108 unsigned int key_len) 168 unsigned int key_len)
109{ 169{
110 int ret; 170 return asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
111
112 free_mpis(rsa_key);
113 ret = asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
114 if (ret < 0)
115 goto error;
116
117 return 0;
118error:
119 free_mpis(rsa_key);
120 return ret;
121} 171}
122EXPORT_SYMBOL_GPL(rsa_parse_pub_key); 172EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
123 173
124/** 174/**
125 * rsa_parse_pub_key() - extracts an rsa private key from BER encoded buffer 175 * rsa_parse_priv_key() - decodes the BER encoded buffer and stores in the
126 * and stores it in the provided struct rsa_key 176 * provided struct rsa_key, pointers to the raw key
177 * as is, so that the caller can copy it or MPI parse it,
178 * etc.
127 * 179 *
128 * @rsa_key: struct rsa_key key representation 180 * @rsa_key: struct rsa_key key representation
129 * @key: key in BER format 181 * @key: key in BER format
@@ -134,16 +186,6 @@ EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
134int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, 186int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
135 unsigned int key_len) 187 unsigned int key_len)
136{ 188{
137 int ret; 189 return asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
138
139 free_mpis(rsa_key);
140 ret = asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
141 if (ret < 0)
142 goto error;
143
144 return 0;
145error:
146 free_mpis(rsa_key);
147 return ret;
148} 190}
149EXPORT_SYMBOL_GPL(rsa_parse_priv_key); 191EXPORT_SYMBOL_GPL(rsa_parse_priv_key);
diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1
index 731aea5edb0c..4ce06758e8af 100644
--- a/crypto/rsaprivkey.asn1
+++ b/crypto/rsaprivkey.asn1
@@ -3,9 +3,9 @@ RsaPrivKey ::= SEQUENCE {
3 n INTEGER ({ rsa_get_n }), 3 n INTEGER ({ rsa_get_n }),
4 e INTEGER ({ rsa_get_e }), 4 e INTEGER ({ rsa_get_e }),
5 d INTEGER ({ rsa_get_d }), 5 d INTEGER ({ rsa_get_d }),
6 prime1 INTEGER, 6 prime1 INTEGER ({ rsa_get_p }),
7 prime2 INTEGER, 7 prime2 INTEGER ({ rsa_get_q }),
8 exponent1 INTEGER, 8 exponent1 INTEGER ({ rsa_get_dp }),
9 exponent2 INTEGER, 9 exponent2 INTEGER ({ rsa_get_dq }),
10 coefficient INTEGER 10 coefficient INTEGER ({ rsa_get_qinv })
11} 11}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index ea5815c5e128..52ce17a3dd63 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -18,8 +18,6 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
24 22
25static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) 23static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
@@ -30,53 +28,6 @@ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
30 memcpy(dst, src, nbytes); 28 memcpy(dst, src, nbytes);
31} 29}
32 30
33void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
34{
35 walk->sg = sg;
36
37 BUG_ON(!sg->length);
38
39 walk->offset = sg->offset;
40}
41EXPORT_SYMBOL_GPL(scatterwalk_start);
42
43void *scatterwalk_map(struct scatter_walk *walk)
44{
45 return kmap_atomic(scatterwalk_page(walk)) +
46 offset_in_page(walk->offset);
47}
48EXPORT_SYMBOL_GPL(scatterwalk_map);
49
50static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
51 unsigned int more)
52{
53 if (out) {
54 struct page *page;
55
56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
57 /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
58 * PageSlab cannot be optimised away per se due to
59 * use of volatile pointer.
60 */
61 if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
62 flush_dcache_page(page);
63 }
64
65 if (more) {
66 walk->offset += PAGE_SIZE - 1;
67 walk->offset &= PAGE_MASK;
68 if (walk->offset >= walk->sg->offset + walk->sg->length)
69 scatterwalk_start(walk, sg_next(walk->sg));
70 }
71}
72
73void scatterwalk_done(struct scatter_walk *walk, int out, int more)
74{
75 if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
76 scatterwalk_pagedone(walk, out, more);
77}
78EXPORT_SYMBOL_GPL(scatterwalk_done);
79
80void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 31void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
81 size_t nbytes, int out) 32 size_t nbytes, int out)
82{ 33{
@@ -87,9 +38,11 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
87 if (len_this_page > nbytes) 38 if (len_this_page > nbytes)
88 len_this_page = nbytes; 39 len_this_page = nbytes;
89 40
90 vaddr = scatterwalk_map(walk); 41 if (out != 2) {
91 memcpy_dir(buf, vaddr, len_this_page, out); 42 vaddr = scatterwalk_map(walk);
92 scatterwalk_unmap(vaddr); 43 memcpy_dir(buf, vaddr, len_this_page, out);
44 scatterwalk_unmap(vaddr);
45 }
93 46
94 scatterwalk_advance(walk, len_this_page); 47 scatterwalk_advance(walk, len_this_page);
95 48
@@ -99,7 +52,7 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
99 buf += len_this_page; 52 buf += len_this_page;
100 nbytes -= len_this_page; 53 nbytes -= len_this_page;
101 54
102 scatterwalk_pagedone(walk, out, 1); 55 scatterwalk_pagedone(walk, out & 1, 1);
103 } 56 }
104} 57}
105EXPORT_SYMBOL_GPL(scatterwalk_copychunks); 58EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
@@ -125,28 +78,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
125} 78}
126EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); 79EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
127 80
128int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
129{
130 int offset = 0, n = 0;
131
132 /* num_bytes is too small */
133 if (num_bytes < sg->length)
134 return -1;
135
136 do {
137 offset += sg->length;
138 n++;
139 sg = sg_next(sg);
140
141 /* num_bytes is too large */
142 if (unlikely(!sg && (num_bytes < offset)))
143 return -1;
144 } while (sg && (num_bytes > offset));
145
146 return n;
147}
148EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
149
150struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], 81struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
151 struct scatterlist *src, 82 struct scatterlist *src,
152 unsigned int len) 83 unsigned int len)
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 15a749a5cab7..c7049231861f 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -14,50 +14,17 @@
14 */ 14 */
15 15
16#include <crypto/internal/geniv.h> 16#include <crypto/internal/geniv.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/rng.h>
19#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <crypto/skcipher.h>
20#include <linux/err.h> 19#include <linux/err.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/string.h> 24#include <linux/string.h>
27 25
28struct seqiv_ctx {
29 spinlock_t lock;
30 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
31};
32
33static void seqiv_free(struct crypto_instance *inst); 26static void seqiv_free(struct crypto_instance *inst);
34 27
35static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
36{
37 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
38 struct crypto_ablkcipher *geniv;
39
40 if (err == -EINPROGRESS)
41 return;
42
43 if (err)
44 goto out;
45
46 geniv = skcipher_givcrypt_reqtfm(req);
47 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
48
49out:
50 kfree(subreq->info);
51}
52
53static void seqiv_complete(struct crypto_async_request *base, int err)
54{
55 struct skcipher_givcrypt_request *req = base->data;
56
57 seqiv_complete2(req, err);
58 skcipher_givcrypt_complete(req, err);
59}
60
61static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) 28static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
62{ 29{
63 struct aead_request *subreq = aead_request_ctx(req); 30 struct aead_request *subreq = aead_request_ctx(req);
@@ -85,65 +52,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
85 aead_request_complete(req, err); 52 aead_request_complete(req, err);
86} 53}
87 54
88static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
89 unsigned int ivsize)
90{
91 unsigned int len = ivsize;
92
93 if (ivsize > sizeof(u64)) {
94 memset(info, 0, ivsize - sizeof(u64));
95 len = sizeof(u64);
96 }
97 seq = cpu_to_be64(seq);
98 memcpy(info + ivsize - len, &seq, len);
99 crypto_xor(info, ctx->salt, ivsize);
100}
101
102static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
103{
104 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
105 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
106 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
107 crypto_completion_t compl;
108 void *data;
109 u8 *info;
110 unsigned int ivsize;
111 int err;
112
113 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
114
115 compl = req->creq.base.complete;
116 data = req->creq.base.data;
117 info = req->creq.info;
118
119 ivsize = crypto_ablkcipher_ivsize(geniv);
120
121 if (unlikely(!IS_ALIGNED((unsigned long)info,
122 crypto_ablkcipher_alignmask(geniv) + 1))) {
123 info = kmalloc(ivsize, req->creq.base.flags &
124 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
125 GFP_ATOMIC);
126 if (!info)
127 return -ENOMEM;
128
129 compl = seqiv_complete;
130 data = req;
131 }
132
133 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
134 data);
135 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
136 req->creq.nbytes, info);
137
138 seqiv_geniv(ctx, info, req->seq, ivsize);
139 memcpy(req->giv, info, ivsize);
140
141 err = crypto_ablkcipher_encrypt(subreq);
142 if (unlikely(info != req->creq.info))
143 seqiv_complete2(req, err);
144 return err;
145}
146
147static int seqiv_aead_encrypt(struct aead_request *req) 55static int seqiv_aead_encrypt(struct aead_request *req)
148{ 56{
149 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 57 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
@@ -165,12 +73,16 @@ static int seqiv_aead_encrypt(struct aead_request *req)
165 info = req->iv; 73 info = req->iv;
166 74
167 if (req->src != req->dst) { 75 if (req->src != req->dst) {
168 struct blkcipher_desc desc = { 76 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
169 .tfm = ctx->null, 77
170 }; 78 skcipher_request_set_tfm(nreq, ctx->sknull);
79 skcipher_request_set_callback(nreq, req->base.flags,
80 NULL, NULL);
81 skcipher_request_set_crypt(nreq, req->src, req->dst,
82 req->assoclen + req->cryptlen,
83 NULL);
171 84
172 err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, 85 err = crypto_skcipher_encrypt(nreq);
173 req->assoclen + req->cryptlen);
174 if (err) 86 if (err)
175 return err; 87 return err;
176 } 88 }
@@ -229,62 +141,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
229 return crypto_aead_decrypt(subreq); 141 return crypto_aead_decrypt(subreq);
230} 142}
231 143
232static int seqiv_init(struct crypto_tfm *tfm)
233{
234 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
235 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
236 int err;
237
238 spin_lock_init(&ctx->lock);
239
240 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
241
242 err = 0;
243 if (!crypto_get_default_rng()) {
244 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
245 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
246 crypto_ablkcipher_ivsize(geniv));
247 crypto_put_default_rng();
248 }
249
250 return err ?: skcipher_geniv_init(tfm);
251}
252
253static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
254 struct rtattr **tb)
255{
256 struct crypto_instance *inst;
257 int err;
258
259 inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
260
261 if (IS_ERR(inst))
262 return PTR_ERR(inst);
263
264 err = -EINVAL;
265 if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
266 goto free_inst;
267
268 inst->alg.cra_init = seqiv_init;
269 inst->alg.cra_exit = skcipher_geniv_exit;
270
271 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
272 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
273
274 inst->alg.cra_alignmask |= __alignof__(u32) - 1;
275
276 err = crypto_register_instance(tmpl, inst);
277 if (err)
278 goto free_inst;
279
280out:
281 return err;
282
283free_inst:
284 skcipher_geniv_free(inst);
285 goto out;
286}
287
288static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) 144static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
289{ 145{
290 struct aead_instance *inst; 146 struct aead_instance *inst;
@@ -330,26 +186,20 @@ free_inst:
330static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) 186static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
331{ 187{
332 struct crypto_attr_type *algt; 188 struct crypto_attr_type *algt;
333 int err;
334 189
335 algt = crypto_get_attr_type(tb); 190 algt = crypto_get_attr_type(tb);
336 if (IS_ERR(algt)) 191 if (IS_ERR(algt))
337 return PTR_ERR(algt); 192 return PTR_ERR(algt);
338 193
339 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 194 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
340 err = seqiv_ablkcipher_create(tmpl, tb); 195 return -EINVAL;
341 else
342 err = seqiv_aead_create(tmpl, tb);
343 196
344 return err; 197 return seqiv_aead_create(tmpl, tb);
345} 198}
346 199
347static void seqiv_free(struct crypto_instance *inst) 200static void seqiv_free(struct crypto_instance *inst)
348{ 201{
349 if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 202 aead_geniv_free(aead_instance(inst));
350 skcipher_geniv_free(inst);
351 else
352 aead_geniv_free(aead_instance(inst));
353} 203}
354 204
355static struct crypto_template seqiv_tmpl = { 205static struct crypto_template seqiv_tmpl = {
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
new file mode 100644
index 000000000000..7e8ed96236ce
--- /dev/null
+++ b/crypto/sha3_generic.c
@@ -0,0 +1,300 @@
1/*
2 * Cryptographic API.
3 *
4 * SHA-3, as specified in
5 * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
6 *
7 * SHA-3 code by Jeff Garzik <jeff@garzik.org>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)•
12 * any later version.
13 *
14 */
15#include <crypto/internal/hash.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <crypto/sha3.h>
20#include <asm/byteorder.h>
21
22#define KECCAK_ROUNDS 24
23
24#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
25
26static const u64 keccakf_rndc[24] = {
27 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
28 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
29 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
30 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
31 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
32 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
33 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
34 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
35};
36
37static const int keccakf_rotc[24] = {
38 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
39 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
40};
41
42static const int keccakf_piln[24] = {
43 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
44 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
45};
46
47/* update the state with given number of rounds */
48
49static void keccakf(u64 st[25])
50{
51 int i, j, round;
52 u64 t, bc[5];
53
54 for (round = 0; round < KECCAK_ROUNDS; round++) {
55
56 /* Theta */
57 for (i = 0; i < 5; i++)
58 bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
59 ^ st[i + 20];
60
61 for (i = 0; i < 5; i++) {
62 t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
63 for (j = 0; j < 25; j += 5)
64 st[j + i] ^= t;
65 }
66
67 /* Rho Pi */
68 t = st[1];
69 for (i = 0; i < 24; i++) {
70 j = keccakf_piln[i];
71 bc[0] = st[j];
72 st[j] = ROTL64(t, keccakf_rotc[i]);
73 t = bc[0];
74 }
75
76 /* Chi */
77 for (j = 0; j < 25; j += 5) {
78 for (i = 0; i < 5; i++)
79 bc[i] = st[j + i];
80 for (i = 0; i < 5; i++)
81 st[j + i] ^= (~bc[(i + 1) % 5]) &
82 bc[(i + 2) % 5];
83 }
84
85 /* Iota */
86 st[0] ^= keccakf_rndc[round];
87 }
88}
89
90static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
91{
92 memset(sctx, 0, sizeof(*sctx));
93 sctx->md_len = digest_sz;
94 sctx->rsiz = 200 - 2 * digest_sz;
95 sctx->rsizw = sctx->rsiz / 8;
96}
97
98static int sha3_224_init(struct shash_desc *desc)
99{
100 struct sha3_state *sctx = shash_desc_ctx(desc);
101
102 sha3_init(sctx, SHA3_224_DIGEST_SIZE);
103 return 0;
104}
105
106static int sha3_256_init(struct shash_desc *desc)
107{
108 struct sha3_state *sctx = shash_desc_ctx(desc);
109
110 sha3_init(sctx, SHA3_256_DIGEST_SIZE);
111 return 0;
112}
113
114static int sha3_384_init(struct shash_desc *desc)
115{
116 struct sha3_state *sctx = shash_desc_ctx(desc);
117
118 sha3_init(sctx, SHA3_384_DIGEST_SIZE);
119 return 0;
120}
121
122static int sha3_512_init(struct shash_desc *desc)
123{
124 struct sha3_state *sctx = shash_desc_ctx(desc);
125
126 sha3_init(sctx, SHA3_512_DIGEST_SIZE);
127 return 0;
128}
129
130static int sha3_update(struct shash_desc *desc, const u8 *data,
131 unsigned int len)
132{
133 struct sha3_state *sctx = shash_desc_ctx(desc);
134 unsigned int done;
135 const u8 *src;
136
137 done = 0;
138 src = data;
139
140 if ((sctx->partial + len) > (sctx->rsiz - 1)) {
141 if (sctx->partial) {
142 done = -sctx->partial;
143 memcpy(sctx->buf + sctx->partial, data,
144 done + sctx->rsiz);
145 src = sctx->buf;
146 }
147
148 do {
149 unsigned int i;
150
151 for (i = 0; i < sctx->rsizw; i++)
152 sctx->st[i] ^= ((u64 *) src)[i];
153 keccakf(sctx->st);
154
155 done += sctx->rsiz;
156 src = data + done;
157 } while (done + (sctx->rsiz - 1) < len);
158
159 sctx->partial = 0;
160 }
161 memcpy(sctx->buf + sctx->partial, src, len - done);
162 sctx->partial += (len - done);
163
164 return 0;
165}
166
167static int sha3_final(struct shash_desc *desc, u8 *out)
168{
169 struct sha3_state *sctx = shash_desc_ctx(desc);
170 unsigned int i, inlen = sctx->partial;
171
172 sctx->buf[inlen++] = 0x06;
173 memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
174 sctx->buf[sctx->rsiz - 1] |= 0x80;
175
176 for (i = 0; i < sctx->rsizw; i++)
177 sctx->st[i] ^= ((u64 *) sctx->buf)[i];
178
179 keccakf(sctx->st);
180
181 for (i = 0; i < sctx->rsizw; i++)
182 sctx->st[i] = cpu_to_le64(sctx->st[i]);
183
184 memcpy(out, sctx->st, sctx->md_len);
185
186 memset(sctx, 0, sizeof(*sctx));
187 return 0;
188}
189
190static struct shash_alg sha3_224 = {
191 .digestsize = SHA3_224_DIGEST_SIZE,
192 .init = sha3_224_init,
193 .update = sha3_update,
194 .final = sha3_final,
195 .descsize = sizeof(struct sha3_state),
196 .base = {
197 .cra_name = "sha3-224",
198 .cra_driver_name = "sha3-224-generic",
199 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
200 .cra_blocksize = SHA3_224_BLOCK_SIZE,
201 .cra_module = THIS_MODULE,
202 }
203};
204
205static struct shash_alg sha3_256 = {
206 .digestsize = SHA3_256_DIGEST_SIZE,
207 .init = sha3_256_init,
208 .update = sha3_update,
209 .final = sha3_final,
210 .descsize = sizeof(struct sha3_state),
211 .base = {
212 .cra_name = "sha3-256",
213 .cra_driver_name = "sha3-256-generic",
214 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
215 .cra_blocksize = SHA3_256_BLOCK_SIZE,
216 .cra_module = THIS_MODULE,
217 }
218};
219
220static struct shash_alg sha3_384 = {
221 .digestsize = SHA3_384_DIGEST_SIZE,
222 .init = sha3_384_init,
223 .update = sha3_update,
224 .final = sha3_final,
225 .descsize = sizeof(struct sha3_state),
226 .base = {
227 .cra_name = "sha3-384",
228 .cra_driver_name = "sha3-384-generic",
229 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
230 .cra_blocksize = SHA3_384_BLOCK_SIZE,
231 .cra_module = THIS_MODULE,
232 }
233};
234
235static struct shash_alg sha3_512 = {
236 .digestsize = SHA3_512_DIGEST_SIZE,
237 .init = sha3_512_init,
238 .update = sha3_update,
239 .final = sha3_final,
240 .descsize = sizeof(struct sha3_state),
241 .base = {
242 .cra_name = "sha3-512",
243 .cra_driver_name = "sha3-512-generic",
244 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
245 .cra_blocksize = SHA3_512_BLOCK_SIZE,
246 .cra_module = THIS_MODULE,
247 }
248};
249
250static int __init sha3_generic_mod_init(void)
251{
252 int ret;
253
254 ret = crypto_register_shash(&sha3_224);
255 if (ret < 0)
256 goto err_out;
257 ret = crypto_register_shash(&sha3_256);
258 if (ret < 0)
259 goto err_out_224;
260 ret = crypto_register_shash(&sha3_384);
261 if (ret < 0)
262 goto err_out_256;
263 ret = crypto_register_shash(&sha3_512);
264 if (ret < 0)
265 goto err_out_384;
266
267 return 0;
268
269err_out_384:
270 crypto_unregister_shash(&sha3_384);
271err_out_256:
272 crypto_unregister_shash(&sha3_256);
273err_out_224:
274 crypto_unregister_shash(&sha3_224);
275err_out:
276 return ret;
277}
278
279static void __exit sha3_generic_mod_fini(void)
280{
281 crypto_unregister_shash(&sha3_224);
282 crypto_unregister_shash(&sha3_256);
283 crypto_unregister_shash(&sha3_384);
284 crypto_unregister_shash(&sha3_512);
285}
286
287module_init(sha3_generic_mod_init);
288module_exit(sha3_generic_mod_fini);
289
290MODULE_LICENSE("GPL");
291MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
292
293MODULE_ALIAS_CRYPTO("sha3-224");
294MODULE_ALIAS_CRYPTO("sha3-224-generic");
295MODULE_ALIAS_CRYPTO("sha3-256");
296MODULE_ALIAS_CRYPTO("sha3-256-generic");
297MODULE_ALIAS_CRYPTO("sha3-384");
298MODULE_ALIAS_CRYPTO("sha3-384-generic");
299MODULE_ALIAS_CRYPTO("sha3-512");
300MODULE_ALIAS_CRYPTO("sha3-512-generic");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 69230e9d4ac9..f7d0018dcaee 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -16,7 +16,11 @@
16 16
17#include <crypto/internal/skcipher.h> 17#include <crypto/internal/skcipher.h>
18#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/cryptouser.h>
19#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/rtnetlink.h>
22#include <linux/seq_file.h>
23#include <net/netlink.h>
20 24
21#include "internal.h" 25#include "internal.h"
22 26
@@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
25 if (alg->cra_type == &crypto_blkcipher_type) 29 if (alg->cra_type == &crypto_blkcipher_type)
26 return sizeof(struct crypto_blkcipher *); 30 return sizeof(struct crypto_blkcipher *);
27 31
28 BUG_ON(alg->cra_type != &crypto_ablkcipher_type && 32 if (alg->cra_type == &crypto_ablkcipher_type ||
29 alg->cra_type != &crypto_givcipher_type); 33 alg->cra_type == &crypto_givcipher_type)
34 return sizeof(struct crypto_ablkcipher *);
30 35
31 return sizeof(struct crypto_ablkcipher *); 36 return crypto_alg_extsize(alg);
32} 37}
33 38
34static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, 39static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
@@ -216,26 +221,118 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
216 return 0; 221 return 0;
217} 222}
218 223
224static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
225{
226 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
227 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
228
229 alg->exit(skcipher);
230}
231
219static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) 232static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
220{ 233{
234 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
235 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
236
221 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) 237 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
222 return crypto_init_skcipher_ops_blkcipher(tfm); 238 return crypto_init_skcipher_ops_blkcipher(tfm);
223 239
224 BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && 240 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
225 tfm->__crt_alg->cra_type != &crypto_givcipher_type); 241 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
242 return crypto_init_skcipher_ops_ablkcipher(tfm);
243
244 skcipher->setkey = alg->setkey;
245 skcipher->encrypt = alg->encrypt;
246 skcipher->decrypt = alg->decrypt;
247 skcipher->ivsize = alg->ivsize;
248 skcipher->keysize = alg->max_keysize;
249
250 if (alg->exit)
251 skcipher->base.exit = crypto_skcipher_exit_tfm;
226 252
227 return crypto_init_skcipher_ops_ablkcipher(tfm); 253 if (alg->init)
254 return alg->init(skcipher);
255
256 return 0;
257}
258
259static void crypto_skcipher_free_instance(struct crypto_instance *inst)
260{
261 struct skcipher_instance *skcipher =
262 container_of(inst, struct skcipher_instance, s.base);
263
264 skcipher->free(skcipher);
265}
266
267static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
268 __attribute__ ((unused));
269static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
270{
271 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
272 base);
273
274 seq_printf(m, "type : skcipher\n");
275 seq_printf(m, "async : %s\n",
276 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
277 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
278 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
279 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
280 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
281 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
228} 282}
229 283
284#ifdef CONFIG_NET
285static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
286{
287 struct crypto_report_blkcipher rblkcipher;
288 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
289 base);
290
291 strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
292 strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
293
294 rblkcipher.blocksize = alg->cra_blocksize;
295 rblkcipher.min_keysize = skcipher->min_keysize;
296 rblkcipher.max_keysize = skcipher->max_keysize;
297 rblkcipher.ivsize = skcipher->ivsize;
298
299 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
300 sizeof(struct crypto_report_blkcipher), &rblkcipher))
301 goto nla_put_failure;
302 return 0;
303
304nla_put_failure:
305 return -EMSGSIZE;
306}
307#else
308static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
309{
310 return -ENOSYS;
311}
312#endif
313
230static const struct crypto_type crypto_skcipher_type2 = { 314static const struct crypto_type crypto_skcipher_type2 = {
231 .extsize = crypto_skcipher_extsize, 315 .extsize = crypto_skcipher_extsize,
232 .init_tfm = crypto_skcipher_init_tfm, 316 .init_tfm = crypto_skcipher_init_tfm,
317 .free = crypto_skcipher_free_instance,
318#ifdef CONFIG_PROC_FS
319 .show = crypto_skcipher_show,
320#endif
321 .report = crypto_skcipher_report,
233 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 322 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
234 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, 323 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
235 .type = CRYPTO_ALG_TYPE_BLKCIPHER, 324 .type = CRYPTO_ALG_TYPE_SKCIPHER,
236 .tfmsize = offsetof(struct crypto_skcipher, base), 325 .tfmsize = offsetof(struct crypto_skcipher, base),
237}; 326};
238 327
328int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
329 const char *name, u32 type, u32 mask)
330{
331 spawn->base.frontend = &crypto_skcipher_type2;
332 return crypto_grab_spawn(&spawn->base, name, type, mask);
333}
334EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
335
239struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 336struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
240 u32 type, u32 mask) 337 u32 type, u32 mask)
241{ 338{
@@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
243} 340}
244EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 341EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
245 342
343int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
344{
345 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
346 type, mask);
347}
348EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
349
350static int skcipher_prepare_alg(struct skcipher_alg *alg)
351{
352 struct crypto_alg *base = &alg->base;
353
354 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
355 return -EINVAL;
356
357 if (!alg->chunksize)
358 alg->chunksize = base->cra_blocksize;
359
360 base->cra_type = &crypto_skcipher_type2;
361 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
362 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
363
364 return 0;
365}
366
367int crypto_register_skcipher(struct skcipher_alg *alg)
368{
369 struct crypto_alg *base = &alg->base;
370 int err;
371
372 err = skcipher_prepare_alg(alg);
373 if (err)
374 return err;
375
376 return crypto_register_alg(base);
377}
378EXPORT_SYMBOL_GPL(crypto_register_skcipher);
379
380void crypto_unregister_skcipher(struct skcipher_alg *alg)
381{
382 crypto_unregister_alg(&alg->base);
383}
384EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
385
386int crypto_register_skciphers(struct skcipher_alg *algs, int count)
387{
388 int i, ret;
389
390 for (i = 0; i < count; i++) {
391 ret = crypto_register_skcipher(&algs[i]);
392 if (ret)
393 goto err;
394 }
395
396 return 0;
397
398err:
399 for (--i; i >= 0; --i)
400 crypto_unregister_skcipher(&algs[i]);
401
402 return ret;
403}
404EXPORT_SYMBOL_GPL(crypto_register_skciphers);
405
406void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
407{
408 int i;
409
410 for (i = count - 1; i >= 0; --i)
411 crypto_unregister_skcipher(&algs[i]);
412}
413EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
414
415int skcipher_register_instance(struct crypto_template *tmpl,
416 struct skcipher_instance *inst)
417{
418 int err;
419
420 err = skcipher_prepare_alg(&inst->alg);
421 if (err)
422 return err;
423
424 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
425}
426EXPORT_SYMBOL_GPL(skcipher_register_instance);
427
246MODULE_LICENSE("GPL"); 428MODULE_LICENSE("GPL");
247MODULE_DESCRIPTION("Symmetric key cipher type"); 429MODULE_DESCRIPTION("Symmetric key cipher type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 579dce071463..ae22f05d5936 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -24,6 +24,7 @@
24 24
25#include <crypto/aead.h> 25#include <crypto/aead.h>
26#include <crypto/hash.h> 26#include <crypto/hash.h>
27#include <crypto/skcipher.h>
27#include <linux/err.h> 28#include <linux/err.h>
28#include <linux/fips.h> 29#include <linux/fips.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -72,7 +73,8 @@ static char *check[] = {
72 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 73 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
73 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 74 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
74 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", 75 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
75 "lzo", "cts", "zlib", NULL 76 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
77 NULL
76}; 78};
77 79
78struct tcrypt_result { 80struct tcrypt_result {
@@ -91,76 +93,6 @@ static void tcrypt_complete(struct crypto_async_request *req, int err)
91 complete(&res->completion); 93 complete(&res->completion);
92} 94}
93 95
94static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
95 struct scatterlist *sg, int blen, int secs)
96{
97 unsigned long start, end;
98 int bcount;
99 int ret;
100
101 for (start = jiffies, end = start + secs * HZ, bcount = 0;
102 time_before(jiffies, end); bcount++) {
103 if (enc)
104 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
105 else
106 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
107
108 if (ret)
109 return ret;
110 }
111
112 printk("%d operations in %d seconds (%ld bytes)\n",
113 bcount, secs, (long)bcount * blen);
114 return 0;
115}
116
117static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
118 struct scatterlist *sg, int blen)
119{
120 unsigned long cycles = 0;
121 int ret = 0;
122 int i;
123
124 local_irq_disable();
125
126 /* Warm-up run. */
127 for (i = 0; i < 4; i++) {
128 if (enc)
129 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
130 else
131 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
132
133 if (ret)
134 goto out;
135 }
136
137 /* The real thing. */
138 for (i = 0; i < 8; i++) {
139 cycles_t start, end;
140
141 start = get_cycles();
142 if (enc)
143 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
144 else
145 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
146 end = get_cycles();
147
148 if (ret)
149 goto out;
150
151 cycles += end - start;
152 }
153
154out:
155 local_irq_enable();
156
157 if (ret == 0)
158 printk("1 operation in %lu cycles (%d bytes)\n",
159 (cycles + 4) / 8, blen);
160
161 return ret;
162}
163
164static inline int do_one_aead_op(struct aead_request *req, int ret) 96static inline int do_one_aead_op(struct aead_request *req, int ret)
165{ 97{
166 if (ret == -EINPROGRESS || ret == -EBUSY) { 98 if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -454,127 +386,148 @@ out_noxbuf:
454 return; 386 return;
455} 387}
456 388
457static void test_cipher_speed(const char *algo, int enc, unsigned int secs, 389static void test_hash_sg_init(struct scatterlist *sg)
458 struct cipher_speed_template *template,
459 unsigned int tcount, u8 *keysize)
460{ 390{
461 unsigned int ret, i, j, iv_len; 391 int i;
462 const char *key;
463 char iv[128];
464 struct crypto_blkcipher *tfm;
465 struct blkcipher_desc desc;
466 const char *e;
467 u32 *b_size;
468 392
469 if (enc == ENCRYPT) 393 sg_init_table(sg, TVMEMSIZE);
470 e = "encryption"; 394 for (i = 0; i < TVMEMSIZE; i++) {
471 else 395 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
472 e = "decryption"; 396 memset(tvmem[i], 0xff, PAGE_SIZE);
397 }
398}
473 399
474 tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); 400static inline int do_one_ahash_op(struct ahash_request *req, int ret)
401{
402 if (ret == -EINPROGRESS || ret == -EBUSY) {
403 struct tcrypt_result *tr = req->base.data;
475 404
476 if (IS_ERR(tfm)) { 405 wait_for_completion(&tr->completion);
477 printk("failed to load transform for %s: %ld\n", algo, 406 reinit_completion(&tr->completion);
478 PTR_ERR(tfm)); 407 ret = tr->err;
408 }
409 return ret;
410}
411
412struct test_mb_ahash_data {
413 struct scatterlist sg[TVMEMSIZE];
414 char result[64];
415 struct ahash_request *req;
416 struct tcrypt_result tresult;
417 char *xbuf[XBUFSIZE];
418};
419
420static void test_mb_ahash_speed(const char *algo, unsigned int sec,
421 struct hash_speed *speed)
422{
423 struct test_mb_ahash_data *data;
424 struct crypto_ahash *tfm;
425 unsigned long start, end;
426 unsigned long cycles;
427 unsigned int i, j, k;
428 int ret;
429
430 data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
431 if (!data)
479 return; 432 return;
433
434 tfm = crypto_alloc_ahash(algo, 0, 0);
435 if (IS_ERR(tfm)) {
436 pr_err("failed to load transform for %s: %ld\n",
437 algo, PTR_ERR(tfm));
438 goto free_data;
480 } 439 }
481 desc.tfm = tfm;
482 desc.flags = 0;
483 440
484 printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, 441 for (i = 0; i < 8; ++i) {
485 get_driver_name(crypto_blkcipher, tfm), e); 442 if (testmgr_alloc_buf(data[i].xbuf))
443 goto out;
486 444
487 i = 0; 445 init_completion(&data[i].tresult.completion);
488 do {
489 446
490 b_size = block_sizes; 447 data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
491 do { 448 if (!data[i].req) {
492 struct scatterlist sg[TVMEMSIZE]; 449 pr_err("alg: hash: Failed to allocate request for %s\n",
450 algo);
451 goto out;
452 }
493 453
494 if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { 454 ahash_request_set_callback(data[i].req, 0,
495 printk("template (%u) too big for " 455 tcrypt_complete, &data[i].tresult);
496 "tvmem (%lu)\n", *keysize + *b_size, 456 test_hash_sg_init(data[i].sg);
497 TVMEMSIZE * PAGE_SIZE); 457 }
498 goto out;
499 }
500 458
501 printk("test %u (%d bit key, %d byte blocks): ", i, 459 pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
502 *keysize * 8, *b_size); 460 get_driver_name(crypto_ahash, tfm));
503 461
504 memset(tvmem[0], 0xff, PAGE_SIZE); 462 for (i = 0; speed[i].blen != 0; i++) {
463 /* For some reason this only tests digests. */
464 if (speed[i].blen != speed[i].plen)
465 continue;
505 466
506 /* set key, plain text and IV */ 467 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
507 key = tvmem[0]; 468 pr_err("template (%u) too big for tvmem (%lu)\n",
508 for (j = 0; j < tcount; j++) { 469 speed[i].blen, TVMEMSIZE * PAGE_SIZE);
509 if (template[j].klen == *keysize) { 470 goto out;
510 key = template[j].key; 471 }
511 break;
512 }
513 }
514 472
515 ret = crypto_blkcipher_setkey(tfm, key, *keysize); 473 if (speed[i].klen)
516 if (ret) { 474 crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
517 printk("setkey() failed flags=%x\n",
518 crypto_blkcipher_get_flags(tfm));
519 goto out;
520 }
521 475
522 sg_init_table(sg, TVMEMSIZE); 476 for (k = 0; k < 8; k++)
523 sg_set_buf(sg, tvmem[0] + *keysize, 477 ahash_request_set_crypt(data[k].req, data[k].sg,
524 PAGE_SIZE - *keysize); 478 data[k].result, speed[i].blen);
525 for (j = 1; j < TVMEMSIZE; j++) {
526 sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
527 memset (tvmem[j], 0xff, PAGE_SIZE);
528 }
529 479
530 iv_len = crypto_blkcipher_ivsize(tfm); 480 pr_info("test%3u "
531 if (iv_len) { 481 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
532 memset(&iv, 0xff, iv_len); 482 i, speed[i].blen, speed[i].plen,
533 crypto_blkcipher_set_iv(tfm, iv, iv_len); 483 speed[i].blen / speed[i].plen);
534 }
535 484
536 if (secs) 485 start = get_cycles();
537 ret = test_cipher_jiffies(&desc, enc, sg,
538 *b_size, secs);
539 else
540 ret = test_cipher_cycles(&desc, enc, sg,
541 *b_size);
542 486
543 if (ret) { 487 for (k = 0; k < 8; k++) {
544 printk("%s() failed flags=%x\n", e, desc.flags); 488 ret = crypto_ahash_digest(data[k].req);
545 break; 489 if (ret == -EINPROGRESS) {
490 ret = 0;
491 continue;
546 } 492 }
547 b_size++;
548 i++;
549 } while (*b_size);
550 keysize++;
551 } while (*keysize);
552 493
553out: 494 if (ret)
554 crypto_free_blkcipher(tfm); 495 break;
555}
556 496
557static void test_hash_sg_init(struct scatterlist *sg) 497 complete(&data[k].tresult.completion);
558{ 498 data[k].tresult.err = 0;
559 int i; 499 }
560 500
561 sg_init_table(sg, TVMEMSIZE); 501 for (j = 0; j < k; j++) {
562 for (i = 0; i < TVMEMSIZE; i++) { 502 struct tcrypt_result *tr = &data[j].tresult;
563 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
564 memset(tvmem[i], 0xff, PAGE_SIZE);
565 }
566}
567 503
568static inline int do_one_ahash_op(struct ahash_request *req, int ret) 504 wait_for_completion(&tr->completion);
569{ 505 if (tr->err)
570 if (ret == -EINPROGRESS || ret == -EBUSY) { 506 ret = tr->err;
571 struct tcrypt_result *tr = req->base.data; 507 }
572 508
573 wait_for_completion(&tr->completion); 509 end = get_cycles();
574 reinit_completion(&tr->completion); 510 cycles = end - start;
575 ret = tr->err; 511 pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
512 cycles, cycles / (8 * speed[i].blen));
513
514 if (ret) {
515 pr_err("At least one hashing failed ret=%d\n", ret);
516 break;
517 }
576 } 518 }
577 return ret; 519
520out:
521 for (k = 0; k < 8; ++k)
522 ahash_request_free(data[k].req);
523
524 for (k = 0; k < 8; ++k)
525 testmgr_free_buf(data[k].xbuf);
526
527 crypto_free_ahash(tfm);
528
529free_data:
530 kfree(data);
578} 531}
579 532
580static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, 533static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
@@ -812,7 +765,7 @@ static void test_hash_speed(const char *algo, unsigned int secs,
812 return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); 765 return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
813} 766}
814 767
815static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) 768static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
816{ 769{
817 if (ret == -EINPROGRESS || ret == -EBUSY) { 770 if (ret == -EINPROGRESS || ret == -EBUSY) {
818 struct tcrypt_result *tr = req->base.data; 771 struct tcrypt_result *tr = req->base.data;
@@ -825,7 +778,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
825 return ret; 778 return ret;
826} 779}
827 780
828static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, 781static int test_acipher_jiffies(struct skcipher_request *req, int enc,
829 int blen, int secs) 782 int blen, int secs)
830{ 783{
831 unsigned long start, end; 784 unsigned long start, end;
@@ -836,10 +789,10 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
836 time_before(jiffies, end); bcount++) { 789 time_before(jiffies, end); bcount++) {
837 if (enc) 790 if (enc)
838 ret = do_one_acipher_op(req, 791 ret = do_one_acipher_op(req,
839 crypto_ablkcipher_encrypt(req)); 792 crypto_skcipher_encrypt(req));
840 else 793 else
841 ret = do_one_acipher_op(req, 794 ret = do_one_acipher_op(req,
842 crypto_ablkcipher_decrypt(req)); 795 crypto_skcipher_decrypt(req));
843 796
844 if (ret) 797 if (ret)
845 return ret; 798 return ret;
@@ -850,7 +803,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
850 return 0; 803 return 0;
851} 804}
852 805
853static int test_acipher_cycles(struct ablkcipher_request *req, int enc, 806static int test_acipher_cycles(struct skcipher_request *req, int enc,
854 int blen) 807 int blen)
855{ 808{
856 unsigned long cycles = 0; 809 unsigned long cycles = 0;
@@ -861,10 +814,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
861 for (i = 0; i < 4; i++) { 814 for (i = 0; i < 4; i++) {
862 if (enc) 815 if (enc)
863 ret = do_one_acipher_op(req, 816 ret = do_one_acipher_op(req,
864 crypto_ablkcipher_encrypt(req)); 817 crypto_skcipher_encrypt(req));
865 else 818 else
866 ret = do_one_acipher_op(req, 819 ret = do_one_acipher_op(req,
867 crypto_ablkcipher_decrypt(req)); 820 crypto_skcipher_decrypt(req));
868 821
869 if (ret) 822 if (ret)
870 goto out; 823 goto out;
@@ -877,10 +830,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
877 start = get_cycles(); 830 start = get_cycles();
878 if (enc) 831 if (enc)
879 ret = do_one_acipher_op(req, 832 ret = do_one_acipher_op(req,
880 crypto_ablkcipher_encrypt(req)); 833 crypto_skcipher_encrypt(req));
881 else 834 else
882 ret = do_one_acipher_op(req, 835 ret = do_one_acipher_op(req,
883 crypto_ablkcipher_decrypt(req)); 836 crypto_skcipher_decrypt(req));
884 end = get_cycles(); 837 end = get_cycles();
885 838
886 if (ret) 839 if (ret)
@@ -897,16 +850,16 @@ out:
897 return ret; 850 return ret;
898} 851}
899 852
900static void test_acipher_speed(const char *algo, int enc, unsigned int secs, 853static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
901 struct cipher_speed_template *template, 854 struct cipher_speed_template *template,
902 unsigned int tcount, u8 *keysize) 855 unsigned int tcount, u8 *keysize, bool async)
903{ 856{
904 unsigned int ret, i, j, k, iv_len; 857 unsigned int ret, i, j, k, iv_len;
905 struct tcrypt_result tresult; 858 struct tcrypt_result tresult;
906 const char *key; 859 const char *key;
907 char iv[128]; 860 char iv[128];
908 struct ablkcipher_request *req; 861 struct skcipher_request *req;
909 struct crypto_ablkcipher *tfm; 862 struct crypto_skcipher *tfm;
910 const char *e; 863 const char *e;
911 u32 *b_size; 864 u32 *b_size;
912 865
@@ -917,7 +870,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
917 870
918 init_completion(&tresult.completion); 871 init_completion(&tresult.completion);
919 872
920 tfm = crypto_alloc_ablkcipher(algo, 0, 0); 873 tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
921 874
922 if (IS_ERR(tfm)) { 875 if (IS_ERR(tfm)) {
923 pr_err("failed to load transform for %s: %ld\n", algo, 876 pr_err("failed to load transform for %s: %ld\n", algo,
@@ -926,17 +879,17 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
926 } 879 }
927 880
928 pr_info("\ntesting speed of async %s (%s) %s\n", algo, 881 pr_info("\ntesting speed of async %s (%s) %s\n", algo,
929 get_driver_name(crypto_ablkcipher, tfm), e); 882 get_driver_name(crypto_skcipher, tfm), e);
930 883
931 req = ablkcipher_request_alloc(tfm, GFP_KERNEL); 884 req = skcipher_request_alloc(tfm, GFP_KERNEL);
932 if (!req) { 885 if (!req) {
933 pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", 886 pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
934 algo); 887 algo);
935 goto out; 888 goto out;
936 } 889 }
937 890
938 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 891 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
939 tcrypt_complete, &tresult); 892 tcrypt_complete, &tresult);
940 893
941 i = 0; 894 i = 0;
942 do { 895 do {
@@ -966,12 +919,12 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
966 } 919 }
967 } 920 }
968 921
969 crypto_ablkcipher_clear_flags(tfm, ~0); 922 crypto_skcipher_clear_flags(tfm, ~0);
970 923
971 ret = crypto_ablkcipher_setkey(tfm, key, *keysize); 924 ret = crypto_skcipher_setkey(tfm, key, *keysize);
972 if (ret) { 925 if (ret) {
973 pr_err("setkey() failed flags=%x\n", 926 pr_err("setkey() failed flags=%x\n",
974 crypto_ablkcipher_get_flags(tfm)); 927 crypto_skcipher_get_flags(tfm));
975 goto out_free_req; 928 goto out_free_req;
976 } 929 }
977 930
@@ -995,11 +948,11 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
995 sg_set_buf(sg, tvmem[0] + *keysize, *b_size); 948 sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
996 } 949 }
997 950
998 iv_len = crypto_ablkcipher_ivsize(tfm); 951 iv_len = crypto_skcipher_ivsize(tfm);
999 if (iv_len) 952 if (iv_len)
1000 memset(&iv, 0xff, iv_len); 953 memset(&iv, 0xff, iv_len);
1001 954
1002 ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv); 955 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
1003 956
1004 if (secs) 957 if (secs)
1005 ret = test_acipher_jiffies(req, enc, 958 ret = test_acipher_jiffies(req, enc,
@@ -1010,7 +963,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
1010 963
1011 if (ret) { 964 if (ret) {
1012 pr_err("%s() failed flags=%x\n", e, 965 pr_err("%s() failed flags=%x\n", e,
1013 crypto_ablkcipher_get_flags(tfm)); 966 crypto_skcipher_get_flags(tfm));
1014 break; 967 break;
1015 } 968 }
1016 b_size++; 969 b_size++;
@@ -1020,9 +973,25 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
1020 } while (*keysize); 973 } while (*keysize);
1021 974
1022out_free_req: 975out_free_req:
1023 ablkcipher_request_free(req); 976 skcipher_request_free(req);
1024out: 977out:
1025 crypto_free_ablkcipher(tfm); 978 crypto_free_skcipher(tfm);
979}
980
981static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
982 struct cipher_speed_template *template,
983 unsigned int tcount, u8 *keysize)
984{
985 return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
986 true);
987}
988
989static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
990 struct cipher_speed_template *template,
991 unsigned int tcount, u8 *keysize)
992{
993 return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
994 false);
1026} 995}
1027 996
1028static void test_available(void) 997static void test_available(void)
@@ -1284,6 +1253,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1284 ret += tcrypt_test("crct10dif"); 1253 ret += tcrypt_test("crct10dif");
1285 break; 1254 break;
1286 1255
1256 case 48:
1257 ret += tcrypt_test("sha3-224");
1258 break;
1259
1260 case 49:
1261 ret += tcrypt_test("sha3-256");
1262 break;
1263
1264 case 50:
1265 ret += tcrypt_test("sha3-384");
1266 break;
1267
1268 case 51:
1269 ret += tcrypt_test("sha3-512");
1270 break;
1271
1287 case 100: 1272 case 100:
1288 ret += tcrypt_test("hmac(md5)"); 1273 ret += tcrypt_test("hmac(md5)");
1289 break; 1274 break;
@@ -1328,6 +1313,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1328 ret += tcrypt_test("hmac(crc32)"); 1313 ret += tcrypt_test("hmac(crc32)");
1329 break; 1314 break;
1330 1315
1316 case 111:
1317 ret += tcrypt_test("hmac(sha3-224)");
1318 break;
1319
1320 case 112:
1321 ret += tcrypt_test("hmac(sha3-256)");
1322 break;
1323
1324 case 113:
1325 ret += tcrypt_test("hmac(sha3-384)");
1326 break;
1327
1328 case 114:
1329 ret += tcrypt_test("hmac(sha3-512)");
1330 break;
1331
1331 case 150: 1332 case 150:
1332 ret += tcrypt_test("ansi_cprng"); 1333 ret += tcrypt_test("ansi_cprng");
1333 break; 1334 break;
@@ -1406,6 +1407,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1406 speed_template_32_48_64); 1407 speed_template_32_48_64);
1407 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, 1408 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
1408 speed_template_32_48_64); 1409 speed_template_32_48_64);
1410 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
1411 speed_template_16_24_32);
1412 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
1413 speed_template_16_24_32);
1409 test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, 1414 test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
1410 speed_template_16_24_32); 1415 speed_template_16_24_32);
1411 test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, 1416 test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
@@ -1691,6 +1696,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1691 test_hash_speed("poly1305", sec, poly1305_speed_template); 1696 test_hash_speed("poly1305", sec, poly1305_speed_template);
1692 if (mode > 300 && mode < 400) break; 1697 if (mode > 300 && mode < 400) break;
1693 1698
1699 case 322:
1700 test_hash_speed("sha3-224", sec, generic_hash_speed_template);
1701 if (mode > 300 && mode < 400) break;
1702
1703 case 323:
1704 test_hash_speed("sha3-256", sec, generic_hash_speed_template);
1705 if (mode > 300 && mode < 400) break;
1706
1707 case 324:
1708 test_hash_speed("sha3-384", sec, generic_hash_speed_template);
1709 if (mode > 300 && mode < 400) break;
1710
1711 case 325:
1712 test_hash_speed("sha3-512", sec, generic_hash_speed_template);
1713 if (mode > 300 && mode < 400) break;
1714
1694 case 399: 1715 case 399:
1695 break; 1716 break;
1696 1717
@@ -1770,6 +1791,35 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1770 test_ahash_speed("rmd320", sec, generic_hash_speed_template); 1791 test_ahash_speed("rmd320", sec, generic_hash_speed_template);
1771 if (mode > 400 && mode < 500) break; 1792 if (mode > 400 && mode < 500) break;
1772 1793
1794 case 418:
1795 test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
1796 if (mode > 400 && mode < 500) break;
1797
1798 case 419:
1799 test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
1800 if (mode > 400 && mode < 500) break;
1801
1802 case 420:
1803 test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
1804 if (mode > 400 && mode < 500) break;
1805
1806
1807 case 421:
1808 test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
1809 if (mode > 400 && mode < 500) break;
1810
1811 case 422:
1812 test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
1813 if (mode > 400 && mode < 500) break;
1814
1815 case 423:
1816 test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
1817 if (mode > 400 && mode < 500) break;
1818
1819 case 424:
1820 test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
1821 if (mode > 400 && mode < 500) break;
1822
1773 case 499: 1823 case 499:
1774 break; 1824 break;
1775 1825
@@ -1790,6 +1840,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1790 speed_template_32_48_64); 1840 speed_template_32_48_64);
1791 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, 1841 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
1792 speed_template_32_48_64); 1842 speed_template_32_48_64);
1843 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
1844 speed_template_16_24_32);
1845 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
1846 speed_template_16_24_32);
1793 test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, 1847 test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
1794 speed_template_16_24_32); 1848 speed_template_16_24_32);
1795 test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, 1849 test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c727fb0cb021..5c9d5a5e7b65 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -32,6 +32,7 @@
32#include <crypto/rng.h> 32#include <crypto/rng.h>
33#include <crypto/drbg.h> 33#include <crypto/drbg.h>
34#include <crypto/akcipher.h> 34#include <crypto/akcipher.h>
35#include <crypto/kpp.h>
35 36
36#include "internal.h" 37#include "internal.h"
37 38
@@ -120,6 +121,11 @@ struct akcipher_test_suite {
120 unsigned int count; 121 unsigned int count;
121}; 122};
122 123
124struct kpp_test_suite {
125 struct kpp_testvec *vecs;
126 unsigned int count;
127};
128
123struct alg_test_desc { 129struct alg_test_desc {
124 const char *alg; 130 const char *alg;
125 int (*test)(const struct alg_test_desc *desc, const char *driver, 131 int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -134,6 +140,7 @@ struct alg_test_desc {
134 struct cprng_test_suite cprng; 140 struct cprng_test_suite cprng;
135 struct drbg_test_suite drbg; 141 struct drbg_test_suite drbg;
136 struct akcipher_test_suite akcipher; 142 struct akcipher_test_suite akcipher;
143 struct kpp_test_suite kpp;
137 } suite; 144 } suite;
138}; 145};
139 146
@@ -1777,8 +1784,135 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1777 1784
1778} 1785}
1779 1786
1780static int do_test_rsa(struct crypto_akcipher *tfm, 1787static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1781 struct akcipher_testvec *vecs) 1788 const char *alg)
1789{
1790 struct kpp_request *req;
1791 void *input_buf = NULL;
1792 void *output_buf = NULL;
1793 struct tcrypt_result result;
1794 unsigned int out_len_max;
1795 int err = -ENOMEM;
1796 struct scatterlist src, dst;
1797
1798 req = kpp_request_alloc(tfm, GFP_KERNEL);
1799 if (!req)
1800 return err;
1801
1802 init_completion(&result.completion);
1803
1804 err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
1805 if (err < 0)
1806 goto free_req;
1807
1808 out_len_max = crypto_kpp_maxsize(tfm);
1809 output_buf = kzalloc(out_len_max, GFP_KERNEL);
1810 if (!output_buf) {
1811 err = -ENOMEM;
1812 goto free_req;
1813 }
1814
1815 /* Use appropriate parameter as base */
1816 kpp_request_set_input(req, NULL, 0);
1817 sg_init_one(&dst, output_buf, out_len_max);
1818 kpp_request_set_output(req, &dst, out_len_max);
1819 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1820 tcrypt_complete, &result);
1821
1822 /* Compute public key */
1823 err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
1824 if (err) {
1825 pr_err("alg: %s: generate public key test failed. err %d\n",
1826 alg, err);
1827 goto free_output;
1828 }
1829 /* Verify calculated public key */
1830 if (memcmp(vec->expected_a_public, sg_virt(req->dst),
1831 vec->expected_a_public_size)) {
1832 pr_err("alg: %s: generate public key test failed. Invalid output\n",
1833 alg);
1834 err = -EINVAL;
1835 goto free_output;
1836 }
1837
1838 /* Calculate shared secret key by using counter part (b) public key. */
1839 input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
1840 if (!input_buf) {
1841 err = -ENOMEM;
1842 goto free_output;
1843 }
1844
1845 memcpy(input_buf, vec->b_public, vec->b_public_size);
1846 sg_init_one(&src, input_buf, vec->b_public_size);
1847 sg_init_one(&dst, output_buf, out_len_max);
1848 kpp_request_set_input(req, &src, vec->b_public_size);
1849 kpp_request_set_output(req, &dst, out_len_max);
1850 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1851 tcrypt_complete, &result);
1852 err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
1853 if (err) {
1854 pr_err("alg: %s: compute shard secret test failed. err %d\n",
1855 alg, err);
1856 goto free_all;
1857 }
1858 /*
1859 * verify shared secret from which the user will derive
1860 * secret key by executing whatever hash it has chosen
1861 */
1862 if (memcmp(vec->expected_ss, sg_virt(req->dst),
1863 vec->expected_ss_size)) {
1864 pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
1865 alg);
1866 err = -EINVAL;
1867 }
1868
1869free_all:
1870 kfree(input_buf);
1871free_output:
1872 kfree(output_buf);
1873free_req:
1874 kpp_request_free(req);
1875 return err;
1876}
1877
1878static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1879 struct kpp_testvec *vecs, unsigned int tcount)
1880{
1881 int ret, i;
1882
1883 for (i = 0; i < tcount; i++) {
1884 ret = do_test_kpp(tfm, vecs++, alg);
1885 if (ret) {
1886 pr_err("alg: %s: test failed on vector %d, err=%d\n",
1887 alg, i + 1, ret);
1888 return ret;
1889 }
1890 }
1891 return 0;
1892}
1893
1894static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
1895 u32 type, u32 mask)
1896{
1897 struct crypto_kpp *tfm;
1898 int err = 0;
1899
1900 tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1901 if (IS_ERR(tfm)) {
1902 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1903 driver, PTR_ERR(tfm));
1904 return PTR_ERR(tfm);
1905 }
1906 if (desc->suite.kpp.vecs)
1907 err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs,
1908 desc->suite.kpp.count);
1909
1910 crypto_free_kpp(tfm);
1911 return err;
1912}
1913
1914static int test_akcipher_one(struct crypto_akcipher *tfm,
1915 struct akcipher_testvec *vecs)
1782{ 1916{
1783 char *xbuf[XBUFSIZE]; 1917 char *xbuf[XBUFSIZE];
1784 struct akcipher_request *req; 1918 struct akcipher_request *req;
@@ -1807,6 +1941,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
1807 if (err) 1941 if (err)
1808 goto free_req; 1942 goto free_req;
1809 1943
1944 err = -ENOMEM;
1810 out_len_max = crypto_akcipher_maxsize(tfm); 1945 out_len_max = crypto_akcipher_maxsize(tfm);
1811 outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); 1946 outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
1812 if (!outbuf_enc) 1947 if (!outbuf_enc)
@@ -1829,17 +1964,18 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
1829 /* Run RSA encrypt - c = m^e mod n;*/ 1964 /* Run RSA encrypt - c = m^e mod n;*/
1830 err = wait_async_op(&result, crypto_akcipher_encrypt(req)); 1965 err = wait_async_op(&result, crypto_akcipher_encrypt(req));
1831 if (err) { 1966 if (err) {
1832 pr_err("alg: rsa: encrypt test failed. err %d\n", err); 1967 pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
1833 goto free_all; 1968 goto free_all;
1834 } 1969 }
1835 if (req->dst_len != vecs->c_size) { 1970 if (req->dst_len != vecs->c_size) {
1836 pr_err("alg: rsa: encrypt test failed. Invalid output len\n"); 1971 pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
1837 err = -EINVAL; 1972 err = -EINVAL;
1838 goto free_all; 1973 goto free_all;
1839 } 1974 }
1840 /* verify that encrypted message is equal to expected */ 1975 /* verify that encrypted message is equal to expected */
1841 if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { 1976 if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
1842 pr_err("alg: rsa: encrypt test failed. Invalid output\n"); 1977 pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
1978 hexdump(outbuf_enc, vecs->c_size);
1843 err = -EINVAL; 1979 err = -EINVAL;
1844 goto free_all; 1980 goto free_all;
1845 } 1981 }
@@ -1867,18 +2003,22 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
1867 /* Run RSA decrypt - m = c^d mod n;*/ 2003 /* Run RSA decrypt - m = c^d mod n;*/
1868 err = wait_async_op(&result, crypto_akcipher_decrypt(req)); 2004 err = wait_async_op(&result, crypto_akcipher_decrypt(req));
1869 if (err) { 2005 if (err) {
1870 pr_err("alg: rsa: decrypt test failed. err %d\n", err); 2006 pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
1871 goto free_all; 2007 goto free_all;
1872 } 2008 }
1873 out_len = req->dst_len; 2009 out_len = req->dst_len;
1874 if (out_len != vecs->m_size) { 2010 if (out_len < vecs->m_size) {
1875 pr_err("alg: rsa: decrypt test failed. Invalid output len\n"); 2011 pr_err("alg: akcipher: decrypt test failed. "
2012 "Invalid output len %u\n", out_len);
1876 err = -EINVAL; 2013 err = -EINVAL;
1877 goto free_all; 2014 goto free_all;
1878 } 2015 }
1879 /* verify that decrypted message is equal to the original msg */ 2016 /* verify that decrypted message is equal to the original msg */
1880 if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) { 2017 if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
1881 pr_err("alg: rsa: decrypt test failed. Invalid output\n"); 2018 memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
2019 vecs->m_size)) {
2020 pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
2021 hexdump(outbuf_dec, out_len);
1882 err = -EINVAL; 2022 err = -EINVAL;
1883 } 2023 }
1884free_all: 2024free_all:
@@ -1891,28 +2031,22 @@ free_xbuf:
1891 return err; 2031 return err;
1892} 2032}
1893 2033
1894static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs, 2034static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1895 unsigned int tcount) 2035 struct akcipher_testvec *vecs, unsigned int tcount)
1896{ 2036{
2037 const char *algo =
2038 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1897 int ret, i; 2039 int ret, i;
1898 2040
1899 for (i = 0; i < tcount; i++) { 2041 for (i = 0; i < tcount; i++) {
1900 ret = do_test_rsa(tfm, vecs++); 2042 ret = test_akcipher_one(tfm, vecs++);
1901 if (ret) { 2043 if (!ret)
1902 pr_err("alg: rsa: test failed on vector %d, err=%d\n", 2044 continue;
1903 i + 1, ret);
1904 return ret;
1905 }
1906 }
1907 return 0;
1908}
1909
1910static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1911 struct akcipher_testvec *vecs, unsigned int tcount)
1912{
1913 if (strncmp(alg, "rsa", 3) == 0)
1914 return test_rsa(tfm, vecs, tcount);
1915 2045
2046 pr_err("alg: akcipher: test %d failed for %s, err=%d\n",
2047 i + 1, algo, ret);
2048 return ret;
2049 }
1916 return 0; 2050 return 0;
1917} 2051}
1918 2052
@@ -2729,6 +2863,16 @@ static const struct alg_test_desc alg_test_descs[] = {
2729 } 2863 }
2730 } 2864 }
2731 }, { 2865 }, {
2866 .alg = "dh",
2867 .test = alg_test_kpp,
2868 .fips_allowed = 1,
2869 .suite = {
2870 .kpp = {
2871 .vecs = dh_tv_template,
2872 .count = DH_TEST_VECTORS
2873 }
2874 }
2875 }, {
2732 .alg = "digest_null", 2876 .alg = "digest_null",
2733 .test = alg_test_null, 2877 .test = alg_test_null,
2734 }, { 2878 }, {
@@ -3157,6 +3301,16 @@ static const struct alg_test_desc alg_test_descs[] = {
3157 } 3301 }
3158 } 3302 }
3159 }, { 3303 }, {
3304 .alg = "ecdh",
3305 .test = alg_test_kpp,
3306 .fips_allowed = 1,
3307 .suite = {
3308 .kpp = {
3309 .vecs = ecdh_tv_template,
3310 .count = ECDH_TEST_VECTORS
3311 }
3312 }
3313 }, {
3160 .alg = "gcm(aes)", 3314 .alg = "gcm(aes)",
3161 .test = alg_test_aead, 3315 .test = alg_test_aead,
3162 .fips_allowed = 1, 3316 .fips_allowed = 1,
@@ -3249,6 +3403,46 @@ static const struct alg_test_desc alg_test_descs[] = {
3249 } 3403 }
3250 } 3404 }
3251 }, { 3405 }, {
3406 .alg = "hmac(sha3-224)",
3407 .test = alg_test_hash,
3408 .fips_allowed = 1,
3409 .suite = {
3410 .hash = {
3411 .vecs = hmac_sha3_224_tv_template,
3412 .count = HMAC_SHA3_224_TEST_VECTORS
3413 }
3414 }
3415 }, {
3416 .alg = "hmac(sha3-256)",
3417 .test = alg_test_hash,
3418 .fips_allowed = 1,
3419 .suite = {
3420 .hash = {
3421 .vecs = hmac_sha3_256_tv_template,
3422 .count = HMAC_SHA3_256_TEST_VECTORS
3423 }
3424 }
3425 }, {
3426 .alg = "hmac(sha3-384)",
3427 .test = alg_test_hash,
3428 .fips_allowed = 1,
3429 .suite = {
3430 .hash = {
3431 .vecs = hmac_sha3_384_tv_template,
3432 .count = HMAC_SHA3_384_TEST_VECTORS
3433 }
3434 }
3435 }, {
3436 .alg = "hmac(sha3-512)",
3437 .test = alg_test_hash,
3438 .fips_allowed = 1,
3439 .suite = {
3440 .hash = {
3441 .vecs = hmac_sha3_512_tv_template,
3442 .count = HMAC_SHA3_512_TEST_VECTORS
3443 }
3444 }
3445 }, {
3252 .alg = "hmac(sha384)", 3446 .alg = "hmac(sha384)",
3253 .test = alg_test_hash, 3447 .test = alg_test_hash,
3254 .fips_allowed = 1, 3448 .fips_allowed = 1,
@@ -3659,6 +3853,46 @@ static const struct alg_test_desc alg_test_descs[] = {
3659 } 3853 }
3660 } 3854 }
3661 }, { 3855 }, {
3856 .alg = "sha3-224",
3857 .test = alg_test_hash,
3858 .fips_allowed = 1,
3859 .suite = {
3860 .hash = {
3861 .vecs = sha3_224_tv_template,
3862 .count = SHA3_224_TEST_VECTORS
3863 }
3864 }
3865 }, {
3866 .alg = "sha3-256",
3867 .test = alg_test_hash,
3868 .fips_allowed = 1,
3869 .suite = {
3870 .hash = {
3871 .vecs = sha3_256_tv_template,
3872 .count = SHA3_256_TEST_VECTORS
3873 }
3874 }
3875 }, {
3876 .alg = "sha3-384",
3877 .test = alg_test_hash,
3878 .fips_allowed = 1,
3879 .suite = {
3880 .hash = {
3881 .vecs = sha3_384_tv_template,
3882 .count = SHA3_384_TEST_VECTORS
3883 }
3884 }
3885 }, {
3886 .alg = "sha3-512",
3887 .test = alg_test_hash,
3888 .fips_allowed = 1,
3889 .suite = {
3890 .hash = {
3891 .vecs = sha3_512_tv_template,
3892 .count = SHA3_512_TEST_VECTORS
3893 }
3894 }
3895 }, {
3662 .alg = "sha384", 3896 .alg = "sha384",
3663 .test = alg_test_hash, 3897 .test = alg_test_hash,
3664 .fips_allowed = 1, 3898 .fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 487ec880e889..acb6bbff781a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -133,6 +133,17 @@ struct akcipher_testvec {
133 bool public_key_vec; 133 bool public_key_vec;
134}; 134};
135 135
136struct kpp_testvec {
137 unsigned char *secret;
138 unsigned char *b_public;
139 unsigned char *expected_a_public;
140 unsigned char *expected_ss;
141 unsigned short secret_size;
142 unsigned short b_public_size;
143 unsigned short expected_a_public_size;
144 unsigned short expected_ss_size;
145};
146
136static char zeroed_string[48]; 147static char zeroed_string[48];
137 148
138/* 149/*
@@ -141,7 +152,7 @@ static char zeroed_string[48];
141#ifdef CONFIG_CRYPTO_FIPS 152#ifdef CONFIG_CRYPTO_FIPS
142#define RSA_TEST_VECTORS 2 153#define RSA_TEST_VECTORS 2
143#else 154#else
144#define RSA_TEST_VECTORS 4 155#define RSA_TEST_VECTORS 5
145#endif 156#endif
146static struct akcipher_testvec rsa_tv_template[] = { 157static struct akcipher_testvec rsa_tv_template[] = {
147 { 158 {
@@ -327,6 +338,516 @@ static struct akcipher_testvec rsa_tv_template[] = {
327 .m_size = 8, 338 .m_size = 8,
328 .c_size = 256, 339 .c_size = 256,
329 .public_key_vec = true, 340 .public_key_vec = true,
341 }, {
342 .key =
343 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
344 "\x02\x01\x00" /* version integer of 1 byte */
345 "\x02\x82\x02\x01" /* modulus - integer of 513 bytes */
346 "\x00\xC3\x8B\x55\x7B\x73\x4D\xFF\xE9\x9B\xC6\xDC\x67\x3C\xB4\x8E"
347 "\xA0\x86\xED\xF2\xB9\x50\x5C\x54\x5C\xBA\xE4\xA1\xB2\xA7\xAE\x2F"
348 "\x1B\x7D\xF1\xFB\xAC\x79\xC5\xDF\x1A\x00\xC9\xB2\xC1\x61\x25\x33"
349 "\xE6\x9C\xE9\xCF\xD6\x27\xC4\x4E\x44\x30\x44\x5E\x08\xA1\x87\x52"
350 "\xCC\x6B\x97\x70\x8C\xBC\xA5\x06\x31\x0C\xD4\x2F\xD5\x7D\x26\x24"
351 "\xA2\xE2\xAC\x78\xF4\x53\x14\xCE\xF7\x19\x2E\xD7\xF7\xE6\x0C\xB9"
352 "\x56\x7F\x0B\xF1\xB1\xE2\x43\x70\xBD\x86\x1D\xA1\xCC\x2B\x19\x08"
353 "\x76\xEF\x91\xAC\xBF\x20\x24\x0D\x38\xC0\x89\xB8\x9A\x70\xB3\x64"
354 "\xD9\x8F\x80\x41\x10\x5B\x9F\xB1\xCB\x76\x43\x00\x21\x25\x36\xD4"
355 "\x19\xFC\x55\x95\x10\xE4\x26\x74\x98\x2C\xD9\xBD\x0B\x2B\x04\xC2"
356 "\xAC\x82\x38\xB4\xDD\x4C\x04\x7E\x51\x36\x40\x1E\x0B\xC4\x7C\x25"
357 "\xDD\x4B\xB2\xE7\x20\x0A\x57\xF9\xB4\x94\xC3\x08\x33\x22\x6F\x8B"
358 "\x48\xDB\x03\x68\x5A\x5B\xBA\xAE\xF3\xAD\xCF\xC3\x6D\xBA\xF1\x28"
359 "\x67\x7E\x6C\x79\x07\xDE\xFC\xED\xE7\x96\xE3\x6C\xE0\x2C\x87\xF8"
360 "\x02\x01\x28\x38\x43\x21\x53\x84\x69\x75\x78\x15\x7E\xEE\xD2\x1B"
361 "\xB9\x23\x40\xA8\x86\x1E\x38\x83\xB2\x73\x1D\x53\xFB\x9E\x2A\x8A"
362 "\xB2\x75\x35\x01\xC3\xC3\xC4\x94\xE8\x84\x86\x64\x81\xF4\x42\xAA"
363 "\x3C\x0E\xD6\x4F\xBC\x0A\x09\x2D\xE7\x1B\xD4\x10\xA8\x54\xEA\x89"
364 "\x84\x8A\xCB\xF7\x5A\x3C\xCA\x76\x08\x29\x62\xB4\x6A\x22\xDF\x14"
365 "\x95\x71\xFD\xB6\x86\x39\xB8\x8B\xF8\x91\x7F\x38\xAA\x14\xCD\xE5"
366 "\xF5\x1D\xC2\x6D\x53\x69\x52\x84\x7F\xA3\x1A\x5E\x26\x04\x83\x06"
367 "\x73\x52\x56\xCF\x76\x26\xC9\xDD\x75\xD7\xFC\xF4\x69\xD8\x7B\x55"
368 "\xB7\x68\x13\x53\xB9\xE7\x89\xC3\xE8\xD6\x6E\xA7\x6D\xEA\x81\xFD"
369 "\xC4\xB7\x05\x5A\xB7\x41\x0A\x23\x8E\x03\x8A\x1C\xAE\xD3\x1E\xCE"
370 "\xE3\x5E\xFC\x19\x4A\xEE\x61\x9B\x8E\xE5\xE5\xDD\x85\xF9\x41\xEC"
371 "\x14\x53\x92\xF7\xDD\x06\x85\x02\x91\xE3\xEB\x6C\x43\x03\xB1\x36"
372 "\x7B\x89\x5A\xA8\xEB\xFC\xD5\xA8\x35\xDC\x81\xD9\x5C\xBD\xCA\xDC"
373 "\x9B\x98\x0B\x06\x5D\x0C\x5B\xEE\xF3\xD5\xCC\x57\xC9\x71\x2F\x90"
374 "\x3B\x3C\xF0\x8E\x4E\x35\x48\xAE\x63\x74\xA9\xFC\x72\x75\x8E\x34"
375 "\xA8\xF2\x1F\xEA\xDF\x3A\x37\x2D\xE5\x39\x39\xF8\x57\x58\x3C\x04"
376 "\xFE\x87\x06\x98\xBC\x7B\xD3\x21\x36\x60\x25\x54\xA7\x3D\xFA\x91"
377 "\xCC\xA8\x0B\x92\x8E\xB4\xF7\x06\xFF\x1E\x95\xCB\x07\x76\x97\x3B"
378 "\x9D"
379 "\x02\x03\x01\x00\x01" /* public key integer of 3 bytes */
380 "\x02\x82\x02\x00" /* private key integer of 512 bytes */
381 "\x74\xA9\xE0\x6A\x32\xB4\xCA\x85\xD9\x86\x9F\x60\x88\x7B\x40\xCC"
382 "\xCD\x33\x91\xA8\xB6\x25\x1F\xBF\xE3\x51\x1C\x97\xB6\x2A\xD9\xB8"
383 "\x11\x40\x19\xE3\x21\x13\xC8\xB3\x7E\xDC\xD7\x65\x40\x4C\x2D\xD6"
384 "\xDC\xAF\x32\x6C\x96\x75\x2C\x2C\xCA\x8F\x3F\x7A\xEE\xC4\x09\xC6"
385 "\x24\x3A\xC9\xCF\x6D\x8D\x17\x50\x94\x52\xD3\xE7\x0F\x2F\x7E\x94"
386 "\x1F\xA0\xBE\xD9\x25\xE8\x38\x42\x7C\x27\xD2\x79\xF8\x2A\x87\x38"
387 "\xEF\xBB\x74\x8B\xA8\x6E\x8C\x08\xC6\xC7\x4F\x0C\xBC\x79\xC6\xEF"
388 "\x0E\xA7\x5E\xE4\xF8\x8C\x09\xC7\x5E\x37\xCC\x87\x77\xCD\xCF\xD1"
389 "\x6D\x28\x1B\xA9\x62\xC0\xB8\x16\xA7\x8B\xF9\xBB\xCC\xB4\x15\x7F"
390 "\x1B\x69\x03\xF2\x7B\xEB\xE5\x8C\x14\xD6\x23\x4F\x52\x6F\x18\xA6"
391 "\x4B\x5B\x01\xAD\x35\xF9\x48\x53\xB3\x86\x35\x66\xD7\xE7\x29\xC0"
392 "\x09\xB5\xC6\xE6\xFA\xC4\xDA\x19\xBE\xD7\x4D\x41\x14\xBE\x6F\xDF"
393 "\x1B\xAB\xC0\xCA\x88\x07\xAC\xF1\x7D\x35\x83\x67\x28\x2D\x50\xE9"
394 "\xCE\x27\x71\x5E\x1C\xCF\xD2\x30\x65\x79\x72\x2F\x9C\xE1\xD2\x39"
395 "\x7F\xEF\x3B\x01\xF2\x14\x1D\xDF\xBD\x51\xD3\xA1\x53\x62\xCF\x5F"
396 "\x79\x84\xCE\x06\x96\x69\x29\x49\x82\x1C\x71\x4A\xA1\x66\xC8\x2F"
397 "\xFD\x7B\x96\x7B\xFC\xC4\x26\x58\xC4\xFC\x7C\xAF\xB5\xE8\x95\x83"
398 "\x87\xCB\x46\xDE\x97\xA7\xB3\xA2\x54\x5B\xD7\xAF\xAB\xEB\xC8\xF3"
399 "\x55\x9D\x48\x2B\x30\x9C\xDC\x26\x4B\xC2\x89\x45\x13\xB2\x01\x9A"
400 "\xA4\x65\xC3\xEC\x24\x2D\x26\x97\xEB\x80\x8A\x9D\x03\xBC\x59\x66"
401 "\x9E\xE2\xBB\xBB\x63\x19\x64\x93\x11\x7B\x25\x65\x30\xCD\x5B\x4B"
402 "\x2C\xFF\xDC\x2D\x30\x87\x1F\x3C\x88\x07\xD0\xFC\x48\xCC\x05\x8A"
403 "\xA2\xC8\x39\x3E\xD5\x51\xBC\x0A\xBE\x6D\xA8\xA0\xF6\x88\x06\x79"
404 "\x13\xFF\x1B\x45\xDA\x54\xC9\x24\x25\x8A\x75\x0A\x26\xD1\x69\x81"
405 "\x14\x14\xD1\x79\x7D\x8E\x76\xF2\xE0\xEB\xDD\x0F\xDE\xC2\xEC\x80"
406 "\xD7\xDC\x16\x99\x92\xBE\xCB\x40\x0C\xCE\x7C\x3B\x46\xA2\x5B\x5D"
407 "\x0C\x45\xEB\xE1\x00\xDE\x72\x50\xB1\xA6\x0B\x76\xC5\x8D\xFC\x82"
408 "\x38\x6D\x99\x14\x1D\x1A\x4A\xD3\x7C\x53\xB8\x12\x46\xA2\x30\x38"
409 "\x82\xF4\x96\x6E\x8C\xCE\x47\x0D\xAF\x0A\x3B\x45\xB7\x43\x95\x43"
410 "\x9E\x02\x2C\x44\x07\x6D\x1F\x3C\x66\x89\x09\xB6\x1F\x06\x30\xCC"
411 "\xAD\xCE\x7D\x9A\xDE\x3E\xFB\x6C\xE4\x58\x43\xD2\x4F\xA5\x9E\x5E"
412 "\xA7\x7B\xAE\x3A\xF6\x7E\xD9\xDB\xD3\xF5\xC5\x41\xAF\xE6\x9C\x91"
413 "\x02\x82\x01\x01" /* prime1 - integer of 257 bytes */
414 "\x00\xE0\xA6\x6C\xF0\xA2\xF8\x81\x85\x36\x43\xD0\x13\x0B\x33\x8B"
415 "\x8F\x78\x3D\xAC\xC7\x5E\x46\x6A\x7F\x05\xAE\x3E\x26\x0A\xA6\xD0"
416 "\x51\xF3\xC8\x61\xF5\x77\x22\x48\x10\x87\x4C\xD5\xA4\xD5\xAE\x2D"
417 "\x4E\x7A\xFE\x1C\x31\xE7\x6B\xFF\xA4\x69\x20\xF9\x2A\x0B\x99\xBE"
418 "\x7C\x32\x68\xAD\xB0\xC6\x94\x81\x41\x75\xDC\x06\x78\x0A\xB4\xCF"
419 "\xCD\x1B\x2D\x31\xE4\x7B\xEA\xA8\x35\x99\x75\x57\xC6\x0E\xF6\x78"
420 "\x4F\xA0\x92\x4A\x00\x1B\xE7\x96\xF2\x5B\xFD\x2C\x0A\x0A\x13\x81"
421 "\xAF\xCB\x59\x87\x31\xD9\x83\x65\xF2\x22\x48\xD0\x03\x67\x39\xF6"
422 "\xFF\xA8\x36\x07\x3A\x68\xE3\x7B\xA9\x64\xFD\x9C\xF7\xB1\x3D\xBF"
423 "\x26\x5C\xCC\x7A\xFC\xA2\x8F\x51\xD1\xE1\xE2\x3C\xEC\x06\x75\x7C"
424 "\x34\xF9\xA9\x33\x70\x11\xAD\x5A\xDC\x5F\xCF\x50\xF6\x23\x2F\x39"
425 "\xAC\x92\x48\x53\x4D\x01\x96\x3C\xD8\xDC\x1F\x23\x23\x78\x80\x34"
426 "\x54\x14\x76\x8B\xB6\xBB\xFB\x88\x78\x31\x59\x28\xD2\xB1\x75\x17"
427 "\x88\x04\x4A\x78\x62\x18\x2E\xF5\xFB\x9B\xEF\x15\xD8\x16\x47\xC6"
428 "\x42\xB1\x02\xDA\x9E\xE3\x84\x90\xB4\x2D\xC3\xCE\x13\xC9\x12\x7D"
429 "\x3E\xCD\x39\x39\xC9\xAD\xA1\x1A\xE6\xD5\xAD\x5A\x09\x4D\x1B\x0C"
430 "\xAB"
431 "\x02\x82\x01\x01" /* prime 2 - integer of 257 bytes */
432 "\x00\xDE\xD5\x1B\xF6\xCD\x83\xB1\xC6\x47\x7E\xB9\xC0\x6B\xA9\xB8"
433 "\x02\xF3\xAE\x40\x5D\xFC\xD3\xE5\x4E\xF1\xE3\x39\x04\x52\x84\x89"
434 "\x40\x37\xBB\xC2\xCD\x7F\x71\x77\x17\xDF\x6A\x4C\x31\x24\x7F\xB9"
435 "\x7E\x7F\xC8\x43\x4A\x3C\xEB\x8D\x1B\x7F\x21\x51\x67\x45\x8F\xA0"
436 "\x36\x29\x3A\x18\x45\xA5\x32\xEC\x74\x88\x3C\x98\x5D\x67\x3B\xD7"
437 "\x51\x1F\xE9\xAE\x09\x01\xDE\xDE\x7C\xFB\x60\xD1\xA5\x6C\xE9\x6A"
438 "\x93\x04\x02\x3A\xBB\x67\x02\xB9\xFD\x23\xF0\x02\x2B\x49\x85\xC9"
439 "\x5B\xE7\x4B\xDF\xA3\xF4\xEE\x59\x4C\x45\xEF\x8B\xC1\x6B\xDE\xDE"
440 "\xBC\x1A\xFC\xD2\x76\x3F\x33\x74\xA9\x8E\xA3\x7E\x0C\xC6\xCE\x70"
441 "\xA1\x5B\xA6\x77\xEA\x76\xEB\x18\xCE\xB9\xD7\x78\x8D\xAE\x06\xBB"
442 "\xD3\x1F\x16\x0D\x05\xAB\x4F\xC6\x52\xC8\x6B\x36\x51\x7D\x1D\x27"
443 "\xAF\x88\x9A\x6F\xCC\x25\x2E\x74\x06\x72\xCE\x9E\xDB\xE0\x9D\x30"
444 "\xEF\x55\xA5\x58\x21\xA7\x42\x12\x2C\x2C\x23\x87\xC1\x0F\xE8\x51"
445 "\xDA\x53\xDA\xFC\x05\x36\xDF\x08\x0E\x08\x36\xBE\x5C\x86\x9E\xCA"
446 "\x68\x90\x33\x12\x0B\x14\x82\xAB\x90\x1A\xD4\x49\x32\x9C\xBD\xAA"
447 "\xAB\x4E\x38\xF1\xEE\xED\x3D\x3F\xE8\xBD\x48\x56\xA6\x64\xEE\xC8"
448 "\xD7"
449 "\x02\x82\x01\x01" /* exponent 1 - integer of 257 bytes */
450 "\x00\x96\x5E\x6F\x8F\x06\xD6\xE6\x03\x1F\x96\x76\x81\x38\xBF\x30"
451 "\xCC\x40\x84\xAF\xD0\xE7\x06\xA5\x24\x0E\xCE\x59\xA5\x26\xFE\x0F"
452 "\x74\xBB\x83\xC6\x26\x02\xAF\x3C\xA3\x6B\x9C\xFF\x68\x0C\xEB\x40"
453 "\x42\x46\xCB\x2E\x5E\x2C\xF4\x3A\x32\x77\x77\xED\xAF\xBA\x02\x17"
454 "\xE1\x93\xF0\x43\x4A\x8F\x31\x39\xEF\x72\x0F\x6B\x79\x10\x59\x84"
455 "\xBA\x5A\x55\x7F\x0E\xDB\xEE\xEE\xD6\xA9\xB8\x44\x9F\x3A\xC6\xB9"
456 "\x33\x3B\x5C\x90\x11\xD0\x9B\xCC\x8A\xBF\x0E\x10\x5B\x4B\xF1\x50"
457 "\x9E\x35\xB3\xE0\x6D\x7A\x95\x9C\x38\x5D\xC0\x75\x13\xC2\x15\xA7"
458 "\x81\xEA\xBA\xF7\x4D\x9E\x85\x9D\xF1\x7D\xBA\xD0\x45\x6F\x2A\xD0"
459 "\x76\xC2\x28\xD0\xAD\xA7\xB5\xDC\xE3\x6A\x99\xFF\x83\x50\xB3\x75"
460 "\x07\x14\x91\xAF\xEF\x74\xB5\x9F\x9A\xE0\xBA\xA9\x0B\x87\xF3\x85"
461 "\x5C\x40\xB2\x0E\xA7\xFD\xC6\xED\x45\x8E\xD9\x7C\xB0\xB2\x68\xC6"
462 "\x1D\xFD\x70\x78\x06\x41\x7F\x95\x12\x36\x9D\xE2\x58\x5D\x15\xEE"
463 "\x41\x49\xF5\xFA\xEC\x56\x19\xA0\xE6\xE0\xB2\x40\xE1\xD9\xD0\x03"
464 "\x22\x02\xCF\xD1\x3C\x07\x38\x65\x8F\x65\x0E\xAA\x32\xCE\x25\x05"
465 "\x16\x73\x51\xB9\x9F\x88\x0B\xCD\x30\xF3\x97\xCC\x2B\x6B\xA4\x0E"
466 "\x6F"
467 "\x02\x82\x01\x00" /* exponent 2 - integer of 256 bytes */
468 "\x2A\x5F\x3F\xB8\x08\x90\x58\x47\xA9\xE4\xB1\x11\xA3\xE7\x5B\xF4"
469 "\x43\xBE\x08\xC3\x56\x86\x3C\x7E\x6C\x84\x96\x9C\xF9\xCB\xF6\x05"
470 "\x5E\x13\xB8\x11\x37\x80\xAD\xF2\xBE\x2B\x0A\x5D\xF5\xE0\xCB\xB7"
471 "\x00\x39\x66\x82\x41\x5F\x51\x2F\xBF\x56\xE8\x91\xC8\xAA\x6C\xFE"
472 "\x9F\x8C\x4A\x7D\x43\xD2\x91\x1F\xFF\x9F\xF6\x21\x1C\xB6\x46\x55"
473 "\x48\xCA\x38\xAB\xC1\xCD\x4D\x65\x5A\xAF\xA8\x6D\xDA\x6D\xF0\x34"
474 "\x10\x79\x14\x0D\xFA\xA2\x8C\x17\x54\xB4\x18\xD5\x7E\x5F\x90\x50"
475 "\x87\x84\xE7\xFB\xD7\x61\x53\x5D\xAB\x96\xC7\x6E\x7A\x42\xA0\xFC"
476 "\x07\xED\xB7\x5F\x80\xD9\x19\xFF\xFB\xFD\x9E\xC4\x73\x31\x62\x3D"
477 "\x6C\x9E\x15\x03\x62\xA5\x85\xCC\x19\x8E\x9D\x7F\xE3\x6D\xA8\x5D"
478 "\x96\xF5\xAC\x78\x3D\x81\x27\xE7\x29\xF1\x29\x1D\x09\xBB\x77\x86"
479 "\x6B\x65\x62\x88\xE1\x31\x1A\x22\xF7\xC5\xCE\x73\x65\x1C\xBE\xE7"
480 "\x63\xD3\xD3\x14\x63\x27\xAF\x28\xF3\x23\xB6\x76\xC1\xBD\x9D\x82"
481 "\xF4\x9B\x19\x7D\x2C\x57\xF0\xC2\x2A\x51\xAE\x95\x0D\x8C\x38\x54"
482 "\xF5\xC6\xA0\x51\xB7\x0E\xB9\xEC\xE7\x0D\x22\xF6\x1A\xD3\xFE\x16"
483 "\x21\x03\xB7\x0D\x85\xD3\x35\xC9\xDD\xE4\x59\x85\xBE\x7F\xA1\x75"
484 "\x02\x82\x01\x01" /* coefficient - integer of 257 bytes */
485 "\x00\xB9\x48\xD2\x54\x2F\x19\x54\x64\xAE\x62\x80\x61\x89\x80\xB4"
486 "\x48\x0B\x8D\x7E\x1B\x0F\x50\x08\x82\x3F\xED\x75\x84\xB7\x13\xE4"
487 "\xF8\x8D\xA8\xBB\x54\x21\x4C\x5A\x54\x07\x16\x4B\xB4\xA4\x9E\x30"
488 "\xBF\x7A\x30\x1B\x39\x60\xA3\x21\x53\xFB\xB0\xDC\x0F\x7C\x2C\xFB"
489 "\xAA\x95\x7D\x51\x39\x28\x33\x1F\x25\x31\x53\xF5\xD2\x64\x2B\xF2"
490 "\x1E\xB3\xC0\x6A\x0B\xC9\xA4\x42\x64\x5C\xFB\x15\xA3\xE8\x4C\x3A"
491 "\x9C\x3C\xBE\xA3\x39\x83\x23\xE3\x6D\x18\xCC\xC2\xDC\x63\x8D\xBA"
492 "\x98\xE0\xE0\x31\x4A\x2B\x37\x9C\x4D\x6B\xF3\x9F\x51\xE4\x43\x5C"
493 "\x83\x5F\xBF\x5C\xFE\x92\x45\x01\xAF\xF5\xC2\xF4\xB7\x56\x93\xA5"
494 "\xF4\xAA\x67\x3C\x48\x37\xBD\x9A\x3C\xFE\xA5\x9A\xB0\xD1\x6B\x85"
495 "\xDD\x81\xD4\xFA\xAD\x31\x83\xA8\x22\x9B\xFD\xB4\x61\xDC\x7A\x51"
496 "\x59\x62\x10\x1B\x7E\x44\xA3\xFE\x90\x51\x5A\x3E\x02\x87\xAD\xFA"
497 "\xDD\x0B\x1F\x3D\x35\xAF\xEE\x13\x85\x51\xA7\x42\xC0\xEE\x9E\x20"
498 "\xE9\xD0\x29\xB2\xE4\x21\xE4\x6D\x62\xB9\xF4\x48\x4A\xD8\x46\x8E"
499 "\x61\xA6\x2C\x5D\xDF\x8F\x97\x2B\x3A\x75\x1D\x83\x17\x6F\xC6\xB0"
500 "\xDE\xFC\x14\x25\x06\x5A\x60\xBB\xB8\x21\x89\xD1\xEF\x57\xF1\x71"
501 "\x3D",
502 .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a",
503 .c =
504 "\x5c\xce\x9c\xd7\x9a\x9e\xa1\xfe\x7a\x82\x3c\x68\x27\x98\xe3\x5d"
505 "\xd5\xd7\x07\x29\xf5\xfb\xc3\x1a\x7f\x63\x1e\x62\x31\x3b\x19\x87"
506 "\x79\x4f\xec\x7b\xf3\xcb\xea\x9b\x95\x52\x3a\x40\xe5\x87\x7b\x72"
507 "\xd1\x72\xc9\xfb\x54\x63\xd8\xc9\xd7\x2c\xfc\x7b\xc3\x14\x1e\xbc"
508 "\x18\xb4\x34\xa1\xbf\x14\xb1\x37\x31\x6e\xf0\x1b\x35\x19\x54\x07"
509 "\xf7\x99\xec\x3e\x63\xe2\xcd\x61\x28\x65\xc3\xcd\xb1\x38\x36\xa5"
510 "\xb2\xd7\xb0\xdc\x1f\xf5\xef\x19\xc7\x53\x32\x2d\x1c\x26\xda\xe4"
511 "\x0d\xd6\x90\x7e\x28\xd8\xdc\xe4\x61\x05\xd2\x25\x90\x01\xd3\x96"
512 "\x6d\xa6\xcf\x58\x20\xbb\x03\xf4\x01\xbc\x79\xb9\x18\xd8\xb8\xba"
513 "\xbd\x93\xfc\xf2\x62\x5d\x8c\x66\x1e\x0e\x84\x59\x93\xdd\xe2\x93"
514 "\xa2\x62\x7d\x08\x82\x7a\xdd\xfc\xb8\xbc\xc5\x4f\x9c\x4e\xbf\xb4"
515 "\xfc\xf4\xc5\x01\xe8\x00\x70\x4d\x28\x26\xcc\x2e\xfe\x0e\x58\x41"
516 "\x8b\xec\xaf\x7c\x4b\x54\xd0\xa0\x64\xf9\x32\xf4\x2e\x47\x65\x0a"
517 "\x67\x88\x39\x3a\xdb\xb2\xdb\x7b\xb5\xf6\x17\xa8\xd9\xc6\x5e\x28"
518 "\x13\x82\x8a\x99\xdb\x60\x08\xa5\x23\x37\xfa\x88\x90\x31\xc8\x9d"
519 "\x8f\xec\xfb\x85\x9f\xb1\xce\xa6\x24\x50\x46\x44\x47\xcb\x65\xd1"
520 "\xdf\xc0\xb1\x6c\x90\x1f\x99\x8e\x4d\xd5\x9e\x31\x07\x66\x87\xdf"
521 "\x01\xaa\x56\x3c\x71\xe0\x2b\x6f\x67\x3b\x23\xed\xc2\xbd\x03\x30"
522 "\x79\x76\x02\x10\x10\x98\x85\x8a\xff\xfd\x0b\xda\xa5\xd9\x32\x48"
523 "\x02\xa0\x0b\xb9\x2a\x8a\x18\xca\xc6\x8f\x3f\xbb\x16\xb2\xaa\x98"
524 "\x27\xe3\x60\x43\xed\x15\x70\xd4\x57\x15\xfe\x19\xd4\x9b\x13\x78"
525 "\x8a\xf7\x21\xf1\xa2\xa2\x2d\xb3\x09\xcf\x44\x91\x6e\x08\x3a\x30"
526 "\x81\x3e\x90\x93\x8a\x67\x33\x00\x59\x54\x9a\x25\xd3\x49\x8e\x9f"
527 "\xc1\x4b\xe5\x86\xf3\x50\x4c\xbc\xc5\xd3\xf5\x3a\x54\xe1\x36\x3f"
528 "\xe2\x5a\xb4\x37\xc0\xeb\x70\x35\xec\xf6\xb7\xe8\x44\x3b\x7b\xf3"
529 "\xf1\xf2\x1e\xdb\x60\x7d\xd5\xbe\xf0\x71\x34\x90\x4c\xcb\xd4\x35"
530 "\x51\xc7\xdd\xd8\xc9\x81\xf5\x5d\x57\x46\x2c\xb1\x7b\x9b\xaa\xcb"
531 "\xd1\x22\x25\x49\x44\xa3\xd4\x6b\x29\x7b\xd8\xb2\x07\x93\xbf\x3d"
532 "\x52\x49\x84\x79\xef\xb8\xe5\xc4\xad\xca\xa8\xc6\xf6\xa6\x76\x70"
533 "\x5b\x0b\xe5\x83\xc6\x0e\xef\x55\xf2\xe7\xff\x04\xea\xe6\x13\xbe"
534 "\x40\xe1\x40\x45\x48\x66\x75\x31\xae\x35\x64\x91\x11\x6f\xda\xee"
535 "\x26\x86\x45\x6f\x0b\xd5\x9f\x03\xb1\x65\x5b\xdb\xa4\xe4\xf9\x45",
536 .key_len = 2349,
537 .m_size = 8,
538 .c_size = 512,
539 }
540};
541
542#define DH_TEST_VECTORS 2
543
544struct kpp_testvec dh_tv_template[] = {
545 {
546 .secret =
547#ifdef __LITTLE_ENDIAN
548 "\x01\x00" /* type */
549 "\x11\x02" /* len */
550 "\x00\x01\x00\x00" /* key_size */
551 "\x00\x01\x00\x00" /* p_size */
552 "\x01\x00\x00\x00" /* g_size */
553#else
554 "\x00\x01" /* type */
555 "\x02\x11" /* len */
556 "\x00\x00\x01\x00" /* key_size */
557 "\x00\x00\x01\x00" /* p_size */
558 "\x00\x00\x00\x01" /* g_size */
559#endif
560 /* xa */
561 "\x44\xc1\x48\x36\xa7\x2b\x6f\x4e\x43\x03\x68\xad\x31\x00\xda\xf3"
562 "\x2a\x01\xa8\x32\x63\x5f\x89\x32\x1f\xdf\x4c\xa1\x6a\xbc\x10\x15"
563 "\x90\x35\xc9\x26\x41\xdf\x7b\xaa\x56\x56\x3d\x85\x44\xb5\xc0\x8e"
564 "\x37\x83\x06\x50\xb3\x5f\x0e\x28\x2c\xd5\x46\x15\xe3\xda\x7d\x74"
565 "\x87\x13\x91\x4f\xd4\x2d\xf6\xc7\x5e\x14\x2c\x11\xc2\x26\xb4\x3a"
566 "\xe3\xb2\x36\x20\x11\x3b\x22\xf2\x06\x65\x66\xe2\x57\x58\xf8\x22"
567 "\x1a\x94\xbd\x2b\x0e\x8c\x55\xad\x61\x23\x45\x2b\x19\x1e\x63\x3a"
568 "\x13\x61\xe3\xa0\x79\x70\x3e\x6d\x98\x32\xbc\x7f\x82\xc3\x11\xd8"
569 "\xeb\x53\xb5\xfc\xb5\xd5\x3c\x4a\xea\x92\x3e\x01\xce\x15\x65\xd4"
570 "\xaa\x85\xc1\x11\x90\x83\x31\x6e\xfe\xe7\x7f\x7d\xed\xab\xf9\x29"
571 "\xf8\xc7\xf1\x68\xc6\xb7\xe4\x1f\x2f\x28\xa0\xc9\x1a\x50\x64\x29"
572 "\x4b\x01\x6d\x1a\xda\x46\x63\x21\x07\x40\x8c\x8e\x4c\x6f\xb5\xe5"
573 "\x12\xf3\xc2\x1b\x48\x27\x5e\x27\x01\xb1\xaa\xed\x68\x9b\x83\x18"
574 "\x8f\xb1\xeb\x1f\x04\xd1\x3c\x79\xed\x4b\xf7\x0a\x33\xdc\xe0\xc6"
575 "\xd8\x02\x51\x59\x00\x74\x30\x07\x4c\x2d\xac\xe4\x13\xf1\x80\xf0"
576 "\xce\xfa\xff\xa9\xce\x29\x46\xdd\x9d\xad\xd1\xc3\xc6\x58\x1a\x63"
577 /* p */
578 "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81"
579 "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc"
580 "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89"
581 "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64"
582 "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3"
583 "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98"
584 "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26"
585 "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6"
586 "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06"
587 "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7"
588 "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2"
589 "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb"
590 "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f"
591 "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca"
592 "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67"
593 "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73"
594 /* g */
595 "\x02",
596 .b_public =
597 "\x2a\x67\x5c\xfd\x63\x5d\xc0\x97\x0a\x8b\xa2\x1f\xf8\x8a\xcb\x54"
598 "\xca\x2f\xd3\x49\x3f\x01\x8e\x87\xfe\xcc\x94\xa0\x3e\xd4\x26\x79"
599 "\x9a\x94\x3c\x11\x81\x58\x5c\x60\x3d\xf5\x98\x90\x89\x64\x62\x1f"
600 "\xbd\x05\x6d\x2b\xcd\x84\x40\x9b\x4a\x1f\xe0\x19\xf1\xca\x20\xb3"
601 "\x4e\xa0\x4f\x15\xcc\xa5\xfe\xa5\xb4\xf5\x0b\x18\x7a\x5a\x37\xaa"
602 "\x58\x00\x19\x7f\xe2\xa3\xd9\x1c\x44\x57\xcc\xde\x2e\xc1\x38\xea"
603 "\xeb\xe3\x90\x40\xc4\x6c\xf7\xcd\xe9\x22\x50\x71\xf5\x7c\xdb\x37"
604 "\x0e\x80\xc3\xed\x7e\xb1\x2b\x2f\xbe\x71\xa6\x11\xa5\x9d\xf5\x39"
605 "\xf1\xa2\xe5\x85\xbc\x25\x91\x4e\x84\x8d\x26\x9f\x4f\xe6\x0f\xa6"
606 "\x2b\x6b\xf9\x0d\xaf\x6f\xbb\xfa\x2d\x79\x15\x31\x57\xae\x19\x60"
607 "\x22\x0a\xf5\xfd\x98\x0e\xbf\x5d\x49\x75\x58\x37\xbc\x7f\xf5\x21"
608 "\x56\x1e\xd5\xb3\x50\x0b\xca\x96\xf3\xd1\x3f\xb3\x70\xa8\x6d\x63"
609 "\x48\xfb\x3d\xd7\x29\x91\x45\xb5\x48\xcd\xb6\x78\x30\xf2\x3f\x1e"
610 "\xd6\x22\xd6\x35\x9b\xf9\x1f\x85\xae\xab\x4b\xd7\xe0\xc7\x86\x67"
611 "\x3f\x05\x7f\xa6\x0d\x2f\x0d\xbf\x53\x5f\x4d\x2c\x6d\x5e\x57\x40"
612 "\x30\x3a\x23\x98\xf9\xb4\x32\xf5\x32\x83\xdd\x0b\xae\x33\x97\x2f",
613 .expected_a_public =
614 "\x5c\x24\xdf\xeb\x5b\x4b\xf8\xc5\xef\x39\x48\x82\xe0\x1e\x62\xee"
615 "\x8a\xae\xdf\x93\x6c\x2b\x16\x95\x92\x16\x3f\x16\x7b\x75\x03\x85"
616 "\xd9\xf1\x69\xc2\x14\x87\x45\xfc\xa4\x19\xf6\xf0\xa4\xf3\xec\xd4"
617 "\x6c\x5c\x03\x3b\x94\xc2\x2f\x92\xe4\xce\xb3\xe4\x72\xe8\x17\xe6"
618 "\x23\x7e\x00\x01\x09\x59\x13\xbf\xc1\x2f\x99\xa9\x07\xaa\x02\x23"
619 "\x4a\xca\x39\x4f\xbc\xec\x0f\x27\x4f\x19\x93\x6c\xb9\x30\x52\xfd"
620 "\x2b\x9d\x86\xf1\x06\x1e\xb6\x56\x27\x4a\xc9\x8a\xa7\x8a\x48\x5e"
621 "\xb5\x60\xcb\xdf\xff\x03\x26\x10\xbf\x90\x8f\x46\x60\xeb\x9b\x9a"
622 "\xd6\x6f\x44\x91\x03\x92\x18\x2c\x96\x5e\x40\x19\xfb\xf4\x4f\x3a"
623 "\x02\x7b\xaf\xcc\x22\x20\x79\xb9\xf8\x9f\x8f\x85\x6b\xec\x44\xbb"
624 "\xe6\xa8\x8e\xb1\xe8\x2c\xee\x64\xee\xf8\xbd\x00\xf3\xe2\x2b\x93"
625 "\xcd\xe7\xc4\xdf\xc9\x19\x46\xfe\xb6\x07\x73\xc1\x8a\x64\x79\x26"
626 "\xe7\x30\xad\x2a\xdf\xe6\x8f\x59\xf5\x81\xbf\x4a\x29\x91\xe7\xb7"
627 "\xcf\x48\x13\x27\x75\x79\x40\xd9\xd6\x32\x52\x4e\x6a\x86\xae\x6f"
628 "\xc2\xbf\xec\x1f\xc2\x69\xb2\xb6\x59\xe5\xa5\x17\xa4\x77\xb7\x62"
629 "\x46\xde\xe8\xd2\x89\x78\x9a\xef\xa3\xb5\x8f\x26\xec\x80\xda\x39",
630 .expected_ss =
631 "\x8f\xf3\xac\xa2\xea\x22\x11\x5c\x45\x65\x1a\x77\x75\x2e\xcf\x46"
632 "\x23\x14\x1e\x67\x53\x4d\x35\xb0\x38\x1d\x4e\xb9\x41\x9a\x21\x24"
633 "\x6e\x9f\x40\xfe\x90\x51\xb1\x06\xa4\x7b\x87\x17\x2f\xe7\x5e\x22"
634 "\xf0\x7b\x54\x84\x0a\xac\x0a\x90\xd2\xd7\xe8\x7f\xe7\xe3\x30\x75"
635 "\x01\x1f\x24\x75\x56\xbe\xcc\x8d\x1e\x68\x0c\x41\x72\xd3\xfa\xbb"
636 "\xe5\x9c\x60\xc7\x28\x77\x0c\xbe\x89\xab\x08\xd6\x21\xe7\x2e\x1a"
637 "\x58\x7a\xca\x4f\x22\xf3\x2b\x30\xfd\xf4\x98\xc1\xa3\xf8\xf6\xcc"
638 "\xa9\xe4\xdb\x5b\xee\xd5\x5c\x6f\x62\x4c\xd1\x1a\x02\x2a\x23\xe4"
639 "\xb5\x57\xf3\xf9\xec\x04\x83\x54\xfe\x08\x5e\x35\xac\xfb\xa8\x09"
640 "\x82\x32\x60\x11\xb2\x16\x62\x6b\xdf\xda\xde\x9c\xcb\x63\x44\x6c"
641 "\x59\x26\x6a\x8f\xb0\x24\xcb\xa6\x72\x48\x1e\xeb\xe0\xe1\x09\x44"
642 "\xdd\xee\x66\x6d\x84\xcf\xa5\xc1\xb8\x36\x74\xd3\x15\x96\xc3\xe4"
643 "\xc6\x5a\x4d\x23\x97\x0c\x5c\xcb\xa9\xf5\x29\xc2\x0e\xff\x93\x82"
644 "\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
645 "\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
646 "\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
647 .secret_size = 529,
648 .b_public_size = 256,
649 .expected_a_public_size = 256,
650 .expected_ss_size = 256,
651 },
652 {
653 .secret =
654#ifdef __LITTLE_ENDIAN
655 "\x01\x00" /* type */
656 "\x11\x02" /* len */
657 "\x00\x01\x00\x00" /* key_size */
658 "\x00\x01\x00\x00" /* p_size */
659 "\x01\x00\x00\x00" /* g_size */
660#else
661 "\x00\x01" /* type */
662 "\x02\x11" /* len */
663 "\x00\x00\x01\x00" /* key_size */
664 "\x00\x00\x01\x00" /* p_size */
665 "\x00\x00\x00\x01" /* g_size */
666#endif
667 /* xa */
668 "\x4d\x75\xa8\x6e\xba\x23\x3a\x0c\x63\x56\xc8\xc9\x5a\xa7\xd6\x0e"
669 "\xed\xae\x40\x78\x87\x47\x5f\xe0\xa7\x7b\xba\x84\x88\x67\x4e\xe5"
670 "\x3c\xcc\x5c\x6a\xe7\x4a\x20\xec\xbe\xcb\xf5\x52\x62\x9f\x37\x80"
671 "\x0c\x72\x7b\x83\x66\xa4\xf6\x7f\x95\x97\x1c\x6a\x5c\x7e\xf1\x67"
672 "\x37\xb3\x93\x39\x3d\x0b\x55\x35\xd9\xe5\x22\x04\x9f\xf8\xc1\x04"
673 "\xce\x13\xa5\xac\xe1\x75\x05\xd1\x2b\x53\xa2\x84\xef\xb1\x18\xf4"
674 "\x66\xdd\xea\xe6\x24\x69\x5a\x49\xe0\x7a\xd8\xdf\x1b\xb7\xf1\x6d"
675 "\x9b\x50\x2c\xc8\x1c\x1c\xa3\xb4\x37\xfb\x66\x3f\x67\x71\x73\xa9"
676 "\xff\x5f\xd9\xa2\x25\x6e\x25\x1b\x26\x54\xbf\x0c\xc6\xdb\xea\x0a"
677 "\x52\x6c\x16\x7c\x27\x68\x15\x71\x58\x73\x9d\xe6\xc2\x80\xaa\x97"
678 "\x31\x66\xfb\xa6\xfb\xfd\xd0\x9c\x1d\xbe\x81\x48\xf5\x9a\x32\xf1"
679 "\x69\x62\x18\x78\xae\x72\x36\xe6\x94\x27\xd1\xff\x18\x4f\x28\x6a"
680 "\x16\xbd\x6a\x60\xee\xe5\xf9\x6d\x16\xe4\xb8\xa6\x41\x9b\x23\x7e"
681 "\xf7\x9d\xd1\x1d\x03\x15\x66\x3a\xcf\xb6\x2c\x13\x96\x2c\x52\x21"
682 "\xe4\x2d\x48\x7a\x8a\x5d\xb2\x88\xed\x98\x61\x79\x8b\x6a\x1e\x5f"
683 "\xd0\x8a\x2d\x99\x5a\x2b\x0f\xbc\xef\x53\x8f\x32\xc1\xa2\x99\x26"
684 /* p */
685 "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81"
686 "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc"
687 "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89"
688 "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64"
689 "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3"
690 "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98"
691 "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26"
692 "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6"
693 "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06"
694 "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7"
695 "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2"
696 "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb"
697 "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f"
698 "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca"
699 "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67"
700 "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73"
701 /* g */
702 "\x02",
703 .b_public =
704 "\x99\x4d\xd9\x01\x84\x8e\x4a\x5b\xb8\xa5\x64\x8c\x6c\x00\x5c\x0e"
705 "\x1e\x1b\xee\x5d\x9f\x53\xe3\x16\x70\x01\xed\xbf\x4f\x14\x36\x6e"
706 "\xe4\x43\x45\x43\x49\xcc\xb1\xb0\x2a\xc0\x6f\x22\x55\x42\x17\x94"
707 "\x18\x83\xd7\x2a\x5c\x51\x54\xf8\x4e\x7c\x10\xda\x76\x68\x57\x77"
708 "\x1e\x62\x03\x30\x04\x7b\x4c\x39\x9c\x54\x01\x54\xec\xef\xb3\x55"
709 "\xa4\xc0\x24\x6d\x3d\xbd\xcc\x46\x5b\x00\x96\xc7\xea\x93\xd1\x3f"
710 "\xf2\x6a\x72\xe3\xf2\xc1\x92\x24\x5b\xda\x48\x70\x2c\xa9\x59\x97"
711 "\x19\xb1\xd6\x54\xb3\x9c\x2e\xb0\x63\x07\x9b\x5e\xac\xb5\xf2\xb1"
712 "\x5b\xf8\xf3\xd7\x2d\x37\x9b\x68\x6c\xf8\x90\x07\xbc\x37\x9a\xa5"
713 "\xe2\x91\x12\x25\x47\x77\xe3\x3d\xb2\x95\x69\x44\x0b\x91\x1e\xaf"
714 "\x7c\x8c\x7c\x34\x41\x6a\xab\x60\x6e\xc6\x52\xec\x7e\x94\x0a\x37"
715 "\xec\x98\x90\xdf\x3f\x02\xbd\x23\x52\xdd\xd9\xe5\x31\x80\x74\x25"
716 "\xb6\xd2\xd3\xcc\xd5\xcc\x6d\xf9\x7e\x4d\x78\xab\x77\x51\xfa\x77"
717 "\x19\x94\x49\x8c\x05\xd4\x75\xed\xd2\xb3\x64\x57\xe0\x52\x99\xc0"
718 "\x83\xe3\xbb\x5e\x2b\xf1\xd2\xc0\xb1\x37\x36\x0b\x7c\xb5\x63\x96"
719 "\x8e\xde\x04\x23\x11\x95\x62\x11\x9a\xce\x6f\x63\xc8\xd5\xd1\x8f",
720 .expected_a_public =
721 "\x90\x89\xe4\x82\xd6\x0a\xcf\x1a\xae\xce\x1b\x66\xa7\x19\x71\x18"
722 "\x8f\x95\x4b\x5b\x80\x45\x4a\x5a\x43\x99\x4d\x37\xcf\xa3\xa7\x28"
723 "\x9c\xc7\x73\xf1\xb2\x17\xf6\x99\xe3\x6b\x56\xcb\x3e\x35\x60\x7d"
724 "\x65\xc7\x84\x6b\x3e\x60\xee\xcd\xd2\x70\xe7\xc9\x32\x1c\xf0\xb4"
725 "\xf9\x52\xd9\x88\x75\xfd\x40\x2c\xa7\xbe\x19\x1c\x0a\xae\x93\xe1"
726 "\x71\xc7\xcd\x4f\x33\x5c\x10\x7d\x39\x56\xfc\x73\x84\xb2\x67\xc3"
727 "\x77\x26\x20\x97\x2b\xf8\x13\x43\x93\x9c\x9a\xa4\x08\xc7\x34\x83"
728 "\xe6\x98\x61\xe7\x16\x30\x2c\xb1\xdb\x2a\xb2\xcc\xc3\x02\xa5\x3c"
729 "\x71\x50\x14\x83\xc7\xbb\xa4\xbe\x98\x1b\xfe\xcb\x43\xe9\x97\x62"
730 "\xd6\xf0\x8c\xcb\x1c\xba\x1e\xa8\xa6\xa6\x50\xfc\x85\x7d\x47\xbf"
731 "\xf4\x3e\x23\xd3\x5f\xb2\x71\x3e\x40\x94\xaa\x87\x83\x2c\x6c\x8e"
732 "\x60\xfd\xdd\xf7\xf4\x76\x03\xd3\x1d\xec\x18\x51\xa3\xf2\x44\x1a"
733 "\x3f\xb4\x7c\x18\x0d\x68\x65\x92\x54\x0d\x2d\x81\x16\xf1\x84\x66"
734 "\x89\x92\xd0\x1a\x5e\x1f\x42\x46\x5b\xe5\x83\x86\x80\xd9\xcd\x3a"
735 "\x5a\x2f\xb9\x59\x9b\xe4\x43\x84\x64\xf3\x09\x1a\x0a\xa2\x64\x0f"
736 "\x77\x4e\x8d\x8b\xe6\x88\xd1\xfc\xaf\x8f\xdf\x1d\xbc\x31\xb3\xbd",
737 .expected_ss =
738 "\x34\xc3\x35\x14\x88\x46\x26\x23\x97\xbb\xdd\x28\x5c\x94\xf6\x47"
739 "\xca\xb3\x19\xaf\xca\x44\x9b\xc2\x7d\x89\xfd\x96\x14\xfd\x6d\x58"
740 "\xd8\xc4\x6b\x61\x2a\x0d\xf2\x36\x45\xc8\xe4\xa4\xed\x81\x53\x81"
741 "\x66\x1e\xe0\x5a\xb1\x78\x2d\x0b\x5c\xb4\xd1\xfc\x90\xc6\x9c\xdb"
742 "\x5a\x30\x0b\x14\x7d\xbe\xb3\x7d\xb1\xb2\x76\x3c\x6c\xef\x74\x6b"
743 "\xe7\x1f\x64\x0c\xab\x65\xe1\x76\x5c\x3d\x83\xb5\x8a\xfb\xaf\x0f"
744 "\xf2\x06\x14\x8f\xa0\xf6\xc1\x89\x78\xf2\xba\x72\x73\x3c\xf7\x76"
745 "\x21\x67\xbc\x24\x31\xb8\x09\x65\x0f\x0c\x02\x32\x4a\x98\x14\xfc"
746 "\x72\x2c\x25\x60\x68\x5f\x2f\x30\x1e\x5b\xf0\x3b\xd1\xa2\x87\xa0"
747 "\x54\xdf\xdb\xc0\xee\x0a\x0f\x47\xc9\x90\x20\x2c\xf9\xe3\x52\xad"
748 "\x27\x65\x8d\x54\x8d\xa8\xa1\xf3\xed\x15\xd4\x94\x28\x90\x31\x93"
749 "\x1b\xc0\x51\xbb\x43\x5d\x76\x3b\x1d\x2a\x71\x50\xea\x5d\x48\x94"
750 "\x7f\x6f\xf1\x48\xdb\x30\xe5\xae\x64\x79\xd9\x7a\xdb\xc6\xff\xd8"
751 "\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
752 "\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
753 "\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
754 .secret_size = 529,
755 .b_public_size = 256,
756 .expected_a_public_size = 256,
757 .expected_ss_size = 256,
758 }
759};
760
761#ifdef CONFIG_CRYPTO_FIPS
762#define ECDH_TEST_VECTORS 1
763#else
764#define ECDH_TEST_VECTORS 2
765#endif
766struct kpp_testvec ecdh_tv_template[] = {
767 {
768#ifndef CONFIG_CRYPTO_FIPS
769 .secret =
770#ifdef __LITTLE_ENDIAN
771 "\x02\x00" /* type */
772 "\x20\x00" /* len */
773 "\x01\x00" /* curve_id */
774 "\x18\x00" /* key_size */
775#else
776 "\x00\x02" /* type */
777 "\x00\x20" /* len */
778 "\x00\x01" /* curve_id */
779 "\x00\x18" /* key_size */
780#endif
781 "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
782 "\x4e\x19\x1e\x62\x1f\x23\x23\x31"
783 "\x36\x1e\xd3\x84\x2f\xcc\x21\x72",
784 .b_public =
785 "\xc3\xba\x67\x4b\x71\xec\xd0\x76"
786 "\x7a\x99\x75\x64\x36\x13\x9a\x94"
787 "\x5d\x8b\xdc\x60\x90\x91\xfd\x3f"
788 "\xb0\x1f\x8a\x0a\x68\xc6\x88\x6e"
789 "\x83\x87\xdd\x67\x09\xf8\x8d\x96"
790 "\x07\xd6\xbd\x1c\xe6\x8d\x9d\x67",
791 .expected_a_public =
792 "\x1a\x04\xdb\xa5\xe1\xdd\x4e\x79"
793 "\xa3\xe6\xef\x0e\x5c\x80\x49\x85"
794 "\xfa\x78\xb4\xef\x49\xbd\x4c\x7c"
795 "\x22\x90\x21\x02\xf9\x1b\x81\x5d"
796 "\x0c\x8a\xa8\x98\xd6\x27\x69\x88"
797 "\x5e\xbc\x94\xd8\x15\x9e\x21\xce",
798 .expected_ss =
799 "\xf4\x57\xcc\x4f\x1f\x4e\x31\xcc"
800 "\xe3\x40\x60\xc8\x06\x93\xc6\x2e"
801 "\x99\x80\x81\x28\xaf\xc5\x51\x74",
802 .secret_size = 32,
803 .b_public_size = 48,
804 .expected_a_public_size = 48,
805 .expected_ss_size = 24
806 }, {
807#endif
808 .secret =
809#ifdef __LITTLE_ENDIAN
810 "\x02\x00" /* type */
811 "\x28\x00" /* len */
812 "\x02\x00" /* curve_id */
813 "\x20\x00" /* key_size */
814#else
815 "\x00\x02" /* type */
816 "\x00\x28" /* len */
817 "\x00\x02" /* curve_id */
818 "\x00\x20" /* key_size */
819#endif
820 "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
821 "\xf6\x62\x1b\x6e\x43\x84\x3a\xa3"
822 "\x8b\xe0\x86\xc3\x20\x19\xda\x92"
823 "\x50\x53\x03\xe1\xc0\xea\xb8\x82",
824 .expected_a_public =
825 "\x1a\x7f\xeb\x52\x00\xbd\x3c\x31"
826 "\x7d\xb6\x70\xc1\x86\xa6\xc7\xc4"
827 "\x3b\xc5\x5f\x6c\x6f\x58\x3c\xf5"
828 "\xb6\x63\x82\x77\x33\x24\xa1\x5f"
829 "\x6a\xca\x43\x6f\xf7\x7e\xff\x02"
830 "\x37\x08\xcc\x40\x5e\x7a\xfd\x6a"
831 "\x6a\x02\x6e\x41\x87\x68\x38\x77"
832 "\xfa\xa9\x44\x43\x2d\xef\x09\xdf",
833 .expected_ss =
834 "\xea\x17\x6f\x7e\x6e\x57\x26\x38"
835 "\x8b\xfb\x41\xeb\xba\xc8\x6d\xa5"
836 "\xa8\x72\xd1\xff\xc9\x47\x3d\xaa"
837 "\x58\x43\x9f\x34\x0f\x8c\xf3\xc9",
838 .b_public =
839 "\xcc\xb4\xda\x74\xb1\x47\x3f\xea"
840 "\x6c\x70\x9e\x38\x2d\xc7\xaa\xb7"
841 "\x29\xb2\x47\x03\x19\xab\xdd\x34"
842 "\xbd\xa8\x2c\x93\xe1\xa4\x74\xd9"
843 "\x64\x63\xf7\x70\x20\x2f\xa4\xe6"
844 "\x9f\x4a\x38\xcc\xc0\x2c\x49\x2f"
845 "\xb1\x32\xbb\xaf\x22\x61\xda\xcb"
846 "\x6f\xdb\xa9\xaa\xfc\x77\x81\xf3",
847 .secret_size = 40,
848 .b_public_size = 64,
849 .expected_a_public_size = 64,
850 .expected_ss_size = 32
330 } 851 }
331}; 852};
332 853
@@ -376,6 +897,131 @@ static struct hash_testvec md4_tv_template [] = {
376 }, 897 },
377}; 898};
378 899
900#define SHA3_224_TEST_VECTORS 3
901static struct hash_testvec sha3_224_tv_template[] = {
902 {
903 .plaintext = "",
904 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
905 "\x3b\x6e\x15\x45\x4f\x0e\xb1\xab"
906 "\xd4\x59\x7f\x9a\x1b\x07\x8e\x3f"
907 "\x5b\x5a\x6b\xc7",
908 }, {
909 .plaintext = "a",
910 .psize = 1,
911 .digest = "\x9e\x86\xff\x69\x55\x7c\xa9\x5f"
912 "\x40\x5f\x08\x12\x69\x68\x5b\x38"
913 "\xe3\xa8\x19\xb3\x09\xee\x94\x2f"
914 "\x48\x2b\x6a\x8b",
915 }, {
916 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
917 "jklmklmnlmnomnopnopq",
918 .psize = 56,
919 .digest = "\x8a\x24\x10\x8b\x15\x4a\xda\x21"
920 "\xc9\xfd\x55\x74\x49\x44\x79\xba"
921 "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
922 "\xd0\xfc\xce\x33",
923 },
924};
925
926#define SHA3_256_TEST_VECTORS 3
927static struct hash_testvec sha3_256_tv_template[] = {
928 {
929 .plaintext = "",
930 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
931 "\x51\xc1\x47\x56\xa0\x61\xd6\x62"
932 "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa"
933 "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a",
934 }, {
935 .plaintext = "a",
936 .psize = 1,
937 .digest = "\x80\x08\x4b\xf2\xfb\xa0\x24\x75"
938 "\x72\x6f\xeb\x2c\xab\x2d\x82\x15"
939 "\xea\xb1\x4b\xc6\xbd\xd8\xbf\xb2"
940 "\xc8\x15\x12\x57\x03\x2e\xcd\x8b",
941 }, {
942 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
943 "jklmklmnlmnomnopnopq",
944 .psize = 56,
945 .digest = "\x41\xc0\xdb\xa2\xa9\xd6\x24\x08"
946 "\x49\x10\x03\x76\xa8\x23\x5e\x2c"
947 "\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
948 "\xdb\x32\xdd\x97\x49\x6d\x33\x76",
949 },
950};
951
952
953#define SHA3_384_TEST_VECTORS 3
954static struct hash_testvec sha3_384_tv_template[] = {
955 {
956 .plaintext = "",
957 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
958 "\x01\x10\x7d\x85\x2e\x4c\x24\x85"
959 "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61"
960 "\x99\x5e\x71\xbb\xee\x98\x3a\x2a"
961 "\xc3\x71\x38\x31\x26\x4a\xdb\x47"
962 "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04",
963 }, {
964 .plaintext = "a",
965 .psize = 1,
966 .digest = "\x18\x15\xf7\x74\xf3\x20\x49\x1b"
967 "\x48\x56\x9e\xfe\xc7\x94\xd2\x49"
968 "\xee\xb5\x9a\xae\x46\xd2\x2b\xf7"
969 "\x7d\xaf\xe2\x5c\x5e\xdc\x28\xd7"
970 "\xea\x44\xf9\x3e\xe1\x23\x4a\xa8"
971 "\x8f\x61\xc9\x19\x12\xa4\xcc\xd9",
972 }, {
973 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
974 "jklmklmnlmnomnopnopq",
975 .psize = 56,
976 .digest = "\x99\x1c\x66\x57\x55\xeb\x3a\x4b"
977 "\x6b\xbd\xfb\x75\xc7\x8a\x49\x2e"
978 "\x8c\x56\xa2\x2c\x5c\x4d\x7e\x42"
979 "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
980 "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
981 "\x9e\xef\x51\xac\xd0\x65\x7c\x22",
982 },
983};
984
985
986#define SHA3_512_TEST_VECTORS 3
987static struct hash_testvec sha3_512_tv_template[] = {
988 {
989 .plaintext = "",
990 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
991 "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e"
992 "\x97\xc9\x82\x16\x4f\xe2\x58\x59"
993 "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6"
994 "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c"
995 "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58"
996 "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3"
997 "\x01\x75\x85\x86\x28\x1d\xcd\x26",
998 }, {
999 .plaintext = "a",
1000 .psize = 1,
1001 .digest = "\x69\x7f\x2d\x85\x61\x72\xcb\x83"
1002 "\x09\xd6\xb8\xb9\x7d\xac\x4d\xe3"
1003 "\x44\xb5\x49\xd4\xde\xe6\x1e\xdf"
1004 "\xb4\x96\x2d\x86\x98\xb7\xfa\x80"
1005 "\x3f\x4f\x93\xff\x24\x39\x35\x86"
1006 "\xe2\x8b\x5b\x95\x7a\xc3\xd1\xd3"
1007 "\x69\x42\x0c\xe5\x33\x32\x71\x2f"
1008 "\x99\x7b\xd3\x36\xd0\x9a\xb0\x2a",
1009 }, {
1010 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
1011 "jklmklmnlmnomnopnopq",
1012 .psize = 56,
1013 .digest = "\x04\xa3\x71\xe8\x4e\xcf\xb5\xb8"
1014 "\xb7\x7c\xb4\x86\x10\xfc\xa8\x18"
1015 "\x2d\xd4\x57\xce\x6f\x32\x6a\x0f"
1016 "\xd3\xd7\xec\x2f\x1e\x91\x63\x6d"
1017 "\xee\x69\x1f\xbe\x0c\x98\x53\x02"
1018 "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
1019 "\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
1020 "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
1021 },
1022};
1023
1024
379/* 1025/*
380 * MD5 test vectors from RFC1321 1026 * MD5 test vectors from RFC1321
381 */ 1027 */
@@ -3246,6 +3892,394 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
3246 }, 3892 },
3247}; 3893};
3248 3894
3895#define HMAC_SHA3_224_TEST_VECTORS 4
3896
3897static struct hash_testvec hmac_sha3_224_tv_template[] = {
3898 {
3899 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3900 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3901 "\x0b\x0b\x0b\x0b",
3902 .ksize = 20,
3903 .plaintext = "Hi There",
3904 .psize = 8,
3905 .digest = "\x3b\x16\x54\x6b\xbc\x7b\xe2\x70"
3906 "\x6a\x03\x1d\xca\xfd\x56\x37\x3d"
3907 "\x98\x84\x36\x76\x41\xd8\xc5\x9a"
3908 "\xf3\xc8\x60\xf7",
3909 }, {
3910 .key = "Jefe",
3911 .ksize = 4,
3912 .plaintext = "what do ya want for nothing?",
3913 .psize = 28,
3914 .digest = "\x7f\xdb\x8d\xd8\x8b\xd2\xf6\x0d"
3915 "\x1b\x79\x86\x34\xad\x38\x68\x11"
3916 "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b"
3917 "\xba\xce\x5e\x66",
3918 .np = 4,
3919 .tap = { 7, 7, 7, 7 }
3920 }, {
3921 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3922 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3923 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3924 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3925 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3926 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3927 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3928 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3929 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3930 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3931 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3932 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3933 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3934 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3935 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3936 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3937 "\xaa\xaa\xaa",
3938 .ksize = 131,
3939 .plaintext = "Test Using Large"
3940 "r Than Block-Siz"
3941 "e Key - Hash Key"
3942 " First",
3943 .psize = 54,
3944 .digest = "\xb4\xa1\xf0\x4c\x00\x28\x7a\x9b"
3945 "\x7f\x60\x75\xb3\x13\xd2\x79\xb8"
3946 "\x33\xbc\x8f\x75\x12\x43\x52\xd0"
3947 "\x5f\xb9\x99\x5f",
3948 }, {
3949 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3950 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3951 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3952 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3953 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3954 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3955 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3956 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3957 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3958 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3959 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3960 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3961 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3962 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3963 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3964 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3965 "\xaa\xaa\xaa",
3966 .ksize = 131,
3967 .plaintext =
3968 "This is a test u"
3969 "sing a larger th"
3970 "an block-size ke"
3971 "y and a larger t"
3972 "han block-size d"
3973 "ata. The key nee"
3974 "ds to be hashed "
3975 "before being use"
3976 "d by the HMAC al"
3977 "gorithm.",
3978 .psize = 152,
3979 .digest = "\x05\xd8\xcd\x6d\x00\xfa\xea\x8d"
3980 "\x1e\xb6\x8a\xde\x28\x73\x0b\xbd"
3981 "\x3c\xba\xb6\x92\x9f\x0a\x08\x6b"
3982 "\x29\xcd\x62\xa0",
3983 },
3984};
3985
3986#define HMAC_SHA3_256_TEST_VECTORS 4
3987
3988static struct hash_testvec hmac_sha3_256_tv_template[] = {
3989 {
3990 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3991 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3992 "\x0b\x0b\x0b\x0b",
3993 .ksize = 20,
3994 .plaintext = "Hi There",
3995 .psize = 8,
3996 .digest = "\xba\x85\x19\x23\x10\xdf\xfa\x96"
3997 "\xe2\xa3\xa4\x0e\x69\x77\x43\x51"
3998 "\x14\x0b\xb7\x18\x5e\x12\x02\xcd"
3999 "\xcc\x91\x75\x89\xf9\x5e\x16\xbb",
4000 }, {
4001 .key = "Jefe",
4002 .ksize = 4,
4003 .plaintext = "what do ya want for nothing?",
4004 .psize = 28,
4005 .digest = "\xc7\xd4\x07\x2e\x78\x88\x77\xae"
4006 "\x35\x96\xbb\xb0\xda\x73\xb8\x87"
4007 "\xc9\x17\x1f\x93\x09\x5b\x29\x4a"
4008 "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5",
4009 .np = 4,
4010 .tap = { 7, 7, 7, 7 }
4011 }, {
4012 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4013 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4014 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4015 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4016 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4017 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4018 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4019 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4020 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4021 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4022 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4023 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4024 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4025 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4026 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4027 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4028 "\xaa\xaa\xaa",
4029 .ksize = 131,
4030 .plaintext = "Test Using Large"
4031 "r Than Block-Siz"
4032 "e Key - Hash Key"
4033 " First",
4034 .psize = 54,
4035 .digest = "\xed\x73\xa3\x74\xb9\x6c\x00\x52"
4036 "\x35\xf9\x48\x03\x2f\x09\x67\x4a"
4037 "\x58\xc0\xce\x55\x5c\xfc\x1f\x22"
4038 "\x3b\x02\x35\x65\x60\x31\x2c\x3b",
4039 }, {
4040 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4041 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4042 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4043 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4044 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4045 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4046 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4047 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4048 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4049 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4050 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4051 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4052 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4053 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4054 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4055 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4056 "\xaa\xaa\xaa",
4057 .ksize = 131,
4058 .plaintext =
4059 "This is a test u"
4060 "sing a larger th"
4061 "an block-size ke"
4062 "y and a larger t"
4063 "han block-size d"
4064 "ata. The key nee"
4065 "ds to be hashed "
4066 "before being use"
4067 "d by the HMAC al"
4068 "gorithm.",
4069 .psize = 152,
4070 .digest = "\x65\xc5\xb0\x6d\x4c\x3d\xe3\x2a"
4071 "\x7a\xef\x87\x63\x26\x1e\x49\xad"
4072 "\xb6\xe2\x29\x3e\xc8\xe7\xc6\x1e"
4073 "\x8d\xe6\x17\x01\xfc\x63\xe1\x23",
4074 },
4075};
4076
4077#define HMAC_SHA3_384_TEST_VECTORS 4
4078
4079static struct hash_testvec hmac_sha3_384_tv_template[] = {
4080 {
4081 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4082 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4083 "\x0b\x0b\x0b\x0b",
4084 .ksize = 20,
4085 .plaintext = "Hi There",
4086 .psize = 8,
4087 .digest = "\x68\xd2\xdc\xf7\xfd\x4d\xdd\x0a"
4088 "\x22\x40\xc8\xa4\x37\x30\x5f\x61"
4089 "\xfb\x73\x34\xcf\xb5\xd0\x22\x6e"
4090 "\x1b\xc2\x7d\xc1\x0a\x2e\x72\x3a"
4091 "\x20\xd3\x70\xb4\x77\x43\x13\x0e"
4092 "\x26\xac\x7e\x3d\x53\x28\x86\xbd",
4093 }, {
4094 .key = "Jefe",
4095 .ksize = 4,
4096 .plaintext = "what do ya want for nothing?",
4097 .psize = 28,
4098 .digest = "\xf1\x10\x1f\x8c\xbf\x97\x66\xfd"
4099 "\x67\x64\xd2\xed\x61\x90\x3f\x21"
4100 "\xca\x9b\x18\xf5\x7c\xf3\xe1\xa2"
4101 "\x3c\xa1\x35\x08\xa9\x32\x43\xce"
4102 "\x48\xc0\x45\xdc\x00\x7f\x26\xa2"
4103 "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a",
4104 .np = 4,
4105 .tap = { 7, 7, 7, 7 }
4106 }, {
4107 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4108 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4109 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4110 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4111 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4112 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4113 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4114 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4115 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4116 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4117 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4118 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4119 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4120 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4121 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4122 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4123 "\xaa\xaa\xaa",
4124 .ksize = 131,
4125 .plaintext = "Test Using Large"
4126 "r Than Block-Siz"
4127 "e Key - Hash Key"
4128 " First",
4129 .psize = 54,
4130 .digest = "\x0f\xc1\x95\x13\xbf\x6b\xd8\x78"
4131 "\x03\x70\x16\x70\x6a\x0e\x57\xbc"
4132 "\x52\x81\x39\x83\x6b\x9a\x42\xc3"
4133 "\xd4\x19\xe4\x98\xe0\xe1\xfb\x96"
4134 "\x16\xfd\x66\x91\x38\xd3\x3a\x11"
4135 "\x05\xe0\x7c\x72\xb6\x95\x3b\xcc",
4136 }, {
4137 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4138 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4139 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4140 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4141 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4142 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4143 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4144 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4145 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4146 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4147 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4148 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4149 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4150 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4151 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4152 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4153 "\xaa\xaa\xaa",
4154 .ksize = 131,
4155 .plaintext =
4156 "This is a test u"
4157 "sing a larger th"
4158 "an block-size ke"
4159 "y and a larger t"
4160 "han block-size d"
4161 "ata. The key nee"
4162 "ds to be hashed "
4163 "before being use"
4164 "d by the HMAC al"
4165 "gorithm.",
4166 .psize = 152,
4167 .digest = "\x02\x6f\xdf\x6b\x50\x74\x1e\x37"
4168 "\x38\x99\xc9\xf7\xd5\x40\x6d\x4e"
4169 "\xb0\x9f\xc6\x66\x56\x36\xfc\x1a"
4170 "\x53\x00\x29\xdd\xf5\xcf\x3c\xa5"
4171 "\xa9\x00\xed\xce\x01\xf5\xf6\x1e"
4172 "\x2f\x40\x8c\xdf\x2f\xd3\xe7\xe8",
4173 },
4174};
4175
4176#define HMAC_SHA3_512_TEST_VECTORS 4
4177
4178static struct hash_testvec hmac_sha3_512_tv_template[] = {
4179 {
4180 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4181 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4182 "\x0b\x0b\x0b\x0b",
4183 .ksize = 20,
4184 .plaintext = "Hi There",
4185 .psize = 8,
4186 .digest = "\xeb\x3f\xbd\x4b\x2e\xaa\xb8\xf5"
4187 "\xc5\x04\xbd\x3a\x41\x46\x5a\xac"
4188 "\xec\x15\x77\x0a\x7c\xab\xac\x53"
4189 "\x1e\x48\x2f\x86\x0b\x5e\xc7\xba"
4190 "\x47\xcc\xb2\xc6\xf2\xaf\xce\x8f"
4191 "\x88\xd2\x2b\x6d\xc6\x13\x80\xf2"
4192 "\x3a\x66\x8f\xd3\x88\x8b\xb8\x05"
4193 "\x37\xc0\xa0\xb8\x64\x07\x68\x9e",
4194 }, {
4195 .key = "Jefe",
4196 .ksize = 4,
4197 .plaintext = "what do ya want for nothing?",
4198 .psize = 28,
4199 .digest = "\x5a\x4b\xfe\xab\x61\x66\x42\x7c"
4200 "\x7a\x36\x47\xb7\x47\x29\x2b\x83"
4201 "\x84\x53\x7c\xdb\x89\xaf\xb3\xbf"
4202 "\x56\x65\xe4\xc5\xe7\x09\x35\x0b"
4203 "\x28\x7b\xae\xc9\x21\xfd\x7c\xa0"
4204 "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e"
4205 "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83"
4206 "\x96\x02\x75\xbe\xb4\xe6\x20\x24",
4207 .np = 4,
4208 .tap = { 7, 7, 7, 7 }
4209 }, {
4210 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4211 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4212 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4213 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4214 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4215 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4216 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4217 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4218 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4219 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4220 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4221 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4222 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4223 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4224 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4225 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4226 "\xaa\xaa\xaa",
4227 .ksize = 131,
4228 .plaintext = "Test Using Large"
4229 "r Than Block-Siz"
4230 "e Key - Hash Key"
4231 " First",
4232 .psize = 54,
4233 .digest = "\x00\xf7\x51\xa9\xe5\x06\x95\xb0"
4234 "\x90\xed\x69\x11\xa4\xb6\x55\x24"
4235 "\x95\x1c\xdc\x15\xa7\x3a\x5d\x58"
4236 "\xbb\x55\x21\x5e\xa2\xcd\x83\x9a"
4237 "\xc7\x9d\x2b\x44\xa3\x9b\xaf\xab"
4238 "\x27\xe8\x3f\xde\x9e\x11\xf6\x34"
4239 "\x0b\x11\xd9\x91\xb1\xb9\x1b\xf2"
4240 "\xee\xe7\xfc\x87\x24\x26\xc3\xa4",
4241 }, {
4242 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4243 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4244 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4245 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4246 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4247 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4248 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4249 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4250 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4251 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4252 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4253 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4254 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4255 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4256 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4257 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4258 "\xaa\xaa\xaa",
4259 .ksize = 131,
4260 .plaintext =
4261 "This is a test u"
4262 "sing a larger th"
4263 "an block-size ke"
4264 "y and a larger t"
4265 "han block-size d"
4266 "ata. The key nee"
4267 "ds to be hashed "
4268 "before being use"
4269 "d by the HMAC al"
4270 "gorithm.",
4271 .psize = 152,
4272 .digest = "\x38\xa4\x56\xa0\x04\xbd\x10\xd3"
4273 "\x2c\x9a\xb8\x33\x66\x84\x11\x28"
4274 "\x62\xc3\xdb\x61\xad\xcc\xa3\x18"
4275 "\x29\x35\x5e\xaf\x46\xfd\x5c\x73"
4276 "\xd0\x6a\x1f\x0d\x13\xfe\xc9\xa6"
4277 "\x52\xfb\x38\x11\xb5\x77\xb1\xb1"
4278 "\xd1\xb9\x78\x9f\x97\xae\x5b\x83"
4279 "\xc6\xf4\x4d\xfc\xf1\xd6\x7e\xba",
4280 },
4281};
4282
3249/* 4283/*
3250 * Poly1305 test vectors from RFC7539 A.3. 4284 * Poly1305 test vectors from RFC7539 A.3.
3251 */ 4285 */