aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 13:42:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 13:42:15 -0400
commitcb906953d2c3fd450655d9fa833f03690ad50c23 (patch)
tree06c5665afb24baee3ac49f62db61ca97918079b4 /crypto
parent6c373ca89399c5a3f7ef210ad8f63dc3437da345 (diff)
parent3abafaf2192b1712079edfd4232b19877d6f41a5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 4.1: New interfaces: - user-space interface for AEAD - user-space interface for RNG (i.e., pseudo RNG) New hashes: - ARMv8 SHA1/256 - ARMv8 AES - ARMv8 GHASH - ARM assembler and NEON SHA256 - MIPS OCTEON SHA1/256/512 - MIPS img-hash SHA1/256 and MD5 - Power 8 VMX AES/CBC/CTR/GHASH - PPC assembler AES, SHA1/256 and MD5 - Broadcom IPROC RNG driver Cleanups/fixes: - prevent internal helper algos from being exposed to user-space - merge common code from assembly/C SHA implementations - misc fixes" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (169 commits) crypto: arm - workaround for building with old binutils crypto: arm/sha256 - avoid sha256 code on ARMv7-M crypto: x86/sha512_ssse3 - move SHA-384/512 SSSE3 implementation to base layer crypto: x86/sha256_ssse3 - move SHA-224/256 SSSE3 implementation to base layer crypto: x86/sha1_ssse3 - move SHA-1 SSSE3 implementation to base layer crypto: arm64/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer crypto: arm64/sha1-ce - move SHA-1 ARMv8 implementation to base layer crypto: arm/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer crypto: arm/sha256 - move SHA-224/256 ASM/NEON implementation to base layer crypto: arm/sha1-ce - move SHA-1 ARMv8 implementation to base layer crypto: arm/sha1_neon - move SHA-1 NEON implementation to base layer crypto: arm/sha1 - move SHA-1 ARM asm implementation to base layer crypto: sha512-generic - move to generic glue implementation crypto: sha256-generic - move to generic glue implementation crypto: sha1-generic - move to generic glue implementation crypto: sha512 - implement base layer for SHA-512 crypto: sha256 - implement base layer for SHA-256 crypto: sha1 - implement base layer for SHA-1 crypto: api - remove instance when test failed crypto: api - Move alg ref count init to crypto_check_alg ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig142
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablk_helper.c3
-rw-r--r--crypto/algapi.c42
-rw-r--r--crypto/algif_aead.c666
-rw-r--r--crypto/algif_rng.c2
-rw-r--r--crypto/ansi_cprng.c6
-rw-r--r--crypto/api.c10
-rw-r--r--crypto/cryptd.c49
-rw-r--r--crypto/crypto_user.c39
-rw-r--r--crypto/drbg.c64
-rw-r--r--crypto/mcryptd.c25
-rw-r--r--crypto/proc.c3
-rw-r--r--crypto/sha1_generic.c102
-rw-r--r--crypto/sha256_generic.c133
-rw-r--r--crypto/sha512_generic.c123
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c24
18 files changed, 967 insertions, 471 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 50f4da44a304..8aaf298a80e1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -436,6 +436,14 @@ config CRYPTO_MD5_OCTEON
436 MD5 message digest algorithm (RFC1321) implemented 436 MD5 message digest algorithm (RFC1321) implemented
437 using OCTEON crypto instructions, when available. 437 using OCTEON crypto instructions, when available.
438 438
439config CRYPTO_MD5_PPC
440 tristate "MD5 digest algorithm (PPC)"
441 depends on PPC
442 select CRYPTO_HASH
443 help
444 MD5 message digest algorithm (RFC1321) implemented
445 in PPC assembler.
446
439config CRYPTO_MD5_SPARC64 447config CRYPTO_MD5_SPARC64
440 tristate "MD5 digest algorithm (SPARC64)" 448 tristate "MD5 digest algorithm (SPARC64)"
441 depends on SPARC64 449 depends on SPARC64
@@ -546,34 +554,23 @@ config CRYPTO_SHA512_SSSE3
546 Extensions version 1 (AVX1), or Advanced Vector Extensions 554 Extensions version 1 (AVX1), or Advanced Vector Extensions
547 version 2 (AVX2) instructions, when available. 555 version 2 (AVX2) instructions, when available.
548 556
549config CRYPTO_SHA1_SPARC64 557config CRYPTO_SHA1_OCTEON
550 tristate "SHA1 digest algorithm (SPARC64)" 558 tristate "SHA1 digest algorithm (OCTEON)"
551 depends on SPARC64 559 depends on CPU_CAVIUM_OCTEON
552 select CRYPTO_SHA1
553 select CRYPTO_HASH
554 help
555 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
556 using sparc64 crypto instructions, when available.
557
558config CRYPTO_SHA1_ARM
559 tristate "SHA1 digest algorithm (ARM-asm)"
560 depends on ARM
561 select CRYPTO_SHA1 560 select CRYPTO_SHA1
562 select CRYPTO_HASH 561 select CRYPTO_HASH
563 help 562 help
564 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 563 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
565 using optimized ARM assembler. 564 using OCTEON crypto instructions, when available.
566 565
567config CRYPTO_SHA1_ARM_NEON 566config CRYPTO_SHA1_SPARC64
568 tristate "SHA1 digest algorithm (ARM NEON)" 567 tristate "SHA1 digest algorithm (SPARC64)"
569 depends on ARM && KERNEL_MODE_NEON 568 depends on SPARC64
570 select CRYPTO_SHA1_ARM
571 select CRYPTO_SHA1 569 select CRYPTO_SHA1
572 select CRYPTO_HASH 570 select CRYPTO_HASH
573 help 571 help
574 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 572 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
575 using optimized ARM NEON assembly, when NEON instructions are 573 using sparc64 crypto instructions, when available.
576 available.
577 574
578config CRYPTO_SHA1_PPC 575config CRYPTO_SHA1_PPC
579 tristate "SHA1 digest algorithm (powerpc)" 576 tristate "SHA1 digest algorithm (powerpc)"
@@ -582,6 +579,13 @@ config CRYPTO_SHA1_PPC
582 This is the powerpc hardware accelerated implementation of the 579 This is the powerpc hardware accelerated implementation of the
583 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 580 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
584 581
582config CRYPTO_SHA1_PPC_SPE
583 tristate "SHA1 digest algorithm (PPC SPE)"
584 depends on PPC && SPE
585 help
586 SHA-1 secure hash standard (DFIPS 180-4) implemented
587 using powerpc SPE SIMD instruction set.
588
585config CRYPTO_SHA1_MB 589config CRYPTO_SHA1_MB
586 tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)" 590 tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
587 depends on X86 && 64BIT 591 depends on X86 && 64BIT
@@ -610,6 +614,24 @@ config CRYPTO_SHA256
610 This code also includes SHA-224, a 224 bit hash with 112 bits 614 This code also includes SHA-224, a 224 bit hash with 112 bits
611 of security against collision attacks. 615 of security against collision attacks.
612 616
617config CRYPTO_SHA256_PPC_SPE
618 tristate "SHA224 and SHA256 digest algorithm (PPC SPE)"
619 depends on PPC && SPE
620 select CRYPTO_SHA256
621 select CRYPTO_HASH
622 help
623 SHA224 and SHA256 secure hash standard (DFIPS 180-2)
624 implemented using powerpc SPE SIMD instruction set.
625
626config CRYPTO_SHA256_OCTEON
627 tristate "SHA224 and SHA256 digest algorithm (OCTEON)"
628 depends on CPU_CAVIUM_OCTEON
629 select CRYPTO_SHA256
630 select CRYPTO_HASH
631 help
632 SHA-256 secure hash standard (DFIPS 180-2) implemented
633 using OCTEON crypto instructions, when available.
634
613config CRYPTO_SHA256_SPARC64 635config CRYPTO_SHA256_SPARC64
614 tristate "SHA224 and SHA256 digest algorithm (SPARC64)" 636 tristate "SHA224 and SHA256 digest algorithm (SPARC64)"
615 depends on SPARC64 637 depends on SPARC64
@@ -631,29 +653,23 @@ config CRYPTO_SHA512
631 This code also includes SHA-384, a 384 bit hash with 192 bits 653 This code also includes SHA-384, a 384 bit hash with 192 bits
632 of security against collision attacks. 654 of security against collision attacks.
633 655
634config CRYPTO_SHA512_SPARC64 656config CRYPTO_SHA512_OCTEON
635 tristate "SHA384 and SHA512 digest algorithm (SPARC64)" 657 tristate "SHA384 and SHA512 digest algorithms (OCTEON)"
636 depends on SPARC64 658 depends on CPU_CAVIUM_OCTEON
637 select CRYPTO_SHA512 659 select CRYPTO_SHA512
638 select CRYPTO_HASH 660 select CRYPTO_HASH
639 help 661 help
640 SHA-512 secure hash standard (DFIPS 180-2) implemented 662 SHA-512 secure hash standard (DFIPS 180-2) implemented
641 using sparc64 crypto instructions, when available. 663 using OCTEON crypto instructions, when available.
642 664
643config CRYPTO_SHA512_ARM_NEON 665config CRYPTO_SHA512_SPARC64
644 tristate "SHA384 and SHA512 digest algorithm (ARM NEON)" 666 tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
645 depends on ARM && KERNEL_MODE_NEON 667 depends on SPARC64
646 select CRYPTO_SHA512 668 select CRYPTO_SHA512
647 select CRYPTO_HASH 669 select CRYPTO_HASH
648 help 670 help
649 SHA-512 secure hash standard (DFIPS 180-2) implemented 671 SHA-512 secure hash standard (DFIPS 180-2) implemented
650 using ARM NEON instructions, when available. 672 using sparc64 crypto instructions, when available.
651
652 This version of SHA implements a 512 bit hash with 256 bits of
653 security against collision attacks.
654
655 This code also includes SHA-384, a 384 bit hash with 192 bits
656 of security against collision attacks.
657 673
658config CRYPTO_TGR192 674config CRYPTO_TGR192
659 tristate "Tiger digest algorithms" 675 tristate "Tiger digest algorithms"
@@ -817,45 +833,18 @@ config CRYPTO_AES_SPARC64
817 for some popular block cipher mode is supported too, including 833 for some popular block cipher mode is supported too, including
818 ECB and CBC. 834 ECB and CBC.
819 835
820config CRYPTO_AES_ARM 836config CRYPTO_AES_PPC_SPE
821 tristate "AES cipher algorithms (ARM-asm)" 837 tristate "AES cipher algorithms (PPC SPE)"
822 depends on ARM 838 depends on PPC && SPE
823 select CRYPTO_ALGAPI
824 select CRYPTO_AES
825 help
826 Use optimized AES assembler routines for ARM platforms.
827
828 AES cipher algorithms (FIPS-197). AES uses the Rijndael
829 algorithm.
830
831 Rijndael appears to be consistently a very good performer in
832 both hardware and software across a wide range of computing
833 environments regardless of its use in feedback or non-feedback
834 modes. Its key setup time is excellent, and its key agility is
835 good. Rijndael's very low memory requirements make it very well
836 suited for restricted-space environments, in which it also
837 demonstrates excellent performance. Rijndael's operations are
838 among the easiest to defend against power and timing attacks.
839
840 The AES specifies three key sizes: 128, 192 and 256 bits
841
842 See <http://csrc.nist.gov/encryption/aes/> for more information.
843
844config CRYPTO_AES_ARM_BS
845 tristate "Bit sliced AES using NEON instructions"
846 depends on ARM && KERNEL_MODE_NEON
847 select CRYPTO_ALGAPI
848 select CRYPTO_AES_ARM
849 select CRYPTO_ABLK_HELPER
850 help 839 help
851 Use a faster and more secure NEON based implementation of AES in CBC, 840 AES cipher algorithms (FIPS-197). Additionally the acceleration
852 CTR and XTS modes 841 for popular block cipher modes ECB, CBC, CTR and XTS is supported.
853 842 This module should only be used for low power (router) devices
854 Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode 843 without hardware AES acceleration (e.g. caam crypto). It reduces the
855 and for XTS mode encryption, CBC and XTS mode decryption speedup is 844 size of the AES tables from 16KB to 8KB + 256 bytes and mitigates
856 around 25%. (CBC encryption speed is not affected by this driver.) 845 timining attacks. Nevertheless it might be not as secure as other
857 This implementation does not rely on any lookup tables so it is 846 architecture specific assembler implementations that work on 1KB
858 believed to be invulnerable to cache timing attacks. 847 tables or 256 bytes S-boxes.
859 848
860config CRYPTO_ANUBIS 849config CRYPTO_ANUBIS
861 tristate "Anubis cipher algorithm" 850 tristate "Anubis cipher algorithm"
@@ -1199,7 +1188,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
1199 Keys are allowed to be from 0 to 256 bits in length, in steps 1188 Keys are allowed to be from 0 to 256 bits in length, in steps
1200 of 8 bits. 1189 of 8 bits.
1201 1190
1202 This module provides Serpent cipher algorithm that processes eigth 1191 This module provides Serpent cipher algorithm that processes eight
1203 blocks parallel using SSE2 instruction set. 1192 blocks parallel using SSE2 instruction set.
1204 1193
1205 See also: 1194 See also:
@@ -1523,6 +1512,15 @@ config CRYPTO_USER_API_RNG
1523 This option enables the user-spaces interface for random 1512 This option enables the user-spaces interface for random
1524 number generator algorithms. 1513 number generator algorithms.
1525 1514
1515config CRYPTO_USER_API_AEAD
1516 tristate "User-space interface for AEAD cipher algorithms"
1517 depends on NET
1518 select CRYPTO_AEAD
1519 select CRYPTO_USER_API
1520 help
1521 This option enables the user-spaces interface for AEAD
1522 cipher algorithms.
1523
1526config CRYPTO_HASH_INFO 1524config CRYPTO_HASH_INFO
1527 bool 1525 bool
1528 1526
diff --git a/crypto/Makefile b/crypto/Makefile
index ba19465f9ad3..97b7d3ac87e7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
100obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o 100obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
101obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o 101obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
102obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o 102obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
103obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
103 104
104# 105#
105# generic algorithms and the async_tx api 106# generic algorithms and the async_tx api
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index ffe7278d4bd8..e1fcf53bb931 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -124,7 +124,8 @@ int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
124 struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); 124 struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
125 struct cryptd_ablkcipher *cryptd_tfm; 125 struct cryptd_ablkcipher *cryptd_tfm;
126 126
127 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); 127 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, CRYPTO_ALG_INTERNAL,
128 CRYPTO_ALG_INTERNAL);
128 if (IS_ERR(cryptd_tfm)) 129 if (IS_ERR(cryptd_tfm))
129 return PTR_ERR(cryptd_tfm); 130 return PTR_ERR(cryptd_tfm);
130 131
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 83b04e0884b1..2d0a1c64ce39 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -64,6 +64,8 @@ static int crypto_check_alg(struct crypto_alg *alg)
64 if (alg->cra_priority < 0) 64 if (alg->cra_priority < 0)
65 return -EINVAL; 65 return -EINVAL;
66 66
67 atomic_set(&alg->cra_refcnt, 1);
68
67 return crypto_set_driver_name(alg); 69 return crypto_set_driver_name(alg);
68} 70}
69 71
@@ -99,10 +101,9 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
99 return &n->list == stack ? top : &n->inst->alg.cra_users; 101 return &n->list == stack ? top : &n->inst->alg.cra_users;
100} 102}
101 103
102static void crypto_remove_spawn(struct crypto_spawn *spawn, 104static void crypto_remove_instance(struct crypto_instance *inst,
103 struct list_head *list) 105 struct list_head *list)
104{ 106{
105 struct crypto_instance *inst = spawn->inst;
106 struct crypto_template *tmpl = inst->tmpl; 107 struct crypto_template *tmpl = inst->tmpl;
107 108
108 if (crypto_is_dead(&inst->alg)) 109 if (crypto_is_dead(&inst->alg))
@@ -167,7 +168,7 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
167 if (spawn->alg) 168 if (spawn->alg)
168 list_move(&spawn->list, &spawn->alg->cra_users); 169 list_move(&spawn->list, &spawn->alg->cra_users);
169 else 170 else
170 crypto_remove_spawn(spawn, list); 171 crypto_remove_instance(spawn->inst, list);
171 } 172 }
172} 173}
173EXPORT_SYMBOL_GPL(crypto_remove_spawns); 174EXPORT_SYMBOL_GPL(crypto_remove_spawns);
@@ -188,7 +189,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
188 189
189 ret = -EEXIST; 190 ret = -EEXIST;
190 191
191 atomic_set(&alg->cra_refcnt, 1);
192 list_for_each_entry(q, &crypto_alg_list, cra_list) { 192 list_for_each_entry(q, &crypto_alg_list, cra_list) {
193 if (q == alg) 193 if (q == alg)
194 goto err; 194 goto err;
@@ -523,7 +523,10 @@ int crypto_register_instance(struct crypto_template *tmpl,
523 523
524 err = crypto_check_alg(&inst->alg); 524 err = crypto_check_alg(&inst->alg);
525 if (err) 525 if (err)
526 goto err; 526 return err;
527
528 if (unlikely(!crypto_mod_get(&inst->alg)))
529 return -EAGAIN;
527 530
528 inst->alg.cra_module = tmpl->module; 531 inst->alg.cra_module = tmpl->module;
529 inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; 532 inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
@@ -545,37 +548,30 @@ unlock:
545 goto err; 548 goto err;
546 549
547 crypto_wait_for_test(larval); 550 crypto_wait_for_test(larval);
551
552 /* Remove instance if test failed */
553 if (!(inst->alg.cra_flags & CRYPTO_ALG_TESTED))
554 crypto_unregister_instance(inst);
548 err = 0; 555 err = 0;
549 556
550err: 557err:
558 crypto_mod_put(&inst->alg);
551 return err; 559 return err;
552} 560}
553EXPORT_SYMBOL_GPL(crypto_register_instance); 561EXPORT_SYMBOL_GPL(crypto_register_instance);
554 562
555int crypto_unregister_instance(struct crypto_alg *alg) 563int crypto_unregister_instance(struct crypto_instance *inst)
556{ 564{
557 int err; 565 LIST_HEAD(list);
558 struct crypto_instance *inst = (void *)alg;
559 struct crypto_template *tmpl = inst->tmpl;
560 LIST_HEAD(users);
561
562 if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
563 return -EINVAL;
564
565 BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
566 566
567 down_write(&crypto_alg_sem); 567 down_write(&crypto_alg_sem);
568 568
569 hlist_del_init(&inst->list); 569 crypto_remove_spawns(&inst->alg, &list, NULL);
570 err = crypto_remove_alg(alg, &users); 570 crypto_remove_instance(inst, &list);
571 571
572 up_write(&crypto_alg_sem); 572 up_write(&crypto_alg_sem);
573 573
574 if (err) 574 crypto_remove_final(&list);
575 return err;
576
577 tmpl->free(inst);
578 crypto_remove_final(&users);
579 575
580 return 0; 576 return 0;
581} 577}
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
new file mode 100644
index 000000000000..527d27b023ab
--- /dev/null
+++ b/crypto/algif_aead.c
@@ -0,0 +1,666 @@
1/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
16#include <crypto/scatterwalk.h>
17#include <crypto/if_alg.h>
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/net.h>
24#include <net/sock.h>
25
26struct aead_sg_list {
27 unsigned int cur;
28 struct scatterlist sg[ALG_MAX_PAGES];
29};
30
31struct aead_ctx {
32 struct aead_sg_list tsgl;
33 /*
34 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
35 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
36 * bytes
37 */
38#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
39 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
40
41 void *iv;
42
43 struct af_alg_completion completion;
44
45 unsigned long used;
46
47 unsigned int len;
48 bool more;
49 bool merge;
50 bool enc;
51
52 size_t aead_assoclen;
53 struct aead_request aead_req;
54};
55
56static inline int aead_sndbuf(struct sock *sk)
57{
58 struct alg_sock *ask = alg_sk(sk);
59 struct aead_ctx *ctx = ask->private;
60
61 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
62 ctx->used, 0);
63}
64
65static inline bool aead_writable(struct sock *sk)
66{
67 return PAGE_SIZE <= aead_sndbuf(sk);
68}
69
70static inline bool aead_sufficient_data(struct aead_ctx *ctx)
71{
72 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
73
74 return (ctx->used >= (ctx->aead_assoclen + (ctx->enc ? 0 : as)));
75}
76
77static void aead_put_sgl(struct sock *sk)
78{
79 struct alg_sock *ask = alg_sk(sk);
80 struct aead_ctx *ctx = ask->private;
81 struct aead_sg_list *sgl = &ctx->tsgl;
82 struct scatterlist *sg = sgl->sg;
83 unsigned int i;
84
85 for (i = 0; i < sgl->cur; i++) {
86 if (!sg_page(sg + i))
87 continue;
88
89 put_page(sg_page(sg + i));
90 sg_assign_page(sg + i, NULL);
91 }
92 sgl->cur = 0;
93 ctx->used = 0;
94 ctx->more = 0;
95 ctx->merge = 0;
96}
97
98static void aead_wmem_wakeup(struct sock *sk)
99{
100 struct socket_wq *wq;
101
102 if (!aead_writable(sk))
103 return;
104
105 rcu_read_lock();
106 wq = rcu_dereference(sk->sk_wq);
107 if (wq_has_sleeper(wq))
108 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
109 POLLRDNORM |
110 POLLRDBAND);
111 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
112 rcu_read_unlock();
113}
114
115static int aead_wait_for_data(struct sock *sk, unsigned flags)
116{
117 struct alg_sock *ask = alg_sk(sk);
118 struct aead_ctx *ctx = ask->private;
119 long timeout;
120 DEFINE_WAIT(wait);
121 int err = -ERESTARTSYS;
122
123 if (flags & MSG_DONTWAIT)
124 return -EAGAIN;
125
126 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
127
128 for (;;) {
129 if (signal_pending(current))
130 break;
131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
132 timeout = MAX_SCHEDULE_TIMEOUT;
133 if (sk_wait_event(sk, &timeout, !ctx->more)) {
134 err = 0;
135 break;
136 }
137 }
138 finish_wait(sk_sleep(sk), &wait);
139
140 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
141
142 return err;
143}
144
145static void aead_data_wakeup(struct sock *sk)
146{
147 struct alg_sock *ask = alg_sk(sk);
148 struct aead_ctx *ctx = ask->private;
149 struct socket_wq *wq;
150
151 if (ctx->more)
152 return;
153 if (!ctx->used)
154 return;
155
156 rcu_read_lock();
157 wq = rcu_dereference(sk->sk_wq);
158 if (wq_has_sleeper(wq))
159 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
160 POLLRDNORM |
161 POLLRDBAND);
162 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
163 rcu_read_unlock();
164}
165
166static int aead_sendmsg(struct kiocb *unused, struct socket *sock,
167 struct msghdr *msg, size_t size)
168{
169 struct sock *sk = sock->sk;
170 struct alg_sock *ask = alg_sk(sk);
171 struct aead_ctx *ctx = ask->private;
172 unsigned ivsize =
173 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
174 struct aead_sg_list *sgl = &ctx->tsgl;
175 struct af_alg_control con = {};
176 long copied = 0;
177 bool enc = 0;
178 bool init = 0;
179 int err = -EINVAL;
180
181 if (msg->msg_controllen) {
182 err = af_alg_cmsg_send(msg, &con);
183 if (err)
184 return err;
185
186 init = 1;
187 switch (con.op) {
188 case ALG_OP_ENCRYPT:
189 enc = 1;
190 break;
191 case ALG_OP_DECRYPT:
192 enc = 0;
193 break;
194 default:
195 return -EINVAL;
196 }
197
198 if (con.iv && con.iv->ivlen != ivsize)
199 return -EINVAL;
200 }
201
202 lock_sock(sk);
203 if (!ctx->more && ctx->used)
204 goto unlock;
205
206 if (init) {
207 ctx->enc = enc;
208 if (con.iv)
209 memcpy(ctx->iv, con.iv->iv, ivsize);
210
211 ctx->aead_assoclen = con.aead_assoclen;
212 }
213
214 while (size) {
215 unsigned long len = size;
216 struct scatterlist *sg = NULL;
217
218 /* use the existing memory in an allocated page */
219 if (ctx->merge) {
220 sg = sgl->sg + sgl->cur - 1;
221 len = min_t(unsigned long, len,
222 PAGE_SIZE - sg->offset - sg->length);
223 err = memcpy_from_msg(page_address(sg_page(sg)) +
224 sg->offset + sg->length,
225 msg, len);
226 if (err)
227 goto unlock;
228
229 sg->length += len;
230 ctx->merge = (sg->offset + sg->length) &
231 (PAGE_SIZE - 1);
232
233 ctx->used += len;
234 copied += len;
235 size -= len;
236 continue;
237 }
238
239 if (!aead_writable(sk)) {
240 /* user space sent too much data */
241 aead_put_sgl(sk);
242 err = -EMSGSIZE;
243 goto unlock;
244 }
245
246 /* allocate a new page */
247 len = min_t(unsigned long, size, aead_sndbuf(sk));
248 while (len) {
249 int plen = 0;
250
251 if (sgl->cur >= ALG_MAX_PAGES) {
252 aead_put_sgl(sk);
253 err = -E2BIG;
254 goto unlock;
255 }
256
257 sg = sgl->sg + sgl->cur;
258 plen = min_t(int, len, PAGE_SIZE);
259
260 sg_assign_page(sg, alloc_page(GFP_KERNEL));
261 err = -ENOMEM;
262 if (!sg_page(sg))
263 goto unlock;
264
265 err = memcpy_from_msg(page_address(sg_page(sg)),
266 msg, plen);
267 if (err) {
268 __free_page(sg_page(sg));
269 sg_assign_page(sg, NULL);
270 goto unlock;
271 }
272
273 sg->offset = 0;
274 sg->length = plen;
275 len -= plen;
276 ctx->used += plen;
277 copied += plen;
278 sgl->cur++;
279 size -= plen;
280 ctx->merge = plen & (PAGE_SIZE - 1);
281 }
282 }
283
284 err = 0;
285
286 ctx->more = msg->msg_flags & MSG_MORE;
287 if (!ctx->more && !aead_sufficient_data(ctx)) {
288 aead_put_sgl(sk);
289 err = -EMSGSIZE;
290 }
291
292unlock:
293 aead_data_wakeup(sk);
294 release_sock(sk);
295
296 return err ?: copied;
297}
298
299static ssize_t aead_sendpage(struct socket *sock, struct page *page,
300 int offset, size_t size, int flags)
301{
302 struct sock *sk = sock->sk;
303 struct alg_sock *ask = alg_sk(sk);
304 struct aead_ctx *ctx = ask->private;
305 struct aead_sg_list *sgl = &ctx->tsgl;
306 int err = -EINVAL;
307
308 if (flags & MSG_SENDPAGE_NOTLAST)
309 flags |= MSG_MORE;
310
311 if (sgl->cur >= ALG_MAX_PAGES)
312 return -E2BIG;
313
314 lock_sock(sk);
315 if (!ctx->more && ctx->used)
316 goto unlock;
317
318 if (!size)
319 goto done;
320
321 if (!aead_writable(sk)) {
322 /* user space sent too much data */
323 aead_put_sgl(sk);
324 err = -EMSGSIZE;
325 goto unlock;
326 }
327
328 ctx->merge = 0;
329
330 get_page(page);
331 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
332 sgl->cur++;
333 ctx->used += size;
334
335 err = 0;
336
337done:
338 ctx->more = flags & MSG_MORE;
339 if (!ctx->more && !aead_sufficient_data(ctx)) {
340 aead_put_sgl(sk);
341 err = -EMSGSIZE;
342 }
343
344unlock:
345 aead_data_wakeup(sk);
346 release_sock(sk);
347
348 return err ?: size;
349}
350
351static int aead_recvmsg(struct kiocb *unused, struct socket *sock,
352 struct msghdr *msg, size_t ignored, int flags)
353{
354 struct sock *sk = sock->sk;
355 struct alg_sock *ask = alg_sk(sk);
356 struct aead_ctx *ctx = ask->private;
357 unsigned bs = crypto_aead_blocksize(crypto_aead_reqtfm(&ctx->aead_req));
358 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
359 struct aead_sg_list *sgl = &ctx->tsgl;
360 struct scatterlist *sg = NULL;
361 struct scatterlist assoc[ALG_MAX_PAGES];
362 size_t assoclen = 0;
363 unsigned int i = 0;
364 int err = -EINVAL;
365 unsigned long used = 0;
366 size_t outlen = 0;
367 size_t usedpages = 0;
368 unsigned int cnt = 0;
369
370 /* Limit number of IOV blocks to be accessed below */
371 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
372 return -ENOMSG;
373
374 lock_sock(sk);
375
376 /*
377 * AEAD memory structure: For encryption, the tag is appended to the
378 * ciphertext which implies that the memory allocated for the ciphertext
379 * must be increased by the tag length. For decryption, the tag
380 * is expected to be concatenated to the ciphertext. The plaintext
381 * therefore has a memory size of the ciphertext minus the tag length.
382 *
383 * The memory structure for cipher operation has the following
384 * structure:
385 * AEAD encryption input: assoc data || plaintext
386 * AEAD encryption output: cipherntext || auth tag
387 * AEAD decryption input: assoc data || ciphertext || auth tag
388 * AEAD decryption output: plaintext
389 */
390
391 if (ctx->more) {
392 err = aead_wait_for_data(sk, flags);
393 if (err)
394 goto unlock;
395 }
396
397 used = ctx->used;
398
399 /*
400 * Make sure sufficient data is present -- note, the same check is
401 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
402 * shall provide an information to the data sender that something is
403 * wrong, but they are irrelevant to maintain the kernel integrity.
404 * We need this check here too in case user space decides to not honor
405 * the error message in sendmsg/sendpage and still call recvmsg. This
406 * check here protects the kernel integrity.
407 */
408 if (!aead_sufficient_data(ctx))
409 goto unlock;
410
411 /*
412 * The cipher operation input data is reduced by the associated data
413 * length as this data is processed separately later on.
414 */
415 used -= ctx->aead_assoclen;
416
417 if (ctx->enc) {
418 /* round up output buffer to multiple of block size */
419 outlen = ((used + bs - 1) / bs * bs);
420 /* add the size needed for the auth tag to be created */
421 outlen += as;
422 } else {
423 /* output data size is input without the authentication tag */
424 outlen = used - as;
425 /* round up output buffer to multiple of block size */
426 outlen = ((outlen + bs - 1) / bs * bs);
427 }
428
429 /* convert iovecs of output buffers into scatterlists */
430 while (iov_iter_count(&msg->msg_iter)) {
431 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
432 (outlen - usedpages));
433
434 /* make one iovec available as scatterlist */
435 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
436 seglen);
437 if (err < 0)
438 goto unlock;
439 usedpages += err;
440 /* chain the new scatterlist with initial list */
441 if (cnt)
442 scatterwalk_crypto_chain(ctx->rsgl[0].sg,
443 ctx->rsgl[cnt].sg, 1,
444 sg_nents(ctx->rsgl[cnt-1].sg));
445 /* we do not need more iovecs as we have sufficient memory */
446 if (outlen <= usedpages)
447 break;
448 iov_iter_advance(&msg->msg_iter, err);
449 cnt++;
450 }
451
452 err = -EINVAL;
453 /* ensure output buffer is sufficiently large */
454 if (usedpages < outlen)
455 goto unlock;
456
457 sg_init_table(assoc, ALG_MAX_PAGES);
458 assoclen = ctx->aead_assoclen;
459 /*
460 * Split scatterlist into two: first part becomes AD, second part
461 * is plaintext / ciphertext. The first part is assigned to assoc
462 * scatterlist. When this loop finishes, sg points to the start of the
463 * plaintext / ciphertext.
464 */
465 for (i = 0; i < ctx->tsgl.cur; i++) {
466 sg = sgl->sg + i;
467 if (sg->length <= assoclen) {
468 /* AD is larger than one page */
469 sg_set_page(assoc + i, sg_page(sg),
470 sg->length, sg->offset);
471 assoclen -= sg->length;
472 if (i >= ctx->tsgl.cur)
473 goto unlock;
474 } else if (!assoclen) {
475 /* current page is to start of plaintext / ciphertext */
476 if (i)
477 /* AD terminates at page boundary */
478 sg_mark_end(assoc + i - 1);
479 else
480 /* AD size is zero */
481 sg_mark_end(assoc);
482 break;
483 } else {
484 /* AD does not terminate at page boundary */
485 sg_set_page(assoc + i, sg_page(sg),
486 assoclen, sg->offset);
487 sg_mark_end(assoc + i);
488 /* plaintext / ciphertext starts after AD */
489 sg->length -= assoclen;
490 sg->offset += assoclen;
491 break;
492 }
493 }
494
495 aead_request_set_assoc(&ctx->aead_req, assoc, ctx->aead_assoclen);
496 aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used,
497 ctx->iv);
498
499 err = af_alg_wait_for_completion(ctx->enc ?
500 crypto_aead_encrypt(&ctx->aead_req) :
501 crypto_aead_decrypt(&ctx->aead_req),
502 &ctx->completion);
503
504 if (err) {
505 /* EBADMSG implies a valid cipher operation took place */
506 if (err == -EBADMSG)
507 aead_put_sgl(sk);
508 goto unlock;
509 }
510
511 aead_put_sgl(sk);
512
513 err = 0;
514
515unlock:
516 for (i = 0; i < cnt; i++)
517 af_alg_free_sg(&ctx->rsgl[i]);
518
519 aead_wmem_wakeup(sk);
520 release_sock(sk);
521
522 return err ? err : outlen;
523}
524
525static unsigned int aead_poll(struct file *file, struct socket *sock,
526 poll_table *wait)
527{
528 struct sock *sk = sock->sk;
529 struct alg_sock *ask = alg_sk(sk);
530 struct aead_ctx *ctx = ask->private;
531 unsigned int mask;
532
533 sock_poll_wait(file, sk_sleep(sk), wait);
534 mask = 0;
535
536 if (!ctx->more)
537 mask |= POLLIN | POLLRDNORM;
538
539 if (aead_writable(sk))
540 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
541
542 return mask;
543}
544
545static struct proto_ops algif_aead_ops = {
546 .family = PF_ALG,
547
548 .connect = sock_no_connect,
549 .socketpair = sock_no_socketpair,
550 .getname = sock_no_getname,
551 .ioctl = sock_no_ioctl,
552 .listen = sock_no_listen,
553 .shutdown = sock_no_shutdown,
554 .getsockopt = sock_no_getsockopt,
555 .mmap = sock_no_mmap,
556 .bind = sock_no_bind,
557 .accept = sock_no_accept,
558 .setsockopt = sock_no_setsockopt,
559
560 .release = af_alg_release,
561 .sendmsg = aead_sendmsg,
562 .sendpage = aead_sendpage,
563 .recvmsg = aead_recvmsg,
564 .poll = aead_poll,
565};
566
567static void *aead_bind(const char *name, u32 type, u32 mask)
568{
569 return crypto_alloc_aead(name, type, mask);
570}
571
572static void aead_release(void *private)
573{
574 crypto_free_aead(private);
575}
576
577static int aead_setauthsize(void *private, unsigned int authsize)
578{
579 return crypto_aead_setauthsize(private, authsize);
580}
581
582static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
583{
584 return crypto_aead_setkey(private, key, keylen);
585}
586
587static void aead_sock_destruct(struct sock *sk)
588{
589 struct alg_sock *ask = alg_sk(sk);
590 struct aead_ctx *ctx = ask->private;
591 unsigned int ivlen = crypto_aead_ivsize(
592 crypto_aead_reqtfm(&ctx->aead_req));
593
594 aead_put_sgl(sk);
595 sock_kzfree_s(sk, ctx->iv, ivlen);
596 sock_kfree_s(sk, ctx, ctx->len);
597 af_alg_release_parent(sk);
598}
599
600static int aead_accept_parent(void *private, struct sock *sk)
601{
602 struct aead_ctx *ctx;
603 struct alg_sock *ask = alg_sk(sk);
604 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
605 unsigned int ivlen = crypto_aead_ivsize(private);
606
607 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
608 if (!ctx)
609 return -ENOMEM;
610 memset(ctx, 0, len);
611
612 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
613 if (!ctx->iv) {
614 sock_kfree_s(sk, ctx, len);
615 return -ENOMEM;
616 }
617 memset(ctx->iv, 0, ivlen);
618
619 ctx->len = len;
620 ctx->used = 0;
621 ctx->more = 0;
622 ctx->merge = 0;
623 ctx->enc = 0;
624 ctx->tsgl.cur = 0;
625 ctx->aead_assoclen = 0;
626 af_alg_init_completion(&ctx->completion);
627 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
628
629 ask->private = ctx;
630
631 aead_request_set_tfm(&ctx->aead_req, private);
632 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
633 af_alg_complete, &ctx->completion);
634
635 sk->sk_destruct = aead_sock_destruct;
636
637 return 0;
638}
639
640static const struct af_alg_type algif_type_aead = {
641 .bind = aead_bind,
642 .release = aead_release,
643 .setkey = aead_setkey,
644 .setauthsize = aead_setauthsize,
645 .accept = aead_accept_parent,
646 .ops = &algif_aead_ops,
647 .name = "aead",
648 .owner = THIS_MODULE
649};
650
651static int __init algif_aead_init(void)
652{
653 return af_alg_register_type(&algif_type_aead);
654}
655
656static void __exit algif_aead_exit(void)
657{
658 int err = af_alg_unregister_type(&algif_type_aead);
659 BUG_ON(err);
660}
661
662module_init(algif_aead_init);
663module_exit(algif_aead_exit);
664MODULE_LICENSE("GPL");
665MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
666MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 3acba0a7cd55..8109aaad2726 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -87,7 +87,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
87 return genlen; 87 return genlen;
88 88
89 err = memcpy_to_msg(msg, result, len); 89 err = memcpy_to_msg(msg, result, len);
90 memzero_explicit(result, genlen); 90 memzero_explicit(result, len);
91 91
92 return err ? err : len; 92 return err ? err : len;
93} 93}
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 6f5bebc9bf01..765fe7609348 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -210,7 +210,11 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
210 byte_count = DEFAULT_BLK_SZ; 210 byte_count = DEFAULT_BLK_SZ;
211 } 211 }
212 212
213 err = byte_count; 213 /*
214 * Return 0 in case of success as mandated by the kernel
215 * crypto API interface definition.
216 */
217 err = 0;
214 218
215 dbgprint(KERN_CRIT "getting %d random bytes for context %p\n", 219 dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
216 byte_count, ctx); 220 byte_count, ctx);
diff --git a/crypto/api.c b/crypto/api.c
index 2a81e98a0021..afe4610afc4b 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -257,6 +257,16 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
257 mask |= CRYPTO_ALG_TESTED; 257 mask |= CRYPTO_ALG_TESTED;
258 } 258 }
259 259
260 /*
261 * If the internal flag is set for a cipher, require a caller to
262 * to invoke the cipher with the internal flag to use that cipher.
263 * Also, if a caller wants to allocate a cipher that may or may
264 * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
265 * !(mask & CRYPTO_ALG_INTERNAL).
266 */
267 if (!((type | mask) & CRYPTO_ALG_INTERNAL))
268 mask |= CRYPTO_ALG_INTERNAL;
269
260 larval = crypto_larval_lookup(name, type, mask); 270 larval = crypto_larval_lookup(name, type, mask);
261 if (IS_ERR(larval) || !crypto_is_larval(larval)) 271 if (IS_ERR(larval) || !crypto_is_larval(larval))
262 return larval; 272 return larval;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 650afac10fd7..b0602ba03111 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -168,6 +168,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
168 return ictx->queue; 168 return ictx->queue;
169} 169}
170 170
171static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
172 u32 *mask)
173{
174 struct crypto_attr_type *algt;
175
176 algt = crypto_get_attr_type(tb);
177 if (IS_ERR(algt))
178 return;
179 if ((algt->type & CRYPTO_ALG_INTERNAL))
180 *type |= CRYPTO_ALG_INTERNAL;
181 if ((algt->mask & CRYPTO_ALG_INTERNAL))
182 *mask |= CRYPTO_ALG_INTERNAL;
183}
184
171static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 185static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
172 const u8 *key, unsigned int keylen) 186 const u8 *key, unsigned int keylen)
173{ 187{
@@ -321,10 +335,13 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl,
321 struct cryptd_instance_ctx *ctx; 335 struct cryptd_instance_ctx *ctx;
322 struct crypto_instance *inst; 336 struct crypto_instance *inst;
323 struct crypto_alg *alg; 337 struct crypto_alg *alg;
338 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
339 u32 mask = CRYPTO_ALG_TYPE_MASK;
324 int err; 340 int err;
325 341
326 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, 342 cryptd_check_internal(tb, &type, &mask);
327 CRYPTO_ALG_TYPE_MASK); 343
344 alg = crypto_get_attr_alg(tb, type, mask);
328 if (IS_ERR(alg)) 345 if (IS_ERR(alg))
329 return PTR_ERR(alg); 346 return PTR_ERR(alg);
330 347
@@ -341,7 +358,10 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl,
341 if (err) 358 if (err)
342 goto out_free_inst; 359 goto out_free_inst;
343 360
344 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 361 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
362 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
363 type |= CRYPTO_ALG_INTERNAL;
364 inst->alg.cra_flags = type;
345 inst->alg.cra_type = &crypto_ablkcipher_type; 365 inst->alg.cra_type = &crypto_ablkcipher_type;
346 366
347 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; 367 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
@@ -577,9 +597,13 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
577 struct ahash_instance *inst; 597 struct ahash_instance *inst;
578 struct shash_alg *salg; 598 struct shash_alg *salg;
579 struct crypto_alg *alg; 599 struct crypto_alg *alg;
600 u32 type = 0;
601 u32 mask = 0;
580 int err; 602 int err;
581 603
582 salg = shash_attr_alg(tb[1], 0, 0); 604 cryptd_check_internal(tb, &type, &mask);
605
606 salg = shash_attr_alg(tb[1], type, mask);
583 if (IS_ERR(salg)) 607 if (IS_ERR(salg))
584 return PTR_ERR(salg); 608 return PTR_ERR(salg);
585 609
@@ -598,7 +622,10 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
598 if (err) 622 if (err)
599 goto out_free_inst; 623 goto out_free_inst;
600 624
601 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; 625 type = CRYPTO_ALG_ASYNC;
626 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
627 type |= CRYPTO_ALG_INTERNAL;
628 inst->alg.halg.base.cra_flags = type;
602 629
603 inst->alg.halg.digestsize = salg->digestsize; 630 inst->alg.halg.digestsize = salg->digestsize;
604 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 631 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
@@ -719,10 +746,13 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
719 struct aead_instance_ctx *ctx; 746 struct aead_instance_ctx *ctx;
720 struct crypto_instance *inst; 747 struct crypto_instance *inst;
721 struct crypto_alg *alg; 748 struct crypto_alg *alg;
749 u32 type = CRYPTO_ALG_TYPE_AEAD;
750 u32 mask = CRYPTO_ALG_TYPE_MASK;
722 int err; 751 int err;
723 752
724 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, 753 cryptd_check_internal(tb, &type, &mask);
725 CRYPTO_ALG_TYPE_MASK); 754
755 alg = crypto_get_attr_alg(tb, type, mask);
726 if (IS_ERR(alg)) 756 if (IS_ERR(alg))
727 return PTR_ERR(alg); 757 return PTR_ERR(alg);
728 758
@@ -739,7 +769,10 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
739 if (err) 769 if (err)
740 goto out_free_inst; 770 goto out_free_inst;
741 771
742 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 772 type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
773 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
774 type |= CRYPTO_ALG_INTERNAL;
775 inst->alg.cra_flags = type;
743 inst->alg.cra_type = alg->cra_type; 776 inst->alg.cra_type = alg->cra_type;
744 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 777 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
745 inst->alg.cra_init = cryptd_aead_init_tfm; 778 inst->alg.cra_init = cryptd_aead_init_tfm;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index c5148a35ae0a..41dfe762b7fb 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -62,10 +62,14 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
62 else if (!exact) 62 else if (!exact)
63 match = !strcmp(q->cra_name, p->cru_name); 63 match = !strcmp(q->cra_name, p->cru_name);
64 64
65 if (match) { 65 if (!match)
66 alg = q; 66 continue;
67 break; 67
68 } 68 if (unlikely(!crypto_mod_get(q)))
69 continue;
70
71 alg = q;
72 break;
69 } 73 }
70 74
71 up_read(&crypto_alg_sem); 75 up_read(&crypto_alg_sem);
@@ -205,9 +209,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
205 if (!alg) 209 if (!alg)
206 return -ENOENT; 210 return -ENOENT;
207 211
212 err = -ENOMEM;
208 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 213 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
209 if (!skb) 214 if (!skb)
210 return -ENOMEM; 215 goto drop_alg;
211 216
212 info.in_skb = in_skb; 217 info.in_skb = in_skb;
213 info.out_skb = skb; 218 info.out_skb = skb;
@@ -215,6 +220,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
215 info.nlmsg_flags = 0; 220 info.nlmsg_flags = 0;
216 221
217 err = crypto_report_alg(alg, &info); 222 err = crypto_report_alg(alg, &info);
223
224drop_alg:
225 crypto_mod_put(alg);
226
218 if (err) 227 if (err)
219 return err; 228 return err;
220 229
@@ -284,6 +293,7 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
284 293
285 up_write(&crypto_alg_sem); 294 up_write(&crypto_alg_sem);
286 295
296 crypto_mod_put(alg);
287 crypto_remove_final(&list); 297 crypto_remove_final(&list);
288 298
289 return 0; 299 return 0;
@@ -294,6 +304,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
294{ 304{
295 struct crypto_alg *alg; 305 struct crypto_alg *alg;
296 struct crypto_user_alg *p = nlmsg_data(nlh); 306 struct crypto_user_alg *p = nlmsg_data(nlh);
307 int err;
297 308
298 if (!netlink_capable(skb, CAP_NET_ADMIN)) 309 if (!netlink_capable(skb, CAP_NET_ADMIN))
299 return -EPERM; 310 return -EPERM;
@@ -310,13 +321,19 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
310 * if we try to unregister. Unregistering such an algorithm without 321 * if we try to unregister. Unregistering such an algorithm without
311 * removing the module is not possible, so we restrict to crypto 322 * removing the module is not possible, so we restrict to crypto
312 * instances that are build from templates. */ 323 * instances that are build from templates. */
324 err = -EINVAL;
313 if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE)) 325 if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
314 return -EINVAL; 326 goto drop_alg;
315 327
316 if (atomic_read(&alg->cra_refcnt) != 1) 328 err = -EBUSY;
317 return -EBUSY; 329 if (atomic_read(&alg->cra_refcnt) > 2)
330 goto drop_alg;
318 331
319 return crypto_unregister_instance(alg); 332 err = crypto_unregister_instance((struct crypto_instance *)alg);
333
334drop_alg:
335 crypto_mod_put(alg);
336 return err;
320} 337}
321 338
322static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type, 339static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
@@ -395,8 +412,10 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
395 return -EINVAL; 412 return -EINVAL;
396 413
397 alg = crypto_alg_match(p, exact); 414 alg = crypto_alg_match(p, exact);
398 if (alg) 415 if (alg) {
416 crypto_mod_put(alg);
399 return -EEXIST; 417 return -EEXIST;
418 }
400 419
401 if (strlen(p->cru_driver_name)) 420 if (strlen(p->cru_driver_name))
402 name = p->cru_driver_name; 421 name = p->cru_driver_name;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index d8ff16e5c322..b69409cb7e6a 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -119,19 +119,19 @@ static const struct drbg_core drbg_cores[] = {
119 .statelen = 32, /* 256 bits as defined in 10.2.1 */ 119 .statelen = 32, /* 256 bits as defined in 10.2.1 */
120 .blocklen_bytes = 16, 120 .blocklen_bytes = 16,
121 .cra_name = "ctr_aes128", 121 .cra_name = "ctr_aes128",
122 .backend_cra_name = "ecb(aes)", 122 .backend_cra_name = "aes",
123 }, { 123 }, {
124 .flags = DRBG_CTR | DRBG_STRENGTH192, 124 .flags = DRBG_CTR | DRBG_STRENGTH192,
125 .statelen = 40, /* 320 bits as defined in 10.2.1 */ 125 .statelen = 40, /* 320 bits as defined in 10.2.1 */
126 .blocklen_bytes = 16, 126 .blocklen_bytes = 16,
127 .cra_name = "ctr_aes192", 127 .cra_name = "ctr_aes192",
128 .backend_cra_name = "ecb(aes)", 128 .backend_cra_name = "aes",
129 }, { 129 }, {
130 .flags = DRBG_CTR | DRBG_STRENGTH256, 130 .flags = DRBG_CTR | DRBG_STRENGTH256,
131 .statelen = 48, /* 384 bits as defined in 10.2.1 */ 131 .statelen = 48, /* 384 bits as defined in 10.2.1 */
132 .blocklen_bytes = 16, 132 .blocklen_bytes = 16,
133 .cra_name = "ctr_aes256", 133 .cra_name = "ctr_aes256",
134 .backend_cra_name = "ecb(aes)", 134 .backend_cra_name = "aes",
135 }, 135 },
136#endif /* CONFIG_CRYPTO_DRBG_CTR */ 136#endif /* CONFIG_CRYPTO_DRBG_CTR */
137#ifdef CONFIG_CRYPTO_DRBG_HASH 137#ifdef CONFIG_CRYPTO_DRBG_HASH
@@ -308,9 +308,6 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
308 308
309 drbg_string_fill(&data, out, drbg_blocklen(drbg)); 309 drbg_string_fill(&data, out, drbg_blocklen(drbg));
310 310
311 /* 10.4.3 step 1 */
312 memset(out, 0, drbg_blocklen(drbg));
313
314 /* 10.4.3 step 2 / 4 */ 311 /* 10.4.3 step 2 / 4 */
315 list_for_each_entry(curr, in, list) { 312 list_for_each_entry(curr, in, list) {
316 const unsigned char *pos = curr->buf; 313 const unsigned char *pos = curr->buf;
@@ -406,7 +403,6 @@ static int drbg_ctr_df(struct drbg_state *drbg,
406 403
407 memset(pad, 0, drbg_blocklen(drbg)); 404 memset(pad, 0, drbg_blocklen(drbg));
408 memset(iv, 0, drbg_blocklen(drbg)); 405 memset(iv, 0, drbg_blocklen(drbg));
409 memset(temp, 0, drbg_statelen(drbg));
410 406
411 /* 10.4.2 step 1 is implicit as we work byte-wise */ 407 /* 10.4.2 step 1 is implicit as we work byte-wise */
412 408
@@ -523,7 +519,6 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
523 unsigned int len = 0; 519 unsigned int len = 0;
524 struct drbg_string cipherin; 520 struct drbg_string cipherin;
525 521
526 memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
527 if (3 > reseed) 522 if (3 > reseed)
528 memset(df_data, 0, drbg_statelen(drbg)); 523 memset(df_data, 0, drbg_statelen(drbg));
529 524
@@ -585,8 +580,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
585 int ret = 0; 580 int ret = 0;
586 struct drbg_string data; 581 struct drbg_string data;
587 582
588 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
589
590 /* 10.2.1.5.2 step 2 */ 583 /* 10.2.1.5.2 step 2 */
591 if (addtl && !list_empty(addtl)) { 584 if (addtl && !list_empty(addtl)) {
592 ret = drbg_ctr_update(drbg, addtl, 2); 585 ret = drbg_ctr_update(drbg, addtl, 2);
@@ -761,7 +754,6 @@ static struct drbg_state_ops drbg_hmac_ops = {
761 .generate = drbg_hmac_generate, 754 .generate = drbg_hmac_generate,
762 .crypto_init = drbg_init_hash_kernel, 755 .crypto_init = drbg_init_hash_kernel,
763 .crypto_fini = drbg_fini_hash_kernel, 756 .crypto_fini = drbg_fini_hash_kernel,
764
765}; 757};
766#endif /* CONFIG_CRYPTO_DRBG_HMAC */ 758#endif /* CONFIG_CRYPTO_DRBG_HMAC */
767 759
@@ -838,8 +830,6 @@ static int drbg_hash_df(struct drbg_state *drbg,
838 unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg); 830 unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
839 struct drbg_string data; 831 struct drbg_string data;
840 832
841 memset(tmp, 0, drbg_blocklen(drbg));
842
843 /* 10.4.1 step 3 */ 833 /* 10.4.1 step 3 */
844 input[0] = 1; 834 input[0] = 1;
845 drbg_cpu_to_be32((outlen * 8), &input[1]); 835 drbg_cpu_to_be32((outlen * 8), &input[1]);
@@ -879,7 +869,6 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
879 unsigned char *V = drbg->scratchpad; 869 unsigned char *V = drbg->scratchpad;
880 unsigned char prefix = DRBG_PREFIX1; 870 unsigned char prefix = DRBG_PREFIX1;
881 871
882 memset(drbg->scratchpad, 0, drbg_statelen(drbg));
883 if (!seed) 872 if (!seed)
884 return -EINVAL; 873 return -EINVAL;
885 874
@@ -921,9 +910,6 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
921 LIST_HEAD(datalist); 910 LIST_HEAD(datalist);
922 unsigned char prefix = DRBG_PREFIX2; 911 unsigned char prefix = DRBG_PREFIX2;
923 912
924 /* this is value w as per documentation */
925 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
926
927 /* 10.1.1.4 step 2 */ 913 /* 10.1.1.4 step 2 */
928 if (!addtl || list_empty(addtl)) 914 if (!addtl || list_empty(addtl))
929 return 0; 915 return 0;
@@ -959,9 +945,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
959 struct drbg_string data; 945 struct drbg_string data;
960 LIST_HEAD(datalist); 946 LIST_HEAD(datalist);
961 947
962 memset(src, 0, drbg_statelen(drbg));
963 memset(dst, 0, drbg_blocklen(drbg));
964
965 /* 10.1.1.4 step hashgen 2 */ 948 /* 10.1.1.4 step hashgen 2 */
966 memcpy(src, drbg->V, drbg_statelen(drbg)); 949 memcpy(src, drbg->V, drbg_statelen(drbg));
967 950
@@ -1018,7 +1001,6 @@ static int drbg_hash_generate(struct drbg_state *drbg,
1018 len = drbg_hash_hashgen(drbg, buf, buflen); 1001 len = drbg_hash_hashgen(drbg, buf, buflen);
1019 1002
1020 /* this is the value H as documented in 10.1.1.4 */ 1003 /* this is the value H as documented in 10.1.1.4 */
1021 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
1022 /* 10.1.1.4 step 4 */ 1004 /* 10.1.1.4 step 4 */
1023 drbg_string_fill(&data1, &prefix, 1); 1005 drbg_string_fill(&data1, &prefix, 1);
1024 list_add_tail(&data1.list, &datalist); 1006 list_add_tail(&data1.list, &datalist);
@@ -1298,7 +1280,7 @@ static void drbg_restore_shadow(struct drbg_state *drbg,
1298 * as defined in SP800-90A. The additional input is mixed into 1280 * as defined in SP800-90A. The additional input is mixed into
1299 * the state in addition to the pulled entropy. 1281 * the state in addition to the pulled entropy.
1300 * 1282 *
1301 * return: generated number of bytes 1283 * return: 0 when all bytes are generated; < 0 in case of an error
1302 */ 1284 */
1303static int drbg_generate(struct drbg_state *drbg, 1285static int drbg_generate(struct drbg_state *drbg,
1304 unsigned char *buf, unsigned int buflen, 1286 unsigned char *buf, unsigned int buflen,
@@ -1437,6 +1419,11 @@ static int drbg_generate(struct drbg_state *drbg,
1437 } 1419 }
1438#endif 1420#endif
1439 1421
1422 /*
1423 * All operations were successful, return 0 as mandated by
1424 * the kernel crypto API interface.
1425 */
1426 len = 0;
1440err: 1427err:
1441 shadow->d_ops->crypto_fini(shadow); 1428 shadow->d_ops->crypto_fini(shadow);
1442 drbg_restore_shadow(drbg, &shadow); 1429 drbg_restore_shadow(drbg, &shadow);
@@ -1644,24 +1631,24 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
1644static int drbg_init_sym_kernel(struct drbg_state *drbg) 1631static int drbg_init_sym_kernel(struct drbg_state *drbg)
1645{ 1632{
1646 int ret = 0; 1633 int ret = 0;
1647 struct crypto_blkcipher *tfm; 1634 struct crypto_cipher *tfm;
1648 1635
1649 tfm = crypto_alloc_blkcipher(drbg->core->backend_cra_name, 0, 0); 1636 tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
1650 if (IS_ERR(tfm)) { 1637 if (IS_ERR(tfm)) {
1651 pr_info("DRBG: could not allocate cipher TFM handle\n"); 1638 pr_info("DRBG: could not allocate cipher TFM handle\n");
1652 return PTR_ERR(tfm); 1639 return PTR_ERR(tfm);
1653 } 1640 }
1654 BUG_ON(drbg_blocklen(drbg) != crypto_blkcipher_blocksize(tfm)); 1641 BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
1655 drbg->priv_data = tfm; 1642 drbg->priv_data = tfm;
1656 return ret; 1643 return ret;
1657} 1644}
1658 1645
1659static int drbg_fini_sym_kernel(struct drbg_state *drbg) 1646static int drbg_fini_sym_kernel(struct drbg_state *drbg)
1660{ 1647{
1661 struct crypto_blkcipher *tfm = 1648 struct crypto_cipher *tfm =
1662 (struct crypto_blkcipher *)drbg->priv_data; 1649 (struct crypto_cipher *)drbg->priv_data;
1663 if (tfm) 1650 if (tfm)
1664 crypto_free_blkcipher(tfm); 1651 crypto_free_cipher(tfm);
1665 drbg->priv_data = NULL; 1652 drbg->priv_data = NULL;
1666 return 0; 1653 return 0;
1667} 1654}
@@ -1669,21 +1656,14 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
1669static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, 1656static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
1670 unsigned char *outval, const struct drbg_string *in) 1657 unsigned char *outval, const struct drbg_string *in)
1671{ 1658{
1672 int ret = 0; 1659 struct crypto_cipher *tfm =
1673 struct scatterlist sg_in, sg_out; 1660 (struct crypto_cipher *)drbg->priv_data;
1674 struct blkcipher_desc desc;
1675 struct crypto_blkcipher *tfm =
1676 (struct crypto_blkcipher *)drbg->priv_data;
1677
1678 desc.tfm = tfm;
1679 desc.flags = 0;
1680 crypto_blkcipher_setkey(tfm, key, (drbg_keylen(drbg)));
1681 /* there is only component in *in */
1682 sg_init_one(&sg_in, in->buf, in->len);
1683 sg_init_one(&sg_out, outval, drbg_blocklen(drbg));
1684 ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, in->len);
1685 1661
1686 return ret; 1662 crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
1663 /* there is only component in *in */
1664 BUG_ON(in->len < drbg_blocklen(drbg));
1665 crypto_cipher_encrypt_one(tfm, outval, in->buf);
1666 return 0;
1687} 1667}
1688#endif /* CONFIG_CRYPTO_DRBG_CTR */ 1668#endif /* CONFIG_CRYPTO_DRBG_CTR */
1689 1669
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index a8e870444ea9..fe5b495a434d 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -258,6 +258,20 @@ out_free_inst:
258 goto out; 258 goto out;
259} 259}
260 260
261static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
262 u32 *mask)
263{
264 struct crypto_attr_type *algt;
265
266 algt = crypto_get_attr_type(tb);
267 if (IS_ERR(algt))
268 return;
269 if ((algt->type & CRYPTO_ALG_INTERNAL))
270 *type |= CRYPTO_ALG_INTERNAL;
271 if ((algt->mask & CRYPTO_ALG_INTERNAL))
272 *mask |= CRYPTO_ALG_INTERNAL;
273}
274
261static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) 275static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
262{ 276{
263 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 277 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -480,9 +494,13 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
480 struct ahash_instance *inst; 494 struct ahash_instance *inst;
481 struct shash_alg *salg; 495 struct shash_alg *salg;
482 struct crypto_alg *alg; 496 struct crypto_alg *alg;
497 u32 type = 0;
498 u32 mask = 0;
483 int err; 499 int err;
484 500
485 salg = shash_attr_alg(tb[1], 0, 0); 501 mcryptd_check_internal(tb, &type, &mask);
502
503 salg = shash_attr_alg(tb[1], type, mask);
486 if (IS_ERR(salg)) 504 if (IS_ERR(salg))
487 return PTR_ERR(salg); 505 return PTR_ERR(salg);
488 506
@@ -502,7 +520,10 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
502 if (err) 520 if (err)
503 goto out_free_inst; 521 goto out_free_inst;
504 522
505 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; 523 type = CRYPTO_ALG_ASYNC;
524 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
525 type |= CRYPTO_ALG_INTERNAL;
526 inst->alg.halg.base.cra_flags = type;
506 527
507 inst->alg.halg.digestsize = salg->digestsize; 528 inst->alg.halg.digestsize = salg->digestsize;
508 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); 529 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
diff --git a/crypto/proc.c b/crypto/proc.c
index 4a0a7aad2204..4ffe73b51612 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -89,6 +89,9 @@ static int c_show(struct seq_file *m, void *p)
89 seq_printf(m, "selftest : %s\n", 89 seq_printf(m, "selftest : %s\n",
90 (alg->cra_flags & CRYPTO_ALG_TESTED) ? 90 (alg->cra_flags & CRYPTO_ALG_TESTED) ?
91 "passed" : "unknown"); 91 "passed" : "unknown");
92 seq_printf(m, "internal : %s\n",
93 (alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
94 "yes" : "no");
92 95
93 if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 96 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
94 seq_printf(m, "type : larval\n"); 97 seq_printf(m, "type : larval\n");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index a3e50c37eb6f..39e3acc438d9 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -23,111 +23,49 @@
23#include <linux/cryptohash.h> 23#include <linux/cryptohash.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <crypto/sha.h> 25#include <crypto/sha.h>
26#include <crypto/sha1_base.h>
26#include <asm/byteorder.h> 27#include <asm/byteorder.h>
27 28
28static int sha1_init(struct shash_desc *desc) 29static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
30 int blocks)
29{ 31{
30 struct sha1_state *sctx = shash_desc_ctx(desc); 32 u32 temp[SHA_WORKSPACE_WORDS];
31 33
32 *sctx = (struct sha1_state){ 34 while (blocks--) {
33 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, 35 sha_transform(sst->state, src, temp);
34 }; 36 src += SHA1_BLOCK_SIZE;
35 37 }
36 return 0; 38 memzero_explicit(temp, sizeof(temp));
37} 39}
38 40
39int crypto_sha1_update(struct shash_desc *desc, const u8 *data, 41int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
40 unsigned int len) 42 unsigned int len)
41{ 43{
42 struct sha1_state *sctx = shash_desc_ctx(desc); 44 return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
43 unsigned int partial, done;
44 const u8 *src;
45
46 partial = sctx->count % SHA1_BLOCK_SIZE;
47 sctx->count += len;
48 done = 0;
49 src = data;
50
51 if ((partial + len) >= SHA1_BLOCK_SIZE) {
52 u32 temp[SHA_WORKSPACE_WORDS];
53
54 if (partial) {
55 done = -partial;
56 memcpy(sctx->buffer + partial, data,
57 done + SHA1_BLOCK_SIZE);
58 src = sctx->buffer;
59 }
60
61 do {
62 sha_transform(sctx->state, src, temp);
63 done += SHA1_BLOCK_SIZE;
64 src = data + done;
65 } while (done + SHA1_BLOCK_SIZE <= len);
66
67 memzero_explicit(temp, sizeof(temp));
68 partial = 0;
69 }
70 memcpy(sctx->buffer + partial, src, len - done);
71
72 return 0;
73} 45}
74EXPORT_SYMBOL(crypto_sha1_update); 46EXPORT_SYMBOL(crypto_sha1_update);
75 47
76
77/* Add padding and return the message digest. */
78static int sha1_final(struct shash_desc *desc, u8 *out) 48static int sha1_final(struct shash_desc *desc, u8 *out)
79{ 49{
80 struct sha1_state *sctx = shash_desc_ctx(desc); 50 sha1_base_do_finalize(desc, sha1_generic_block_fn);
81 __be32 *dst = (__be32 *)out; 51 return sha1_base_finish(desc, out);
82 u32 i, index, padlen;
83 __be64 bits;
84 static const u8 padding[64] = { 0x80, };
85
86 bits = cpu_to_be64(sctx->count << 3);
87
88 /* Pad out to 56 mod 64 */
89 index = sctx->count & 0x3f;
90 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
91 crypto_sha1_update(desc, padding, padlen);
92
93 /* Append length */
94 crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
95
96 /* Store state in digest */
97 for (i = 0; i < 5; i++)
98 dst[i] = cpu_to_be32(sctx->state[i]);
99
100 /* Wipe context */
101 memset(sctx, 0, sizeof *sctx);
102
103 return 0;
104} 52}
105 53
106static int sha1_export(struct shash_desc *desc, void *out) 54int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
55 unsigned int len, u8 *out)
107{ 56{
108 struct sha1_state *sctx = shash_desc_ctx(desc); 57 sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
109 58 return sha1_final(desc, out);
110 memcpy(out, sctx, sizeof(*sctx));
111 return 0;
112}
113
114static int sha1_import(struct shash_desc *desc, const void *in)
115{
116 struct sha1_state *sctx = shash_desc_ctx(desc);
117
118 memcpy(sctx, in, sizeof(*sctx));
119 return 0;
120} 59}
60EXPORT_SYMBOL(crypto_sha1_finup);
121 61
122static struct shash_alg alg = { 62static struct shash_alg alg = {
123 .digestsize = SHA1_DIGEST_SIZE, 63 .digestsize = SHA1_DIGEST_SIZE,
124 .init = sha1_init, 64 .init = sha1_base_init,
125 .update = crypto_sha1_update, 65 .update = crypto_sha1_update,
126 .final = sha1_final, 66 .final = sha1_final,
127 .export = sha1_export, 67 .finup = crypto_sha1_finup,
128 .import = sha1_import,
129 .descsize = sizeof(struct sha1_state), 68 .descsize = sizeof(struct sha1_state),
130 .statesize = sizeof(struct sha1_state),
131 .base = { 69 .base = {
132 .cra_name = "sha1", 70 .cra_name = "sha1",
133 .cra_driver_name= "sha1-generic", 71 .cra_driver_name= "sha1-generic",
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index b001ff5c2efc..78431163ed3c 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <crypto/sha.h> 25#include <crypto/sha.h>
26#include <crypto/sha256_base.h>
26#include <asm/byteorder.h> 27#include <asm/byteorder.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28 29
@@ -214,138 +215,43 @@ static void sha256_transform(u32 *state, const u8 *input)
214 memzero_explicit(W, 64 * sizeof(u32)); 215 memzero_explicit(W, 64 * sizeof(u32));
215} 216}
216 217
217static int sha224_init(struct shash_desc *desc) 218static void sha256_generic_block_fn(struct sha256_state *sst, u8 const *src,
219 int blocks)
218{ 220{
219 struct sha256_state *sctx = shash_desc_ctx(desc); 221 while (blocks--) {
220 sctx->state[0] = SHA224_H0; 222 sha256_transform(sst->state, src);
221 sctx->state[1] = SHA224_H1; 223 src += SHA256_BLOCK_SIZE;
222 sctx->state[2] = SHA224_H2; 224 }
223 sctx->state[3] = SHA224_H3;
224 sctx->state[4] = SHA224_H4;
225 sctx->state[5] = SHA224_H5;
226 sctx->state[6] = SHA224_H6;
227 sctx->state[7] = SHA224_H7;
228 sctx->count = 0;
229
230 return 0;
231}
232
233static int sha256_init(struct shash_desc *desc)
234{
235 struct sha256_state *sctx = shash_desc_ctx(desc);
236 sctx->state[0] = SHA256_H0;
237 sctx->state[1] = SHA256_H1;
238 sctx->state[2] = SHA256_H2;
239 sctx->state[3] = SHA256_H3;
240 sctx->state[4] = SHA256_H4;
241 sctx->state[5] = SHA256_H5;
242 sctx->state[6] = SHA256_H6;
243 sctx->state[7] = SHA256_H7;
244 sctx->count = 0;
245
246 return 0;
247} 225}
248 226
249int crypto_sha256_update(struct shash_desc *desc, const u8 *data, 227int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
250 unsigned int len) 228 unsigned int len)
251{ 229{
252 struct sha256_state *sctx = shash_desc_ctx(desc); 230 return sha256_base_do_update(desc, data, len, sha256_generic_block_fn);
253 unsigned int partial, done;
254 const u8 *src;
255
256 partial = sctx->count & 0x3f;
257 sctx->count += len;
258 done = 0;
259 src = data;
260
261 if ((partial + len) > 63) {
262 if (partial) {
263 done = -partial;
264 memcpy(sctx->buf + partial, data, done + 64);
265 src = sctx->buf;
266 }
267
268 do {
269 sha256_transform(sctx->state, src);
270 done += 64;
271 src = data + done;
272 } while (done + 63 < len);
273
274 partial = 0;
275 }
276 memcpy(sctx->buf + partial, src, len - done);
277
278 return 0;
279} 231}
280EXPORT_SYMBOL(crypto_sha256_update); 232EXPORT_SYMBOL(crypto_sha256_update);
281 233
282static int sha256_final(struct shash_desc *desc, u8 *out) 234static int sha256_final(struct shash_desc *desc, u8 *out)
283{ 235{
284 struct sha256_state *sctx = shash_desc_ctx(desc); 236 sha256_base_do_finalize(desc, sha256_generic_block_fn);
285 __be32 *dst = (__be32 *)out; 237 return sha256_base_finish(desc, out);
286 __be64 bits;
287 unsigned int index, pad_len;
288 int i;
289 static const u8 padding[64] = { 0x80, };
290
291 /* Save number of bits */
292 bits = cpu_to_be64(sctx->count << 3);
293
294 /* Pad out to 56 mod 64. */
295 index = sctx->count & 0x3f;
296 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
297 crypto_sha256_update(desc, padding, pad_len);
298
299 /* Append length (before padding) */
300 crypto_sha256_update(desc, (const u8 *)&bits, sizeof(bits));
301
302 /* Store state in digest */
303 for (i = 0; i < 8; i++)
304 dst[i] = cpu_to_be32(sctx->state[i]);
305
306 /* Zeroize sensitive information. */
307 memset(sctx, 0, sizeof(*sctx));
308
309 return 0;
310} 238}
311 239
312static int sha224_final(struct shash_desc *desc, u8 *hash) 240int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
241 unsigned int len, u8 *hash)
313{ 242{
314 u8 D[SHA256_DIGEST_SIZE]; 243 sha256_base_do_update(desc, data, len, sha256_generic_block_fn);
315 244 return sha256_final(desc, hash);
316 sha256_final(desc, D);
317
318 memcpy(hash, D, SHA224_DIGEST_SIZE);
319 memzero_explicit(D, SHA256_DIGEST_SIZE);
320
321 return 0;
322}
323
324static int sha256_export(struct shash_desc *desc, void *out)
325{
326 struct sha256_state *sctx = shash_desc_ctx(desc);
327
328 memcpy(out, sctx, sizeof(*sctx));
329 return 0;
330}
331
332static int sha256_import(struct shash_desc *desc, const void *in)
333{
334 struct sha256_state *sctx = shash_desc_ctx(desc);
335
336 memcpy(sctx, in, sizeof(*sctx));
337 return 0;
338} 245}
246EXPORT_SYMBOL(crypto_sha256_finup);
339 247
340static struct shash_alg sha256_algs[2] = { { 248static struct shash_alg sha256_algs[2] = { {
341 .digestsize = SHA256_DIGEST_SIZE, 249 .digestsize = SHA256_DIGEST_SIZE,
342 .init = sha256_init, 250 .init = sha256_base_init,
343 .update = crypto_sha256_update, 251 .update = crypto_sha256_update,
344 .final = sha256_final, 252 .final = sha256_final,
345 .export = sha256_export, 253 .finup = crypto_sha256_finup,
346 .import = sha256_import,
347 .descsize = sizeof(struct sha256_state), 254 .descsize = sizeof(struct sha256_state),
348 .statesize = sizeof(struct sha256_state),
349 .base = { 255 .base = {
350 .cra_name = "sha256", 256 .cra_name = "sha256",
351 .cra_driver_name= "sha256-generic", 257 .cra_driver_name= "sha256-generic",
@@ -355,9 +261,10 @@ static struct shash_alg sha256_algs[2] = { {
355 } 261 }
356}, { 262}, {
357 .digestsize = SHA224_DIGEST_SIZE, 263 .digestsize = SHA224_DIGEST_SIZE,
358 .init = sha224_init, 264 .init = sha224_base_init,
359 .update = crypto_sha256_update, 265 .update = crypto_sha256_update,
360 .final = sha224_final, 266 .final = sha256_final,
267 .finup = crypto_sha256_finup,
361 .descsize = sizeof(struct sha256_state), 268 .descsize = sizeof(struct sha256_state),
362 .base = { 269 .base = {
363 .cra_name = "sha224", 270 .cra_name = "sha224",
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 1c3c3767e079..eba965d18bfc 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -18,6 +18,7 @@
18#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <crypto/sha.h> 20#include <crypto/sha.h>
21#include <crypto/sha512_base.h>
21#include <linux/percpu.h> 22#include <linux/percpu.h>
22#include <asm/byteorder.h> 23#include <asm/byteorder.h>
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
@@ -130,125 +131,42 @@ sha512_transform(u64 *state, const u8 *input)
130 a = b = c = d = e = f = g = h = t1 = t2 = 0; 131 a = b = c = d = e = f = g = h = t1 = t2 = 0;
131} 132}
132 133
133static int 134static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
134sha512_init(struct shash_desc *desc) 135 int blocks)
135{ 136{
136 struct sha512_state *sctx = shash_desc_ctx(desc); 137 while (blocks--) {
137 sctx->state[0] = SHA512_H0; 138 sha512_transform(sst->state, src);
138 sctx->state[1] = SHA512_H1; 139 src += SHA512_BLOCK_SIZE;
139 sctx->state[2] = SHA512_H2; 140 }
140 sctx->state[3] = SHA512_H3;
141 sctx->state[4] = SHA512_H4;
142 sctx->state[5] = SHA512_H5;
143 sctx->state[6] = SHA512_H6;
144 sctx->state[7] = SHA512_H7;
145 sctx->count[0] = sctx->count[1] = 0;
146
147 return 0;
148}
149
150static int
151sha384_init(struct shash_desc *desc)
152{
153 struct sha512_state *sctx = shash_desc_ctx(desc);
154 sctx->state[0] = SHA384_H0;
155 sctx->state[1] = SHA384_H1;
156 sctx->state[2] = SHA384_H2;
157 sctx->state[3] = SHA384_H3;
158 sctx->state[4] = SHA384_H4;
159 sctx->state[5] = SHA384_H5;
160 sctx->state[6] = SHA384_H6;
161 sctx->state[7] = SHA384_H7;
162 sctx->count[0] = sctx->count[1] = 0;
163
164 return 0;
165} 141}
166 142
167int crypto_sha512_update(struct shash_desc *desc, const u8 *data, 143int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
168 unsigned int len) 144 unsigned int len)
169{ 145{
170 struct sha512_state *sctx = shash_desc_ctx(desc); 146 return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
171
172 unsigned int i, index, part_len;
173
174 /* Compute number of bytes mod 128 */
175 index = sctx->count[0] & 0x7f;
176
177 /* Update number of bytes */
178 if ((sctx->count[0] += len) < len)
179 sctx->count[1]++;
180
181 part_len = 128 - index;
182
183 /* Transform as many times as possible. */
184 if (len >= part_len) {
185 memcpy(&sctx->buf[index], data, part_len);
186 sha512_transform(sctx->state, sctx->buf);
187
188 for (i = part_len; i + 127 < len; i+=128)
189 sha512_transform(sctx->state, &data[i]);
190
191 index = 0;
192 } else {
193 i = 0;
194 }
195
196 /* Buffer remaining input */
197 memcpy(&sctx->buf[index], &data[i], len - i);
198
199 return 0;
200} 147}
201EXPORT_SYMBOL(crypto_sha512_update); 148EXPORT_SYMBOL(crypto_sha512_update);
202 149
203static int 150static int sha512_final(struct shash_desc *desc, u8 *hash)
204sha512_final(struct shash_desc *desc, u8 *hash)
205{ 151{
206 struct sha512_state *sctx = shash_desc_ctx(desc); 152 sha512_base_do_finalize(desc, sha512_generic_block_fn);
207 static u8 padding[128] = { 0x80, }; 153 return sha512_base_finish(desc, hash);
208 __be64 *dst = (__be64 *)hash;
209 __be64 bits[2];
210 unsigned int index, pad_len;
211 int i;
212
213 /* Save number of bits */
214 bits[1] = cpu_to_be64(sctx->count[0] << 3);
215 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
216
217 /* Pad out to 112 mod 128. */
218 index = sctx->count[0] & 0x7f;
219 pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
220 crypto_sha512_update(desc, padding, pad_len);
221
222 /* Append length (before padding) */
223 crypto_sha512_update(desc, (const u8 *)bits, sizeof(bits));
224
225 /* Store state in digest */
226 for (i = 0; i < 8; i++)
227 dst[i] = cpu_to_be64(sctx->state[i]);
228
229 /* Zeroize sensitive information. */
230 memset(sctx, 0, sizeof(struct sha512_state));
231
232 return 0;
233} 154}
234 155
235static int sha384_final(struct shash_desc *desc, u8 *hash) 156int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
157 unsigned int len, u8 *hash)
236{ 158{
237 u8 D[64]; 159 sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
238 160 return sha512_final(desc, hash);
239 sha512_final(desc, D);
240
241 memcpy(hash, D, 48);
242 memzero_explicit(D, 64);
243
244 return 0;
245} 161}
162EXPORT_SYMBOL(crypto_sha512_finup);
246 163
247static struct shash_alg sha512_algs[2] = { { 164static struct shash_alg sha512_algs[2] = { {
248 .digestsize = SHA512_DIGEST_SIZE, 165 .digestsize = SHA512_DIGEST_SIZE,
249 .init = sha512_init, 166 .init = sha512_base_init,
250 .update = crypto_sha512_update, 167 .update = crypto_sha512_update,
251 .final = sha512_final, 168 .final = sha512_final,
169 .finup = crypto_sha512_finup,
252 .descsize = sizeof(struct sha512_state), 170 .descsize = sizeof(struct sha512_state),
253 .base = { 171 .base = {
254 .cra_name = "sha512", 172 .cra_name = "sha512",
@@ -259,9 +177,10 @@ static struct shash_alg sha512_algs[2] = { {
259 } 177 }
260}, { 178}, {
261 .digestsize = SHA384_DIGEST_SIZE, 179 .digestsize = SHA384_DIGEST_SIZE,
262 .init = sha384_init, 180 .init = sha384_base_init,
263 .update = crypto_sha512_update, 181 .update = crypto_sha512_update,
264 .final = sha384_final, 182 .final = sha512_final,
183 .finup = crypto_sha512_finup,
265 .descsize = sizeof(struct sha512_state), 184 .descsize = sizeof(struct sha512_state),
266 .base = { 185 .base = {
267 .cra_name = "sha384", 186 .cra_name = "sha384",
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 4b9e23fa4204..1a2800107fc8 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1155,9 +1155,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
1155 goto out_free_req; 1155 goto out_free_req;
1156 } 1156 }
1157 1157
1158 sg_init_table(sg, TVMEMSIZE);
1159
1160 k = *keysize + *b_size; 1158 k = *keysize + *b_size;
1159 sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
1160
1161 if (k > PAGE_SIZE) { 1161 if (k > PAGE_SIZE) {
1162 sg_set_buf(sg, tvmem[0] + *keysize, 1162 sg_set_buf(sg, tvmem[0] + *keysize,
1163 PAGE_SIZE - *keysize); 1163 PAGE_SIZE - *keysize);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f4ed6d4205e7..f9bce3d7ee7f 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1474,11 +1474,11 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1474 for (j = 0; j < template[i].loops; j++) { 1474 for (j = 0; j < template[i].loops; j++) {
1475 err = crypto_rng_get_bytes(tfm, result, 1475 err = crypto_rng_get_bytes(tfm, result,
1476 template[i].rlen); 1476 template[i].rlen);
1477 if (err != template[i].rlen) { 1477 if (err < 0) {
1478 printk(KERN_ERR "alg: cprng: Failed to obtain " 1478 printk(KERN_ERR "alg: cprng: Failed to obtain "
1479 "the correct amount of random data for " 1479 "the correct amount of random data for "
1480 "%s (requested %d, got %d)\n", algo, 1480 "%s (requested %d)\n", algo,
1481 template[i].rlen, err); 1481 template[i].rlen);
1482 goto out; 1482 goto out;
1483 } 1483 }
1484 } 1484 }
@@ -1505,7 +1505,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
1505 struct crypto_aead *tfm; 1505 struct crypto_aead *tfm;
1506 int err = 0; 1506 int err = 0;
1507 1507
1508 tfm = crypto_alloc_aead(driver, type, mask); 1508 tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1509 if (IS_ERR(tfm)) { 1509 if (IS_ERR(tfm)) {
1510 printk(KERN_ERR "alg: aead: Failed to load transform for %s: " 1510 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1511 "%ld\n", driver, PTR_ERR(tfm)); 1511 "%ld\n", driver, PTR_ERR(tfm));
@@ -1534,7 +1534,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
1534 struct crypto_cipher *tfm; 1534 struct crypto_cipher *tfm;
1535 int err = 0; 1535 int err = 0;
1536 1536
1537 tfm = crypto_alloc_cipher(driver, type, mask); 1537 tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1538 if (IS_ERR(tfm)) { 1538 if (IS_ERR(tfm)) {
1539 printk(KERN_ERR "alg: cipher: Failed to load transform for " 1539 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1540 "%s: %ld\n", driver, PTR_ERR(tfm)); 1540 "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1563,7 +1563,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
1563 struct crypto_ablkcipher *tfm; 1563 struct crypto_ablkcipher *tfm;
1564 int err = 0; 1564 int err = 0;
1565 1565
1566 tfm = crypto_alloc_ablkcipher(driver, type, mask); 1566 tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1567 if (IS_ERR(tfm)) { 1567 if (IS_ERR(tfm)) {
1568 printk(KERN_ERR "alg: skcipher: Failed to load transform for " 1568 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1569 "%s: %ld\n", driver, PTR_ERR(tfm)); 1569 "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1636,7 +1636,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1636 struct crypto_ahash *tfm; 1636 struct crypto_ahash *tfm;
1637 int err; 1637 int err;
1638 1638
1639 tfm = crypto_alloc_ahash(driver, type, mask); 1639 tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1640 if (IS_ERR(tfm)) { 1640 if (IS_ERR(tfm)) {
1641 printk(KERN_ERR "alg: hash: Failed to load transform for %s: " 1641 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1642 "%ld\n", driver, PTR_ERR(tfm)); 1642 "%ld\n", driver, PTR_ERR(tfm));
@@ -1664,7 +1664,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
1664 if (err) 1664 if (err)
1665 goto out; 1665 goto out;
1666 1666
1667 tfm = crypto_alloc_shash(driver, type, mask); 1667 tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1668 if (IS_ERR(tfm)) { 1668 if (IS_ERR(tfm)) {
1669 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " 1669 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1670 "%ld\n", driver, PTR_ERR(tfm)); 1670 "%ld\n", driver, PTR_ERR(tfm));
@@ -1706,7 +1706,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1706 struct crypto_rng *rng; 1706 struct crypto_rng *rng;
1707 int err; 1707 int err;
1708 1708
1709 rng = crypto_alloc_rng(driver, type, mask); 1709 rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1710 if (IS_ERR(rng)) { 1710 if (IS_ERR(rng)) {
1711 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " 1711 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1712 "%ld\n", driver, PTR_ERR(rng)); 1712 "%ld\n", driver, PTR_ERR(rng));
@@ -1733,7 +1733,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1733 if (!buf) 1733 if (!buf)
1734 return -ENOMEM; 1734 return -ENOMEM;
1735 1735
1736 drng = crypto_alloc_rng(driver, type, mask); 1736 drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1737 if (IS_ERR(drng)) { 1737 if (IS_ERR(drng)) {
1738 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " 1738 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1739 "%s\n", driver); 1739 "%s\n", driver);
@@ -1759,7 +1759,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1759 ret = crypto_drbg_get_bytes_addtl(drng, 1759 ret = crypto_drbg_get_bytes_addtl(drng,
1760 buf, test->expectedlen, &addtl); 1760 buf, test->expectedlen, &addtl);
1761 } 1761 }
1762 if (ret <= 0) { 1762 if (ret < 0) {
1763 printk(KERN_ERR "alg: drbg: could not obtain random data for " 1763 printk(KERN_ERR "alg: drbg: could not obtain random data for "
1764 "driver %s\n", driver); 1764 "driver %s\n", driver);
1765 goto outbuf; 1765 goto outbuf;
@@ -1774,7 +1774,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1774 ret = crypto_drbg_get_bytes_addtl(drng, 1774 ret = crypto_drbg_get_bytes_addtl(drng,
1775 buf, test->expectedlen, &addtl); 1775 buf, test->expectedlen, &addtl);
1776 } 1776 }
1777 if (ret <= 0) { 1777 if (ret < 0) {
1778 printk(KERN_ERR "alg: drbg: could not obtain random data for " 1778 printk(KERN_ERR "alg: drbg: could not obtain random data for "
1779 "driver %s\n", driver); 1779 "driver %s\n", driver);
1780 goto outbuf; 1780 goto outbuf;