aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablkcipher.c7
-rw-r--r--crypto/aead.c3
-rw-r--r--crypto/af_alg.c11
-rw-r--r--crypto/ahash.c3
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algif_rng.c192
-rw-r--r--crypto/algif_skcipher.c8
-rw-r--r--crypto/cts.c5
-rw-r--r--crypto/drbg.c34
-rw-r--r--crypto/scatterwalk.c6
-rw-r--r--crypto/seqiv.c12
-rw-r--r--crypto/tcrypt.c37
-rw-r--r--crypto/testmgr.c58
15 files changed, 316 insertions, 80 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 87bbc9c1e681..50f4da44a304 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -427,6 +427,15 @@ config CRYPTO_MD5
427 help 427 help
428 MD5 message digest algorithm (RFC1321). 428 MD5 message digest algorithm (RFC1321).
429 429
430config CRYPTO_MD5_OCTEON
431 tristate "MD5 digest algorithm (OCTEON)"
432 depends on CPU_CAVIUM_OCTEON
433 select CRYPTO_MD5
434 select CRYPTO_HASH
435 help
436 MD5 message digest algorithm (RFC1321) implemented
437 using OCTEON crypto instructions, when available.
438
430config CRYPTO_MD5_SPARC64 439config CRYPTO_MD5_SPARC64
431 tristate "MD5 digest algorithm (SPARC64)" 440 tristate "MD5 digest algorithm (SPARC64)"
432 depends on SPARC64 441 depends on SPARC64
@@ -1505,6 +1514,15 @@ config CRYPTO_USER_API_SKCIPHER
1505 This option enables the user-spaces interface for symmetric 1514 This option enables the user-spaces interface for symmetric
1506 key cipher algorithms. 1515 key cipher algorithms.
1507 1516
1517config CRYPTO_USER_API_RNG
1518 tristate "User-space interface for random number generator algorithms"
1519 depends on NET
1520 select CRYPTO_RNG
1521 select CRYPTO_USER_API
1522 help
1523 This option enables the user-spaces interface for random
1524 number generator algorithms.
1525
1508config CRYPTO_HASH_INFO 1526config CRYPTO_HASH_INFO
1509 bool 1527 bool
1510 1528
diff --git a/crypto/Makefile b/crypto/Makefile
index 1445b9100c05..ba19465f9ad3 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
99obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o 99obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
100obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o 100obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
101obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o 101obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
102obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
102 103
103# 104#
104# generic algorithms and the async_tx api 105# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 40886c489903..db201bca1581 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -69,6 +69,7 @@ static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
69static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) 69static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
70{ 70{
71 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 71 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
72
72 return max(start, end_page); 73 return max(start, end_page);
73} 74}
74 75
@@ -86,7 +87,7 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
86 if (n == len_this_page) 87 if (n == len_this_page)
87 break; 88 break;
88 n -= len_this_page; 89 n -= len_this_page;
89 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); 90 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
90 } 91 }
91 92
92 return bsize; 93 return bsize;
@@ -284,6 +285,7 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
284 walk->iv = req->info; 285 walk->iv = req->info;
285 if (unlikely(((unsigned long)walk->iv & alignmask))) { 286 if (unlikely(((unsigned long)walk->iv & alignmask))) {
286 int err = ablkcipher_copy_iv(walk, tfm, alignmask); 287 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
288
287 if (err) 289 if (err)
288 return err; 290 return err;
289 } 291 }
@@ -589,7 +591,8 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
589 if (IS_ERR(inst)) 591 if (IS_ERR(inst))
590 goto put_tmpl; 592 goto put_tmpl;
591 593
592 if ((err = crypto_register_instance(tmpl, inst))) { 594 err = crypto_register_instance(tmpl, inst);
595 if (err) {
593 tmpl->free(inst); 596 tmpl->free(inst);
594 goto put_tmpl; 597 goto put_tmpl;
595 } 598 }
diff --git a/crypto/aead.c b/crypto/aead.c
index 547491e35c63..222271070b49 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -448,7 +448,8 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
448 if (IS_ERR(inst)) 448 if (IS_ERR(inst))
449 goto put_tmpl; 449 goto put_tmpl;
450 450
451 if ((err = crypto_register_instance(tmpl, inst))) { 451 err = crypto_register_instance(tmpl, inst);
452 if (err) {
452 tmpl->free(inst); 453 tmpl->free(inst);
453 goto put_tmpl; 454 goto put_tmpl;
454 } 455 }
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 3e80d8b8be45..7f8b7edcadca 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -188,7 +188,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey,
188 err = type->setkey(ask->private, key, keylen); 188 err = type->setkey(ask->private, key, keylen);
189 189
190out: 190out:
191 sock_kfree_s(sk, key, keylen); 191 sock_kzfree_s(sk, key, keylen);
192 192
193 return err; 193 return err;
194} 194}
@@ -215,6 +215,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
215 goto unlock; 215 goto unlock;
216 216
217 err = alg_setkey(sk, optval, optlen); 217 err = alg_setkey(sk, optval, optlen);
218 break;
219 case ALG_SET_AEAD_AUTHSIZE:
220 if (sock->state == SS_CONNECTED)
221 goto unlock;
222 if (!type->setauthsize)
223 goto unlock;
224 err = type->setauthsize(ask->private, optlen);
218 } 225 }
219 226
220unlock: 227unlock:
@@ -387,7 +394,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
387 if (cmsg->cmsg_level != SOL_ALG) 394 if (cmsg->cmsg_level != SOL_ALG)
388 continue; 395 continue;
389 396
390 switch(cmsg->cmsg_type) { 397 switch (cmsg->cmsg_type) {
391 case ALG_SET_IV: 398 case ALG_SET_IV:
392 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) 399 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
393 return -EINVAL; 400 return -EINVAL;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f6a36a52d738..8acb886032ae 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -55,6 +55,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
55 55
56 if (offset & alignmask) { 56 if (offset & alignmask) {
57 unsigned int unaligned = alignmask + 1 - (offset & alignmask); 57 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58
58 if (nbytes > unaligned) 59 if (nbytes > unaligned)
59 nbytes = unaligned; 60 nbytes = unaligned;
60 } 61 }
@@ -120,7 +121,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
120 if (!walk->total) 121 if (!walk->total)
121 return 0; 122 return 0;
122 123
123 walk->sg = scatterwalk_sg_next(walk->sg); 124 walk->sg = sg_next(walk->sg);
124 125
125 return hash_walk_new_entry(walk); 126 return hash_walk_new_entry(walk);
126} 127}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 71a8143e23b1..83b04e0884b1 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -473,6 +473,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
473 list = &tmpl->instances; 473 list = &tmpl->instances;
474 hlist_for_each_entry(inst, list, list) { 474 hlist_for_each_entry(inst, list, list) {
475 int err = crypto_remove_alg(&inst->alg, &users); 475 int err = crypto_remove_alg(&inst->alg, &users);
476
476 BUG_ON(err); 477 BUG_ON(err);
477 } 478 }
478 479
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
new file mode 100644
index 000000000000..67f612cfed97
--- /dev/null
+++ b/crypto/algif_rng.c
@@ -0,0 +1,192 @@
1/*
2 * algif_rng: User-space interface for random number generators
3 *
4 * This file provides the user-space API for random number generators.
5 *
6 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, and the entire permission notice in its entirety,
13 * including the disclaimer of warranties.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior
19 * written permission.
20 *
21 * ALTERNATIVELY, this product may be distributed under the terms of
22 * the GNU General Public License, in which case the provisions of the GPL2
23 * are required INSTEAD OF the above restrictions. (This clause is
24 * necessary due to a potential bad interaction between the GPL and
25 * the restrictions contained in a BSD-style copyright.)
26 *
27 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
30 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
33 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
34 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 */
40
41#include <linux/module.h>
42#include <crypto/rng.h>
43#include <linux/random.h>
44#include <crypto/if_alg.h>
45#include <linux/net.h>
46#include <net/sock.h>
47
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
50MODULE_DESCRIPTION("User-space interface for random number generators");
51
52struct rng_ctx {
53#define MAXSIZE 128
54 unsigned int len;
55 struct crypto_rng *drng;
56};
57
58static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
59 struct msghdr *msg, size_t len, int flags)
60{
61 struct sock *sk = sock->sk;
62 struct alg_sock *ask = alg_sk(sk);
63 struct rng_ctx *ctx = ask->private;
64 int err = -EFAULT;
65 int genlen = 0;
66 u8 result[MAXSIZE];
67
68 if (len == 0)
69 return 0;
70 if (len > MAXSIZE)
71 len = MAXSIZE;
72
73 /*
74 * although not strictly needed, this is a precaution against coding
75 * errors
76 */
77 memset(result, 0, len);
78
79 /*
80 * The enforcement of a proper seeding of an RNG is done within an
81 * RNG implementation. Some RNGs (DRBG, krng) do not need specific
82 * seeding as they automatically seed. The X9.31 DRNG will return
83 * an error if it was not seeded properly.
84 */
85 genlen = crypto_rng_get_bytes(ctx->drng, result, len);
86 if (genlen < 0)
87 return genlen;
88
89 err = memcpy_to_msg(msg, result, len);
90 memzero_explicit(result, genlen);
91
92 return err ? err : len;
93}
94
95static struct proto_ops algif_rng_ops = {
96 .family = PF_ALG,
97
98 .connect = sock_no_connect,
99 .socketpair = sock_no_socketpair,
100 .getname = sock_no_getname,
101 .ioctl = sock_no_ioctl,
102 .listen = sock_no_listen,
103 .shutdown = sock_no_shutdown,
104 .getsockopt = sock_no_getsockopt,
105 .mmap = sock_no_mmap,
106 .bind = sock_no_bind,
107 .accept = sock_no_accept,
108 .setsockopt = sock_no_setsockopt,
109 .poll = sock_no_poll,
110 .sendmsg = sock_no_sendmsg,
111 .sendpage = sock_no_sendpage,
112
113 .release = af_alg_release,
114 .recvmsg = rng_recvmsg,
115};
116
117static void *rng_bind(const char *name, u32 type, u32 mask)
118{
119 return crypto_alloc_rng(name, type, mask);
120}
121
122static void rng_release(void *private)
123{
124 crypto_free_rng(private);
125}
126
127static void rng_sock_destruct(struct sock *sk)
128{
129 struct alg_sock *ask = alg_sk(sk);
130 struct rng_ctx *ctx = ask->private;
131
132 sock_kfree_s(sk, ctx, ctx->len);
133 af_alg_release_parent(sk);
134}
135
136static int rng_accept_parent(void *private, struct sock *sk)
137{
138 struct rng_ctx *ctx;
139 struct alg_sock *ask = alg_sk(sk);
140 unsigned int len = sizeof(*ctx);
141
142 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
143 if (!ctx)
144 return -ENOMEM;
145
146 ctx->len = len;
147
148 /*
149 * No seeding done at that point -- if multiple accepts are
150 * done on one RNG instance, each resulting FD points to the same
151 * state of the RNG.
152 */
153
154 ctx->drng = private;
155 ask->private = ctx;
156 sk->sk_destruct = rng_sock_destruct;
157
158 return 0;
159}
160
161static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
162{
163 /*
164 * Check whether seedlen is of sufficient size is done in RNG
165 * implementations.
166 */
167 return crypto_rng_reset(private, (u8 *)seed, seedlen);
168}
169
170static const struct af_alg_type algif_type_rng = {
171 .bind = rng_bind,
172 .release = rng_release,
173 .accept = rng_accept_parent,
174 .setkey = rng_setkey,
175 .ops = &algif_rng_ops,
176 .name = "rng",
177 .owner = THIS_MODULE
178};
179
180static int __init rng_init(void)
181{
182 return af_alg_register_type(&algif_type_rng);
183}
184
185static void __exit rng_exit(void)
186{
187 int err = af_alg_unregister_type(&algif_type_rng);
188 BUG_ON(err);
189}
190
191module_init(rng_init);
192module_exit(rng_exit);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 6fc12c3fc4b9..0c8a1e5ccadf 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -330,6 +330,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
330 330
331 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 331 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
332 sg = sgl->sg; 332 sg = sgl->sg;
333 sg_unmark_end(sg + sgl->cur);
333 do { 334 do {
334 i = sgl->cur; 335 i = sgl->cur;
335 plen = min_t(int, len, PAGE_SIZE); 336 plen = min_t(int, len, PAGE_SIZE);
@@ -355,6 +356,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
355 sgl->cur++; 356 sgl->cur++;
356 } while (len && sgl->cur < MAX_SGL_ENTS); 357 } while (len && sgl->cur < MAX_SGL_ENTS);
357 358
359 if (!size)
360 sg_mark_end(sg + sgl->cur - 1);
361
358 ctx->merge = plen & (PAGE_SIZE - 1); 362 ctx->merge = plen & (PAGE_SIZE - 1);
359 } 363 }
360 364
@@ -401,6 +405,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
401 ctx->merge = 0; 405 ctx->merge = 0;
402 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 406 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
403 407
408 if (sgl->cur)
409 sg_unmark_end(sgl->sg + sgl->cur - 1);
410
411 sg_mark_end(sgl->sg + sgl->cur);
404 get_page(page); 412 get_page(page);
405 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 413 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
406 sgl->cur++; 414 sgl->cur++;
diff --git a/crypto/cts.c b/crypto/cts.c
index bd9405820e8a..e467ec0acf9f 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -290,6 +290,9 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
290 if (!is_power_of_2(alg->cra_blocksize)) 290 if (!is_power_of_2(alg->cra_blocksize))
291 goto out_put_alg; 291 goto out_put_alg;
292 292
293 if (strncmp(alg->cra_name, "cbc(", 4))
294 goto out_put_alg;
295
293 inst = crypto_alloc_instance("cts", alg); 296 inst = crypto_alloc_instance("cts", alg);
294 if (IS_ERR(inst)) 297 if (IS_ERR(inst))
295 goto out_put_alg; 298 goto out_put_alg;
@@ -307,8 +310,6 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
307 inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 310 inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
308 inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 311 inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
309 312
310 inst->alg.cra_blkcipher.geniv = "seqiv";
311
312 inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); 313 inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
313 314
314 inst->alg.cra_init = crypto_cts_init_tfm; 315 inst->alg.cra_init = crypto_cts_init_tfm;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index d748a1d0ca24..d8ff16e5c322 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,7 +98,6 @@
98 */ 98 */
99 99
100#include <crypto/drbg.h> 100#include <crypto/drbg.h>
101#include <linux/string.h>
102 101
103/*************************************************************** 102/***************************************************************
104 * Backend cipher definitions available to DRBG 103 * Backend cipher definitions available to DRBG
@@ -223,15 +222,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
223 * function. Thus, the function implicitly knows the size of the 222 * function. Thus, the function implicitly knows the size of the
224 * buffer. 223 * buffer.
225 * 224 *
226 * The FIPS test can be called in an endless loop until it returns
227 * true. Although the code looks like a potential for a deadlock, it
228 * is not the case, because returning a false cannot mathematically
229 * occur (except once when a reseed took place and the updated state
230 * would is now set up such that the generation of new value returns
231 * an identical one -- this is most unlikely and would happen only once).
232 * Thus, if this function repeatedly returns false and thus would cause
233 * a deadlock, the integrity of the entire kernel is lost.
234 *
235 * @drbg DRBG handle 225 * @drbg DRBG handle
236 * @buf output buffer of random data to be checked 226 * @buf output buffer of random data to be checked
237 * 227 *
@@ -258,6 +248,8 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg,
258 return false; 248 return false;
259 } 249 }
260 ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg)); 250 ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
251 if (!ret)
252 panic("DRBG continuous self test failed\n");
261 memcpy(drbg->prev, buf, drbg_blocklen(drbg)); 253 memcpy(drbg->prev, buf, drbg_blocklen(drbg));
262 /* the test shall pass when the two compared values are not equal */ 254 /* the test shall pass when the two compared values are not equal */
263 return ret != 0; 255 return ret != 0;
@@ -498,9 +490,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
498 ret = 0; 490 ret = 0;
499 491
500out: 492out:
501 memzero_explicit(iv, drbg_blocklen(drbg)); 493 memset(iv, 0, drbg_blocklen(drbg));
502 memzero_explicit(temp, drbg_statelen(drbg)); 494 memset(temp, 0, drbg_statelen(drbg));
503 memzero_explicit(pad, drbg_blocklen(drbg)); 495 memset(pad, 0, drbg_blocklen(drbg));
504 return ret; 496 return ret;
505} 497}
506 498
@@ -574,9 +566,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
574 ret = 0; 566 ret = 0;
575 567
576out: 568out:
577 memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg)); 569 memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
578 if (2 != reseed) 570 if (2 != reseed)
579 memzero_explicit(df_data, drbg_statelen(drbg)); 571 memset(df_data, 0, drbg_statelen(drbg));
580 return ret; 572 return ret;
581} 573}
582 574
@@ -634,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
634 len = ret; 626 len = ret;
635 627
636out: 628out:
637 memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); 629 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
638 return len; 630 return len;
639} 631}
640 632
@@ -872,7 +864,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
872 } 864 }
873 865
874out: 866out:
875 memzero_explicit(tmp, drbg_blocklen(drbg)); 867 memset(tmp, 0, drbg_blocklen(drbg));
876 return ret; 868 return ret;
877} 869}
878 870
@@ -916,7 +908,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
916 ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); 908 ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
917 909
918out: 910out:
919 memzero_explicit(drbg->scratchpad, drbg_statelen(drbg)); 911 memset(drbg->scratchpad, 0, drbg_statelen(drbg));
920 return ret; 912 return ret;
921} 913}
922 914
@@ -951,7 +943,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
951 drbg->scratchpad, drbg_blocklen(drbg)); 943 drbg->scratchpad, drbg_blocklen(drbg));
952 944
953out: 945out:
954 memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); 946 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
955 return ret; 947 return ret;
956} 948}
957 949
@@ -998,7 +990,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
998 } 990 }
999 991
1000out: 992out:
1001 memzero_explicit(drbg->scratchpad, 993 memset(drbg->scratchpad, 0,
1002 (drbg_statelen(drbg) + drbg_blocklen(drbg))); 994 (drbg_statelen(drbg) + drbg_blocklen(drbg)));
1003 return len; 995 return len;
1004} 996}
@@ -1047,7 +1039,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
1047 drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); 1039 drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
1048 1040
1049out: 1041out:
1050 memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg)); 1042 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
1051 return len; 1043 return len;
1052} 1044}
1053 1045
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 79ca2278c2a3..3bd749c7bb70 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
62 walk->offset += PAGE_SIZE - 1; 62 walk->offset += PAGE_SIZE - 1;
63 walk->offset &= PAGE_MASK; 63 walk->offset &= PAGE_MASK;
64 if (walk->offset >= walk->sg->offset + walk->sg->length) 64 if (walk->offset >= walk->sg->offset + walk->sg->length)
65 scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); 65 scatterwalk_start(walk, sg_next(walk->sg));
66 } 66 }
67} 67}
68 68
@@ -116,7 +116,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
116 break; 116 break;
117 117
118 offset += sg->length; 118 offset += sg->length;
119 sg = scatterwalk_sg_next(sg); 119 sg = sg_next(sg);
120 } 120 }
121 121
122 scatterwalk_advance(&walk, start - offset); 122 scatterwalk_advance(&walk, start - offset);
@@ -136,7 +136,7 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
136 do { 136 do {
137 offset += sg->length; 137 offset += sg->length;
138 n++; 138 n++;
139 sg = scatterwalk_sg_next(sg); 139 sg = sg_next(sg);
140 140
141 /* num_bytes is too large */ 141 /* num_bytes is too large */
142 if (unlikely(!sg && (num_bytes < offset))) 142 if (unlikely(!sg && (num_bytes < offset)))
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 9daa854cc485..b7bb9a2f4a31 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -267,6 +267,12 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
267 if (IS_ERR(inst)) 267 if (IS_ERR(inst))
268 goto out; 268 goto out;
269 269
270 if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) {
271 skcipher_geniv_free(inst);
272 inst = ERR_PTR(-EINVAL);
273 goto out;
274 }
275
270 inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; 276 inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
271 277
272 inst->alg.cra_init = seqiv_init; 278 inst->alg.cra_init = seqiv_init;
@@ -287,6 +293,12 @@ static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
287 if (IS_ERR(inst)) 293 if (IS_ERR(inst))
288 goto out; 294 goto out;
289 295
296 if (inst->alg.cra_aead.ivsize < sizeof(u64)) {
297 aead_geniv_free(inst);
298 inst = ERR_PTR(-EINVAL);
299 goto out;
300 }
301
290 inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; 302 inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
291 303
292 inst->alg.cra_init = seqiv_aead_init; 304 inst->alg.cra_init = seqiv_aead_init;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1d864e988ea9..4b9e23fa4204 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -250,19 +250,19 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
250 int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; 250 int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
251 int k, rem; 251 int k, rem;
252 252
253 np = (np > XBUFSIZE) ? XBUFSIZE : np;
254 rem = buflen % PAGE_SIZE;
255 if (np > XBUFSIZE) { 253 if (np > XBUFSIZE) {
256 rem = PAGE_SIZE; 254 rem = PAGE_SIZE;
257 np = XBUFSIZE; 255 np = XBUFSIZE;
256 } else {
257 rem = buflen % PAGE_SIZE;
258 } 258 }
259
259 sg_init_table(sg, np); 260 sg_init_table(sg, np);
260 for (k = 0; k < np; ++k) { 261 np--;
261 if (k == (np-1)) 262 for (k = 0; k < np; k++)
262 sg_set_buf(&sg[k], xbuf[k], rem); 263 sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
263 else 264
264 sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); 265 sg_set_buf(&sg[k], xbuf[k], rem);
265 }
266} 266}
267 267
268static void test_aead_speed(const char *algo, int enc, unsigned int secs, 268static void test_aead_speed(const char *algo, int enc, unsigned int secs,
@@ -280,16 +280,20 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
280 struct scatterlist *sgout; 280 struct scatterlist *sgout;
281 const char *e; 281 const char *e;
282 void *assoc; 282 void *assoc;
283 char iv[MAX_IVLEN]; 283 char *iv;
284 char *xbuf[XBUFSIZE]; 284 char *xbuf[XBUFSIZE];
285 char *xoutbuf[XBUFSIZE]; 285 char *xoutbuf[XBUFSIZE];
286 char *axbuf[XBUFSIZE]; 286 char *axbuf[XBUFSIZE];
287 unsigned int *b_size; 287 unsigned int *b_size;
288 unsigned int iv_len; 288 unsigned int iv_len;
289 289
290 iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
291 if (!iv)
292 return;
293
290 if (aad_size >= PAGE_SIZE) { 294 if (aad_size >= PAGE_SIZE) {
291 pr_err("associate data length (%u) too big\n", aad_size); 295 pr_err("associate data length (%u) too big\n", aad_size);
292 return; 296 goto out_noxbuf;
293 } 297 }
294 298
295 if (enc == ENCRYPT) 299 if (enc == ENCRYPT)
@@ -355,7 +359,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
355 359
356 iv_len = crypto_aead_ivsize(tfm); 360 iv_len = crypto_aead_ivsize(tfm);
357 if (iv_len) 361 if (iv_len)
358 memset(&iv, 0xff, iv_len); 362 memset(iv, 0xff, iv_len);
359 363
360 crypto_aead_clear_flags(tfm, ~0); 364 crypto_aead_clear_flags(tfm, ~0);
361 printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", 365 printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
@@ -408,6 +412,7 @@ out_nooutbuf:
408out_noaxbuf: 412out_noaxbuf:
409 testmgr_free_buf(xbuf); 413 testmgr_free_buf(xbuf);
410out_noxbuf: 414out_noxbuf:
415 kfree(iv);
411 return; 416 return;
412} 417}
413 418
@@ -764,10 +769,9 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
764 if (ret == -EINPROGRESS || ret == -EBUSY) { 769 if (ret == -EINPROGRESS || ret == -EBUSY) {
765 struct tcrypt_result *tr = req->base.data; 770 struct tcrypt_result *tr = req->base.data;
766 771
767 ret = wait_for_completion_interruptible(&tr->completion); 772 wait_for_completion(&tr->completion);
768 if (!ret)
769 ret = tr->err;
770 reinit_completion(&tr->completion); 773 reinit_completion(&tr->completion);
774 ret = tr->err;
771 } 775 }
772 return ret; 776 return ret;
773} 777}
@@ -993,10 +997,9 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
993 if (ret == -EINPROGRESS || ret == -EBUSY) { 997 if (ret == -EINPROGRESS || ret == -EBUSY) {
994 struct tcrypt_result *tr = req->base.data; 998 struct tcrypt_result *tr = req->base.data;
995 999
996 ret = wait_for_completion_interruptible(&tr->completion); 1000 wait_for_completion(&tr->completion);
997 if (!ret)
998 ret = tr->err;
999 reinit_completion(&tr->completion); 1001 reinit_completion(&tr->completion);
1002 ret = tr->err;
1000 } 1003 }
1001 1004
1002 return ret; 1005 return ret;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 037368d34586..f4ed6d4205e7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -181,10 +181,9 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
181static int wait_async_op(struct tcrypt_result *tr, int ret) 181static int wait_async_op(struct tcrypt_result *tr, int ret)
182{ 182{
183 if (ret == -EINPROGRESS || ret == -EBUSY) { 183 if (ret == -EINPROGRESS || ret == -EBUSY) {
184 ret = wait_for_completion_interruptible(&tr->completion); 184 wait_for_completion(&tr->completion);
185 if (!ret)
186 ret = tr->err;
187 reinit_completion(&tr->completion); 185 reinit_completion(&tr->completion);
186 ret = tr->err;
188 } 187 }
189 return ret; 188 return ret;
190} 189}
@@ -353,12 +352,11 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
353 break; 352 break;
354 case -EINPROGRESS: 353 case -EINPROGRESS:
355 case -EBUSY: 354 case -EBUSY:
356 ret = wait_for_completion_interruptible( 355 wait_for_completion(&tresult.completion);
357 &tresult.completion); 356 reinit_completion(&tresult.completion);
358 if (!ret && !(ret = tresult.err)) { 357 ret = tresult.err;
359 reinit_completion(&tresult.completion); 358 if (!ret)
360 break; 359 break;
361 }
362 /* fall through */ 360 /* fall through */
363 default: 361 default:
364 printk(KERN_ERR "alg: hash: digest failed " 362 printk(KERN_ERR "alg: hash: digest failed "
@@ -431,7 +429,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
431 struct scatterlist *sgout; 429 struct scatterlist *sgout;
432 const char *e, *d; 430 const char *e, *d;
433 struct tcrypt_result result; 431 struct tcrypt_result result;
434 unsigned int authsize; 432 unsigned int authsize, iv_len;
435 void *input; 433 void *input;
436 void *output; 434 void *output;
437 void *assoc; 435 void *assoc;
@@ -502,10 +500,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
502 500
503 memcpy(input, template[i].input, template[i].ilen); 501 memcpy(input, template[i].input, template[i].ilen);
504 memcpy(assoc, template[i].assoc, template[i].alen); 502 memcpy(assoc, template[i].assoc, template[i].alen);
503 iv_len = crypto_aead_ivsize(tfm);
505 if (template[i].iv) 504 if (template[i].iv)
506 memcpy(iv, template[i].iv, MAX_IVLEN); 505 memcpy(iv, template[i].iv, iv_len);
507 else 506 else
508 memset(iv, 0, MAX_IVLEN); 507 memset(iv, 0, iv_len);
509 508
510 crypto_aead_clear_flags(tfm, ~0); 509 crypto_aead_clear_flags(tfm, ~0);
511 if (template[i].wk) 510 if (template[i].wk)
@@ -569,12 +568,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
569 break; 568 break;
570 case -EINPROGRESS: 569 case -EINPROGRESS:
571 case -EBUSY: 570 case -EBUSY:
572 ret = wait_for_completion_interruptible( 571 wait_for_completion(&result.completion);
573 &result.completion); 572 reinit_completion(&result.completion);
574 if (!ret && !(ret = result.err)) { 573 ret = result.err;
575 reinit_completion(&result.completion); 574 if (!ret)
576 break; 575 break;
577 }
578 case -EBADMSG: 576 case -EBADMSG:
579 if (template[i].novrfy) 577 if (template[i].novrfy)
580 /* verification failure was expected */ 578 /* verification failure was expected */
@@ -720,12 +718,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
720 break; 718 break;
721 case -EINPROGRESS: 719 case -EINPROGRESS:
722 case -EBUSY: 720 case -EBUSY:
723 ret = wait_for_completion_interruptible( 721 wait_for_completion(&result.completion);
724 &result.completion); 722 reinit_completion(&result.completion);
725 if (!ret && !(ret = result.err)) { 723 ret = result.err;
726 reinit_completion(&result.completion); 724 if (!ret)
727 break; 725 break;
728 }
729 case -EBADMSG: 726 case -EBADMSG:
730 if (template[i].novrfy) 727 if (template[i].novrfy)
731 /* verification failure was expected */ 728 /* verification failure was expected */
@@ -1002,12 +999,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
1002 break; 999 break;
1003 case -EINPROGRESS: 1000 case -EINPROGRESS:
1004 case -EBUSY: 1001 case -EBUSY:
1005 ret = wait_for_completion_interruptible( 1002 wait_for_completion(&result.completion);
1006 &result.completion); 1003 reinit_completion(&result.completion);
1007 if (!ret && !((ret = result.err))) { 1004 ret = result.err;
1008 reinit_completion(&result.completion); 1005 if (!ret)
1009 break; 1006 break;
1010 }
1011 /* fall through */ 1007 /* fall through */
1012 default: 1008 default:
1013 pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", 1009 pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
@@ -1097,12 +1093,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
1097 break; 1093 break;
1098 case -EINPROGRESS: 1094 case -EINPROGRESS:
1099 case -EBUSY: 1095 case -EBUSY:
1100 ret = wait_for_completion_interruptible( 1096 wait_for_completion(&result.completion);
1101 &result.completion); 1097 reinit_completion(&result.completion);
1102 if (!ret && !((ret = result.err))) { 1098 ret = result.err;
1103 reinit_completion(&result.completion); 1099 if (!ret)
1104 break; 1100 break;
1105 }
1106 /* fall through */ 1101 /* fall through */
1107 default: 1102 default:
1108 pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", 1103 pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
@@ -3299,6 +3294,7 @@ static const struct alg_test_desc alg_test_descs[] = {
3299 }, { 3294 }, {
3300 .alg = "rfc4106(gcm(aes))", 3295 .alg = "rfc4106(gcm(aes))",
3301 .test = alg_test_aead, 3296 .test = alg_test_aead,
3297 .fips_allowed = 1,
3302 .suite = { 3298 .suite = {
3303 .aead = { 3299 .aead = {
3304 .enc = { 3300 .enc = {