aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig63
-rw-r--r--crypto/Makefile7
-rw-r--r--crypto/ahash.c194
-rw-r--r--crypto/api.c8
-rw-r--r--crypto/camellia.c84
-rw-r--r--crypto/chainiv.c10
-rw-r--r--crypto/crc32c.c128
-rw-r--r--crypto/cryptd.c253
-rw-r--r--crypto/digest.c83
-rw-r--r--crypto/hash.c102
-rw-r--r--crypto/hmac.c16
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/prng.c410
-rw-r--r--crypto/prng.h27
-rw-r--r--crypto/ripemd.h43
-rw-r--r--crypto/rmd128.c325
-rw-r--r--crypto/rmd160.c369
-rw-r--r--crypto/rmd256.c344
-rw-r--r--crypto/rmd320.c393
-rw-r--r--crypto/tcrypt.c198
-rw-r--r--crypto/tcrypt.h526
21 files changed, 3441 insertions, 143 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 864456c140fe..ea503572fcbe 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -65,6 +65,7 @@ config CRYPTO_NULL
65config CRYPTO_CRYPTD 65config CRYPTO_CRYPTD
66 tristate "Software async crypto daemon" 66 tristate "Software async crypto daemon"
67 select CRYPTO_BLKCIPHER 67 select CRYPTO_BLKCIPHER
68 select CRYPTO_HASH
68 select CRYPTO_MANAGER 69 select CRYPTO_MANAGER
69 help 70 help
70 This is a generic software asynchronous crypto daemon that 71 This is a generic software asynchronous crypto daemon that
@@ -212,7 +213,7 @@ comment "Digest"
212 213
213config CRYPTO_CRC32C 214config CRYPTO_CRC32C
214 tristate "CRC32c CRC algorithm" 215 tristate "CRC32c CRC algorithm"
215 select CRYPTO_ALGAPI 216 select CRYPTO_HASH
216 select LIBCRC32C 217 select LIBCRC32C
217 help 218 help
218 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used 219 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
@@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
241 should not be used for other purposes because of the weakness 242 should not be used for other purposes because of the weakness
242 of the algorithm. 243 of the algorithm.
243 244
245config CRYPTO_RMD128
246 tristate "RIPEMD-128 digest algorithm"
247 select CRYPTO_ALGAPI
248 help
249 RIPEMD-128 (ISO/IEC 10118-3:2004).
250
251 RIPEMD-128 is a 128-bit cryptographic hash function. It should only
252 to be used as a secure replacement for RIPEMD. For other use cases
253 RIPEMD-160 should be used.
254
255 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
256 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
257
258config CRYPTO_RMD160
259 tristate "RIPEMD-160 digest algorithm"
260 select CRYPTO_ALGAPI
261 help
262 RIPEMD-160 (ISO/IEC 10118-3:2004).
263
264 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
265 to be used as a secure replacement for the 128-bit hash functions
266 MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
267
268 It's speed is comparable to SHA1 and there are no known attacks against
269 RIPEMD-160.
270
271 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
272 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
273
274config CRYPTO_RMD256
275 tristate "RIPEMD-256 digest algorithm"
276 select CRYPTO_ALGAPI
277 help
278 RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
279 It is intended for applications that require longer hash-results, without
280 needing a larger security level (than RIPEMD-128).
281
282 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
283 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
284
285config CRYPTO_RMD320
286 tristate "RIPEMD-320 digest algorithm"
287 select CRYPTO_ALGAPI
288 help
289 RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
290 It is intended for applications that require longer hash-results, without
291 needing a larger security level (than RIPEMD-160).
292
293 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
294 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
295
244config CRYPTO_SHA1 296config CRYPTO_SHA1
245 tristate "SHA1 digest algorithm" 297 tristate "SHA1 digest algorithm"
246 select CRYPTO_ALGAPI 298 select CRYPTO_ALGAPI
@@ -614,6 +666,15 @@ config CRYPTO_LZO
614 help 666 help
615 This is the LZO algorithm. 667 This is the LZO algorithm.
616 668
669comment "Random Number Generation"
670
671config CRYPTO_PRNG
672 tristate "Pseudo Random Number Generation for Cryptographic modules"
673 help
674 This option enables the generic pseudo random number generator
675 for cryptographic modules. Uses the Algorithm specified in
676 ANSI X9.31 A.2.4
677
617source "drivers/crypto/Kconfig" 678source "drivers/crypto/Kconfig"
618 679
619endif # if CRYPTO 680endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index ca024418f4fb..ef61b3b64660 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
19obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 19obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
20 20
21crypto_hash-objs := hash.o 21crypto_hash-objs := hash.o
22crypto_hash-objs += ahash.o
22obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o 23obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
23 24
24obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o 25obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
@@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
27obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o 28obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
28obj-$(CONFIG_CRYPTO_MD4) += md4.o 29obj-$(CONFIG_CRYPTO_MD4) += md4.o
29obj-$(CONFIG_CRYPTO_MD5) += md5.o 30obj-$(CONFIG_CRYPTO_MD5) += md5.o
31obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
32obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
33obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
34obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
30obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o 35obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
31obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o 36obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
32obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o 37obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
@@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
64obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 69obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
65obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o 70obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
66obj-$(CONFIG_CRYPTO_LZO) += lzo.o 71obj-$(CONFIG_CRYPTO_LZO) += lzo.o
67 72obj-$(CONFIG_CRYPTO_PRNG) += prng.o
68obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o 73obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
69 74
70# 75#
diff --git a/crypto/ahash.c b/crypto/ahash.c
new file mode 100644
index 000000000000..27128f2c687a
--- /dev/null
+++ b/crypto/ahash.c
@@ -0,0 +1,194 @@
1/*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/hash.h>
17#include <crypto/scatterwalk.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/seq_file.h>
24
25#include "internal.h"
26
27static int hash_walk_next(struct crypto_hash_walk *walk)
28{
29 unsigned int alignmask = walk->alignmask;
30 unsigned int offset = walk->offset;
31 unsigned int nbytes = min(walk->entrylen,
32 ((unsigned int)(PAGE_SIZE)) - offset);
33
34 walk->data = crypto_kmap(walk->pg, 0);
35 walk->data += offset;
36
37 if (offset & alignmask)
38 nbytes = alignmask + 1 - (offset & alignmask);
39
40 walk->entrylen -= nbytes;
41 return nbytes;
42}
43
44static int hash_walk_new_entry(struct crypto_hash_walk *walk)
45{
46 struct scatterlist *sg;
47
48 sg = walk->sg;
49 walk->pg = sg_page(sg);
50 walk->offset = sg->offset;
51 walk->entrylen = sg->length;
52
53 if (walk->entrylen > walk->total)
54 walk->entrylen = walk->total;
55 walk->total -= walk->entrylen;
56
57 return hash_walk_next(walk);
58}
59
60int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
61{
62 unsigned int alignmask = walk->alignmask;
63 unsigned int nbytes = walk->entrylen;
64
65 walk->data -= walk->offset;
66
67 if (nbytes && walk->offset & alignmask && !err) {
68 walk->offset += alignmask - 1;
69 walk->offset = ALIGN(walk->offset, alignmask + 1);
70 walk->data += walk->offset;
71
72 nbytes = min(nbytes,
73 ((unsigned int)(PAGE_SIZE)) - walk->offset);
74 walk->entrylen -= nbytes;
75
76 return nbytes;
77 }
78
79 crypto_kunmap(walk->data, 0);
80 crypto_yield(walk->flags);
81
82 if (err)
83 return err;
84
85 walk->offset = 0;
86
87 if (nbytes)
88 return hash_walk_next(walk);
89
90 if (!walk->total)
91 return 0;
92
93 walk->sg = scatterwalk_sg_next(walk->sg);
94
95 return hash_walk_new_entry(walk);
96}
97EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
98
99int crypto_hash_walk_first(struct ahash_request *req,
100 struct crypto_hash_walk *walk)
101{
102 walk->total = req->nbytes;
103
104 if (!walk->total)
105 return 0;
106
107 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
108 walk->sg = req->src;
109 walk->flags = req->base.flags;
110
111 return hash_walk_new_entry(walk);
112}
113EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
114
115static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
116 unsigned int keylen)
117{
118 struct ahash_alg *ahash = crypto_ahash_alg(tfm);
119 unsigned long alignmask = crypto_ahash_alignmask(tfm);
120 int ret;
121 u8 *buffer, *alignbuffer;
122 unsigned long absize;
123
124 absize = keylen + alignmask;
125 buffer = kmalloc(absize, GFP_ATOMIC);
126 if (!buffer)
127 return -ENOMEM;
128
129 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
130 memcpy(alignbuffer, key, keylen);
131 ret = ahash->setkey(tfm, alignbuffer, keylen);
132 memset(alignbuffer, 0, keylen);
133 kfree(buffer);
134 return ret;
135}
136
137static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
138 unsigned int keylen)
139{
140 struct ahash_alg *ahash = crypto_ahash_alg(tfm);
141 unsigned long alignmask = crypto_ahash_alignmask(tfm);
142
143 if ((unsigned long)key & alignmask)
144 return ahash_setkey_unaligned(tfm, key, keylen);
145
146 return ahash->setkey(tfm, key, keylen);
147}
148
149static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
150 u32 mask)
151{
152 return alg->cra_ctxsize;
153}
154
155static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
156{
157 struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
158 struct ahash_tfm *crt = &tfm->crt_ahash;
159
160 if (alg->digestsize > PAGE_SIZE / 8)
161 return -EINVAL;
162
163 crt->init = alg->init;
164 crt->update = alg->update;
165 crt->final = alg->final;
166 crt->digest = alg->digest;
167 crt->setkey = ahash_setkey;
168 crt->digestsize = alg->digestsize;
169
170 return 0;
171}
172
173static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
174 __attribute__ ((unused));
175static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
176{
177 seq_printf(m, "type : ahash\n");
178 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
179 "yes" : "no");
180 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
181 seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
182}
183
184const struct crypto_type crypto_ahash_type = {
185 .ctxsize = crypto_ahash_ctxsize,
186 .init = crypto_init_ahash_ops,
187#ifdef CONFIG_PROC_FS
188 .show = crypto_ahash_show,
189#endif
190};
191EXPORT_SYMBOL_GPL(crypto_ahash_type);
192
193MODULE_LICENSE("GPL");
194MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/api.c b/crypto/api.c
index 0a0f41ef255f..d06e33270abe 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
235 return crypto_init_cipher_ops(tfm); 235 return crypto_init_cipher_ops(tfm);
236 236
237 case CRYPTO_ALG_TYPE_DIGEST: 237 case CRYPTO_ALG_TYPE_DIGEST:
238 return crypto_init_digest_ops(tfm); 238 if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
239 239 CRYPTO_ALG_TYPE_HASH_MASK)
240 return crypto_init_digest_ops_async(tfm);
241 else
242 return crypto_init_digest_ops(tfm);
243
240 case CRYPTO_ALG_TYPE_COMPRESS: 244 case CRYPTO_ALG_TYPE_COMPRESS:
241 return crypto_init_compress_ops(tfm); 245 return crypto_init_compress_ops(tfm);
242 246
diff --git a/crypto/camellia.c b/crypto/camellia.c
index 493fee7e0a8b..b1cc4de6493c 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -35,6 +35,8 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/bitops.h>
39#include <asm/unaligned.h>
38 40
39static const u32 camellia_sp1110[256] = { 41static const u32 camellia_sp1110[256] = {
40 0x70707000,0x82828200,0x2c2c2c00,0xececec00, 42 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
@@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
335/* 337/*
336 * macros 338 * macros
337 */ 339 */
338#define GETU32(v, pt) \
339 do { \
340 /* latest breed of gcc is clever enough to use move */ \
341 memcpy(&(v), (pt), 4); \
342 (v) = be32_to_cpu(v); \
343 } while(0)
344
345/* rotation right shift 1byte */
346#define ROR8(x) (((x) >> 8) + ((x) << 24))
347/* rotation left shift 1bit */
348#define ROL1(x) (((x) << 1) + ((x) >> 31))
349/* rotation left shift 1byte */
350#define ROL8(x) (((x) << 8) + ((x) >> 24))
351
352#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ 340#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
353 do { \ 341 do { \
354 w0 = ll; \ 342 w0 = ll; \
@@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
383 ^ camellia_sp3033[(u8)(il >> 8)] \ 371 ^ camellia_sp3033[(u8)(il >> 8)] \
384 ^ camellia_sp4404[(u8)(il )]; \ 372 ^ camellia_sp4404[(u8)(il )]; \
385 yl ^= yr; \ 373 yl ^= yr; \
386 yr = ROR8(yr); \ 374 yr = ror32(yr, 8); \
387 yr ^= yl; \ 375 yr ^= yl; \
388 } while(0) 376 } while(0)
389 377
@@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
405 subL[7] ^= subL[1]; subR[7] ^= subR[1]; 393 subL[7] ^= subL[1]; subR[7] ^= subR[1];
406 subL[1] ^= subR[1] & ~subR[9]; 394 subL[1] ^= subR[1] & ~subR[9];
407 dw = subL[1] & subL[9], 395 dw = subL[1] & subL[9],
408 subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ 396 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
409 /* round 8 */ 397 /* round 8 */
410 subL[11] ^= subL[1]; subR[11] ^= subR[1]; 398 subL[11] ^= subL[1]; subR[11] ^= subR[1];
411 /* round 10 */ 399 /* round 10 */
@@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
414 subL[15] ^= subL[1]; subR[15] ^= subR[1]; 402 subL[15] ^= subL[1]; subR[15] ^= subR[1];
415 subL[1] ^= subR[1] & ~subR[17]; 403 subL[1] ^= subR[1] & ~subR[17];
416 dw = subL[1] & subL[17], 404 dw = subL[1] & subL[17],
417 subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ 405 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
418 /* round 14 */ 406 /* round 14 */
419 subL[19] ^= subL[1]; subR[19] ^= subR[1]; 407 subL[19] ^= subL[1]; subR[19] ^= subR[1];
420 /* round 16 */ 408 /* round 16 */
@@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
430 } else { 418 } else {
431 subL[1] ^= subR[1] & ~subR[25]; 419 subL[1] ^= subR[1] & ~subR[25];
432 dw = subL[1] & subL[25], 420 dw = subL[1] & subL[25],
433 subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ 421 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
434 /* round 20 */ 422 /* round 20 */
435 subL[27] ^= subL[1]; subR[27] ^= subR[1]; 423 subL[27] ^= subL[1]; subR[27] ^= subR[1];
436 /* round 22 */ 424 /* round 22 */
@@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
450 subL[26] ^= kw4l; subR[26] ^= kw4r; 438 subL[26] ^= kw4l; subR[26] ^= kw4r;
451 kw4l ^= kw4r & ~subR[24]; 439 kw4l ^= kw4r & ~subR[24];
452 dw = kw4l & subL[24], 440 dw = kw4l & subL[24],
453 kw4r ^= ROL1(dw); /* modified for FL(kl5) */ 441 kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
454 } 442 }
455 /* round 17 */ 443 /* round 17 */
456 subL[22] ^= kw4l; subR[22] ^= kw4r; 444 subL[22] ^= kw4l; subR[22] ^= kw4r;
@@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
460 subL[18] ^= kw4l; subR[18] ^= kw4r; 448 subL[18] ^= kw4l; subR[18] ^= kw4r;
461 kw4l ^= kw4r & ~subR[16]; 449 kw4l ^= kw4r & ~subR[16];
462 dw = kw4l & subL[16], 450 dw = kw4l & subL[16],
463 kw4r ^= ROL1(dw); /* modified for FL(kl3) */ 451 kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
464 /* round 11 */ 452 /* round 11 */
465 subL[14] ^= kw4l; subR[14] ^= kw4r; 453 subL[14] ^= kw4l; subR[14] ^= kw4r;
466 /* round 9 */ 454 /* round 9 */
@@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
469 subL[10] ^= kw4l; subR[10] ^= kw4r; 457 subL[10] ^= kw4l; subR[10] ^= kw4r;
470 kw4l ^= kw4r & ~subR[8]; 458 kw4l ^= kw4r & ~subR[8];
471 dw = kw4l & subL[8], 459 dw = kw4l & subL[8],
472 kw4r ^= ROL1(dw); /* modified for FL(kl1) */ 460 kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
473 /* round 5 */ 461 /* round 5 */
474 subL[6] ^= kw4l; subR[6] ^= kw4r; 462 subL[6] ^= kw4l; subR[6] ^= kw4r;
475 /* round 3 */ 463 /* round 3 */
@@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
494 SUBKEY_R(6) = subR[5] ^ subR[7]; 482 SUBKEY_R(6) = subR[5] ^ subR[7];
495 tl = subL[10] ^ (subR[10] & ~subR[8]); 483 tl = subL[10] ^ (subR[10] & ~subR[8]);
496 dw = tl & subL[8], /* FL(kl1) */ 484 dw = tl & subL[8], /* FL(kl1) */
497 tr = subR[10] ^ ROL1(dw); 485 tr = subR[10] ^ rol32(dw, 1);
498 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ 486 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
499 SUBKEY_R(7) = subR[6] ^ tr; 487 SUBKEY_R(7) = subR[6] ^ tr;
500 SUBKEY_L(8) = subL[8]; /* FL(kl1) */ 488 SUBKEY_L(8) = subL[8]; /* FL(kl1) */
@@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
503 SUBKEY_R(9) = subR[9]; 491 SUBKEY_R(9) = subR[9];
504 tl = subL[7] ^ (subR[7] & ~subR[9]); 492 tl = subL[7] ^ (subR[7] & ~subR[9]);
505 dw = tl & subL[9], /* FLinv(kl2) */ 493 dw = tl & subL[9], /* FLinv(kl2) */
506 tr = subR[7] ^ ROL1(dw); 494 tr = subR[7] ^ rol32(dw, 1);
507 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ 495 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
508 SUBKEY_R(10) = tr ^ subR[11]; 496 SUBKEY_R(10) = tr ^ subR[11];
509 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ 497 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
@@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
516 SUBKEY_R(14) = subR[13] ^ subR[15]; 504 SUBKEY_R(14) = subR[13] ^ subR[15];
517 tl = subL[18] ^ (subR[18] & ~subR[16]); 505 tl = subL[18] ^ (subR[18] & ~subR[16]);
518 dw = tl & subL[16], /* FL(kl3) */ 506 dw = tl & subL[16], /* FL(kl3) */
519 tr = subR[18] ^ ROL1(dw); 507 tr = subR[18] ^ rol32(dw, 1);
520 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ 508 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
521 SUBKEY_R(15) = subR[14] ^ tr; 509 SUBKEY_R(15) = subR[14] ^ tr;
522 SUBKEY_L(16) = subL[16]; /* FL(kl3) */ 510 SUBKEY_L(16) = subL[16]; /* FL(kl3) */
@@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
525 SUBKEY_R(17) = subR[17]; 513 SUBKEY_R(17) = subR[17];
526 tl = subL[15] ^ (subR[15] & ~subR[17]); 514 tl = subL[15] ^ (subR[15] & ~subR[17]);
527 dw = tl & subL[17], /* FLinv(kl4) */ 515 dw = tl & subL[17], /* FLinv(kl4) */
528 tr = subR[15] ^ ROL1(dw); 516 tr = subR[15] ^ rol32(dw, 1);
529 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ 517 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
530 SUBKEY_R(18) = tr ^ subR[19]; 518 SUBKEY_R(18) = tr ^ subR[19];
531 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ 519 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
@@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
544 } else { 532 } else {
545 tl = subL[26] ^ (subR[26] & ~subR[24]); 533 tl = subL[26] ^ (subR[26] & ~subR[24]);
546 dw = tl & subL[24], /* FL(kl5) */ 534 dw = tl & subL[24], /* FL(kl5) */
547 tr = subR[26] ^ ROL1(dw); 535 tr = subR[26] ^ rol32(dw, 1);
548 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ 536 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
549 SUBKEY_R(23) = subR[22] ^ tr; 537 SUBKEY_R(23) = subR[22] ^ tr;
550 SUBKEY_L(24) = subL[24]; /* FL(kl5) */ 538 SUBKEY_L(24) = subL[24]; /* FL(kl5) */
@@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
553 SUBKEY_R(25) = subR[25]; 541 SUBKEY_R(25) = subR[25];
554 tl = subL[23] ^ (subR[23] & ~subR[25]); 542 tl = subL[23] ^ (subR[23] & ~subR[25]);
555 dw = tl & subL[25], /* FLinv(kl6) */ 543 dw = tl & subL[25], /* FLinv(kl6) */
556 tr = subR[23] ^ ROL1(dw); 544 tr = subR[23] ^ rol32(dw, 1);
557 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ 545 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
558 SUBKEY_R(26) = tr ^ subR[27]; 546 SUBKEY_R(26) = tr ^ subR[27];
559 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ 547 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
@@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
573 /* apply the inverse of the last half of P-function */ 561 /* apply the inverse of the last half of P-function */
574 i = 2; 562 i = 2;
575 do { 563 do {
576 dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ 564 dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
577 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; 565 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
578 dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ 566 dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
579 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; 567 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
580 dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ 568 dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
581 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; 569 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
582 dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ 570 dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
583 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; 571 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
584 dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ 572 dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */
585 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; 573 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
586 dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ 574 dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
587 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; 575 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
588 i += 8; 576 i += 8;
589 } while (i < max); 577 } while (i < max);
@@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
599 /** 587 /**
600 * k == kll || klr || krl || krr (|| is concatenation) 588 * k == kll || klr || krl || krr (|| is concatenation)
601 */ 589 */
602 GETU32(kll, key ); 590 kll = get_unaligned_be32(key);
603 GETU32(klr, key + 4); 591 klr = get_unaligned_be32(key + 4);
604 GETU32(krl, key + 8); 592 krl = get_unaligned_be32(key + 8);
605 GETU32(krr, key + 12); 593 krr = get_unaligned_be32(key + 12);
606 594
607 /* generate KL dependent subkeys */ 595 /* generate KL dependent subkeys */
608 /* kw1 */ 596 /* kw1 */
@@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
707 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) 695 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
708 * (|| is concatenation) 696 * (|| is concatenation)
709 */ 697 */
710 GETU32(kll, key ); 698 kll = get_unaligned_be32(key);
711 GETU32(klr, key + 4); 699 klr = get_unaligned_be32(key + 4);
712 GETU32(krl, key + 8); 700 krl = get_unaligned_be32(key + 8);
713 GETU32(krr, key + 12); 701 krr = get_unaligned_be32(key + 12);
714 GETU32(krll, key + 16); 702 krll = get_unaligned_be32(key + 16);
715 GETU32(krlr, key + 20); 703 krlr = get_unaligned_be32(key + 20);
716 GETU32(krrl, key + 24); 704 krrl = get_unaligned_be32(key + 24);
717 GETU32(krrr, key + 28); 705 krrr = get_unaligned_be32(key + 28);
718 706
719 /* generate KL dependent subkeys */ 707 /* generate KL dependent subkeys */
720 /* kw1 */ 708 /* kw1 */
@@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
870 t0 &= ll; \ 858 t0 &= ll; \
871 t2 |= rr; \ 859 t2 |= rr; \
872 rl ^= t2; \ 860 rl ^= t2; \
873 lr ^= ROL1(t0); \ 861 lr ^= rol32(t0, 1); \
874 t3 = krl; \ 862 t3 = krl; \
875 t1 = klr; \ 863 t1 = klr; \
876 t3 &= rl; \ 864 t3 &= rl; \
877 t1 |= lr; \ 865 t1 |= lr; \
878 ll ^= t1; \ 866 ll ^= t1; \
879 rr ^= ROL1(t3); \ 867 rr ^= rol32(t3, 1); \
880 } while(0) 868 } while(0)
881 869
882#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ 870#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
@@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
892 il ^= kl; \ 880 il ^= kl; \
893 ir ^= il ^ kr; \ 881 ir ^= il ^ kr; \
894 yl ^= ir; \ 882 yl ^= ir; \
895 yr ^= ROR8(il) ^ ir; \ 883 yr ^= ror32(il, 8) ^ ir; \
896 } while(0) 884 } while(0)
897 885
898/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ 886/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 6da3f577e4db..9affadee3287 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -117,6 +117,7 @@ static int chainiv_init(struct crypto_tfm *tfm)
117static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) 117static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
118{ 118{
119 int queued; 119 int queued;
120 int err = ctx->err;
120 121
121 if (!ctx->queue.qlen) { 122 if (!ctx->queue.qlen) {
122 smp_mb__before_clear_bit(); 123 smp_mb__before_clear_bit();
@@ -131,7 +132,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
131 BUG_ON(!queued); 132 BUG_ON(!queued);
132 133
133out: 134out:
134 return ctx->err; 135 return err;
135} 136}
136 137
137static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) 138static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
@@ -227,6 +228,7 @@ static void async_chainiv_do_postponed(struct work_struct *work)
227 postponed); 228 postponed);
228 struct skcipher_givcrypt_request *req; 229 struct skcipher_givcrypt_request *req;
229 struct ablkcipher_request *subreq; 230 struct ablkcipher_request *subreq;
231 int err;
230 232
231 /* Only handle one request at a time to avoid hogging keventd. */ 233 /* Only handle one request at a time to avoid hogging keventd. */
232 spin_lock_bh(&ctx->lock); 234 spin_lock_bh(&ctx->lock);
@@ -241,7 +243,11 @@ static void async_chainiv_do_postponed(struct work_struct *work)
241 subreq = skcipher_givcrypt_reqctx(req); 243 subreq = skcipher_givcrypt_reqctx(req);
242 subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; 244 subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
243 245
244 async_chainiv_givencrypt_tail(req); 246 err = async_chainiv_givencrypt_tail(req);
247
248 local_bh_disable();
249 skcipher_givcrypt_complete(req, err);
250 local_bh_enable();
245} 251}
246 252
247static int async_chainiv_init(struct crypto_tfm *tfm) 253static int async_chainiv_init(struct crypto_tfm *tfm)
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 0dcf64a74e68..a882d9e4e63e 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -5,20 +5,23 @@
5 * 5 *
6 * This module file is a wrapper to invoke the lib/crc32c routines. 6 * This module file is a wrapper to invoke the lib/crc32c routines.
7 * 7 *
8 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
9 *
8 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option) 12 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version. 13 * any later version.
12 * 14 *
13 */ 15 */
16
17#include <crypto/internal/hash.h>
14#include <linux/init.h> 18#include <linux/init.h>
15#include <linux/module.h> 19#include <linux/module.h>
16#include <linux/string.h> 20#include <linux/string.h>
17#include <linux/crypto.h>
18#include <linux/crc32c.h> 21#include <linux/crc32c.h>
19#include <linux/kernel.h> 22#include <linux/kernel.h>
20 23
21#define CHKSUM_BLOCK_SIZE 32 24#define CHKSUM_BLOCK_SIZE 1
22#define CHKSUM_DIGEST_SIZE 4 25#define CHKSUM_DIGEST_SIZE 4
23 26
24struct chksum_ctx { 27struct chksum_ctx {
@@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
71 *(__le32 *)out = ~cpu_to_le32(mctx->crc); 74 *(__le32 *)out = ~cpu_to_le32(mctx->crc);
72} 75}
73 76
74static int crc32c_cra_init(struct crypto_tfm *tfm) 77static int crc32c_cra_init_old(struct crypto_tfm *tfm)
75{ 78{
76 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 79 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
77 80
@@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
79 return 0; 82 return 0;
80} 83}
81 84
82static struct crypto_alg alg = { 85static struct crypto_alg old_alg = {
83 .cra_name = "crc32c", 86 .cra_name = "crc32c",
84 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 87 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
85 .cra_blocksize = CHKSUM_BLOCK_SIZE, 88 .cra_blocksize = CHKSUM_BLOCK_SIZE,
86 .cra_ctxsize = sizeof(struct chksum_ctx), 89 .cra_ctxsize = sizeof(struct chksum_ctx),
87 .cra_module = THIS_MODULE, 90 .cra_module = THIS_MODULE,
88 .cra_list = LIST_HEAD_INIT(alg.cra_list), 91 .cra_list = LIST_HEAD_INIT(old_alg.cra_list),
89 .cra_init = crc32c_cra_init, 92 .cra_init = crc32c_cra_init_old,
90 .cra_u = { 93 .cra_u = {
91 .digest = { 94 .digest = {
92 .dia_digestsize= CHKSUM_DIGEST_SIZE, 95 .dia_digestsize= CHKSUM_DIGEST_SIZE,
@@ -98,14 +101,125 @@ static struct crypto_alg alg = {
98 } 101 }
99}; 102};
100 103
104/*
105 * Setting the seed allows arbitrary accumulators and flexible XOR policy
106 * If your algorithm starts with ~0, then XOR with ~0 before you set
107 * the seed.
108 */
109static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
110 unsigned int keylen)
111{
112 u32 *mctx = crypto_ahash_ctx(hash);
113
114 if (keylen != sizeof(u32)) {
115 crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
116 return -EINVAL;
117 }
118 *mctx = le32_to_cpup((__le32 *)key);
119 return 0;
120}
121
122static int crc32c_init(struct ahash_request *req)
123{
124 u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
125 u32 *crcp = ahash_request_ctx(req);
126
127 *crcp = *mctx;
128 return 0;
129}
130
131static int crc32c_update(struct ahash_request *req)
132{
133 struct crypto_hash_walk walk;
134 u32 *crcp = ahash_request_ctx(req);
135 u32 crc = *crcp;
136 int nbytes;
137
138 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
139 nbytes = crypto_hash_walk_done(&walk, 0))
140 crc = crc32c(crc, walk.data, nbytes);
141
142 *crcp = crc;
143 return 0;
144}
145
146static int crc32c_final(struct ahash_request *req)
147{
148 u32 *crcp = ahash_request_ctx(req);
149
150 *(__le32 *)req->result = ~cpu_to_le32p(crcp);
151 return 0;
152}
153
154static int crc32c_digest(struct ahash_request *req)
155{
156 struct crypto_hash_walk walk;
157 u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
158 u32 crc = *mctx;
159 int nbytes;
160
161 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
162 nbytes = crypto_hash_walk_done(&walk, 0))
163 crc = crc32c(crc, walk.data, nbytes);
164
165 *(__le32 *)req->result = ~cpu_to_le32(crc);
166 return 0;
167}
168
169static int crc32c_cra_init(struct crypto_tfm *tfm)
170{
171 u32 *key = crypto_tfm_ctx(tfm);
172
173 *key = ~0;
174
175 tfm->crt_ahash.reqsize = sizeof(u32);
176
177 return 0;
178}
179
180static struct crypto_alg alg = {
181 .cra_name = "crc32c",
182 .cra_driver_name = "crc32c-generic",
183 .cra_priority = 100,
184 .cra_flags = CRYPTO_ALG_TYPE_AHASH,
185 .cra_blocksize = CHKSUM_BLOCK_SIZE,
186 .cra_alignmask = 3,
187 .cra_ctxsize = sizeof(u32),
188 .cra_module = THIS_MODULE,
189 .cra_list = LIST_HEAD_INIT(alg.cra_list),
190 .cra_init = crc32c_cra_init,
191 .cra_type = &crypto_ahash_type,
192 .cra_u = {
193 .ahash = {
194 .digestsize = CHKSUM_DIGEST_SIZE,
195 .setkey = crc32c_setkey,
196 .init = crc32c_init,
197 .update = crc32c_update,
198 .final = crc32c_final,
199 .digest = crc32c_digest,
200 }
201 }
202};
203
101static int __init crc32c_mod_init(void) 204static int __init crc32c_mod_init(void)
102{ 205{
103 return crypto_register_alg(&alg); 206 int err;
207
208 err = crypto_register_alg(&old_alg);
209 if (err)
210 return err;
211
212 err = crypto_register_alg(&alg);
213 if (err)
214 crypto_unregister_alg(&old_alg);
215
216 return err;
104} 217}
105 218
106static void __exit crc32c_mod_fini(void) 219static void __exit crc32c_mod_fini(void)
107{ 220{
108 crypto_unregister_alg(&alg); 221 crypto_unregister_alg(&alg);
222 crypto_unregister_alg(&old_alg);
109} 223}
110 224
111module_init(crc32c_mod_init); 225module_init(crc32c_mod_init);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index b150de562057..d29e06b350ff 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <crypto/algapi.h> 13#include <crypto/algapi.h>
14#include <crypto/internal/hash.h>
14#include <linux/err.h> 15#include <linux/err.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
45 crypto_completion_t complete; 46 crypto_completion_t complete;
46}; 47};
47 48
49struct cryptd_hash_ctx {
50 struct crypto_hash *child;
51};
52
53struct cryptd_hash_request_ctx {
54 crypto_completion_t complete;
55};
48 56
49static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) 57static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
50{ 58{
@@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
82 90
83 rctx = ablkcipher_request_ctx(req); 91 rctx = ablkcipher_request_ctx(req);
84 92
85 if (unlikely(err == -EINPROGRESS)) { 93 if (unlikely(err == -EINPROGRESS))
86 rctx->complete(&req->base, err); 94 goto out;
87 return;
88 }
89 95
90 desc.tfm = child; 96 desc.tfm = child;
91 desc.info = req->info; 97 desc.info = req->info;
@@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
95 101
96 req->base.complete = rctx->complete; 102 req->base.complete = rctx->complete;
97 103
104out:
98 local_bh_disable(); 105 local_bh_disable();
99 req->base.complete(&req->base, err); 106 rctx->complete(&req->base, err);
100 local_bh_enable(); 107 local_bh_enable();
101} 108}
102 109
@@ -261,6 +268,240 @@ out_put_alg:
261 return inst; 268 return inst;
262} 269}
263 270
271static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
272{
273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
274 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
275 struct crypto_spawn *spawn = &ictx->spawn;
276 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
277 struct crypto_hash *cipher;
278
279 cipher = crypto_spawn_hash(spawn);
280 if (IS_ERR(cipher))
281 return PTR_ERR(cipher);
282
283 ctx->child = cipher;
284 tfm->crt_ahash.reqsize =
285 sizeof(struct cryptd_hash_request_ctx);
286 return 0;
287}
288
289static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
290{
291 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
292 struct cryptd_state *state = cryptd_get_state(tfm);
293 int active;
294
295 mutex_lock(&state->mutex);
296 active = ahash_tfm_in_queue(&state->queue,
297 __crypto_ahash_cast(tfm));
298 mutex_unlock(&state->mutex);
299
300 BUG_ON(active);
301
302 crypto_free_hash(ctx->child);
303}
304
305static int cryptd_hash_setkey(struct crypto_ahash *parent,
306 const u8 *key, unsigned int keylen)
307{
308 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
309 struct crypto_hash *child = ctx->child;
310 int err;
311
312 crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
313 crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
314 CRYPTO_TFM_REQ_MASK);
315 err = crypto_hash_setkey(child, key, keylen);
316 crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
317 CRYPTO_TFM_RES_MASK);
318 return err;
319}
320
321static int cryptd_hash_enqueue(struct ahash_request *req,
322 crypto_completion_t complete)
323{
324 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct cryptd_state *state =
327 cryptd_get_state(crypto_ahash_tfm(tfm));
328 int err;
329
330 rctx->complete = req->base.complete;
331 req->base.complete = complete;
332
333 spin_lock_bh(&state->lock);
334 err = ahash_enqueue_request(&state->queue, req);
335 spin_unlock_bh(&state->lock);
336
337 wake_up_process(state->task);
338 return err;
339}
340
341static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
342{
343 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
344 struct crypto_hash *child = ctx->child;
345 struct ahash_request *req = ahash_request_cast(req_async);
346 struct cryptd_hash_request_ctx *rctx;
347 struct hash_desc desc;
348
349 rctx = ahash_request_ctx(req);
350
351 if (unlikely(err == -EINPROGRESS))
352 goto out;
353
354 desc.tfm = child;
355 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
356
357 err = crypto_hash_crt(child)->init(&desc);
358
359 req->base.complete = rctx->complete;
360
361out:
362 local_bh_disable();
363 rctx->complete(&req->base, err);
364 local_bh_enable();
365}
366
367static int cryptd_hash_init_enqueue(struct ahash_request *req)
368{
369 return cryptd_hash_enqueue(req, cryptd_hash_init);
370}
371
372static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
373{
374 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
375 struct crypto_hash *child = ctx->child;
376 struct ahash_request *req = ahash_request_cast(req_async);
377 struct cryptd_hash_request_ctx *rctx;
378 struct hash_desc desc;
379
380 rctx = ahash_request_ctx(req);
381
382 if (unlikely(err == -EINPROGRESS))
383 goto out;
384
385 desc.tfm = child;
386 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
387
388 err = crypto_hash_crt(child)->update(&desc,
389 req->src,
390 req->nbytes);
391
392 req->base.complete = rctx->complete;
393
394out:
395 local_bh_disable();
396 rctx->complete(&req->base, err);
397 local_bh_enable();
398}
399
400static int cryptd_hash_update_enqueue(struct ahash_request *req)
401{
402 return cryptd_hash_enqueue(req, cryptd_hash_update);
403}
404
405static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
406{
407 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
408 struct crypto_hash *child = ctx->child;
409 struct ahash_request *req = ahash_request_cast(req_async);
410 struct cryptd_hash_request_ctx *rctx;
411 struct hash_desc desc;
412
413 rctx = ahash_request_ctx(req);
414
415 if (unlikely(err == -EINPROGRESS))
416 goto out;
417
418 desc.tfm = child;
419 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
420
421 err = crypto_hash_crt(child)->final(&desc, req->result);
422
423 req->base.complete = rctx->complete;
424
425out:
426 local_bh_disable();
427 rctx->complete(&req->base, err);
428 local_bh_enable();
429}
430
431static int cryptd_hash_final_enqueue(struct ahash_request *req)
432{
433 return cryptd_hash_enqueue(req, cryptd_hash_final);
434}
435
436static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
437{
438 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
439 struct crypto_hash *child = ctx->child;
440 struct ahash_request *req = ahash_request_cast(req_async);
441 struct cryptd_hash_request_ctx *rctx;
442 struct hash_desc desc;
443
444 rctx = ahash_request_ctx(req);
445
446 if (unlikely(err == -EINPROGRESS))
447 goto out;
448
449 desc.tfm = child;
450 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
451
452 err = crypto_hash_crt(child)->digest(&desc,
453 req->src,
454 req->nbytes,
455 req->result);
456
457 req->base.complete = rctx->complete;
458
459out:
460 local_bh_disable();
461 rctx->complete(&req->base, err);
462 local_bh_enable();
463}
464
465static int cryptd_hash_digest_enqueue(struct ahash_request *req)
466{
467 return cryptd_hash_enqueue(req, cryptd_hash_digest);
468}
469
470static struct crypto_instance *cryptd_alloc_hash(
471 struct rtattr **tb, struct cryptd_state *state)
472{
473 struct crypto_instance *inst;
474 struct crypto_alg *alg;
475
476 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
477 CRYPTO_ALG_TYPE_HASH_MASK);
478 if (IS_ERR(alg))
479 return ERR_PTR(PTR_ERR(alg));
480
481 inst = cryptd_alloc_instance(alg, state);
482 if (IS_ERR(inst))
483 goto out_put_alg;
484
485 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
486 inst->alg.cra_type = &crypto_ahash_type;
487
488 inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
489 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
490
491 inst->alg.cra_init = cryptd_hash_init_tfm;
492 inst->alg.cra_exit = cryptd_hash_exit_tfm;
493
494 inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
495 inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
496 inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
497 inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
498 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
499
500out_put_alg:
501 crypto_mod_put(alg);
502 return inst;
503}
504
264static struct cryptd_state state; 505static struct cryptd_state state;
265 506
266static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 507static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
@@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
274 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 515 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
275 case CRYPTO_ALG_TYPE_BLKCIPHER: 516 case CRYPTO_ALG_TYPE_BLKCIPHER:
276 return cryptd_alloc_blkcipher(tb, &state); 517 return cryptd_alloc_blkcipher(tb, &state);
518 case CRYPTO_ALG_TYPE_DIGEST:
519 return cryptd_alloc_hash(tb, &state);
277 } 520 }
278 521
279 return ERR_PTR(-EINVAL); 522 return ERR_PTR(-EINVAL);
diff --git a/crypto/digest.c b/crypto/digest.c
index b526cc348b79..ac0919460d14 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -12,6 +12,7 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/internal/hash.h>
15#include <crypto/scatterwalk.h> 16#include <crypto/scatterwalk.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
@@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
141 struct hash_tfm *ops = &tfm->crt_hash; 142 struct hash_tfm *ops = &tfm->crt_hash;
142 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; 143 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
143 144
144 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) 145 if (dalg->dia_digestsize > PAGE_SIZE / 8)
145 return -EINVAL; 146 return -EINVAL;
146 147
147 ops->init = init; 148 ops->init = init;
@@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
157void crypto_exit_digest_ops(struct crypto_tfm *tfm) 158void crypto_exit_digest_ops(struct crypto_tfm *tfm)
158{ 159{
159} 160}
161
162static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
163 unsigned int keylen)
164{
165 crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
166 return -ENOSYS;
167}
168
169static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
170 unsigned int keylen)
171{
172 struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
173 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
174
175 crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
176 return dalg->dia_setkey(tfm, key, keylen);
177}
178
179static int digest_async_init(struct ahash_request *req)
180{
181 struct crypto_tfm *tfm = req->base.tfm;
182 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
183
184 dalg->dia_init(tfm);
185 return 0;
186}
187
188static int digest_async_update(struct ahash_request *req)
189{
190 struct crypto_tfm *tfm = req->base.tfm;
191 struct hash_desc desc = {
192 .tfm = __crypto_hash_cast(tfm),
193 .flags = req->base.flags,
194 };
195
196 update(&desc, req->src, req->nbytes);
197 return 0;
198}
199
200static int digest_async_final(struct ahash_request *req)
201{
202 struct crypto_tfm *tfm = req->base.tfm;
203 struct hash_desc desc = {
204 .tfm = __crypto_hash_cast(tfm),
205 .flags = req->base.flags,
206 };
207
208 final(&desc, req->result);
209 return 0;
210}
211
212static int digest_async_digest(struct ahash_request *req)
213{
214 struct crypto_tfm *tfm = req->base.tfm;
215 struct hash_desc desc = {
216 .tfm = __crypto_hash_cast(tfm),
217 .flags = req->base.flags,
218 };
219
220 return digest(&desc, req->src, req->nbytes, req->result);
221}
222
223int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
224{
225 struct ahash_tfm *crt = &tfm->crt_ahash;
226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
227
228 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
229 return -EINVAL;
230
231 crt->init = digest_async_init;
232 crt->update = digest_async_update;
233 crt->final = digest_async_final;
234 crt->digest = digest_async_digest;
235 crt->setkey = dalg->dia_setkey ? digest_async_setkey :
236 digest_async_nosetkey;
237 crt->digestsize = dalg->dia_digestsize;
238
239 return 0;
240}
diff --git a/crypto/hash.c b/crypto/hash.c
index 7dcff671c19b..cb86b19fd105 100644
--- a/crypto/hash.c
+++ b/crypto/hash.c
@@ -9,6 +9,7 @@
9 * any later version. 9 * any later version.
10 */ 10 */
11 11
12#include <crypto/internal/hash.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key,
59 return alg->setkey(crt, key, keylen); 60 return alg->setkey(crt, key, keylen);
60} 61}
61 62
62static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) 63static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
64 unsigned int keylen)
65{
66 struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
67 struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
68 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
69
70 return alg->setkey(tfm_hash, key, keylen);
71}
72
73static int hash_async_init(struct ahash_request *req)
74{
75 struct crypto_tfm *tfm = req->base.tfm;
76 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
77 struct hash_desc desc = {
78 .tfm = __crypto_hash_cast(tfm),
79 .flags = req->base.flags,
80 };
81
82 return alg->init(&desc);
83}
84
85static int hash_async_update(struct ahash_request *req)
86{
87 struct crypto_tfm *tfm = req->base.tfm;
88 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
89 struct hash_desc desc = {
90 .tfm = __crypto_hash_cast(tfm),
91 .flags = req->base.flags,
92 };
93
94 return alg->update(&desc, req->src, req->nbytes);
95}
96
97static int hash_async_final(struct ahash_request *req)
98{
99 struct crypto_tfm *tfm = req->base.tfm;
100 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
101 struct hash_desc desc = {
102 .tfm = __crypto_hash_cast(tfm),
103 .flags = req->base.flags,
104 };
105
106 return alg->final(&desc, req->result);
107}
108
109static int hash_async_digest(struct ahash_request *req)
110{
111 struct crypto_tfm *tfm = req->base.tfm;
112 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
113 struct hash_desc desc = {
114 .tfm = __crypto_hash_cast(tfm),
115 .flags = req->base.flags,
116 };
117
118 return alg->digest(&desc, req->src, req->nbytes, req->result);
119}
120
121static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
122{
123 struct ahash_tfm *crt = &tfm->crt_ahash;
124 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
125
126 crt->init = hash_async_init;
127 crt->update = hash_async_update;
128 crt->final = hash_async_final;
129 crt->digest = hash_async_digest;
130 crt->setkey = hash_async_setkey;
131 crt->digestsize = alg->digestsize;
132
133 return 0;
134}
135
136static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
63{ 137{
64 struct hash_tfm *crt = &tfm->crt_hash; 138 struct hash_tfm *crt = &tfm->crt_hash;
65 struct hash_alg *alg = &tfm->__crt_alg->cra_hash; 139 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
66 140
67 if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) 141 crt->init = alg->init;
68 return -EINVAL; 142 crt->update = alg->update;
69 143 crt->final = alg->final;
70 crt->init = alg->init; 144 crt->digest = alg->digest;
71 crt->update = alg->update; 145 crt->setkey = hash_setkey;
72 crt->final = alg->final;
73 crt->digest = alg->digest;
74 crt->setkey = hash_setkey;
75 crt->digestsize = alg->digestsize; 146 crt->digestsize = alg->digestsize;
76 147
77 return 0; 148 return 0;
78} 149}
79 150
151static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
152{
153 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
154
155 if (alg->digestsize > PAGE_SIZE / 8)
156 return -EINVAL;
157
158 if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
159 return crypto_init_hash_ops_async(tfm);
160 else
161 return crypto_init_hash_ops_sync(tfm);
162}
163
80static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) 164static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
81 __attribute__ ((unused)); 165 __attribute__ ((unused));
82static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) 166static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 14c6351e639d..7ff2d6a8c7d0 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
226 struct crypto_instance *inst; 226 struct crypto_instance *inst;
227 struct crypto_alg *alg; 227 struct crypto_alg *alg;
228 int err; 228 int err;
229 int ds;
229 230
230 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); 231 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
231 if (err) 232 if (err)
@@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
236 if (IS_ERR(alg)) 237 if (IS_ERR(alg))
237 return ERR_CAST(alg); 238 return ERR_CAST(alg);
238 239
240 inst = ERR_PTR(-EINVAL);
241 ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
242 CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
243 alg->cra_digest.dia_digestsize;
244 if (ds > alg->cra_blocksize)
245 goto out_put_alg;
246
239 inst = crypto_alloc_instance("hmac", alg); 247 inst = crypto_alloc_instance("hmac", alg);
240 if (IS_ERR(inst)) 248 if (IS_ERR(inst))
241 goto out_put_alg; 249 goto out_put_alg;
@@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
246 inst->alg.cra_alignmask = alg->cra_alignmask; 254 inst->alg.cra_alignmask = alg->cra_alignmask;
247 inst->alg.cra_type = &crypto_hash_type; 255 inst->alg.cra_type = &crypto_hash_type;
248 256
249 inst->alg.cra_hash.digestsize = 257 inst->alg.cra_hash.digestsize = ds;
250 (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
251 CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
252 alg->cra_digest.dia_digestsize;
253 258
254 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + 259 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
255 ALIGN(inst->alg.cra_blocksize * 2 + 260 ALIGN(inst->alg.cra_blocksize * 2 + ds,
256 inst->alg.cra_hash.digestsize,
257 sizeof(void *)); 261 sizeof(void *));
258 262
259 inst->alg.cra_init = hmac_init_tfm; 263 inst->alg.cra_init = hmac_init_tfm;
diff --git a/crypto/internal.h b/crypto/internal.h
index 32f4c2145603..683fcb2d91f4 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
86struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 86struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
87 87
88int crypto_init_digest_ops(struct crypto_tfm *tfm); 88int crypto_init_digest_ops(struct crypto_tfm *tfm);
89int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
89int crypto_init_cipher_ops(struct crypto_tfm *tfm); 90int crypto_init_cipher_ops(struct crypto_tfm *tfm);
90int crypto_init_compress_ops(struct crypto_tfm *tfm); 91int crypto_init_compress_ops(struct crypto_tfm *tfm);
91 92
diff --git a/crypto/prng.c b/crypto/prng.c
new file mode 100644
index 000000000000..24e4f3282c56
--- /dev/null
+++ b/crypto/prng.c
@@ -0,0 +1,410 @@
1/*
2 * PRNG: Pseudo Random Number Generator
3 * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
4 * AES 128 cipher in RFC3686 ctr mode
5 *
6 * (C) Neil Horman <nhorman@tuxdriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * any later version.
12 *
13 *
14 */
15
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/scatterlist.h>
23#include <linux/string.h>
24#include <linux/crypto.h>
25#include <linux/highmem.h>
26#include <linux/moduleparam.h>
27#include <linux/jiffies.h>
28#include <linux/timex.h>
29#include <linux/interrupt.h>
30#include <linux/miscdevice.h>
31#include "prng.h"
32
33#define TEST_PRNG_ON_START 0
34
35#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
36#define DEFAULT_PRNG_KSZ 20
37#define DEFAULT_PRNG_IV "defaultv"
38#define DEFAULT_PRNG_IVSZ 8
39#define DEFAULT_BLK_SZ 16
40#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
41
42/*
43 * Flags for the prng_context flags field
44 */
45
46#define PRNG_FIXED_SIZE 0x1
47#define PRNG_NEED_RESET 0x2
48
49/*
50 * Note: DT is our counter value
51 * I is our intermediate value
52 * V is our seed vector
53 * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
54 * for implementation details
55 */
56
57
58struct prng_context {
59 char *prng_key;
60 char *prng_iv;
61 spinlock_t prng_lock;
62 unsigned char rand_data[DEFAULT_BLK_SZ];
63 unsigned char last_rand_data[DEFAULT_BLK_SZ];
64 unsigned char DT[DEFAULT_BLK_SZ];
65 unsigned char I[DEFAULT_BLK_SZ];
66 unsigned char V[DEFAULT_BLK_SZ];
67 u32 rand_data_valid;
68 struct crypto_blkcipher *tfm;
69 u32 flags;
70};
71
72static int dbg;
73
74static void hexdump(char *note, unsigned char *buf, unsigned int len)
75{
76 if (dbg) {
77 printk(KERN_CRIT "%s", note);
78 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
79 16, 1,
80 buf, len, false);
81 }
82}
83
84#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
85
86static void xor_vectors(unsigned char *in1, unsigned char *in2,
87 unsigned char *out, unsigned int size)
88{
89 int i;
90
91 for (i=0;i<size;i++)
92 out[i] = in1[i] ^ in2[i];
93
94}
95/*
96 * Returns DEFAULT_BLK_SZ bytes of random data per call
97 * returns 0 if generation succeded, <0 if something went wrong
98 */
99static int _get_more_prng_bytes(struct prng_context *ctx)
100{
101 int i;
102 struct blkcipher_desc desc;
103 struct scatterlist sg_in, sg_out;
104 int ret;
105 unsigned char tmp[DEFAULT_BLK_SZ];
106
107 desc.tfm = ctx->tfm;
108 desc.flags = 0;
109
110
111 dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
112
113 hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
114 hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
115 hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
116
117 /*
118 * This algorithm is a 3 stage state machine
119 */
120 for (i=0;i<3;i++) {
121
122 desc.tfm = ctx->tfm;
123 desc.flags = 0;
124 switch (i) {
125 case 0:
126 /*
127 * Start by encrypting the counter value
128 * This gives us an intermediate value I
129 */
130 memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
131 sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
132 hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
133 break;
134 case 1:
135
136 /*
137 * Next xor I with our secret vector V
138 * encrypt that result to obtain our
139 * pseudo random data which we output
140 */
141 xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
142 sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
143 hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
144 break;
145 case 2:
146 /*
147 * First check that we didn't produce the same random data
148 * that we did last time around through this
149 */
150 if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
151 printk(KERN_ERR "ctx %p Failed repetition check!\n",
152 ctx);
153 ctx->flags |= PRNG_NEED_RESET;
154 return -1;
155 }
156 memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
157
158 /*
159 * Lastly xor the random data with I
160 * and encrypt that to obtain a new secret vector V
161 */
162 xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
163 sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
164 hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
165 break;
166 }
167
168 /* Initialize our input buffer */
169 sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
170
171 /* do the encryption */
172 ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
173
174 /* And check the result */
175 if (ret) {
176 dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
177 ctx->rand_data_valid = DEFAULT_BLK_SZ;
178 return -1;
179 }
180
181 }
182
183 /*
184 * Now update our DT value
185 */
186 for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
187 ctx->DT[i] = ctx->DT[i-1];
188 }
189 ctx->DT[0] += 1;
190
191 dbgprint("Returning new block for context %p\n",ctx);
192 ctx->rand_data_valid = 0;
193
194 hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
195 hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
196 hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
197 hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
198
199 return 0;
200}
201
202/* Our exported functions */
203int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
204{
205 unsigned long flags;
206 unsigned char *ptr = buf;
207 unsigned int byte_count = (unsigned int)nbytes;
208 int err;
209
210
211 if (nbytes < 0)
212 return -EINVAL;
213
214 spin_lock_irqsave(&ctx->prng_lock, flags);
215
216 err = -EFAULT;
217 if (ctx->flags & PRNG_NEED_RESET)
218 goto done;
219
220 /*
221 * If the FIXED_SIZE flag is on, only return whole blocks of
222 * pseudo random data
223 */
224 err = -EINVAL;
225 if (ctx->flags & PRNG_FIXED_SIZE) {
226 if (nbytes < DEFAULT_BLK_SZ)
227 goto done;
228 byte_count = DEFAULT_BLK_SZ;
229 }
230
231 err = byte_count;
232
233 dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
234
235
236remainder:
237 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
238 if (_get_more_prng_bytes(ctx) < 0) {
239 memset(buf, 0, nbytes);
240 err = -EFAULT;
241 goto done;
242 }
243 }
244
245 /*
246 * Copy up to the next whole block size
247 */
248 if (byte_count < DEFAULT_BLK_SZ) {
249 for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
250 *ptr = ctx->rand_data[ctx->rand_data_valid];
251 ptr++;
252 byte_count--;
253 if (byte_count == 0)
254 goto done;
255 }
256 }
257
258 /*
259 * Now copy whole blocks
260 */
261 for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
262 if (_get_more_prng_bytes(ctx) < 0) {
263 memset(buf, 0, nbytes);
264 err = -1;
265 goto done;
266 }
267 memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
268 ctx->rand_data_valid += DEFAULT_BLK_SZ;
269 ptr += DEFAULT_BLK_SZ;
270 }
271
272 /*
273 * Now copy any extra partial data
274 */
275 if (byte_count)
276 goto remainder;
277
278done:
279 spin_unlock_irqrestore(&ctx->prng_lock, flags);
280 dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
281 return err;
282}
283EXPORT_SYMBOL_GPL(get_prng_bytes);
284
285struct prng_context *alloc_prng_context(void)
286{
287 struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
288
289 spin_lock_init(&ctx->prng_lock);
290
291 if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
292 kfree(ctx);
293 ctx = NULL;
294 }
295
296 dbgprint(KERN_CRIT "returning context %p\n",ctx);
297 return ctx;
298}
299
300EXPORT_SYMBOL_GPL(alloc_prng_context);
301
302void free_prng_context(struct prng_context *ctx)
303{
304 crypto_free_blkcipher(ctx->tfm);
305 kfree(ctx);
306}
307EXPORT_SYMBOL_GPL(free_prng_context);
308
309int reset_prng_context(struct prng_context *ctx,
310 unsigned char *key, unsigned char *iv,
311 unsigned char *V, unsigned char *DT)
312{
313 int ret;
314 int iv_len;
315 int rc = -EFAULT;
316
317 spin_lock(&ctx->prng_lock);
318 ctx->flags |= PRNG_NEED_RESET;
319
320 if (key)
321 memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
322 else
323 ctx->prng_key = DEFAULT_PRNG_KEY;
324
325 if (iv)
326 memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
327 else
328 ctx->prng_iv = DEFAULT_PRNG_IV;
329
330 if (V)
331 memcpy(ctx->V,V,DEFAULT_BLK_SZ);
332 else
333 memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
334
335 if (DT)
336 memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
337 else
338 memset(ctx->DT, 0, DEFAULT_BLK_SZ);
339
340 memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
341 memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
342
343 if (ctx->tfm)
344 crypto_free_blkcipher(ctx->tfm);
345
346 ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
347 if (!ctx->tfm) {
348 dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
349 goto out;
350 }
351
352 ctx->rand_data_valid = DEFAULT_BLK_SZ;
353
354 ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
355 if (ret) {
356 dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
357 crypto_blkcipher_get_flags(ctx->tfm));
358 crypto_free_blkcipher(ctx->tfm);
359 goto out;
360 }
361
362 iv_len = crypto_blkcipher_ivsize(ctx->tfm);
363 if (iv_len) {
364 crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
365 }
366 rc = 0;
367 ctx->flags &= ~PRNG_NEED_RESET;
368out:
369 spin_unlock(&ctx->prng_lock);
370
371 return rc;
372
373}
374EXPORT_SYMBOL_GPL(reset_prng_context);
375
376/* Module initalization */
377static int __init prng_mod_init(void)
378{
379
380#ifdef TEST_PRNG_ON_START
381 int i;
382 unsigned char tmpbuf[DEFAULT_BLK_SZ];
383
384 struct prng_context *ctx = alloc_prng_context();
385 if (ctx == NULL)
386 return -EFAULT;
387 for (i=0;i<16;i++) {
388 if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
389 free_prng_context(ctx);
390 return -EFAULT;
391 }
392 }
393 free_prng_context(ctx);
394#endif
395
396 return 0;
397}
398
399static void __exit prng_mod_fini(void)
400{
401 return;
402}
403
404MODULE_LICENSE("GPL");
405MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
406MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
407module_param(dbg, int, 0);
408MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
409module_init(prng_mod_init);
410module_exit(prng_mod_fini);
diff --git a/crypto/prng.h b/crypto/prng.h
new file mode 100644
index 000000000000..1ac9be5009b7
--- /dev/null
+++ b/crypto/prng.h
@@ -0,0 +1,27 @@
1/*
2 * PRNG: Pseudo Random Number Generator
3 *
4 * (C) Neil Horman <nhorman@tuxdriver.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * any later version.
10 *
11 *
12 */
13
14#ifndef _PRNG_H_
15#define _PRNG_H_
16struct prng_context;
17
18int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
19struct prng_context *alloc_prng_context(void);
20int reset_prng_context(struct prng_context *ctx,
21 unsigned char *key, unsigned char *iv,
22 unsigned char *V,
23 unsigned char *DT);
24void free_prng_context(struct prng_context *ctx);
25
26#endif
27
diff --git a/crypto/ripemd.h b/crypto/ripemd.h
new file mode 100644
index 000000000000..c57a2d4ce8d9
--- /dev/null
+++ b/crypto/ripemd.h
@@ -0,0 +1,43 @@
1/*
2 * Common values for RIPEMD algorithms
3 */
4
5#ifndef _CRYPTO_RMD_H
6#define _CRYPTO_RMD_H
7
8#define RMD128_DIGEST_SIZE 16
9#define RMD128_BLOCK_SIZE 64
10
11#define RMD160_DIGEST_SIZE 20
12#define RMD160_BLOCK_SIZE 64
13
14#define RMD256_DIGEST_SIZE 32
15#define RMD256_BLOCK_SIZE 64
16
17#define RMD320_DIGEST_SIZE 40
18#define RMD320_BLOCK_SIZE 64
19
20/* initial values */
21#define RMD_H0 0x67452301UL
22#define RMD_H1 0xefcdab89UL
23#define RMD_H2 0x98badcfeUL
24#define RMD_H3 0x10325476UL
25#define RMD_H4 0xc3d2e1f0UL
26#define RMD_H5 0x76543210UL
27#define RMD_H6 0xfedcba98UL
28#define RMD_H7 0x89abcdefUL
29#define RMD_H8 0x01234567UL
30#define RMD_H9 0x3c2d1e0fUL
31
32/* constants */
33#define RMD_K1 0x00000000UL
34#define RMD_K2 0x5a827999UL
35#define RMD_K3 0x6ed9eba1UL
36#define RMD_K4 0x8f1bbcdcUL
37#define RMD_K5 0xa953fd4eUL
38#define RMD_K6 0x50a28be6UL
39#define RMD_K7 0x5c4dd124UL
40#define RMD_K8 0x6d703ef3UL
41#define RMD_K9 0x7a6d76e9UL
42
43#endif
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
new file mode 100644
index 000000000000..5de6fa2a76fb
--- /dev/null
+++ b/crypto/rmd128.c
@@ -0,0 +1,325 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd128_ctx {
27 u64 byte_count;
28 u32 state[4];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define KK1 RMD_K6
37#define KK2 RMD_K7
38#define KK3 RMD_K8
39#define KK4 RMD_K1
40
41#define F1(x, y, z) (x ^ y ^ z) /* XOR */
42#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
43#define F3(x, y, z) ((x | ~y) ^ z)
44#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
45
46#define ROUND(a, b, c, d, f, k, x, s) { \
47 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
48 (a) = rol32((a), (s)); \
49}
50
51static void rmd128_transform(u32 *state, const __le32 *in)
52{
53 u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
54
55 /* Initialize left lane */
56 aa = state[0];
57 bb = state[1];
58 cc = state[2];
59 dd = state[3];
60
61 /* Initialize right lane */
62 aaa = state[0];
63 bbb = state[1];
64 ccc = state[2];
65 ddd = state[3];
66
67 /* round 1: left lane */
68 ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
69 ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
70 ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
71 ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
72 ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
73 ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
74 ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
75 ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
76 ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
77 ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
78 ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
79 ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
80 ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
81 ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
82 ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
83 ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
84
85 /* round 2: left lane */
86 ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
87 ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
88 ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
89 ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
90 ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
91 ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
92 ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
93 ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
94 ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
95 ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
96 ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
97 ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
98 ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
99 ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
100 ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
101 ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
102
103 /* round 3: left lane */
104 ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
105 ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
106 ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
107 ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
108 ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
109 ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
110 ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
111 ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
112 ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
113 ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
114 ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
115 ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
116 ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
117 ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
118 ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
119 ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
120
121 /* round 4: left lane */
122 ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
123 ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
124 ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
125 ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
126 ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
127 ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
128 ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
129 ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
130 ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
131 ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
132 ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
133 ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
134 ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
135 ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
136 ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
137 ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
138
139 /* round 1: right lane */
140 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
141 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
142 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
143 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
144 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
145 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
146 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
147 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
148 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
149 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
150 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
151 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
152 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
153 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
154 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
155 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
156
157 /* round 2: right lane */
158 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
159 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
160 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
161 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
162 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
163 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
164 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
165 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
166 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
167 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
168 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
169 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
170 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
171 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
172 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
173 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
174
175 /* round 3: right lane */
176 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
177 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
178 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
179 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
180 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
181 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
182 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
183 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
184 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
185 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
186 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
187 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
188 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
189 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
190 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
191 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
192
193 /* round 4: right lane */
194 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
195 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
196 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
197 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
198 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
199 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
200 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
201 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
202 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
203 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
204 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
205 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
206 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
207 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
208 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
209 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
210
211 /* combine results */
212 ddd += cc + state[1]; /* final result for state[0] */
213 state[1] = state[2] + dd + aaa;
214 state[2] = state[3] + aa + bbb;
215 state[3] = state[0] + bb + ccc;
216 state[0] = ddd;
217
218 return;
219}
220
221static void rmd128_init(struct crypto_tfm *tfm)
222{
223 struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
224
225 rctx->byte_count = 0;
226
227 rctx->state[0] = RMD_H0;
228 rctx->state[1] = RMD_H1;
229 rctx->state[2] = RMD_H2;
230 rctx->state[3] = RMD_H3;
231
232 memset(rctx->buffer, 0, sizeof(rctx->buffer));
233}
234
235static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
236 unsigned int len)
237{
238 struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
239 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
240
241 rctx->byte_count += len;
242
243 /* Enough space in buffer? If so copy and we're done */
244 if (avail > len) {
245 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
246 data, len);
247 return;
248 }
249
250 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
251 data, avail);
252
253 rmd128_transform(rctx->state, rctx->buffer);
254 data += avail;
255 len -= avail;
256
257 while (len >= sizeof(rctx->buffer)) {
258 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
259 rmd128_transform(rctx->state, rctx->buffer);
260 data += sizeof(rctx->buffer);
261 len -= sizeof(rctx->buffer);
262 }
263
264 memcpy(rctx->buffer, data, len);
265}
266
267/* Add padding and return the message digest. */
268static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
269{
270 struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
271 u32 i, index, padlen;
272 __le64 bits;
273 __le32 *dst = (__le32 *)out;
274 static const u8 padding[64] = { 0x80, };
275
276 bits = cpu_to_le64(rctx->byte_count << 3);
277
278 /* Pad out to 56 mod 64 */
279 index = rctx->byte_count & 0x3f;
280 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
281 rmd128_update(tfm, padding, padlen);
282
283 /* Append length */
284 rmd128_update(tfm, (const u8 *)&bits, sizeof(bits));
285
286 /* Store state in digest */
287 for (i = 0; i < 4; i++)
288 dst[i] = cpu_to_le32p(&rctx->state[i]);
289
290 /* Wipe context */
291 memset(rctx, 0, sizeof(*rctx));
292}
293
294static struct crypto_alg alg = {
295 .cra_name = "rmd128",
296 .cra_driver_name = "rmd128",
297 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
298 .cra_blocksize = RMD128_BLOCK_SIZE,
299 .cra_ctxsize = sizeof(struct rmd128_ctx),
300 .cra_module = THIS_MODULE,
301 .cra_list = LIST_HEAD_INIT(alg.cra_list),
302 .cra_u = { .digest = {
303 .dia_digestsize = RMD128_DIGEST_SIZE,
304 .dia_init = rmd128_init,
305 .dia_update = rmd128_update,
306 .dia_final = rmd128_final } }
307};
308
309static int __init rmd128_mod_init(void)
310{
311 return crypto_register_alg(&alg);
312}
313
314static void __exit rmd128_mod_fini(void)
315{
316 crypto_unregister_alg(&alg);
317}
318
319module_init(rmd128_mod_init);
320module_exit(rmd128_mod_fini);
321
322MODULE_LICENSE("GPL");
323MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
324
325MODULE_ALIAS("rmd128");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
new file mode 100644
index 000000000000..f001ec775e1f
--- /dev/null
+++ b/crypto/rmd160.c
@@ -0,0 +1,369 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd160_ctx {
27 u64 byte_count;
28 u32 state[5];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define K5 RMD_K5
37#define KK1 RMD_K6
38#define KK2 RMD_K7
39#define KK3 RMD_K8
40#define KK4 RMD_K9
41#define KK5 RMD_K1
42
43#define F1(x, y, z) (x ^ y ^ z) /* XOR */
44#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
45#define F3(x, y, z) ((x | ~y) ^ z)
46#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
47#define F5(x, y, z) (x ^ (y | ~z))
48
49#define ROUND(a, b, c, d, e, f, k, x, s) { \
50 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
51 (a) = rol32((a), (s)) + (e); \
52 (c) = rol32((c), 10); \
53}
54
55static void rmd160_transform(u32 *state, const __le32 *in)
56{
57 u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
58
59 /* Initialize left lane */
60 aa = state[0];
61 bb = state[1];
62 cc = state[2];
63 dd = state[3];
64 ee = state[4];
65
66 /* Initialize right lane */
67 aaa = state[0];
68 bbb = state[1];
69 ccc = state[2];
70 ddd = state[3];
71 eee = state[4];
72
73 /* round 1: left lane */
74 ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
75 ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
76 ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
77 ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
78 ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
79 ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
80 ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
81 ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
82 ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
83 ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
84 ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
85 ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
86 ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
87 ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
88 ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
89 ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
90
91 /* round 2: left lane" */
92 ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
93 ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
94 ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
95 ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
96 ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
97 ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
98 ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
99 ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
100 ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
101 ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
102 ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
103 ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
104 ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
105 ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
106 ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
107 ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
108
109 /* round 3: left lane" */
110 ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
111 ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
112 ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
113 ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
114 ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
115 ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
116 ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
117 ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
118 ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
119 ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
120 ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
121 ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
122 ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
123 ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
124 ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
125 ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
126
127 /* round 4: left lane" */
128 ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
129 ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
130 ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
131 ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
132 ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
133 ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
134 ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
135 ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
136 ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
137 ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
138 ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
139 ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
140 ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
141 ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
142 ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
143 ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
144
145 /* round 5: left lane" */
146 ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
147 ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
148 ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
149 ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
150 ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
151 ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
152 ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
153 ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
154 ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
155 ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
156 ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
157 ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
158 ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
159 ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
160 ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
161 ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
162
163 /* round 1: right lane */
164 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
165 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
166 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
167 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
168 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
169 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
170 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
171 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
172 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
173 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
174 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
175 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
176 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
177 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
178 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
179 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
180
181 /* round 2: right lane */
182 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
183 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
184 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
185 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
186 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
187 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
188 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
189 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
190 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
191 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
192 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
193 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
194 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
195 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
196 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
197 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
198
199 /* round 3: right lane */
200 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
201 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
202 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
203 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
204 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
205 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
206 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
207 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
208 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
209 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
210 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
211 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
212 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
213 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
214 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
215 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
216
217 /* round 4: right lane */
218 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
219 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
220 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
221 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
222 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
223 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
224 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
225 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
226 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
227 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
228 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
229 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
230 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
231 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
232 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
233 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
234
235 /* round 5: right lane */
236 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
237 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
238 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
239 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
240 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
241 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
242 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
243 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
244 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
245 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
246 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
247 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
248 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
249 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
250 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
251 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
252
253 /* combine results */
254 ddd += cc + state[1]; /* final result for state[0] */
255 state[1] = state[2] + dd + eee;
256 state[2] = state[3] + ee + aaa;
257 state[3] = state[4] + aa + bbb;
258 state[4] = state[0] + bb + ccc;
259 state[0] = ddd;
260
261 return;
262}
263
264static void rmd160_init(struct crypto_tfm *tfm)
265{
266 struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
267
268 rctx->byte_count = 0;
269
270 rctx->state[0] = RMD_H0;
271 rctx->state[1] = RMD_H1;
272 rctx->state[2] = RMD_H2;
273 rctx->state[3] = RMD_H3;
274 rctx->state[4] = RMD_H4;
275
276 memset(rctx->buffer, 0, sizeof(rctx->buffer));
277}
278
279static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
280 unsigned int len)
281{
282 struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
283 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
284
285 rctx->byte_count += len;
286
287 /* Enough space in buffer? If so copy and we're done */
288 if (avail > len) {
289 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
290 data, len);
291 return;
292 }
293
294 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
295 data, avail);
296
297 rmd160_transform(rctx->state, rctx->buffer);
298 data += avail;
299 len -= avail;
300
301 while (len >= sizeof(rctx->buffer)) {
302 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
303 rmd160_transform(rctx->state, rctx->buffer);
304 data += sizeof(rctx->buffer);
305 len -= sizeof(rctx->buffer);
306 }
307
308 memcpy(rctx->buffer, data, len);
309}
310
311/* Add padding and return the message digest. */
312static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
313{
314 struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
315 u32 i, index, padlen;
316 __le64 bits;
317 __le32 *dst = (__le32 *)out;
318 static const u8 padding[64] = { 0x80, };
319
320 bits = cpu_to_le64(rctx->byte_count << 3);
321
322 /* Pad out to 56 mod 64 */
323 index = rctx->byte_count & 0x3f;
324 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
325 rmd160_update(tfm, padding, padlen);
326
327 /* Append length */
328 rmd160_update(tfm, (const u8 *)&bits, sizeof(bits));
329
330 /* Store state in digest */
331 for (i = 0; i < 5; i++)
332 dst[i] = cpu_to_le32p(&rctx->state[i]);
333
334 /* Wipe context */
335 memset(rctx, 0, sizeof(*rctx));
336}
337
338static struct crypto_alg alg = {
339 .cra_name = "rmd160",
340 .cra_driver_name = "rmd160",
341 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
342 .cra_blocksize = RMD160_BLOCK_SIZE,
343 .cra_ctxsize = sizeof(struct rmd160_ctx),
344 .cra_module = THIS_MODULE,
345 .cra_list = LIST_HEAD_INIT(alg.cra_list),
346 .cra_u = { .digest = {
347 .dia_digestsize = RMD160_DIGEST_SIZE,
348 .dia_init = rmd160_init,
349 .dia_update = rmd160_update,
350 .dia_final = rmd160_final } }
351};
352
353static int __init rmd160_mod_init(void)
354{
355 return crypto_register_alg(&alg);
356}
357
358static void __exit rmd160_mod_fini(void)
359{
360 crypto_unregister_alg(&alg);
361}
362
363module_init(rmd160_mod_init);
364module_exit(rmd160_mod_fini);
365
366MODULE_LICENSE("GPL");
367MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
368
369MODULE_ALIAS("rmd160");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
new file mode 100644
index 000000000000..e3de5b4cb47f
--- /dev/null
+++ b/crypto/rmd256.c
@@ -0,0 +1,344 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd256_ctx {
27 u64 byte_count;
28 u32 state[8];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define KK1 RMD_K6
37#define KK2 RMD_K7
38#define KK3 RMD_K8
39#define KK4 RMD_K1
40
41#define F1(x, y, z) (x ^ y ^ z) /* XOR */
42#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
43#define F3(x, y, z) ((x | ~y) ^ z)
44#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
45
46#define ROUND(a, b, c, d, f, k, x, s) { \
47 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
48 (a) = rol32((a), (s)); \
49}
50
51static void rmd256_transform(u32 *state, const __le32 *in)
52{
53 u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
54
55 /* Initialize left lane */
56 aa = state[0];
57 bb = state[1];
58 cc = state[2];
59 dd = state[3];
60
61 /* Initialize right lane */
62 aaa = state[4];
63 bbb = state[5];
64 ccc = state[6];
65 ddd = state[7];
66
67 /* round 1: left lane */
68 ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
69 ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
70 ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
71 ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
72 ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
73 ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
74 ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
75 ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
76 ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
77 ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
78 ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
79 ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
80 ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
81 ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
82 ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
83 ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
84
85 /* round 1: right lane */
86 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
87 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
88 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
89 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
90 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
91 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
92 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
93 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
94 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
95 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
96 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
97 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
98 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
99 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
100 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
101 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
102
103 /* Swap contents of "a" registers */
104 tmp = aa; aa = aaa; aaa = tmp;
105
106 /* round 2: left lane */
107 ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
108 ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
109 ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
110 ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
111 ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
112 ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
113 ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
114 ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
115 ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
116 ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
117 ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
118 ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
119 ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
120 ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
121 ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
122 ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
123
124 /* round 2: right lane */
125 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
126 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
127 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
128 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
129 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
130 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
131 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
132 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
133 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
134 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
135 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
136 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
137 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
138 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
139 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
140 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
141
142 /* Swap contents of "b" registers */
143 tmp = bb; bb = bbb; bbb = tmp;
144
145 /* round 3: left lane */
146 ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
147 ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
148 ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
149 ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
150 ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
151 ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
152 ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
153 ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
154 ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
155 ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
156 ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
157 ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
158 ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
159 ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
160 ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
161 ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
162
163 /* round 3: right lane */
164 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
165 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
166 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
167 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
168 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
169 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
170 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
171 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
172 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
173 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
174 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
175 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
176 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
177 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
178 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
179 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
180
181 /* Swap contents of "c" registers */
182 tmp = cc; cc = ccc; ccc = tmp;
183
184 /* round 4: left lane */
185 ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
186 ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
187 ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
188 ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
189 ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
190 ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
191 ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
192 ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
193 ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
194 ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
195 ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
196 ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
197 ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
198 ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
199 ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
200 ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
201
202 /* round 4: right lane */
203 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
204 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
205 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
206 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
207 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
208 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
209 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
210 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
211 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
212 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
213 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
214 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
215 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
216 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
217 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
218 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
219
220 /* Swap contents of "d" registers */
221 tmp = dd; dd = ddd; ddd = tmp;
222
223 /* combine results */
224 state[0] += aa;
225 state[1] += bb;
226 state[2] += cc;
227 state[3] += dd;
228 state[4] += aaa;
229 state[5] += bbb;
230 state[6] += ccc;
231 state[7] += ddd;
232
233 return;
234}
235
236static void rmd256_init(struct crypto_tfm *tfm)
237{
238 struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
239
240 rctx->byte_count = 0;
241
242 rctx->state[0] = RMD_H0;
243 rctx->state[1] = RMD_H1;
244 rctx->state[2] = RMD_H2;
245 rctx->state[3] = RMD_H3;
246 rctx->state[4] = RMD_H5;
247 rctx->state[5] = RMD_H6;
248 rctx->state[6] = RMD_H7;
249 rctx->state[7] = RMD_H8;
250
251 memset(rctx->buffer, 0, sizeof(rctx->buffer));
252}
253
254static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
255 unsigned int len)
256{
257 struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
258 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
259
260 rctx->byte_count += len;
261
262 /* Enough space in buffer? If so copy and we're done */
263 if (avail > len) {
264 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
265 data, len);
266 return;
267 }
268
269 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
270 data, avail);
271
272 rmd256_transform(rctx->state, rctx->buffer);
273 data += avail;
274 len -= avail;
275
276 while (len >= sizeof(rctx->buffer)) {
277 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
278 rmd256_transform(rctx->state, rctx->buffer);
279 data += sizeof(rctx->buffer);
280 len -= sizeof(rctx->buffer);
281 }
282
283 memcpy(rctx->buffer, data, len);
284}
285
286/* Add padding and return the message digest. */
287static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
288{
289 struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
290 u32 i, index, padlen;
291 __le64 bits;
292 __le32 *dst = (__le32 *)out;
293 static const u8 padding[64] = { 0x80, };
294
295 bits = cpu_to_le64(rctx->byte_count << 3);
296
297 /* Pad out to 56 mod 64 */
298 index = rctx->byte_count & 0x3f;
299 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
300 rmd256_update(tfm, padding, padlen);
301
302 /* Append length */
303 rmd256_update(tfm, (const u8 *)&bits, sizeof(bits));
304
305 /* Store state in digest */
306 for (i = 0; i < 8; i++)
307 dst[i] = cpu_to_le32p(&rctx->state[i]);
308
309 /* Wipe context */
310 memset(rctx, 0, sizeof(*rctx));
311}
312
313static struct crypto_alg alg = {
314 .cra_name = "rmd256",
315 .cra_driver_name = "rmd256",
316 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
317 .cra_blocksize = RMD256_BLOCK_SIZE,
318 .cra_ctxsize = sizeof(struct rmd256_ctx),
319 .cra_module = THIS_MODULE,
320 .cra_list = LIST_HEAD_INIT(alg.cra_list),
321 .cra_u = { .digest = {
322 .dia_digestsize = RMD256_DIGEST_SIZE,
323 .dia_init = rmd256_init,
324 .dia_update = rmd256_update,
325 .dia_final = rmd256_final } }
326};
327
328static int __init rmd256_mod_init(void)
329{
330 return crypto_register_alg(&alg);
331}
332
333static void __exit rmd256_mod_fini(void)
334{
335 crypto_unregister_alg(&alg);
336}
337
338module_init(rmd256_mod_init);
339module_exit(rmd256_mod_fini);
340
341MODULE_LICENSE("GPL");
342MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
343
344MODULE_ALIAS("rmd256");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
new file mode 100644
index 000000000000..b143d66e42c8
--- /dev/null
+++ b/crypto/rmd320.c
@@ -0,0 +1,393 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd320_ctx {
27 u64 byte_count;
28 u32 state[10];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define K5 RMD_K5
37#define KK1 RMD_K6
38#define KK2 RMD_K7
39#define KK3 RMD_K8
40#define KK4 RMD_K9
41#define KK5 RMD_K1
42
43#define F1(x, y, z) (x ^ y ^ z) /* XOR */
44#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
45#define F3(x, y, z) ((x | ~y) ^ z)
46#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
47#define F5(x, y, z) (x ^ (y | ~z))
48
49#define ROUND(a, b, c, d, e, f, k, x, s) { \
50 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
51 (a) = rol32((a), (s)) + (e); \
52 (c) = rol32((c), 10); \
53}
54
55static void rmd320_transform(u32 *state, const __le32 *in)
56{
57 u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
58
59 /* Initialize left lane */
60 aa = state[0];
61 bb = state[1];
62 cc = state[2];
63 dd = state[3];
64 ee = state[4];
65
66 /* Initialize right lane */
67 aaa = state[5];
68 bbb = state[6];
69 ccc = state[7];
70 ddd = state[8];
71 eee = state[9];
72
73 /* round 1: left lane */
74 ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
75 ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
76 ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
77 ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
78 ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
79 ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
80 ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
81 ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
82 ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
83 ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
84 ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
85 ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
86 ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
87 ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
88 ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
89 ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
90
91 /* round 1: right lane */
92 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
93 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
94 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
95 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
96 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
97 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
98 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
99 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
100 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
101 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
102 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
103 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
104 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
105 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
106 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
107 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
108
109 /* Swap contents of "a" registers */
110 tmp = aa; aa = aaa; aaa = tmp;
111
112 /* round 2: left lane" */
113 ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
114 ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
115 ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
116 ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
117 ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
118 ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
119 ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
120 ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
121 ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
122 ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
123 ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
124 ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
125 ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
126 ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
127 ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
128 ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
129
130 /* round 2: right lane */
131 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
132 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
133 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
134 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
135 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
136 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
137 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
138 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
139 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
140 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
141 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
142 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
143 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
144 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
145 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
146 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
147
148 /* Swap contents of "b" registers */
149 tmp = bb; bb = bbb; bbb = tmp;
150
151 /* round 3: left lane" */
152 ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
153 ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
154 ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
155 ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
156 ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
157 ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
158 ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
159 ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
160 ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
161 ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
162 ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
163 ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
164 ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
165 ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
166 ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
167 ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
168
169 /* round 3: right lane */
170 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
171 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
172 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
173 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
174 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
175 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
176 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
177 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
178 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
179 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
180 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
181 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
182 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
183 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
184 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
185 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
186
187 /* Swap contents of "c" registers */
188 tmp = cc; cc = ccc; ccc = tmp;
189
190 /* round 4: left lane" */
191 ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
192 ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
193 ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
194 ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
195 ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
196 ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
197 ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
198 ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
199 ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
200 ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
201 ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
202 ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
203 ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
204 ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
205 ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
206 ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
207
208 /* round 4: right lane */
209 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
210 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
211 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
212 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
213 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
214 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
215 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
216 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
217 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
218 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
219 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
220 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
221 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
222 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
223 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
224 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
225
226 /* Swap contents of "d" registers */
227 tmp = dd; dd = ddd; ddd = tmp;
228
229 /* round 5: left lane" */
230 ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
231 ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
232 ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
233 ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
234 ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
235 ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
236 ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
237 ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
238 ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
239 ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
240 ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
241 ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
242 ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
243 ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
244 ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
245 ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
246
247 /* round 5: right lane */
248 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
249 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
250 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
251 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
252 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
253 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
254 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
255 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
256 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
257 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
258 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
259 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
260 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
261 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
262 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
263 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
264
265 /* Swap contents of "e" registers */
266 tmp = ee; ee = eee; eee = tmp;
267
268 /* combine results */
269 state[0] += aa;
270 state[1] += bb;
271 state[2] += cc;
272 state[3] += dd;
273 state[4] += ee;
274 state[5] += aaa;
275 state[6] += bbb;
276 state[7] += ccc;
277 state[8] += ddd;
278 state[9] += eee;
279
280 return;
281}
282
283static void rmd320_init(struct crypto_tfm *tfm)
284{
285 struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
286
287 rctx->byte_count = 0;
288
289 rctx->state[0] = RMD_H0;
290 rctx->state[1] = RMD_H1;
291 rctx->state[2] = RMD_H2;
292 rctx->state[3] = RMD_H3;
293 rctx->state[4] = RMD_H4;
294 rctx->state[5] = RMD_H5;
295 rctx->state[6] = RMD_H6;
296 rctx->state[7] = RMD_H7;
297 rctx->state[8] = RMD_H8;
298 rctx->state[9] = RMD_H9;
299
300 memset(rctx->buffer, 0, sizeof(rctx->buffer));
301}
302
303static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
304 unsigned int len)
305{
306 struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
307 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
308
309 rctx->byte_count += len;
310
311 /* Enough space in buffer? If so copy and we're done */
312 if (avail > len) {
313 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
314 data, len);
315 return;
316 }
317
318 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
319 data, avail);
320
321 rmd320_transform(rctx->state, rctx->buffer);
322 data += avail;
323 len -= avail;
324
325 while (len >= sizeof(rctx->buffer)) {
326 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
327 rmd320_transform(rctx->state, rctx->buffer);
328 data += sizeof(rctx->buffer);
329 len -= sizeof(rctx->buffer);
330 }
331
332 memcpy(rctx->buffer, data, len);
333}
334
335/* Add padding and return the message digest. */
336static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
337{
338 struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
339 u32 i, index, padlen;
340 __le64 bits;
341 __le32 *dst = (__le32 *)out;
342 static const u8 padding[64] = { 0x80, };
343
344 bits = cpu_to_le64(rctx->byte_count << 3);
345
346 /* Pad out to 56 mod 64 */
347 index = rctx->byte_count & 0x3f;
348 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
349 rmd320_update(tfm, padding, padlen);
350
351 /* Append length */
352 rmd320_update(tfm, (const u8 *)&bits, sizeof(bits));
353
354 /* Store state in digest */
355 for (i = 0; i < 10; i++)
356 dst[i] = cpu_to_le32p(&rctx->state[i]);
357
358 /* Wipe context */
359 memset(rctx, 0, sizeof(*rctx));
360}
361
362static struct crypto_alg alg = {
363 .cra_name = "rmd320",
364 .cra_driver_name = "rmd320",
365 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
366 .cra_blocksize = RMD320_BLOCK_SIZE,
367 .cra_ctxsize = sizeof(struct rmd320_ctx),
368 .cra_module = THIS_MODULE,
369 .cra_list = LIST_HEAD_INIT(alg.cra_list),
370 .cra_u = { .digest = {
371 .dia_digestsize = RMD320_DIGEST_SIZE,
372 .dia_init = rmd320_init,
373 .dia_update = rmd320_update,
374 .dia_final = rmd320_final } }
375};
376
377static int __init rmd320_mod_init(void)
378{
379 return crypto_register_alg(&alg);
380}
381
382static void __exit rmd320_mod_fini(void)
383{
384 crypto_unregister_alg(&alg);
385}
386
387module_init(rmd320_mod_init);
388module_exit(rmd320_mod_fini);
389
390MODULE_LICENSE("GPL");
391MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
392
393MODULE_ALIAS("rmd320");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 6beabc5abd07..59821a22d752 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -13,15 +13,9 @@
13 * Software Foundation; either version 2 of the License, or (at your option) 13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * 2007-11-13 Added GCM tests
17 * 2007-11-13 Added AEAD support
18 * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
19 * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
20 * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
21 * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
22 *
23 */ 16 */
24 17
18#include <crypto/hash.h>
25#include <linux/err.h> 19#include <linux/err.h>
26#include <linux/init.h> 20#include <linux/init.h>
27#include <linux/module.h> 21#include <linux/module.h>
@@ -30,7 +24,6 @@
30#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
31#include <linux/string.h> 25#include <linux/string.h>
32#include <linux/crypto.h> 26#include <linux/crypto.h>
33#include <linux/highmem.h>
34#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
35#include <linux/jiffies.h> 28#include <linux/jiffies.h>
36#include <linux/timex.h> 29#include <linux/timex.h>
@@ -38,7 +31,7 @@
38#include "tcrypt.h" 31#include "tcrypt.h"
39 32
40/* 33/*
41 * Need to kmalloc() memory for testing kmap(). 34 * Need to kmalloc() memory for testing.
42 */ 35 */
43#define TVMEMSIZE 16384 36#define TVMEMSIZE 16384
44#define XBUFSIZE 32768 37#define XBUFSIZE 32768
@@ -46,7 +39,7 @@
46/* 39/*
47 * Indexes into the xbuf to simulate cross-page access. 40 * Indexes into the xbuf to simulate cross-page access.
48 */ 41 */
49#define IDX1 37 42#define IDX1 32
50#define IDX2 32400 43#define IDX2 32400
51#define IDX3 1 44#define IDX3 1
52#define IDX4 8193 45#define IDX4 8193
@@ -83,7 +76,8 @@ static char *check[] = {
83 "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", 76 "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
84 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 77 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
85 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 78 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
86 "camellia", "seed", "salsa20", "lzo", "cts", NULL 79 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
80 "lzo", "cts", NULL
87}; 81};
88 82
89static void hexdump(unsigned char *buf, unsigned int len) 83static void hexdump(unsigned char *buf, unsigned int len)
@@ -110,22 +104,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
110 unsigned int i, j, k, temp; 104 unsigned int i, j, k, temp;
111 struct scatterlist sg[8]; 105 struct scatterlist sg[8];
112 char result[64]; 106 char result[64];
113 struct crypto_hash *tfm; 107 struct crypto_ahash *tfm;
114 struct hash_desc desc; 108 struct ahash_request *req;
109 struct tcrypt_result tresult;
115 int ret; 110 int ret;
116 void *hash_buff; 111 void *hash_buff;
117 112
118 printk("\ntesting %s\n", algo); 113 printk("\ntesting %s\n", algo);
119 114
120 tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); 115 init_completion(&tresult.completion);
116
117 tfm = crypto_alloc_ahash(algo, 0, 0);
121 if (IS_ERR(tfm)) { 118 if (IS_ERR(tfm)) {
122 printk("failed to load transform for %s: %ld\n", algo, 119 printk("failed to load transform for %s: %ld\n", algo,
123 PTR_ERR(tfm)); 120 PTR_ERR(tfm));
124 return; 121 return;
125 } 122 }
126 123
127 desc.tfm = tfm; 124 req = ahash_request_alloc(tfm, GFP_KERNEL);
128 desc.flags = 0; 125 if (!req) {
126 printk(KERN_ERR "failed to allocate request for %s\n", algo);
127 goto out_noreq;
128 }
129 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
130 tcrypt_complete, &tresult);
129 131
130 for (i = 0; i < tcount; i++) { 132 for (i = 0; i < tcount; i++) {
131 printk("test %u:\n", i + 1); 133 printk("test %u:\n", i + 1);
@@ -139,8 +141,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
139 sg_init_one(&sg[0], hash_buff, template[i].psize); 141 sg_init_one(&sg[0], hash_buff, template[i].psize);
140 142
141 if (template[i].ksize) { 143 if (template[i].ksize) {
142 ret = crypto_hash_setkey(tfm, template[i].key, 144 crypto_ahash_clear_flags(tfm, ~0);
143 template[i].ksize); 145 ret = crypto_ahash_setkey(tfm, template[i].key,
146 template[i].ksize);
144 if (ret) { 147 if (ret) {
145 printk("setkey() failed ret=%d\n", ret); 148 printk("setkey() failed ret=%d\n", ret);
146 kfree(hash_buff); 149 kfree(hash_buff);
@@ -148,17 +151,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
148 } 151 }
149 } 152 }
150 153
151 ret = crypto_hash_digest(&desc, sg, template[i].psize, result); 154 ahash_request_set_crypt(req, sg, result, template[i].psize);
152 if (ret) { 155 ret = crypto_ahash_digest(req);
156 switch (ret) {
157 case 0:
158 break;
159 case -EINPROGRESS:
160 case -EBUSY:
161 ret = wait_for_completion_interruptible(
162 &tresult.completion);
163 if (!ret && !(ret = tresult.err)) {
164 INIT_COMPLETION(tresult.completion);
165 break;
166 }
167 /* fall through */
168 default:
153 printk("digest () failed ret=%d\n", ret); 169 printk("digest () failed ret=%d\n", ret);
154 kfree(hash_buff); 170 kfree(hash_buff);
155 goto out; 171 goto out;
156 } 172 }
157 173
158 hexdump(result, crypto_hash_digestsize(tfm)); 174 hexdump(result, crypto_ahash_digestsize(tfm));
159 printk("%s\n", 175 printk("%s\n",
160 memcmp(result, template[i].digest, 176 memcmp(result, template[i].digest,
161 crypto_hash_digestsize(tfm)) ? 177 crypto_ahash_digestsize(tfm)) ?
162 "fail" : "pass"); 178 "fail" : "pass");
163 kfree(hash_buff); 179 kfree(hash_buff);
164 } 180 }
@@ -187,8 +203,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
187 } 203 }
188 204
189 if (template[i].ksize) { 205 if (template[i].ksize) {
190 ret = crypto_hash_setkey(tfm, template[i].key, 206 crypto_ahash_clear_flags(tfm, ~0);
191 template[i].ksize); 207 ret = crypto_ahash_setkey(tfm, template[i].key,
208 template[i].ksize);
192 209
193 if (ret) { 210 if (ret) {
194 printk("setkey() failed ret=%d\n", ret); 211 printk("setkey() failed ret=%d\n", ret);
@@ -196,29 +213,44 @@ static void test_hash(char *algo, struct hash_testvec *template,
196 } 213 }
197 } 214 }
198 215
199 ret = crypto_hash_digest(&desc, sg, template[i].psize, 216 ahash_request_set_crypt(req, sg, result,
200 result); 217 template[i].psize);
201 if (ret) { 218 ret = crypto_ahash_digest(req);
219 switch (ret) {
220 case 0:
221 break;
222 case -EINPROGRESS:
223 case -EBUSY:
224 ret = wait_for_completion_interruptible(
225 &tresult.completion);
226 if (!ret && !(ret = tresult.err)) {
227 INIT_COMPLETION(tresult.completion);
228 break;
229 }
230 /* fall through */
231 default:
202 printk("digest () failed ret=%d\n", ret); 232 printk("digest () failed ret=%d\n", ret);
203 goto out; 233 goto out;
204 } 234 }
205 235
206 hexdump(result, crypto_hash_digestsize(tfm)); 236 hexdump(result, crypto_ahash_digestsize(tfm));
207 printk("%s\n", 237 printk("%s\n",
208 memcmp(result, template[i].digest, 238 memcmp(result, template[i].digest,
209 crypto_hash_digestsize(tfm)) ? 239 crypto_ahash_digestsize(tfm)) ?
210 "fail" : "pass"); 240 "fail" : "pass");
211 } 241 }
212 } 242 }
213 243
214out: 244out:
215 crypto_free_hash(tfm); 245 ahash_request_free(req);
246out_noreq:
247 crypto_free_ahash(tfm);
216} 248}
217 249
218static void test_aead(char *algo, int enc, struct aead_testvec *template, 250static void test_aead(char *algo, int enc, struct aead_testvec *template,
219 unsigned int tcount) 251 unsigned int tcount)
220{ 252{
221 unsigned int ret, i, j, k, temp; 253 unsigned int ret, i, j, k, n, temp;
222 char *q; 254 char *q;
223 struct crypto_aead *tfm; 255 struct crypto_aead *tfm;
224 char *key; 256 char *key;
@@ -344,13 +376,12 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
344 goto next_one; 376 goto next_one;
345 } 377 }
346 378
347 q = kmap(sg_page(&sg[0])) + sg[0].offset; 379 q = input;
348 hexdump(q, template[i].rlen); 380 hexdump(q, template[i].rlen);
349 381
350 printk(KERN_INFO "enc/dec: %s\n", 382 printk(KERN_INFO "enc/dec: %s\n",
351 memcmp(q, template[i].result, 383 memcmp(q, template[i].result,
352 template[i].rlen) ? "fail" : "pass"); 384 template[i].rlen) ? "fail" : "pass");
353 kunmap(sg_page(&sg[0]));
354next_one: 385next_one:
355 if (!template[i].key) 386 if (!template[i].key)
356 kfree(key); 387 kfree(key);
@@ -360,7 +391,6 @@ next_one:
360 } 391 }
361 392
362 printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e); 393 printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
363 memset(xbuf, 0, XBUFSIZE);
364 memset(axbuf, 0, XBUFSIZE); 394 memset(axbuf, 0, XBUFSIZE);
365 395
366 for (i = 0, j = 0; i < tcount; i++) { 396 for (i = 0, j = 0; i < tcount; i++) {
@@ -388,6 +418,7 @@ next_one:
388 goto out; 418 goto out;
389 } 419 }
390 420
421 memset(xbuf, 0, XBUFSIZE);
391 sg_init_table(sg, template[i].np); 422 sg_init_table(sg, template[i].np);
392 for (k = 0, temp = 0; k < template[i].np; k++) { 423 for (k = 0, temp = 0; k < template[i].np; k++) {
393 memcpy(&xbuf[IDX[k]], 424 memcpy(&xbuf[IDX[k]],
@@ -450,7 +481,7 @@ next_one:
450 481
451 for (k = 0, temp = 0; k < template[i].np; k++) { 482 for (k = 0, temp = 0; k < template[i].np; k++) {
452 printk(KERN_INFO "page %u\n", k); 483 printk(KERN_INFO "page %u\n", k);
453 q = kmap(sg_page(&sg[k])) + sg[k].offset; 484 q = &axbuf[IDX[k]];
454 hexdump(q, template[i].tap[k]); 485 hexdump(q, template[i].tap[k]);
455 printk(KERN_INFO "%s\n", 486 printk(KERN_INFO "%s\n",
456 memcmp(q, template[i].result + temp, 487 memcmp(q, template[i].result + temp,
@@ -459,8 +490,15 @@ next_one:
459 0 : authsize)) ? 490 0 : authsize)) ?
460 "fail" : "pass"); 491 "fail" : "pass");
461 492
493 for (n = 0; q[template[i].tap[k] + n]; n++)
494 ;
495 if (n) {
496 printk("Result buffer corruption %u "
497 "bytes:\n", n);
498 hexdump(&q[template[i].tap[k]], n);
499 }
500
462 temp += template[i].tap[k]; 501 temp += template[i].tap[k];
463 kunmap(sg_page(&sg[k]));
464 } 502 }
465 } 503 }
466 } 504 }
@@ -473,7 +511,7 @@ out:
473static void test_cipher(char *algo, int enc, 511static void test_cipher(char *algo, int enc,
474 struct cipher_testvec *template, unsigned int tcount) 512 struct cipher_testvec *template, unsigned int tcount)
475{ 513{
476 unsigned int ret, i, j, k, temp; 514 unsigned int ret, i, j, k, n, temp;
477 char *q; 515 char *q;
478 struct crypto_ablkcipher *tfm; 516 struct crypto_ablkcipher *tfm;
479 struct ablkcipher_request *req; 517 struct ablkcipher_request *req;
@@ -569,29 +607,21 @@ static void test_cipher(char *algo, int enc,
569 goto out; 607 goto out;
570 } 608 }
571 609
572 q = kmap(sg_page(&sg[0])) + sg[0].offset; 610 q = data;
573 hexdump(q, template[i].rlen); 611 hexdump(q, template[i].rlen);
574 612
575 printk("%s\n", 613 printk("%s\n",
576 memcmp(q, template[i].result, 614 memcmp(q, template[i].result,
577 template[i].rlen) ? "fail" : "pass"); 615 template[i].rlen) ? "fail" : "pass");
578 kunmap(sg_page(&sg[0]));
579 } 616 }
580 kfree(data); 617 kfree(data);
581 } 618 }
582 619
583 printk("\ntesting %s %s across pages (chunking)\n", algo, e); 620 printk("\ntesting %s %s across pages (chunking)\n", algo, e);
584 memset(xbuf, 0, XBUFSIZE);
585 621
586 j = 0; 622 j = 0;
587 for (i = 0; i < tcount; i++) { 623 for (i = 0; i < tcount; i++) {
588 624
589 data = kzalloc(template[i].ilen, GFP_KERNEL);
590 if (!data)
591 continue;
592
593 memcpy(data, template[i].input, template[i].ilen);
594
595 if (template[i].iv) 625 if (template[i].iv)
596 memcpy(iv, template[i].iv, MAX_IVLEN); 626 memcpy(iv, template[i].iv, MAX_IVLEN);
597 else 627 else
@@ -602,6 +632,7 @@ static void test_cipher(char *algo, int enc,
602 printk("test %u (%d bit key):\n", 632 printk("test %u (%d bit key):\n",
603 j, template[i].klen * 8); 633 j, template[i].klen * 8);
604 634
635 memset(xbuf, 0, XBUFSIZE);
605 crypto_ablkcipher_clear_flags(tfm, ~0); 636 crypto_ablkcipher_clear_flags(tfm, ~0);
606 if (template[i].wk) 637 if (template[i].wk)
607 crypto_ablkcipher_set_flags( 638 crypto_ablkcipher_set_flags(
@@ -613,10 +644,8 @@ static void test_cipher(char *algo, int enc,
613 printk("setkey() failed flags=%x\n", 644 printk("setkey() failed flags=%x\n",
614 crypto_ablkcipher_get_flags(tfm)); 645 crypto_ablkcipher_get_flags(tfm));
615 646
616 if (!template[i].fail) { 647 if (!template[i].fail)
617 kfree(data);
618 goto out; 648 goto out;
619 }
620 } 649 }
621 650
622 temp = 0; 651 temp = 0;
@@ -657,14 +686,21 @@ static void test_cipher(char *algo, int enc,
657 temp = 0; 686 temp = 0;
658 for (k = 0; k < template[i].np; k++) { 687 for (k = 0; k < template[i].np; k++) {
659 printk("page %u\n", k); 688 printk("page %u\n", k);
660 q = kmap(sg_page(&sg[k])) + sg[k].offset; 689 q = &xbuf[IDX[k]];
661 hexdump(q, template[i].tap[k]); 690 hexdump(q, template[i].tap[k]);
662 printk("%s\n", 691 printk("%s\n",
663 memcmp(q, template[i].result + temp, 692 memcmp(q, template[i].result + temp,
664 template[i].tap[k]) ? "fail" : 693 template[i].tap[k]) ? "fail" :
665 "pass"); 694 "pass");
695
696 for (n = 0; q[template[i].tap[k] + n]; n++)
697 ;
698 if (n) {
699 printk("Result buffer corruption %u "
700 "bytes:\n", n);
701 hexdump(&q[template[i].tap[k]], n);
702 }
666 temp += template[i].tap[k]; 703 temp += template[i].tap[k];
667 kunmap(sg_page(&sg[k]));
668 } 704 }
669 } 705 }
670 } 706 }
@@ -1180,6 +1216,14 @@ static void do_test(void)
1180 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, 1216 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1181 DES3_EDE_DEC_TEST_VECTORS); 1217 DES3_EDE_DEC_TEST_VECTORS);
1182 1218
1219 test_cipher("cbc(des3_ede)", ENCRYPT,
1220 des3_ede_cbc_enc_tv_template,
1221 DES3_EDE_CBC_ENC_TEST_VECTORS);
1222
1223 test_cipher("cbc(des3_ede)", DECRYPT,
1224 des3_ede_cbc_dec_tv_template,
1225 DES3_EDE_CBC_DEC_TEST_VECTORS);
1226
1183 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); 1227 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
1184 1228
1185 test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); 1229 test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
@@ -1390,6 +1434,14 @@ static void do_test(void)
1390 DES3_EDE_ENC_TEST_VECTORS); 1434 DES3_EDE_ENC_TEST_VECTORS);
1391 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, 1435 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1392 DES3_EDE_DEC_TEST_VECTORS); 1436 DES3_EDE_DEC_TEST_VECTORS);
1437
1438 test_cipher("cbc(des3_ede)", ENCRYPT,
1439 des3_ede_cbc_enc_tv_template,
1440 DES3_EDE_CBC_ENC_TEST_VECTORS);
1441
1442 test_cipher("cbc(des3_ede)", DECRYPT,
1443 des3_ede_cbc_dec_tv_template,
1444 DES3_EDE_CBC_DEC_TEST_VECTORS);
1393 break; 1445 break;
1394 1446
1395 case 5: 1447 case 5:
@@ -1558,7 +1610,7 @@ static void do_test(void)
1558 case 29: 1610 case 29:
1559 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); 1611 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
1560 break; 1612 break;
1561 1613
1562 case 30: 1614 case 30:
1563 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, 1615 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
1564 XETA_ENC_TEST_VECTORS); 1616 XETA_ENC_TEST_VECTORS);
@@ -1623,6 +1675,22 @@ static void do_test(void)
1623 CTS_MODE_DEC_TEST_VECTORS); 1675 CTS_MODE_DEC_TEST_VECTORS);
1624 break; 1676 break;
1625 1677
1678 case 39:
1679 test_hash("rmd128", rmd128_tv_template, RMD128_TEST_VECTORS);
1680 break;
1681
1682 case 40:
1683 test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS);
1684 break;
1685
1686 case 41:
1687 test_hash("rmd256", rmd256_tv_template, RMD256_TEST_VECTORS);
1688 break;
1689
1690 case 42:
1691 test_hash("rmd320", rmd320_tv_template, RMD320_TEST_VECTORS);
1692 break;
1693
1626 case 100: 1694 case 100:
1627 test_hash("hmac(md5)", hmac_md5_tv_template, 1695 test_hash("hmac(md5)", hmac_md5_tv_template,
1628 HMAC_MD5_TEST_VECTORS); 1696 HMAC_MD5_TEST_VECTORS);
@@ -1658,6 +1726,16 @@ static void do_test(void)
1658 XCBC_AES_TEST_VECTORS); 1726 XCBC_AES_TEST_VECTORS);
1659 break; 1727 break;
1660 1728
1729 case 107:
1730 test_hash("hmac(rmd128)", hmac_rmd128_tv_template,
1731 HMAC_RMD128_TEST_VECTORS);
1732 break;
1733
1734 case 108:
1735 test_hash("hmac(rmd160)", hmac_rmd160_tv_template,
1736 HMAC_RMD160_TEST_VECTORS);
1737 break;
1738
1661 case 200: 1739 case 200:
1662 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1740 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
1663 speed_template_16_24_32); 1741 speed_template_16_24_32);
@@ -1796,6 +1874,22 @@ static void do_test(void)
1796 test_hash_speed("sha224", sec, generic_hash_speed_template); 1874 test_hash_speed("sha224", sec, generic_hash_speed_template);
1797 if (mode > 300 && mode < 400) break; 1875 if (mode > 300 && mode < 400) break;
1798 1876
1877 case 314:
1878 test_hash_speed("rmd128", sec, generic_hash_speed_template);
1879 if (mode > 300 && mode < 400) break;
1880
1881 case 315:
1882 test_hash_speed("rmd160", sec, generic_hash_speed_template);
1883 if (mode > 300 && mode < 400) break;
1884
1885 case 316:
1886 test_hash_speed("rmd256", sec, generic_hash_speed_template);
1887 if (mode > 300 && mode < 400) break;
1888
1889 case 317:
1890 test_hash_speed("rmd320", sec, generic_hash_speed_template);
1891 if (mode > 300 && mode < 400) break;
1892
1799 case 399: 1893 case 399:
1800 break; 1894 break;
1801 1895
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 47bc0ecb8978..801e0c288862 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -13,12 +13,6 @@
13 * Software Foundation; either version 2 of the License, or (at your option) 13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * 2007-11-13 Added GCM tests
17 * 2007-11-13 Added AEAD support
18 * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
19 * 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net>
20 * 2003-09-14 Changes by Kartikey Mahendra Bhatt
21 *
22 */ 16 */
23#ifndef _CRYPTO_TCRYPT_H 17#ifndef _CRYPTO_TCRYPT_H
24#define _CRYPTO_TCRYPT_H 18#define _CRYPTO_TCRYPT_H
@@ -168,6 +162,271 @@ static struct hash_testvec md5_tv_template[] = {
168 .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55" 162 .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55"
169 "\xac\x49\xda\x2e\x21\x07\xb6\x7a", 163 "\xac\x49\xda\x2e\x21\x07\xb6\x7a",
170 } 164 }
165
166};
167
168/*
169 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
170 */
171#define RMD128_TEST_VECTORS 10
172
173static struct hash_testvec rmd128_tv_template[] = {
174 {
175 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
176 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
177 }, {
178 .plaintext = "a",
179 .psize = 1,
180 .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
181 "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
182 }, {
183 .plaintext = "abc",
184 .psize = 3,
185 .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
186 "\x84\x63\x6b\x0f\x69\x14\x4c\x77",
187 }, {
188 .plaintext = "message digest",
189 .psize = 14,
190 .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
191 "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
192 }, {
193 .plaintext = "abcdefghijklmnopqrstuvwxyz",
194 .psize = 26,
195 .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
196 "\x10\x71\x49\x22\xb3\x71\x83\x4e",
197 }, {
198 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
199 "fghijklmnopqrstuvwxyz0123456789",
200 .psize = 62,
201 .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
202 "\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
203 }, {
204 .plaintext = "1234567890123456789012345678901234567890"
205 "1234567890123456789012345678901234567890",
206 .psize = 80,
207 .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
208 "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
209 }, {
210 .plaintext = "abcdbcdecdefdefgefghfghighij"
211 "hijkijkljklmklmnlmnomnopnopq",
212 .psize = 56,
213 .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
214 "\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
215 .np = 2,
216 .tap = { 28, 28 },
217 }, {
218 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
219 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
220 "lmnopqrsmnopqrstnopqrstu",
221 .psize = 112,
222 .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
223 "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
224 }, {
225 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
226 .psize = 32,
227 .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
228 "\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
229 }
230};
231
232/*
233 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
234 */
235#define RMD160_TEST_VECTORS 10
236
237static struct hash_testvec rmd160_tv_template[] = {
238 {
239 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
240 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
241 }, {
242 .plaintext = "a",
243 .psize = 1,
244 .digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae"
245 "\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe",
246 }, {
247 .plaintext = "abc",
248 .psize = 3,
249 .digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04"
250 "\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc",
251 }, {
252 .plaintext = "message digest",
253 .psize = 14,
254 .digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8"
255 "\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36",
256 }, {
257 .plaintext = "abcdefghijklmnopqrstuvwxyz",
258 .psize = 26,
259 .digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb"
260 "\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc",
261 }, {
262 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
263 "fghijklmnopqrstuvwxyz0123456789",
264 .psize = 62,
265 .digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed"
266 "\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89",
267 }, {
268 .plaintext = "1234567890123456789012345678901234567890"
269 "1234567890123456789012345678901234567890",
270 .psize = 80,
271 .digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb"
272 "\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb",
273 }, {
274 .plaintext = "abcdbcdecdefdefgefghfghighij"
275 "hijkijkljklmklmnlmnomnopnopq",
276 .psize = 56,
277 .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
278 "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
279 .np = 2,
280 .tap = { 28, 28 },
281 }, {
282 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
283 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
284 "lmnopqrsmnopqrstnopqrstu",
285 .psize = 112,
286 .digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91"
287 "\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45",
288 }, {
289 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
290 .psize = 32,
291 .digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d"
292 "\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f",
293 }
294};
295
296/*
297 * RIPEMD-256 test vectors
298 */
299#define RMD256_TEST_VECTORS 8
300
301static struct hash_testvec rmd256_tv_template[] = {
302 {
303 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
304 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
305 "\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
306 "\x80\xae\x01\x68\xe3\xc5\x52\x2d",
307 }, {
308 .plaintext = "a",
309 .psize = 1,
310 .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
311 "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
312 "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
313 "\xcd\x88\x3a\x91\x34\x69\x29\x25",
314 }, {
315 .plaintext = "abc",
316 .psize = 3,
317 .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
318 "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
319 "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
320 "\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
321 }, {
322 .plaintext = "message digest",
323 .psize = 14,
324 .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
325 "\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
326 "\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
327 "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
328 }, {
329 .plaintext = "abcdefghijklmnopqrstuvwxyz",
330 .psize = 26,
331 .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
332 "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
333 "\x78\x96\x11\x8a\x51\x97\x96\x87"
334 "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
335 }, {
336 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
337 "fghijklmnopqrstuvwxyz0123456789",
338 .psize = 62,
339 .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
340 "\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
341 "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
342 "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
343 }, {
344 .plaintext = "1234567890123456789012345678901234567890"
345 "1234567890123456789012345678901234567890",
346 .psize = 80,
347 .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
348 "\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
349 "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
350 "\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
351 }, {
352 .plaintext = "abcdbcdecdefdefgefghfghighij"
353 "hijkijkljklmklmnlmnomnopnopq",
354 .psize = 56,
355 .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
356 "\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
357 "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
358 "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
359 .np = 2,
360 .tap = { 28, 28 },
361 }
362};
363
364/*
365 * RIPEMD-320 test vectors
366 */
367#define RMD320_TEST_VECTORS 8
368
369static struct hash_testvec rmd320_tv_template[] = {
370 {
371 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
372 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
373 "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
374 "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
375 }, {
376 .plaintext = "a",
377 .psize = 1,
378 .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
379 "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
380 "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
381 "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
382 }, {
383 .plaintext = "abc",
384 .psize = 3,
385 .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
386 "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
387 "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
388 "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
389 }, {
390 .plaintext = "message digest",
391 .psize = 14,
392 .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
393 "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
394 "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
395 "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
396 }, {
397 .plaintext = "abcdefghijklmnopqrstuvwxyz",
398 .psize = 26,
399 .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
400 "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
401 "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
402 "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
403 }, {
404 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
405 "fghijklmnopqrstuvwxyz0123456789",
406 .psize = 62,
407 .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
408 "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
409 "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
410 "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
411 }, {
412 .plaintext = "1234567890123456789012345678901234567890"
413 "1234567890123456789012345678901234567890",
414 .psize = 80,
415 .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
416 "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
417 "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
418 "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
419 }, {
420 .plaintext = "abcdbcdecdefdefgefghfghighij"
421 "hijkijkljklmklmnlmnomnopnopq",
422 .psize = 56,
423 .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
424 "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
425 "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
426 "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
427 .np = 2,
428 .tap = { 28, 28 },
429 }
171}; 430};
172 431
173/* 432/*
@@ -817,6 +1076,168 @@ static struct hash_testvec hmac_md5_tv_template[] =
817}; 1076};
818 1077
819/* 1078/*
1079 * HMAC-RIPEMD128 test vectors from RFC2286
1080 */
1081#define HMAC_RMD128_TEST_VECTORS 7
1082
1083static struct hash_testvec hmac_rmd128_tv_template[] = {
1084 {
1085 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1086 .ksize = 16,
1087 .plaintext = "Hi There",
1088 .psize = 8,
1089 .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
1090 "\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
1091 }, {
1092 .key = "Jefe",
1093 .ksize = 4,
1094 .plaintext = "what do ya want for nothing?",
1095 .psize = 28,
1096 .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
1097 "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
1098 .np = 2,
1099 .tap = { 14, 14 },
1100 }, {
1101 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1102 .ksize = 16,
1103 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1104 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1105 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1106 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1107 .psize = 50,
1108 .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
1109 "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
1110 }, {
1111 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1112 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1113 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1114 .ksize = 25,
1115 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1116 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1117 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1118 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1119 .psize = 50,
1120 .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
1121 "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
1122 }, {
1123 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1124 .ksize = 16,
1125 .plaintext = "Test With Truncation",
1126 .psize = 20,
1127 .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
1128 "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
1129 }, {
1130 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1131 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1132 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1133 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1134 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1135 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1136 "\xaa\xaa",
1137 .ksize = 80,
1138 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1139 .psize = 54,
1140 .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
1141 "\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
1142 }, {
1143 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1144 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1145 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1146 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1147 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1148 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1149 "\xaa\xaa",
1150 .ksize = 80,
1151 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1152 "Block-Size Data",
1153 .psize = 73,
1154 .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
1155 "\x06\x90\xc2\x37\x63\x5f\x30\xc5",
1156 },
1157};
1158
1159/*
1160 * HMAC-RIPEMD160 test vectors from RFC2286
1161 */
1162#define HMAC_RMD160_TEST_VECTORS 7
1163
1164static struct hash_testvec hmac_rmd160_tv_template[] = {
1165 {
1166 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1167 .ksize = 20,
1168 .plaintext = "Hi There",
1169 .psize = 8,
1170 .digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e"
1171 "\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68",
1172 }, {
1173 .key = "Jefe",
1174 .ksize = 4,
1175 .plaintext = "what do ya want for nothing?",
1176 .psize = 28,
1177 .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
1178 "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
1179 .np = 2,
1180 .tap = { 14, 14 },
1181 }, {
1182 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1183 .ksize = 20,
1184 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1185 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1186 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1187 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1188 .psize = 50,
1189 .digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4"
1190 "\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1",
1191 }, {
1192 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1193 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1194 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1195 .ksize = 25,
1196 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1197 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1198 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1199 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1200 .psize = 50,
1201 .digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1"
1202 "\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4",
1203 }, {
1204 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1205 .ksize = 20,
1206 .plaintext = "Test With Truncation",
1207 .psize = 20,
1208 .digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a"
1209 "\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39",
1210 }, {
1211 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1212 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1213 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1214 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1215 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1216 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1217 "\xaa\xaa",
1218 .ksize = 80,
1219 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1220 .psize = 54,
1221 .digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd"
1222 "\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b",
1223 }, {
1224 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1225 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1226 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1227 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1228 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1229 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1230 "\xaa\xaa",
1231 .ksize = 80,
1232 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1233 "Block-Size Data",
1234 .psize = 73,
1235 .digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f"
1236 "\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a",
1237 },
1238};
1239
1240/*
820 * HMAC-SHA1 test vectors from RFC2202 1241 * HMAC-SHA1 test vectors from RFC2202
821 */ 1242 */
822#define HMAC_SHA1_TEST_VECTORS 7 1243#define HMAC_SHA1_TEST_VECTORS 7
@@ -1442,6 +1863,8 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
1442#define DES_CBC_DEC_TEST_VECTORS 4 1863#define DES_CBC_DEC_TEST_VECTORS 4
1443#define DES3_EDE_ENC_TEST_VECTORS 3 1864#define DES3_EDE_ENC_TEST_VECTORS 3
1444#define DES3_EDE_DEC_TEST_VECTORS 3 1865#define DES3_EDE_DEC_TEST_VECTORS 3
1866#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
1867#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
1445 1868
1446static struct cipher_testvec des_enc_tv_template[] = { 1869static struct cipher_testvec des_enc_tv_template[] = {
1447 { /* From Applied Cryptography */ 1870 { /* From Applied Cryptography */
@@ -1680,9 +2103,6 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
1680 }, 2103 },
1681}; 2104};
1682 2105
1683/*
1684 * We really need some more test vectors, especially for DES3 CBC.
1685 */
1686static struct cipher_testvec des3_ede_enc_tv_template[] = { 2106static struct cipher_testvec des3_ede_enc_tv_template[] = {
1687 { /* These are from openssl */ 2107 { /* These are from openssl */
1688 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 2108 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
@@ -1745,6 +2165,94 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
1745 }, 2165 },
1746}; 2166};
1747 2167
2168static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
2169 { /* Generated from openssl */
2170 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2171 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2172 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2173 .klen = 24,
2174 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2175 .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2176 "\x53\x20\x63\x65\x65\x72\x73\x74"
2177 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2178 "\x20\x79\x65\x53\x72\x63\x74\x65"
2179 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2180 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2181 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2182 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2183 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2184 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2185 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2186 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2187 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2188 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2189 "\x63\x65\x65\x72\x73\x74\x54\x20"
2190 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2191 .ilen = 128,
2192 .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2193 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2194 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2195 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2196 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2197 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2198 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2199 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2200 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2201 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2202 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2203 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2204 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2205 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2206 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2207 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2208 .rlen = 128,
2209 },
2210};
2211
2212static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
2213 { /* Generated from openssl */
2214 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2215 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2216 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2217 .klen = 24,
2218 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2219 .input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2220 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2221 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2222 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2223 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2224 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2225 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2226 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2227 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2228 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2229 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2230 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2231 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2232 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2233 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2234 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2235 .ilen = 128,
2236 .result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2237 "\x53\x20\x63\x65\x65\x72\x73\x74"
2238 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2239 "\x20\x79\x65\x53\x72\x63\x74\x65"
2240 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2241 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2242 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2243 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2244 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2245 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2246 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2247 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2248 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2249 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2250 "\x63\x65\x65\x72\x73\x74\x54\x20"
2251 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2252 .rlen = 128,
2253 },
2254};
2255
1748/* 2256/*
1749 * Blowfish test vectors. 2257 * Blowfish test vectors.
1750 */ 2258 */