aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 16:40:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 16:40:42 -0400
commit3b23e665b68387f5ee7b21f7b75ceea4d9acae4a (patch)
treef68ddc11e1a3bb068f6d3d16c15da5e91df4dd84
parent6c118e43dc513a7118b49b9ff953fe61e14515dc (diff)
parent090657e423f45a77151943f50165ae9565bfbf33 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (50 commits) crypto: ixp4xx - Select CRYPTO_AUTHENC crypto: s390 - Respect STFL bit crypto: talitos - Add support for sha256 and md5 variants crypto: hash - Move ahash functions into crypto/hash.h crypto: crc32c - Add ahash implementation crypto: hash - Added scatter list walking helper crypto: prng - Deterministic CPRNG crypto: hash - Removed vestigial ahash fields crypto: hash - Fixed digest size check crypto: rmd - sparse annotations crypto: rmd128 - sparse annotations crypto: camellia - Use kernel-provided bitops, unaligned access helpers crypto: talitos - Use proper form for algorithm driver names crypto: talitos - Add support for 3des crypto: padlock - Make module loading quieter when hardware isn't available crypto: tcrpyt - Remove unnecessary kmap/kunmap calls crypto: ixp4xx - Hardware crypto support for IXP4xx CPUs crypto: talitos - Freescale integrated security engine (SEC) driver [CRYPTO] tcrypt: Add self test for des3_ebe cipher operating in cbc mode [CRYPTO] rmd: Use pointer form of endian swapping operations ...
-rw-r--r--arch/s390/crypto/crypt_s390.h4
-rw-r--r--crypto/Kconfig63
-rw-r--r--crypto/Makefile7
-rw-r--r--crypto/ahash.c194
-rw-r--r--crypto/api.c8
-rw-r--r--crypto/camellia.c84
-rw-r--r--crypto/crc32c.c128
-rw-r--r--crypto/cryptd.c253
-rw-r--r--crypto/digest.c83
-rw-r--r--crypto/hash.c102
-rw-r--r--crypto/hmac.c16
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/prng.c410
-rw-r--r--crypto/prng.h27
-rw-r--r--crypto/ripemd.h43
-rw-r--r--crypto/rmd128.c325
-rw-r--r--crypto/rmd160.c369
-rw-r--r--crypto/rmd256.c344
-rw-r--r--crypto/rmd320.c393
-rw-r--r--crypto/tcrypt.c188
-rw-r--r--crypto/tcrypt.h526
-rw-r--r--drivers/crypto/Kconfig26
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/hifn_795x.c367
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1506
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/crypto/padlock-sha.c4
-rw-r--r--drivers/crypto/talitos.c1597
-rw-r--r--drivers/crypto/talitos.h199
-rw-r--r--include/crypto/hash.h154
-rw-r--r--include/crypto/internal/hash.h78
-rw-r--r--include/linux/crypto.h48
32 files changed, 7227 insertions, 326 deletions
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 9992f95ef992..0ef9829f2ad6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func)
296 unsigned char status[16]; 296 unsigned char status[16];
297 int ret; 297 int ret;
298 298
299 /* check if CPACF facility (bit 17) is available */
300 if (!(stfl() & 1ULL << (31 - 17)))
301 return 0;
302
299 switch (func & CRYPT_S390_OP_MASK) { 303 switch (func & CRYPT_S390_OP_MASK) {
300 case CRYPT_S390_KM: 304 case CRYPT_S390_KM:
301 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 305 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 864456c140fe..ea503572fcbe 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -65,6 +65,7 @@ config CRYPTO_NULL
65config CRYPTO_CRYPTD 65config CRYPTO_CRYPTD
66 tristate "Software async crypto daemon" 66 tristate "Software async crypto daemon"
67 select CRYPTO_BLKCIPHER 67 select CRYPTO_BLKCIPHER
68 select CRYPTO_HASH
68 select CRYPTO_MANAGER 69 select CRYPTO_MANAGER
69 help 70 help
70 This is a generic software asynchronous crypto daemon that 71 This is a generic software asynchronous crypto daemon that
@@ -212,7 +213,7 @@ comment "Digest"
212 213
213config CRYPTO_CRC32C 214config CRYPTO_CRC32C
214 tristate "CRC32c CRC algorithm" 215 tristate "CRC32c CRC algorithm"
215 select CRYPTO_ALGAPI 216 select CRYPTO_HASH
216 select LIBCRC32C 217 select LIBCRC32C
217 help 218 help
218 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used 219 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
@@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
241 should not be used for other purposes because of the weakness 242 should not be used for other purposes because of the weakness
242 of the algorithm. 243 of the algorithm.
243 244
245config CRYPTO_RMD128
246 tristate "RIPEMD-128 digest algorithm"
247 select CRYPTO_ALGAPI
248 help
249 RIPEMD-128 (ISO/IEC 10118-3:2004).
250
251 RIPEMD-128 is a 128-bit cryptographic hash function. It should only
252 to be used as a secure replacement for RIPEMD. For other use cases
253 RIPEMD-160 should be used.
254
255 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
256 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
257
258config CRYPTO_RMD160
259 tristate "RIPEMD-160 digest algorithm"
260 select CRYPTO_ALGAPI
261 help
262 RIPEMD-160 (ISO/IEC 10118-3:2004).
263
264 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
265 to be used as a secure replacement for the 128-bit hash functions
266 MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
267
268 It's speed is comparable to SHA1 and there are no known attacks against
269 RIPEMD-160.
270
271 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
272 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
273
274config CRYPTO_RMD256
275 tristate "RIPEMD-256 digest algorithm"
276 select CRYPTO_ALGAPI
277 help
278 RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
279 It is intended for applications that require longer hash-results, without
280 needing a larger security level (than RIPEMD-128).
281
282 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
283 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
284
285config CRYPTO_RMD320
286 tristate "RIPEMD-320 digest algorithm"
287 select CRYPTO_ALGAPI
288 help
289 RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
290 It is intended for applications that require longer hash-results, without
291 needing a larger security level (than RIPEMD-160).
292
293 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
294 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
295
244config CRYPTO_SHA1 296config CRYPTO_SHA1
245 tristate "SHA1 digest algorithm" 297 tristate "SHA1 digest algorithm"
246 select CRYPTO_ALGAPI 298 select CRYPTO_ALGAPI
@@ -614,6 +666,15 @@ config CRYPTO_LZO
614 help 666 help
615 This is the LZO algorithm. 667 This is the LZO algorithm.
616 668
669comment "Random Number Generation"
670
671config CRYPTO_PRNG
672 tristate "Pseudo Random Number Generation for Cryptographic modules"
673 help
674 This option enables the generic pseudo random number generator
675 for cryptographic modules. Uses the Algorithm specified in
676 ANSI X9.31 A.2.4
677
617source "drivers/crypto/Kconfig" 678source "drivers/crypto/Kconfig"
618 679
619endif # if CRYPTO 680endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index ca024418f4fb..ef61b3b64660 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
19obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 19obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
20 20
21crypto_hash-objs := hash.o 21crypto_hash-objs := hash.o
22crypto_hash-objs += ahash.o
22obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o 23obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
23 24
24obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o 25obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
@@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
27obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o 28obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
28obj-$(CONFIG_CRYPTO_MD4) += md4.o 29obj-$(CONFIG_CRYPTO_MD4) += md4.o
29obj-$(CONFIG_CRYPTO_MD5) += md5.o 30obj-$(CONFIG_CRYPTO_MD5) += md5.o
31obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
32obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
33obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
34obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
30obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o 35obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
31obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o 36obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
32obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o 37obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
@@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
64obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 69obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
65obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o 70obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
66obj-$(CONFIG_CRYPTO_LZO) += lzo.o 71obj-$(CONFIG_CRYPTO_LZO) += lzo.o
67 72obj-$(CONFIG_CRYPTO_PRNG) += prng.o
68obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o 73obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
69 74
70# 75#
diff --git a/crypto/ahash.c b/crypto/ahash.c
new file mode 100644
index 000000000000..27128f2c687a
--- /dev/null
+++ b/crypto/ahash.c
@@ -0,0 +1,194 @@
1/*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/hash.h>
17#include <crypto/scatterwalk.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/seq_file.h>
24
25#include "internal.h"
26
27static int hash_walk_next(struct crypto_hash_walk *walk)
28{
29 unsigned int alignmask = walk->alignmask;
30 unsigned int offset = walk->offset;
31 unsigned int nbytes = min(walk->entrylen,
32 ((unsigned int)(PAGE_SIZE)) - offset);
33
34 walk->data = crypto_kmap(walk->pg, 0);
35 walk->data += offset;
36
37 if (offset & alignmask)
38 nbytes = alignmask + 1 - (offset & alignmask);
39
40 walk->entrylen -= nbytes;
41 return nbytes;
42}
43
44static int hash_walk_new_entry(struct crypto_hash_walk *walk)
45{
46 struct scatterlist *sg;
47
48 sg = walk->sg;
49 walk->pg = sg_page(sg);
50 walk->offset = sg->offset;
51 walk->entrylen = sg->length;
52
53 if (walk->entrylen > walk->total)
54 walk->entrylen = walk->total;
55 walk->total -= walk->entrylen;
56
57 return hash_walk_next(walk);
58}
59
60int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
61{
62 unsigned int alignmask = walk->alignmask;
63 unsigned int nbytes = walk->entrylen;
64
65 walk->data -= walk->offset;
66
67 if (nbytes && walk->offset & alignmask && !err) {
68 walk->offset += alignmask - 1;
69 walk->offset = ALIGN(walk->offset, alignmask + 1);
70 walk->data += walk->offset;
71
72 nbytes = min(nbytes,
73 ((unsigned int)(PAGE_SIZE)) - walk->offset);
74 walk->entrylen -= nbytes;
75
76 return nbytes;
77 }
78
79 crypto_kunmap(walk->data, 0);
80 crypto_yield(walk->flags);
81
82 if (err)
83 return err;
84
85 walk->offset = 0;
86
87 if (nbytes)
88 return hash_walk_next(walk);
89
90 if (!walk->total)
91 return 0;
92
93 walk->sg = scatterwalk_sg_next(walk->sg);
94
95 return hash_walk_new_entry(walk);
96}
97EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
98
99int crypto_hash_walk_first(struct ahash_request *req,
100 struct crypto_hash_walk *walk)
101{
102 walk->total = req->nbytes;
103
104 if (!walk->total)
105 return 0;
106
107 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
108 walk->sg = req->src;
109 walk->flags = req->base.flags;
110
111 return hash_walk_new_entry(walk);
112}
113EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
114
115static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
116 unsigned int keylen)
117{
118 struct ahash_alg *ahash = crypto_ahash_alg(tfm);
119 unsigned long alignmask = crypto_ahash_alignmask(tfm);
120 int ret;
121 u8 *buffer, *alignbuffer;
122 unsigned long absize;
123
124 absize = keylen + alignmask;
125 buffer = kmalloc(absize, GFP_ATOMIC);
126 if (!buffer)
127 return -ENOMEM;
128
129 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
130 memcpy(alignbuffer, key, keylen);
131 ret = ahash->setkey(tfm, alignbuffer, keylen);
132 memset(alignbuffer, 0, keylen);
133 kfree(buffer);
134 return ret;
135}
136
137static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
138 unsigned int keylen)
139{
140 struct ahash_alg *ahash = crypto_ahash_alg(tfm);
141 unsigned long alignmask = crypto_ahash_alignmask(tfm);
142
143 if ((unsigned long)key & alignmask)
144 return ahash_setkey_unaligned(tfm, key, keylen);
145
146 return ahash->setkey(tfm, key, keylen);
147}
148
149static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
150 u32 mask)
151{
152 return alg->cra_ctxsize;
153}
154
155static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
156{
157 struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
158 struct ahash_tfm *crt = &tfm->crt_ahash;
159
160 if (alg->digestsize > PAGE_SIZE / 8)
161 return -EINVAL;
162
163 crt->init = alg->init;
164 crt->update = alg->update;
165 crt->final = alg->final;
166 crt->digest = alg->digest;
167 crt->setkey = ahash_setkey;
168 crt->digestsize = alg->digestsize;
169
170 return 0;
171}
172
173static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
174 __attribute__ ((unused));
175static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
176{
177 seq_printf(m, "type : ahash\n");
178 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
179 "yes" : "no");
180 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
181 seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
182}
183
184const struct crypto_type crypto_ahash_type = {
185 .ctxsize = crypto_ahash_ctxsize,
186 .init = crypto_init_ahash_ops,
187#ifdef CONFIG_PROC_FS
188 .show = crypto_ahash_show,
189#endif
190};
191EXPORT_SYMBOL_GPL(crypto_ahash_type);
192
193MODULE_LICENSE("GPL");
194MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/api.c b/crypto/api.c
index 0a0f41ef255f..d06e33270abe 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
235 return crypto_init_cipher_ops(tfm); 235 return crypto_init_cipher_ops(tfm);
236 236
237 case CRYPTO_ALG_TYPE_DIGEST: 237 case CRYPTO_ALG_TYPE_DIGEST:
238 return crypto_init_digest_ops(tfm); 238 if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
239 239 CRYPTO_ALG_TYPE_HASH_MASK)
240 return crypto_init_digest_ops_async(tfm);
241 else
242 return crypto_init_digest_ops(tfm);
243
240 case CRYPTO_ALG_TYPE_COMPRESS: 244 case CRYPTO_ALG_TYPE_COMPRESS:
241 return crypto_init_compress_ops(tfm); 245 return crypto_init_compress_ops(tfm);
242 246
diff --git a/crypto/camellia.c b/crypto/camellia.c
index 493fee7e0a8b..b1cc4de6493c 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -35,6 +35,8 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/bitops.h>
39#include <asm/unaligned.h>
38 40
39static const u32 camellia_sp1110[256] = { 41static const u32 camellia_sp1110[256] = {
40 0x70707000,0x82828200,0x2c2c2c00,0xececec00, 42 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
@@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
335/* 337/*
336 * macros 338 * macros
337 */ 339 */
338#define GETU32(v, pt) \
339 do { \
340 /* latest breed of gcc is clever enough to use move */ \
341 memcpy(&(v), (pt), 4); \
342 (v) = be32_to_cpu(v); \
343 } while(0)
344
345/* rotation right shift 1byte */
346#define ROR8(x) (((x) >> 8) + ((x) << 24))
347/* rotation left shift 1bit */
348#define ROL1(x) (((x) << 1) + ((x) >> 31))
349/* rotation left shift 1byte */
350#define ROL8(x) (((x) << 8) + ((x) >> 24))
351
352#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ 340#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
353 do { \ 341 do { \
354 w0 = ll; \ 342 w0 = ll; \
@@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
383 ^ camellia_sp3033[(u8)(il >> 8)] \ 371 ^ camellia_sp3033[(u8)(il >> 8)] \
384 ^ camellia_sp4404[(u8)(il )]; \ 372 ^ camellia_sp4404[(u8)(il )]; \
385 yl ^= yr; \ 373 yl ^= yr; \
386 yr = ROR8(yr); \ 374 yr = ror32(yr, 8); \
387 yr ^= yl; \ 375 yr ^= yl; \
388 } while(0) 376 } while(0)
389 377
@@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
405 subL[7] ^= subL[1]; subR[7] ^= subR[1]; 393 subL[7] ^= subL[1]; subR[7] ^= subR[1];
406 subL[1] ^= subR[1] & ~subR[9]; 394 subL[1] ^= subR[1] & ~subR[9];
407 dw = subL[1] & subL[9], 395 dw = subL[1] & subL[9],
408 subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ 396 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
409 /* round 8 */ 397 /* round 8 */
410 subL[11] ^= subL[1]; subR[11] ^= subR[1]; 398 subL[11] ^= subL[1]; subR[11] ^= subR[1];
411 /* round 10 */ 399 /* round 10 */
@@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
414 subL[15] ^= subL[1]; subR[15] ^= subR[1]; 402 subL[15] ^= subL[1]; subR[15] ^= subR[1];
415 subL[1] ^= subR[1] & ~subR[17]; 403 subL[1] ^= subR[1] & ~subR[17];
416 dw = subL[1] & subL[17], 404 dw = subL[1] & subL[17],
417 subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ 405 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
418 /* round 14 */ 406 /* round 14 */
419 subL[19] ^= subL[1]; subR[19] ^= subR[1]; 407 subL[19] ^= subL[1]; subR[19] ^= subR[1];
420 /* round 16 */ 408 /* round 16 */
@@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
430 } else { 418 } else {
431 subL[1] ^= subR[1] & ~subR[25]; 419 subL[1] ^= subR[1] & ~subR[25];
432 dw = subL[1] & subL[25], 420 dw = subL[1] & subL[25],
433 subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ 421 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
434 /* round 20 */ 422 /* round 20 */
435 subL[27] ^= subL[1]; subR[27] ^= subR[1]; 423 subL[27] ^= subL[1]; subR[27] ^= subR[1];
436 /* round 22 */ 424 /* round 22 */
@@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
450 subL[26] ^= kw4l; subR[26] ^= kw4r; 438 subL[26] ^= kw4l; subR[26] ^= kw4r;
451 kw4l ^= kw4r & ~subR[24]; 439 kw4l ^= kw4r & ~subR[24];
452 dw = kw4l & subL[24], 440 dw = kw4l & subL[24],
453 kw4r ^= ROL1(dw); /* modified for FL(kl5) */ 441 kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
454 } 442 }
455 /* round 17 */ 443 /* round 17 */
456 subL[22] ^= kw4l; subR[22] ^= kw4r; 444 subL[22] ^= kw4l; subR[22] ^= kw4r;
@@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
460 subL[18] ^= kw4l; subR[18] ^= kw4r; 448 subL[18] ^= kw4l; subR[18] ^= kw4r;
461 kw4l ^= kw4r & ~subR[16]; 449 kw4l ^= kw4r & ~subR[16];
462 dw = kw4l & subL[16], 450 dw = kw4l & subL[16],
463 kw4r ^= ROL1(dw); /* modified for FL(kl3) */ 451 kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
464 /* round 11 */ 452 /* round 11 */
465 subL[14] ^= kw4l; subR[14] ^= kw4r; 453 subL[14] ^= kw4l; subR[14] ^= kw4r;
466 /* round 9 */ 454 /* round 9 */
@@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
469 subL[10] ^= kw4l; subR[10] ^= kw4r; 457 subL[10] ^= kw4l; subR[10] ^= kw4r;
470 kw4l ^= kw4r & ~subR[8]; 458 kw4l ^= kw4r & ~subR[8];
471 dw = kw4l & subL[8], 459 dw = kw4l & subL[8],
472 kw4r ^= ROL1(dw); /* modified for FL(kl1) */ 460 kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
473 /* round 5 */ 461 /* round 5 */
474 subL[6] ^= kw4l; subR[6] ^= kw4r; 462 subL[6] ^= kw4l; subR[6] ^= kw4r;
475 /* round 3 */ 463 /* round 3 */
@@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
494 SUBKEY_R(6) = subR[5] ^ subR[7]; 482 SUBKEY_R(6) = subR[5] ^ subR[7];
495 tl = subL[10] ^ (subR[10] & ~subR[8]); 483 tl = subL[10] ^ (subR[10] & ~subR[8]);
496 dw = tl & subL[8], /* FL(kl1) */ 484 dw = tl & subL[8], /* FL(kl1) */
497 tr = subR[10] ^ ROL1(dw); 485 tr = subR[10] ^ rol32(dw, 1);
498 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ 486 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
499 SUBKEY_R(7) = subR[6] ^ tr; 487 SUBKEY_R(7) = subR[6] ^ tr;
500 SUBKEY_L(8) = subL[8]; /* FL(kl1) */ 488 SUBKEY_L(8) = subL[8]; /* FL(kl1) */
@@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
503 SUBKEY_R(9) = subR[9]; 491 SUBKEY_R(9) = subR[9];
504 tl = subL[7] ^ (subR[7] & ~subR[9]); 492 tl = subL[7] ^ (subR[7] & ~subR[9]);
505 dw = tl & subL[9], /* FLinv(kl2) */ 493 dw = tl & subL[9], /* FLinv(kl2) */
506 tr = subR[7] ^ ROL1(dw); 494 tr = subR[7] ^ rol32(dw, 1);
507 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ 495 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
508 SUBKEY_R(10) = tr ^ subR[11]; 496 SUBKEY_R(10) = tr ^ subR[11];
509 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ 497 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
@@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
516 SUBKEY_R(14) = subR[13] ^ subR[15]; 504 SUBKEY_R(14) = subR[13] ^ subR[15];
517 tl = subL[18] ^ (subR[18] & ~subR[16]); 505 tl = subL[18] ^ (subR[18] & ~subR[16]);
518 dw = tl & subL[16], /* FL(kl3) */ 506 dw = tl & subL[16], /* FL(kl3) */
519 tr = subR[18] ^ ROL1(dw); 507 tr = subR[18] ^ rol32(dw, 1);
520 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ 508 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
521 SUBKEY_R(15) = subR[14] ^ tr; 509 SUBKEY_R(15) = subR[14] ^ tr;
522 SUBKEY_L(16) = subL[16]; /* FL(kl3) */ 510 SUBKEY_L(16) = subL[16]; /* FL(kl3) */
@@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
525 SUBKEY_R(17) = subR[17]; 513 SUBKEY_R(17) = subR[17];
526 tl = subL[15] ^ (subR[15] & ~subR[17]); 514 tl = subL[15] ^ (subR[15] & ~subR[17]);
527 dw = tl & subL[17], /* FLinv(kl4) */ 515 dw = tl & subL[17], /* FLinv(kl4) */
528 tr = subR[15] ^ ROL1(dw); 516 tr = subR[15] ^ rol32(dw, 1);
529 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ 517 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
530 SUBKEY_R(18) = tr ^ subR[19]; 518 SUBKEY_R(18) = tr ^ subR[19];
531 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ 519 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
@@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
544 } else { 532 } else {
545 tl = subL[26] ^ (subR[26] & ~subR[24]); 533 tl = subL[26] ^ (subR[26] & ~subR[24]);
546 dw = tl & subL[24], /* FL(kl5) */ 534 dw = tl & subL[24], /* FL(kl5) */
547 tr = subR[26] ^ ROL1(dw); 535 tr = subR[26] ^ rol32(dw, 1);
548 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ 536 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
549 SUBKEY_R(23) = subR[22] ^ tr; 537 SUBKEY_R(23) = subR[22] ^ tr;
550 SUBKEY_L(24) = subL[24]; /* FL(kl5) */ 538 SUBKEY_L(24) = subL[24]; /* FL(kl5) */
@@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
553 SUBKEY_R(25) = subR[25]; 541 SUBKEY_R(25) = subR[25];
554 tl = subL[23] ^ (subR[23] & ~subR[25]); 542 tl = subL[23] ^ (subR[23] & ~subR[25]);
555 dw = tl & subL[25], /* FLinv(kl6) */ 543 dw = tl & subL[25], /* FLinv(kl6) */
556 tr = subR[23] ^ ROL1(dw); 544 tr = subR[23] ^ rol32(dw, 1);
557 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ 545 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
558 SUBKEY_R(26) = tr ^ subR[27]; 546 SUBKEY_R(26) = tr ^ subR[27];
559 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ 547 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
@@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
573 /* apply the inverse of the last half of P-function */ 561 /* apply the inverse of the last half of P-function */
574 i = 2; 562 i = 2;
575 do { 563 do {
576 dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ 564 dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
577 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; 565 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
578 dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ 566 dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
579 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; 567 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
580 dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ 568 dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
581 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; 569 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
582 dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ 570 dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
583 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; 571 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
584 dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ 572 dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */
585 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; 573 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
586 dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ 574 dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
587 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; 575 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
588 i += 8; 576 i += 8;
589 } while (i < max); 577 } while (i < max);
@@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
599 /** 587 /**
600 * k == kll || klr || krl || krr (|| is concatenation) 588 * k == kll || klr || krl || krr (|| is concatenation)
601 */ 589 */
602 GETU32(kll, key ); 590 kll = get_unaligned_be32(key);
603 GETU32(klr, key + 4); 591 klr = get_unaligned_be32(key + 4);
604 GETU32(krl, key + 8); 592 krl = get_unaligned_be32(key + 8);
605 GETU32(krr, key + 12); 593 krr = get_unaligned_be32(key + 12);
606 594
607 /* generate KL dependent subkeys */ 595 /* generate KL dependent subkeys */
608 /* kw1 */ 596 /* kw1 */
@@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
707 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) 695 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
708 * (|| is concatenation) 696 * (|| is concatenation)
709 */ 697 */
710 GETU32(kll, key ); 698 kll = get_unaligned_be32(key);
711 GETU32(klr, key + 4); 699 klr = get_unaligned_be32(key + 4);
712 GETU32(krl, key + 8); 700 krl = get_unaligned_be32(key + 8);
713 GETU32(krr, key + 12); 701 krr = get_unaligned_be32(key + 12);
714 GETU32(krll, key + 16); 702 krll = get_unaligned_be32(key + 16);
715 GETU32(krlr, key + 20); 703 krlr = get_unaligned_be32(key + 20);
716 GETU32(krrl, key + 24); 704 krrl = get_unaligned_be32(key + 24);
717 GETU32(krrr, key + 28); 705 krrr = get_unaligned_be32(key + 28);
718 706
719 /* generate KL dependent subkeys */ 707 /* generate KL dependent subkeys */
720 /* kw1 */ 708 /* kw1 */
@@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
870 t0 &= ll; \ 858 t0 &= ll; \
871 t2 |= rr; \ 859 t2 |= rr; \
872 rl ^= t2; \ 860 rl ^= t2; \
873 lr ^= ROL1(t0); \ 861 lr ^= rol32(t0, 1); \
874 t3 = krl; \ 862 t3 = krl; \
875 t1 = klr; \ 863 t1 = klr; \
876 t3 &= rl; \ 864 t3 &= rl; \
877 t1 |= lr; \ 865 t1 |= lr; \
878 ll ^= t1; \ 866 ll ^= t1; \
879 rr ^= ROL1(t3); \ 867 rr ^= rol32(t3, 1); \
880 } while(0) 868 } while(0)
881 869
882#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ 870#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
@@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
892 il ^= kl; \ 880 il ^= kl; \
893 ir ^= il ^ kr; \ 881 ir ^= il ^ kr; \
894 yl ^= ir; \ 882 yl ^= ir; \
895 yr ^= ROR8(il) ^ ir; \ 883 yr ^= ror32(il, 8) ^ ir; \
896 } while(0) 884 } while(0)
897 885
898/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ 886/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 0dcf64a74e68..a882d9e4e63e 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -5,20 +5,23 @@
5 * 5 *
6 * This module file is a wrapper to invoke the lib/crc32c routines. 6 * This module file is a wrapper to invoke the lib/crc32c routines.
7 * 7 *
8 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
9 *
8 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option) 12 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version. 13 * any later version.
12 * 14 *
13 */ 15 */
16
17#include <crypto/internal/hash.h>
14#include <linux/init.h> 18#include <linux/init.h>
15#include <linux/module.h> 19#include <linux/module.h>
16#include <linux/string.h> 20#include <linux/string.h>
17#include <linux/crypto.h>
18#include <linux/crc32c.h> 21#include <linux/crc32c.h>
19#include <linux/kernel.h> 22#include <linux/kernel.h>
20 23
21#define CHKSUM_BLOCK_SIZE 32 24#define CHKSUM_BLOCK_SIZE 1
22#define CHKSUM_DIGEST_SIZE 4 25#define CHKSUM_DIGEST_SIZE 4
23 26
24struct chksum_ctx { 27struct chksum_ctx {
@@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
71 *(__le32 *)out = ~cpu_to_le32(mctx->crc); 74 *(__le32 *)out = ~cpu_to_le32(mctx->crc);
72} 75}
73 76
74static int crc32c_cra_init(struct crypto_tfm *tfm) 77static int crc32c_cra_init_old(struct crypto_tfm *tfm)
75{ 78{
76 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 79 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
77 80
@@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
79 return 0; 82 return 0;
80} 83}
81 84
82static struct crypto_alg alg = { 85static struct crypto_alg old_alg = {
83 .cra_name = "crc32c", 86 .cra_name = "crc32c",
84 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 87 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
85 .cra_blocksize = CHKSUM_BLOCK_SIZE, 88 .cra_blocksize = CHKSUM_BLOCK_SIZE,
86 .cra_ctxsize = sizeof(struct chksum_ctx), 89 .cra_ctxsize = sizeof(struct chksum_ctx),
87 .cra_module = THIS_MODULE, 90 .cra_module = THIS_MODULE,
88 .cra_list = LIST_HEAD_INIT(alg.cra_list), 91 .cra_list = LIST_HEAD_INIT(old_alg.cra_list),
89 .cra_init = crc32c_cra_init, 92 .cra_init = crc32c_cra_init_old,
90 .cra_u = { 93 .cra_u = {
91 .digest = { 94 .digest = {
92 .dia_digestsize= CHKSUM_DIGEST_SIZE, 95 .dia_digestsize= CHKSUM_DIGEST_SIZE,
@@ -98,14 +101,125 @@ static struct crypto_alg alg = {
98 } 101 }
99}; 102};
100 103
104/*
105 * Setting the seed allows arbitrary accumulators and flexible XOR policy
106 * If your algorithm starts with ~0, then XOR with ~0 before you set
107 * the seed.
108 */
109static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
110 unsigned int keylen)
111{
112 u32 *mctx = crypto_ahash_ctx(hash);
113
114 if (keylen != sizeof(u32)) {
115 crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
116 return -EINVAL;
117 }
118 *mctx = le32_to_cpup((__le32 *)key);
119 return 0;
120}
121
122static int crc32c_init(struct ahash_request *req)
123{
124 u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
125 u32 *crcp = ahash_request_ctx(req);
126
127 *crcp = *mctx;
128 return 0;
129}
130
131static int crc32c_update(struct ahash_request *req)
132{
133 struct crypto_hash_walk walk;
134 u32 *crcp = ahash_request_ctx(req);
135 u32 crc = *crcp;
136 int nbytes;
137
138 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
139 nbytes = crypto_hash_walk_done(&walk, 0))
140 crc = crc32c(crc, walk.data, nbytes);
141
142 *crcp = crc;
143 return 0;
144}
145
146static int crc32c_final(struct ahash_request *req)
147{
148 u32 *crcp = ahash_request_ctx(req);
149
150 *(__le32 *)req->result = ~cpu_to_le32p(crcp);
151 return 0;
152}
153
154static int crc32c_digest(struct ahash_request *req)
155{
156 struct crypto_hash_walk walk;
157 u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
158 u32 crc = *mctx;
159 int nbytes;
160
161 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
162 nbytes = crypto_hash_walk_done(&walk, 0))
163 crc = crc32c(crc, walk.data, nbytes);
164
165 *(__le32 *)req->result = ~cpu_to_le32(crc);
166 return 0;
167}
168
169static int crc32c_cra_init(struct crypto_tfm *tfm)
170{
171 u32 *key = crypto_tfm_ctx(tfm);
172
173 *key = ~0;
174
175 tfm->crt_ahash.reqsize = sizeof(u32);
176
177 return 0;
178}
179
180static struct crypto_alg alg = {
181 .cra_name = "crc32c",
182 .cra_driver_name = "crc32c-generic",
183 .cra_priority = 100,
184 .cra_flags = CRYPTO_ALG_TYPE_AHASH,
185 .cra_blocksize = CHKSUM_BLOCK_SIZE,
186 .cra_alignmask = 3,
187 .cra_ctxsize = sizeof(u32),
188 .cra_module = THIS_MODULE,
189 .cra_list = LIST_HEAD_INIT(alg.cra_list),
190 .cra_init = crc32c_cra_init,
191 .cra_type = &crypto_ahash_type,
192 .cra_u = {
193 .ahash = {
194 .digestsize = CHKSUM_DIGEST_SIZE,
195 .setkey = crc32c_setkey,
196 .init = crc32c_init,
197 .update = crc32c_update,
198 .final = crc32c_final,
199 .digest = crc32c_digest,
200 }
201 }
202};
203
101static int __init crc32c_mod_init(void) 204static int __init crc32c_mod_init(void)
102{ 205{
103 return crypto_register_alg(&alg); 206 int err;
207
208 err = crypto_register_alg(&old_alg);
209 if (err)
210 return err;
211
212 err = crypto_register_alg(&alg);
213 if (err)
214 crypto_unregister_alg(&old_alg);
215
216 return err;
104} 217}
105 218
106static void __exit crc32c_mod_fini(void) 219static void __exit crc32c_mod_fini(void)
107{ 220{
108 crypto_unregister_alg(&alg); 221 crypto_unregister_alg(&alg);
222 crypto_unregister_alg(&old_alg);
109} 223}
110 224
111module_init(crc32c_mod_init); 225module_init(crc32c_mod_init);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index b150de562057..d29e06b350ff 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <crypto/algapi.h> 13#include <crypto/algapi.h>
14#include <crypto/internal/hash.h>
14#include <linux/err.h> 15#include <linux/err.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
45 crypto_completion_t complete; 46 crypto_completion_t complete;
46}; 47};
47 48
49struct cryptd_hash_ctx {
50 struct crypto_hash *child;
51};
52
53struct cryptd_hash_request_ctx {
54 crypto_completion_t complete;
55};
48 56
49static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) 57static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
50{ 58{
@@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
82 90
83 rctx = ablkcipher_request_ctx(req); 91 rctx = ablkcipher_request_ctx(req);
84 92
85 if (unlikely(err == -EINPROGRESS)) { 93 if (unlikely(err == -EINPROGRESS))
86 rctx->complete(&req->base, err); 94 goto out;
87 return;
88 }
89 95
90 desc.tfm = child; 96 desc.tfm = child;
91 desc.info = req->info; 97 desc.info = req->info;
@@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
95 101
96 req->base.complete = rctx->complete; 102 req->base.complete = rctx->complete;
97 103
104out:
98 local_bh_disable(); 105 local_bh_disable();
99 req->base.complete(&req->base, err); 106 rctx->complete(&req->base, err);
100 local_bh_enable(); 107 local_bh_enable();
101} 108}
102 109
@@ -261,6 +268,240 @@ out_put_alg:
261 return inst; 268 return inst;
262} 269}
263 270
271static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
272{
273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
274 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
275 struct crypto_spawn *spawn = &ictx->spawn;
276 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
277 struct crypto_hash *cipher;
278
279 cipher = crypto_spawn_hash(spawn);
280 if (IS_ERR(cipher))
281 return PTR_ERR(cipher);
282
283 ctx->child = cipher;
284 tfm->crt_ahash.reqsize =
285 sizeof(struct cryptd_hash_request_ctx);
286 return 0;
287}
288
289static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
290{
291 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
292 struct cryptd_state *state = cryptd_get_state(tfm);
293 int active;
294
295 mutex_lock(&state->mutex);
296 active = ahash_tfm_in_queue(&state->queue,
297 __crypto_ahash_cast(tfm));
298 mutex_unlock(&state->mutex);
299
300 BUG_ON(active);
301
302 crypto_free_hash(ctx->child);
303}
304
305static int cryptd_hash_setkey(struct crypto_ahash *parent,
306 const u8 *key, unsigned int keylen)
307{
308 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
309 struct crypto_hash *child = ctx->child;
310 int err;
311
312 crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
313 crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
314 CRYPTO_TFM_REQ_MASK);
315 err = crypto_hash_setkey(child, key, keylen);
316 crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
317 CRYPTO_TFM_RES_MASK);
318 return err;
319}
320
321static int cryptd_hash_enqueue(struct ahash_request *req,
322 crypto_completion_t complete)
323{
324 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct cryptd_state *state =
327 cryptd_get_state(crypto_ahash_tfm(tfm));
328 int err;
329
330 rctx->complete = req->base.complete;
331 req->base.complete = complete;
332
333 spin_lock_bh(&state->lock);
334 err = ahash_enqueue_request(&state->queue, req);
335 spin_unlock_bh(&state->lock);
336
337 wake_up_process(state->task);
338 return err;
339}
340
341static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
342{
343 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
344 struct crypto_hash *child = ctx->child;
345 struct ahash_request *req = ahash_request_cast(req_async);
346 struct cryptd_hash_request_ctx *rctx;
347 struct hash_desc desc;
348
349 rctx = ahash_request_ctx(req);
350
351 if (unlikely(err == -EINPROGRESS))
352 goto out;
353
354 desc.tfm = child;
355 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
356
357 err = crypto_hash_crt(child)->init(&desc);
358
359 req->base.complete = rctx->complete;
360
361out:
362 local_bh_disable();
363 rctx->complete(&req->base, err);
364 local_bh_enable();
365}
366
367static int cryptd_hash_init_enqueue(struct ahash_request *req)
368{
369 return cryptd_hash_enqueue(req, cryptd_hash_init);
370}
371
372static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
373{
374 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
375 struct crypto_hash *child = ctx->child;
376 struct ahash_request *req = ahash_request_cast(req_async);
377 struct cryptd_hash_request_ctx *rctx;
378 struct hash_desc desc;
379
380 rctx = ahash_request_ctx(req);
381
382 if (unlikely(err == -EINPROGRESS))
383 goto out;
384
385 desc.tfm = child;
386 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
387
388 err = crypto_hash_crt(child)->update(&desc,
389 req->src,
390 req->nbytes);
391
392 req->base.complete = rctx->complete;
393
394out:
395 local_bh_disable();
396 rctx->complete(&req->base, err);
397 local_bh_enable();
398}
399
400static int cryptd_hash_update_enqueue(struct ahash_request *req)
401{
402 return cryptd_hash_enqueue(req, cryptd_hash_update);
403}
404
405static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
406{
407 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
408 struct crypto_hash *child = ctx->child;
409 struct ahash_request *req = ahash_request_cast(req_async);
410 struct cryptd_hash_request_ctx *rctx;
411 struct hash_desc desc;
412
413 rctx = ahash_request_ctx(req);
414
415 if (unlikely(err == -EINPROGRESS))
416 goto out;
417
418 desc.tfm = child;
419 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
420
421 err = crypto_hash_crt(child)->final(&desc, req->result);
422
423 req->base.complete = rctx->complete;
424
425out:
426 local_bh_disable();
427 rctx->complete(&req->base, err);
428 local_bh_enable();
429}
430
431static int cryptd_hash_final_enqueue(struct ahash_request *req)
432{
433 return cryptd_hash_enqueue(req, cryptd_hash_final);
434}
435
436static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
437{
438 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
439 struct crypto_hash *child = ctx->child;
440 struct ahash_request *req = ahash_request_cast(req_async);
441 struct cryptd_hash_request_ctx *rctx;
442 struct hash_desc desc;
443
444 rctx = ahash_request_ctx(req);
445
446 if (unlikely(err == -EINPROGRESS))
447 goto out;
448
449 desc.tfm = child;
450 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
451
452 err = crypto_hash_crt(child)->digest(&desc,
453 req->src,
454 req->nbytes,
455 req->result);
456
457 req->base.complete = rctx->complete;
458
459out:
460 local_bh_disable();
461 rctx->complete(&req->base, err);
462 local_bh_enable();
463}
464
465static int cryptd_hash_digest_enqueue(struct ahash_request *req)
466{
467 return cryptd_hash_enqueue(req, cryptd_hash_digest);
468}
469
470static struct crypto_instance *cryptd_alloc_hash(
471 struct rtattr **tb, struct cryptd_state *state)
472{
473 struct crypto_instance *inst;
474 struct crypto_alg *alg;
475
476 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
477 CRYPTO_ALG_TYPE_HASH_MASK);
478 if (IS_ERR(alg))
479 return ERR_PTR(PTR_ERR(alg));
480
481 inst = cryptd_alloc_instance(alg, state);
482 if (IS_ERR(inst))
483 goto out_put_alg;
484
485 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
486 inst->alg.cra_type = &crypto_ahash_type;
487
488 inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
489 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
490
491 inst->alg.cra_init = cryptd_hash_init_tfm;
492 inst->alg.cra_exit = cryptd_hash_exit_tfm;
493
494 inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
495 inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
496 inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
497 inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
498 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
499
500out_put_alg:
501 crypto_mod_put(alg);
502 return inst;
503}
504
264static struct cryptd_state state; 505static struct cryptd_state state;
265 506
266static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 507static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
@@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
274 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 515 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
275 case CRYPTO_ALG_TYPE_BLKCIPHER: 516 case CRYPTO_ALG_TYPE_BLKCIPHER:
276 return cryptd_alloc_blkcipher(tb, &state); 517 return cryptd_alloc_blkcipher(tb, &state);
518 case CRYPTO_ALG_TYPE_DIGEST:
519 return cryptd_alloc_hash(tb, &state);
277 } 520 }
278 521
279 return ERR_PTR(-EINVAL); 522 return ERR_PTR(-EINVAL);
diff --git a/crypto/digest.c b/crypto/digest.c
index b526cc348b79..ac0919460d14 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -12,6 +12,7 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/internal/hash.h>
15#include <crypto/scatterwalk.h> 16#include <crypto/scatterwalk.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
@@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
141 struct hash_tfm *ops = &tfm->crt_hash; 142 struct hash_tfm *ops = &tfm->crt_hash;
142 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; 143 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
143 144
144 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) 145 if (dalg->dia_digestsize > PAGE_SIZE / 8)
145 return -EINVAL; 146 return -EINVAL;
146 147
147 ops->init = init; 148 ops->init = init;
@@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
157void crypto_exit_digest_ops(struct crypto_tfm *tfm) 158void crypto_exit_digest_ops(struct crypto_tfm *tfm)
158{ 159{
159} 160}
161
162static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
163 unsigned int keylen)
164{
165 crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
166 return -ENOSYS;
167}
168
169static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
170 unsigned int keylen)
171{
172 struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
173 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
174
175 crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
176 return dalg->dia_setkey(tfm, key, keylen);
177}
178
179static int digest_async_init(struct ahash_request *req)
180{
181 struct crypto_tfm *tfm = req->base.tfm;
182 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
183
184 dalg->dia_init(tfm);
185 return 0;
186}
187
188static int digest_async_update(struct ahash_request *req)
189{
190 struct crypto_tfm *tfm = req->base.tfm;
191 struct hash_desc desc = {
192 .tfm = __crypto_hash_cast(tfm),
193 .flags = req->base.flags,
194 };
195
196 update(&desc, req->src, req->nbytes);
197 return 0;
198}
199
200static int digest_async_final(struct ahash_request *req)
201{
202 struct crypto_tfm *tfm = req->base.tfm;
203 struct hash_desc desc = {
204 .tfm = __crypto_hash_cast(tfm),
205 .flags = req->base.flags,
206 };
207
208 final(&desc, req->result);
209 return 0;
210}
211
212static int digest_async_digest(struct ahash_request *req)
213{
214 struct crypto_tfm *tfm = req->base.tfm;
215 struct hash_desc desc = {
216 .tfm = __crypto_hash_cast(tfm),
217 .flags = req->base.flags,
218 };
219
220 return digest(&desc, req->src, req->nbytes, req->result);
221}
222
223int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
224{
225 struct ahash_tfm *crt = &tfm->crt_ahash;
226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
227
228 if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
229 return -EINVAL;
230
231 crt->init = digest_async_init;
232 crt->update = digest_async_update;
233 crt->final = digest_async_final;
234 crt->digest = digest_async_digest;
235 crt->setkey = dalg->dia_setkey ? digest_async_setkey :
236 digest_async_nosetkey;
237 crt->digestsize = dalg->dia_digestsize;
238
239 return 0;
240}
diff --git a/crypto/hash.c b/crypto/hash.c
index 7dcff671c19b..cb86b19fd105 100644
--- a/crypto/hash.c
+++ b/crypto/hash.c
@@ -9,6 +9,7 @@
9 * any later version. 9 * any later version.
10 */ 10 */
11 11
12#include <crypto/internal/hash.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key,
59 return alg->setkey(crt, key, keylen); 60 return alg->setkey(crt, key, keylen);
60} 61}
61 62
62static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) 63static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
64 unsigned int keylen)
65{
66 struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
67 struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
68 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
69
70 return alg->setkey(tfm_hash, key, keylen);
71}
72
73static int hash_async_init(struct ahash_request *req)
74{
75 struct crypto_tfm *tfm = req->base.tfm;
76 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
77 struct hash_desc desc = {
78 .tfm = __crypto_hash_cast(tfm),
79 .flags = req->base.flags,
80 };
81
82 return alg->init(&desc);
83}
84
85static int hash_async_update(struct ahash_request *req)
86{
87 struct crypto_tfm *tfm = req->base.tfm;
88 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
89 struct hash_desc desc = {
90 .tfm = __crypto_hash_cast(tfm),
91 .flags = req->base.flags,
92 };
93
94 return alg->update(&desc, req->src, req->nbytes);
95}
96
97static int hash_async_final(struct ahash_request *req)
98{
99 struct crypto_tfm *tfm = req->base.tfm;
100 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
101 struct hash_desc desc = {
102 .tfm = __crypto_hash_cast(tfm),
103 .flags = req->base.flags,
104 };
105
106 return alg->final(&desc, req->result);
107}
108
109static int hash_async_digest(struct ahash_request *req)
110{
111 struct crypto_tfm *tfm = req->base.tfm;
112 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
113 struct hash_desc desc = {
114 .tfm = __crypto_hash_cast(tfm),
115 .flags = req->base.flags,
116 };
117
118 return alg->digest(&desc, req->src, req->nbytes, req->result);
119}
120
121static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
122{
123 struct ahash_tfm *crt = &tfm->crt_ahash;
124 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
125
126 crt->init = hash_async_init;
127 crt->update = hash_async_update;
128 crt->final = hash_async_final;
129 crt->digest = hash_async_digest;
130 crt->setkey = hash_async_setkey;
131 crt->digestsize = alg->digestsize;
132
133 return 0;
134}
135
136static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
63{ 137{
64 struct hash_tfm *crt = &tfm->crt_hash; 138 struct hash_tfm *crt = &tfm->crt_hash;
65 struct hash_alg *alg = &tfm->__crt_alg->cra_hash; 139 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
66 140
67 if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) 141 crt->init = alg->init;
68 return -EINVAL; 142 crt->update = alg->update;
69 143 crt->final = alg->final;
70 crt->init = alg->init; 144 crt->digest = alg->digest;
71 crt->update = alg->update; 145 crt->setkey = hash_setkey;
72 crt->final = alg->final;
73 crt->digest = alg->digest;
74 crt->setkey = hash_setkey;
75 crt->digestsize = alg->digestsize; 146 crt->digestsize = alg->digestsize;
76 147
77 return 0; 148 return 0;
78} 149}
79 150
151static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
152{
153 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
154
155 if (alg->digestsize > PAGE_SIZE / 8)
156 return -EINVAL;
157
158 if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
159 return crypto_init_hash_ops_async(tfm);
160 else
161 return crypto_init_hash_ops_sync(tfm);
162}
163
80static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) 164static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
81 __attribute__ ((unused)); 165 __attribute__ ((unused));
82static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) 166static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 14c6351e639d..7ff2d6a8c7d0 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
226 struct crypto_instance *inst; 226 struct crypto_instance *inst;
227 struct crypto_alg *alg; 227 struct crypto_alg *alg;
228 int err; 228 int err;
229 int ds;
229 230
230 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); 231 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
231 if (err) 232 if (err)
@@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
236 if (IS_ERR(alg)) 237 if (IS_ERR(alg))
237 return ERR_CAST(alg); 238 return ERR_CAST(alg);
238 239
240 inst = ERR_PTR(-EINVAL);
241 ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
242 CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
243 alg->cra_digest.dia_digestsize;
244 if (ds > alg->cra_blocksize)
245 goto out_put_alg;
246
239 inst = crypto_alloc_instance("hmac", alg); 247 inst = crypto_alloc_instance("hmac", alg);
240 if (IS_ERR(inst)) 248 if (IS_ERR(inst))
241 goto out_put_alg; 249 goto out_put_alg;
@@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
246 inst->alg.cra_alignmask = alg->cra_alignmask; 254 inst->alg.cra_alignmask = alg->cra_alignmask;
247 inst->alg.cra_type = &crypto_hash_type; 255 inst->alg.cra_type = &crypto_hash_type;
248 256
249 inst->alg.cra_hash.digestsize = 257 inst->alg.cra_hash.digestsize = ds;
250 (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
251 CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
252 alg->cra_digest.dia_digestsize;
253 258
254 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + 259 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
255 ALIGN(inst->alg.cra_blocksize * 2 + 260 ALIGN(inst->alg.cra_blocksize * 2 + ds,
256 inst->alg.cra_hash.digestsize,
257 sizeof(void *)); 261 sizeof(void *));
258 262
259 inst->alg.cra_init = hmac_init_tfm; 263 inst->alg.cra_init = hmac_init_tfm;
diff --git a/crypto/internal.h b/crypto/internal.h
index 32f4c2145603..683fcb2d91f4 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
86struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 86struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
87 87
88int crypto_init_digest_ops(struct crypto_tfm *tfm); 88int crypto_init_digest_ops(struct crypto_tfm *tfm);
89int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
89int crypto_init_cipher_ops(struct crypto_tfm *tfm); 90int crypto_init_cipher_ops(struct crypto_tfm *tfm);
90int crypto_init_compress_ops(struct crypto_tfm *tfm); 91int crypto_init_compress_ops(struct crypto_tfm *tfm);
91 92
diff --git a/crypto/prng.c b/crypto/prng.c
new file mode 100644
index 000000000000..24e4f3282c56
--- /dev/null
+++ b/crypto/prng.c
@@ -0,0 +1,410 @@
1/*
2 * PRNG: Pseudo Random Number Generator
3 * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
4 * AES 128 cipher in RFC3686 ctr mode
5 *
6 * (C) Neil Horman <nhorman@tuxdriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * any later version.
12 *
13 *
14 */
15
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/scatterlist.h>
23#include <linux/string.h>
24#include <linux/crypto.h>
25#include <linux/highmem.h>
26#include <linux/moduleparam.h>
27#include <linux/jiffies.h>
28#include <linux/timex.h>
29#include <linux/interrupt.h>
30#include <linux/miscdevice.h>
31#include "prng.h"
32
33#define TEST_PRNG_ON_START 0
34
35#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
36#define DEFAULT_PRNG_KSZ 20
37#define DEFAULT_PRNG_IV "defaultv"
38#define DEFAULT_PRNG_IVSZ 8
39#define DEFAULT_BLK_SZ 16
40#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
41
42/*
43 * Flags for the prng_context flags field
44 */
45
46#define PRNG_FIXED_SIZE 0x1
47#define PRNG_NEED_RESET 0x2
48
49/*
50 * Note: DT is our counter value
51 * I is our intermediate value
52 * V is our seed vector
53 * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
54 * for implementation details
55 */
56
57
58struct prng_context {
59 char *prng_key;
60 char *prng_iv;
61 spinlock_t prng_lock;
62 unsigned char rand_data[DEFAULT_BLK_SZ];
63 unsigned char last_rand_data[DEFAULT_BLK_SZ];
64 unsigned char DT[DEFAULT_BLK_SZ];
65 unsigned char I[DEFAULT_BLK_SZ];
66 unsigned char V[DEFAULT_BLK_SZ];
67 u32 rand_data_valid;
68 struct crypto_blkcipher *tfm;
69 u32 flags;
70};
71
72static int dbg;
73
74static void hexdump(char *note, unsigned char *buf, unsigned int len)
75{
76 if (dbg) {
77 printk(KERN_CRIT "%s", note);
78 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
79 16, 1,
80 buf, len, false);
81 }
82}
83
84#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
85
86static void xor_vectors(unsigned char *in1, unsigned char *in2,
87 unsigned char *out, unsigned int size)
88{
89 int i;
90
91 for (i=0;i<size;i++)
92 out[i] = in1[i] ^ in2[i];
93
94}
95/*
96 * Returns DEFAULT_BLK_SZ bytes of random data per call
97 * returns 0 if generation succeded, <0 if something went wrong
98 */
99static int _get_more_prng_bytes(struct prng_context *ctx)
100{
101 int i;
102 struct blkcipher_desc desc;
103 struct scatterlist sg_in, sg_out;
104 int ret;
105 unsigned char tmp[DEFAULT_BLK_SZ];
106
107 desc.tfm = ctx->tfm;
108 desc.flags = 0;
109
110
111 dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
112
113 hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
114 hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
115 hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
116
117 /*
118 * This algorithm is a 3 stage state machine
119 */
120 for (i=0;i<3;i++) {
121
122 desc.tfm = ctx->tfm;
123 desc.flags = 0;
124 switch (i) {
125 case 0:
126 /*
127 * Start by encrypting the counter value
128 * This gives us an intermediate value I
129 */
130 memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
131 sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
132 hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
133 break;
134 case 1:
135
136 /*
137 * Next xor I with our secret vector V
138 * encrypt that result to obtain our
139 * pseudo random data which we output
140 */
141 xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
142 sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
143 hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
144 break;
145 case 2:
146 /*
147 * First check that we didn't produce the same random data
148 * that we did last time around through this
149 */
150 if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
151 printk(KERN_ERR "ctx %p Failed repetition check!\n",
152 ctx);
153 ctx->flags |= PRNG_NEED_RESET;
154 return -1;
155 }
156 memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
157
158 /*
159 * Lastly xor the random data with I
160 * and encrypt that to obtain a new secret vector V
161 */
162 xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
163 sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
164 hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
165 break;
166 }
167
168 /* Initialize our input buffer */
169 sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
170
171 /* do the encryption */
172 ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
173
174 /* And check the result */
175 if (ret) {
176 dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
177 ctx->rand_data_valid = DEFAULT_BLK_SZ;
178 return -1;
179 }
180
181 }
182
183 /*
184 * Now update our DT value
185 */
186 for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
187 ctx->DT[i] = ctx->DT[i-1];
188 }
189 ctx->DT[0] += 1;
190
191 dbgprint("Returning new block for context %p\n",ctx);
192 ctx->rand_data_valid = 0;
193
194 hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
195 hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
196 hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
197 hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
198
199 return 0;
200}
201
202/* Our exported functions */
203int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
204{
205 unsigned long flags;
206 unsigned char *ptr = buf;
207 unsigned int byte_count = (unsigned int)nbytes;
208 int err;
209
210
211 if (nbytes < 0)
212 return -EINVAL;
213
214 spin_lock_irqsave(&ctx->prng_lock, flags);
215
216 err = -EFAULT;
217 if (ctx->flags & PRNG_NEED_RESET)
218 goto done;
219
220 /*
221 * If the FIXED_SIZE flag is on, only return whole blocks of
222 * pseudo random data
223 */
224 err = -EINVAL;
225 if (ctx->flags & PRNG_FIXED_SIZE) {
226 if (nbytes < DEFAULT_BLK_SZ)
227 goto done;
228 byte_count = DEFAULT_BLK_SZ;
229 }
230
231 err = byte_count;
232
233 dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
234
235
236remainder:
237 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
238 if (_get_more_prng_bytes(ctx) < 0) {
239 memset(buf, 0, nbytes);
240 err = -EFAULT;
241 goto done;
242 }
243 }
244
245 /*
246 * Copy up to the next whole block size
247 */
248 if (byte_count < DEFAULT_BLK_SZ) {
249 for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
250 *ptr = ctx->rand_data[ctx->rand_data_valid];
251 ptr++;
252 byte_count--;
253 if (byte_count == 0)
254 goto done;
255 }
256 }
257
258 /*
259 * Now copy whole blocks
260 */
261 for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
262 if (_get_more_prng_bytes(ctx) < 0) {
263 memset(buf, 0, nbytes);
264 err = -1;
265 goto done;
266 }
267 memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
268 ctx->rand_data_valid += DEFAULT_BLK_SZ;
269 ptr += DEFAULT_BLK_SZ;
270 }
271
272 /*
273 * Now copy any extra partial data
274 */
275 if (byte_count)
276 goto remainder;
277
278done:
279 spin_unlock_irqrestore(&ctx->prng_lock, flags);
280 dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
281 return err;
282}
283EXPORT_SYMBOL_GPL(get_prng_bytes);
284
285struct prng_context *alloc_prng_context(void)
286{
287 struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
288
289 spin_lock_init(&ctx->prng_lock);
290
291 if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
292 kfree(ctx);
293 ctx = NULL;
294 }
295
296 dbgprint(KERN_CRIT "returning context %p\n",ctx);
297 return ctx;
298}
299
300EXPORT_SYMBOL_GPL(alloc_prng_context);
301
302void free_prng_context(struct prng_context *ctx)
303{
304 crypto_free_blkcipher(ctx->tfm);
305 kfree(ctx);
306}
307EXPORT_SYMBOL_GPL(free_prng_context);
308
309int reset_prng_context(struct prng_context *ctx,
310 unsigned char *key, unsigned char *iv,
311 unsigned char *V, unsigned char *DT)
312{
313 int ret;
314 int iv_len;
315 int rc = -EFAULT;
316
317 spin_lock(&ctx->prng_lock);
318 ctx->flags |= PRNG_NEED_RESET;
319
320 if (key)
321 memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
322 else
323 ctx->prng_key = DEFAULT_PRNG_KEY;
324
325 if (iv)
326 memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
327 else
328 ctx->prng_iv = DEFAULT_PRNG_IV;
329
330 if (V)
331 memcpy(ctx->V,V,DEFAULT_BLK_SZ);
332 else
333 memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
334
335 if (DT)
336 memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
337 else
338 memset(ctx->DT, 0, DEFAULT_BLK_SZ);
339
340 memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
341 memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
342
343 if (ctx->tfm)
344 crypto_free_blkcipher(ctx->tfm);
345
346 ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
347 if (!ctx->tfm) {
348 dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
349 goto out;
350 }
351
352 ctx->rand_data_valid = DEFAULT_BLK_SZ;
353
354 ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
355 if (ret) {
356 dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
357 crypto_blkcipher_get_flags(ctx->tfm));
358 crypto_free_blkcipher(ctx->tfm);
359 goto out;
360 }
361
362 iv_len = crypto_blkcipher_ivsize(ctx->tfm);
363 if (iv_len) {
364 crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
365 }
366 rc = 0;
367 ctx->flags &= ~PRNG_NEED_RESET;
368out:
369 spin_unlock(&ctx->prng_lock);
370
371 return rc;
372
373}
374EXPORT_SYMBOL_GPL(reset_prng_context);
375
376/* Module initalization */
377static int __init prng_mod_init(void)
378{
379
380#ifdef TEST_PRNG_ON_START
381 int i;
382 unsigned char tmpbuf[DEFAULT_BLK_SZ];
383
384 struct prng_context *ctx = alloc_prng_context();
385 if (ctx == NULL)
386 return -EFAULT;
387 for (i=0;i<16;i++) {
388 if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
389 free_prng_context(ctx);
390 return -EFAULT;
391 }
392 }
393 free_prng_context(ctx);
394#endif
395
396 return 0;
397}
398
399static void __exit prng_mod_fini(void)
400{
401 return;
402}
403
404MODULE_LICENSE("GPL");
405MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
406MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
407module_param(dbg, int, 0);
408MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
409module_init(prng_mod_init);
410module_exit(prng_mod_fini);
diff --git a/crypto/prng.h b/crypto/prng.h
new file mode 100644
index 000000000000..1ac9be5009b7
--- /dev/null
+++ b/crypto/prng.h
@@ -0,0 +1,27 @@
1/*
2 * PRNG: Pseudo Random Number Generator
3 *
4 * (C) Neil Horman <nhorman@tuxdriver.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * any later version.
10 *
11 *
12 */
13
14#ifndef _PRNG_H_
15#define _PRNG_H_
16struct prng_context;
17
18int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
19struct prng_context *alloc_prng_context(void);
20int reset_prng_context(struct prng_context *ctx,
21 unsigned char *key, unsigned char *iv,
22 unsigned char *V,
23 unsigned char *DT);
24void free_prng_context(struct prng_context *ctx);
25
26#endif
27
diff --git a/crypto/ripemd.h b/crypto/ripemd.h
new file mode 100644
index 000000000000..c57a2d4ce8d9
--- /dev/null
+++ b/crypto/ripemd.h
@@ -0,0 +1,43 @@
1/*
2 * Common values for RIPEMD algorithms
3 */
4
5#ifndef _CRYPTO_RMD_H
6#define _CRYPTO_RMD_H
7
8#define RMD128_DIGEST_SIZE 16
9#define RMD128_BLOCK_SIZE 64
10
11#define RMD160_DIGEST_SIZE 20
12#define RMD160_BLOCK_SIZE 64
13
14#define RMD256_DIGEST_SIZE 32
15#define RMD256_BLOCK_SIZE 64
16
17#define RMD320_DIGEST_SIZE 40
18#define RMD320_BLOCK_SIZE 64
19
20/* initial values */
21#define RMD_H0 0x67452301UL
22#define RMD_H1 0xefcdab89UL
23#define RMD_H2 0x98badcfeUL
24#define RMD_H3 0x10325476UL
25#define RMD_H4 0xc3d2e1f0UL
26#define RMD_H5 0x76543210UL
27#define RMD_H6 0xfedcba98UL
28#define RMD_H7 0x89abcdefUL
29#define RMD_H8 0x01234567UL
30#define RMD_H9 0x3c2d1e0fUL
31
32/* constants */
33#define RMD_K1 0x00000000UL
34#define RMD_K2 0x5a827999UL
35#define RMD_K3 0x6ed9eba1UL
36#define RMD_K4 0x8f1bbcdcUL
37#define RMD_K5 0xa953fd4eUL
38#define RMD_K6 0x50a28be6UL
39#define RMD_K7 0x5c4dd124UL
40#define RMD_K8 0x6d703ef3UL
41#define RMD_K9 0x7a6d76e9UL
42
43#endif
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
new file mode 100644
index 000000000000..5de6fa2a76fb
--- /dev/null
+++ b/crypto/rmd128.c
@@ -0,0 +1,325 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd128_ctx {
27 u64 byte_count;
28 u32 state[4];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define KK1 RMD_K6
37#define KK2 RMD_K7
38#define KK3 RMD_K8
39#define KK4 RMD_K1
40
41#define F1(x, y, z) (x ^ y ^ z) /* XOR */
42#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
43#define F3(x, y, z) ((x | ~y) ^ z)
44#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
45
46#define ROUND(a, b, c, d, f, k, x, s) { \
47 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
48 (a) = rol32((a), (s)); \
49}
50
51static void rmd128_transform(u32 *state, const __le32 *in)
52{
53 u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
54
55 /* Initialize left lane */
56 aa = state[0];
57 bb = state[1];
58 cc = state[2];
59 dd = state[3];
60
61 /* Initialize right lane */
62 aaa = state[0];
63 bbb = state[1];
64 ccc = state[2];
65 ddd = state[3];
66
67 /* round 1: left lane */
68 ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
69 ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
70 ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
71 ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
72 ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
73 ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
74 ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
75 ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
76 ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
77 ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
78 ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
79 ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
80 ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
81 ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
82 ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
83 ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
84
85 /* round 2: left lane */
86 ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
87 ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
88 ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
89 ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
90 ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
91 ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
92 ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
93 ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
94 ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
95 ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
96 ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
97 ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
98 ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
99 ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
100 ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
101 ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
102
103 /* round 3: left lane */
104 ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
105 ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
106 ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
107 ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
108 ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
109 ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
110 ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
111 ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
112 ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
113 ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
114 ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
115 ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
116 ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
117 ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
118 ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
119 ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
120
121 /* round 4: left lane */
122 ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
123 ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
124 ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
125 ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
126 ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
127 ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
128 ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
129 ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
130 ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
131 ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
132 ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
133 ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
134 ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
135 ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
136 ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
137 ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
138
139 /* round 1: right lane */
140 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
141 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
142 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
143 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
144 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
145 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
146 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
147 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
148 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
149 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
150 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
151 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
152 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
153 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
154 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
155 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
156
157 /* round 2: right lane */
158 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
159 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
160 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
161 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
162 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
163 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
164 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
165 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
166 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
167 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
168 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
169 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
170 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
171 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
172 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
173 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
174
175 /* round 3: right lane */
176 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
177 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
178 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
179 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
180 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
181 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
182 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
183 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
184 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
185 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
186 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
187 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
188 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
189 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
190 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
191 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
192
193 /* round 4: right lane */
194 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
195 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
196 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
197 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
198 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
199 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
200 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
201 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
202 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
203 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
204 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
205 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
206 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
207 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
208 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
209 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
210
211 /* combine results */
212 ddd += cc + state[1]; /* final result for state[0] */
213 state[1] = state[2] + dd + aaa;
214 state[2] = state[3] + aa + bbb;
215 state[3] = state[0] + bb + ccc;
216 state[0] = ddd;
217
218 return;
219}
220
221static void rmd128_init(struct crypto_tfm *tfm)
222{
223 struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
224
225 rctx->byte_count = 0;
226
227 rctx->state[0] = RMD_H0;
228 rctx->state[1] = RMD_H1;
229 rctx->state[2] = RMD_H2;
230 rctx->state[3] = RMD_H3;
231
232 memset(rctx->buffer, 0, sizeof(rctx->buffer));
233}
234
235static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
236 unsigned int len)
237{
238 struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
239 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
240
241 rctx->byte_count += len;
242
243 /* Enough space in buffer? If so copy and we're done */
244 if (avail > len) {
245 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
246 data, len);
247 return;
248 }
249
250 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
251 data, avail);
252
253 rmd128_transform(rctx->state, rctx->buffer);
254 data += avail;
255 len -= avail;
256
257 while (len >= sizeof(rctx->buffer)) {
258 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
259 rmd128_transform(rctx->state, rctx->buffer);
260 data += sizeof(rctx->buffer);
261 len -= sizeof(rctx->buffer);
262 }
263
264 memcpy(rctx->buffer, data, len);
265}
266
267/* Add padding and return the message digest. */
268static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
269{
270 struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
271 u32 i, index, padlen;
272 __le64 bits;
273 __le32 *dst = (__le32 *)out;
274 static const u8 padding[64] = { 0x80, };
275
276 bits = cpu_to_le64(rctx->byte_count << 3);
277
278 /* Pad out to 56 mod 64 */
279 index = rctx->byte_count & 0x3f;
280 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
281 rmd128_update(tfm, padding, padlen);
282
283 /* Append length */
284 rmd128_update(tfm, (const u8 *)&bits, sizeof(bits));
285
286 /* Store state in digest */
287 for (i = 0; i < 4; i++)
288 dst[i] = cpu_to_le32p(&rctx->state[i]);
289
290 /* Wipe context */
291 memset(rctx, 0, sizeof(*rctx));
292}
293
294static struct crypto_alg alg = {
295 .cra_name = "rmd128",
296 .cra_driver_name = "rmd128",
297 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
298 .cra_blocksize = RMD128_BLOCK_SIZE,
299 .cra_ctxsize = sizeof(struct rmd128_ctx),
300 .cra_module = THIS_MODULE,
301 .cra_list = LIST_HEAD_INIT(alg.cra_list),
302 .cra_u = { .digest = {
303 .dia_digestsize = RMD128_DIGEST_SIZE,
304 .dia_init = rmd128_init,
305 .dia_update = rmd128_update,
306 .dia_final = rmd128_final } }
307};
308
309static int __init rmd128_mod_init(void)
310{
311 return crypto_register_alg(&alg);
312}
313
314static void __exit rmd128_mod_fini(void)
315{
316 crypto_unregister_alg(&alg);
317}
318
319module_init(rmd128_mod_init);
320module_exit(rmd128_mod_fini);
321
322MODULE_LICENSE("GPL");
323MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
324
325MODULE_ALIAS("rmd128");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
new file mode 100644
index 000000000000..f001ec775e1f
--- /dev/null
+++ b/crypto/rmd160.c
@@ -0,0 +1,369 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd160_ctx {
27 u64 byte_count;
28 u32 state[5];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define K5 RMD_K5
37#define KK1 RMD_K6
38#define KK2 RMD_K7
39#define KK3 RMD_K8
40#define KK4 RMD_K9
41#define KK5 RMD_K1
42
43#define F1(x, y, z) (x ^ y ^ z) /* XOR */
44#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
45#define F3(x, y, z) ((x | ~y) ^ z)
46#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
47#define F5(x, y, z) (x ^ (y | ~z))
48
49#define ROUND(a, b, c, d, e, f, k, x, s) { \
50 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
51 (a) = rol32((a), (s)) + (e); \
52 (c) = rol32((c), 10); \
53}
54
55static void rmd160_transform(u32 *state, const __le32 *in)
56{
57 u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
58
59 /* Initialize left lane */
60 aa = state[0];
61 bb = state[1];
62 cc = state[2];
63 dd = state[3];
64 ee = state[4];
65
66 /* Initialize right lane */
67 aaa = state[0];
68 bbb = state[1];
69 ccc = state[2];
70 ddd = state[3];
71 eee = state[4];
72
73 /* round 1: left lane */
74 ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
75 ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
76 ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
77 ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
78 ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
79 ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
80 ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
81 ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
82 ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
83 ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
84 ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
85 ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
86 ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
87 ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
88 ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
89 ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
90
91 /* round 2: left lane" */
92 ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
93 ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
94 ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
95 ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
96 ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
97 ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
98 ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
99 ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
100 ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
101 ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
102 ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
103 ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
104 ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
105 ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
106 ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
107 ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
108
109 /* round 3: left lane" */
110 ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
111 ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
112 ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
113 ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
114 ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
115 ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
116 ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
117 ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
118 ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
119 ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
120 ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
121 ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
122 ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
123 ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
124 ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
125 ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
126
127 /* round 4: left lane" */
128 ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
129 ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
130 ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
131 ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
132 ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
133 ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
134 ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
135 ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
136 ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
137 ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
138 ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
139 ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
140 ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
141 ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
142 ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
143 ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
144
145 /* round 5: left lane" */
146 ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
147 ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
148 ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
149 ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
150 ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
151 ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
152 ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
153 ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
154 ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
155 ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
156 ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
157 ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
158 ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
159 ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
160 ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
161 ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
162
163 /* round 1: right lane */
164 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
165 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
166 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
167 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
168 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
169 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
170 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
171 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
172 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
173 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
174 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
175 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
176 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
177 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
178 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
179 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
180
181 /* round 2: right lane */
182 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
183 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
184 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
185 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
186 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
187 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
188 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
189 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
190 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
191 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
192 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
193 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
194 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
195 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
196 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
197 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
198
199 /* round 3: right lane */
200 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
201 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
202 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
203 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
204 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
205 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
206 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
207 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
208 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
209 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
210 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
211 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
212 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
213 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
214 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
215 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
216
217 /* round 4: right lane */
218 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
219 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
220 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
221 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
222 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
223 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
224 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
225 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
226 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
227 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
228 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
229 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
230 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
231 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
232 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
233 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
234
235 /* round 5: right lane */
236 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
237 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
238 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
239 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
240 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
241 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
242 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
243 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
244 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
245 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
246 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
247 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
248 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
249 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
250 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
251 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
252
253 /* combine results */
254 ddd += cc + state[1]; /* final result for state[0] */
255 state[1] = state[2] + dd + eee;
256 state[2] = state[3] + ee + aaa;
257 state[3] = state[4] + aa + bbb;
258 state[4] = state[0] + bb + ccc;
259 state[0] = ddd;
260
261 return;
262}
263
264static void rmd160_init(struct crypto_tfm *tfm)
265{
266 struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
267
268 rctx->byte_count = 0;
269
270 rctx->state[0] = RMD_H0;
271 rctx->state[1] = RMD_H1;
272 rctx->state[2] = RMD_H2;
273 rctx->state[3] = RMD_H3;
274 rctx->state[4] = RMD_H4;
275
276 memset(rctx->buffer, 0, sizeof(rctx->buffer));
277}
278
279static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
280 unsigned int len)
281{
282 struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
283 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
284
285 rctx->byte_count += len;
286
287 /* Enough space in buffer? If so copy and we're done */
288 if (avail > len) {
289 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
290 data, len);
291 return;
292 }
293
294 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
295 data, avail);
296
297 rmd160_transform(rctx->state, rctx->buffer);
298 data += avail;
299 len -= avail;
300
301 while (len >= sizeof(rctx->buffer)) {
302 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
303 rmd160_transform(rctx->state, rctx->buffer);
304 data += sizeof(rctx->buffer);
305 len -= sizeof(rctx->buffer);
306 }
307
308 memcpy(rctx->buffer, data, len);
309}
310
311/* Add padding and return the message digest. */
312static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
313{
314 struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
315 u32 i, index, padlen;
316 __le64 bits;
317 __le32 *dst = (__le32 *)out;
318 static const u8 padding[64] = { 0x80, };
319
320 bits = cpu_to_le64(rctx->byte_count << 3);
321
322 /* Pad out to 56 mod 64 */
323 index = rctx->byte_count & 0x3f;
324 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
325 rmd160_update(tfm, padding, padlen);
326
327 /* Append length */
328 rmd160_update(tfm, (const u8 *)&bits, sizeof(bits));
329
330 /* Store state in digest */
331 for (i = 0; i < 5; i++)
332 dst[i] = cpu_to_le32p(&rctx->state[i]);
333
334 /* Wipe context */
335 memset(rctx, 0, sizeof(*rctx));
336}
337
338static struct crypto_alg alg = {
339 .cra_name = "rmd160",
340 .cra_driver_name = "rmd160",
341 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
342 .cra_blocksize = RMD160_BLOCK_SIZE,
343 .cra_ctxsize = sizeof(struct rmd160_ctx),
344 .cra_module = THIS_MODULE,
345 .cra_list = LIST_HEAD_INIT(alg.cra_list),
346 .cra_u = { .digest = {
347 .dia_digestsize = RMD160_DIGEST_SIZE,
348 .dia_init = rmd160_init,
349 .dia_update = rmd160_update,
350 .dia_final = rmd160_final } }
351};
352
353static int __init rmd160_mod_init(void)
354{
355 return crypto_register_alg(&alg);
356}
357
358static void __exit rmd160_mod_fini(void)
359{
360 crypto_unregister_alg(&alg);
361}
362
363module_init(rmd160_mod_init);
364module_exit(rmd160_mod_fini);
365
366MODULE_LICENSE("GPL");
367MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
368
369MODULE_ALIAS("rmd160");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
new file mode 100644
index 000000000000..e3de5b4cb47f
--- /dev/null
+++ b/crypto/rmd256.c
@@ -0,0 +1,344 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd256_ctx {
27 u64 byte_count;
28 u32 state[8];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define KK1 RMD_K6
37#define KK2 RMD_K7
38#define KK3 RMD_K8
39#define KK4 RMD_K1
40
41#define F1(x, y, z) (x ^ y ^ z) /* XOR */
42#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
43#define F3(x, y, z) ((x | ~y) ^ z)
44#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
45
46#define ROUND(a, b, c, d, f, k, x, s) { \
47 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
48 (a) = rol32((a), (s)); \
49}
50
51static void rmd256_transform(u32 *state, const __le32 *in)
52{
53 u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
54
55 /* Initialize left lane */
56 aa = state[0];
57 bb = state[1];
58 cc = state[2];
59 dd = state[3];
60
61 /* Initialize right lane */
62 aaa = state[4];
63 bbb = state[5];
64 ccc = state[6];
65 ddd = state[7];
66
67 /* round 1: left lane */
68 ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
69 ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
70 ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
71 ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
72 ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
73 ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
74 ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
75 ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
76 ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
77 ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
78 ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
79 ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
80 ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
81 ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
82 ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
83 ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
84
85 /* round 1: right lane */
86 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
87 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
88 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
89 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
90 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
91 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
92 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
93 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
94 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
95 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
96 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
97 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
98 ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
99 ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
100 ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
101 ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
102
103 /* Swap contents of "a" registers */
104 tmp = aa; aa = aaa; aaa = tmp;
105
106 /* round 2: left lane */
107 ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
108 ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
109 ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
110 ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
111 ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
112 ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
113 ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
114 ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
115 ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
116 ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
117 ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
118 ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
119 ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
120 ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
121 ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
122 ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
123
124 /* round 2: right lane */
125 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
126 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
127 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
128 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
129 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
130 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
131 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
132 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
133 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
134 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
135 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
136 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
137 ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
138 ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
139 ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
140 ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
141
142 /* Swap contents of "b" registers */
143 tmp = bb; bb = bbb; bbb = tmp;
144
145 /* round 3: left lane */
146 ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
147 ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
148 ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
149 ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
150 ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
151 ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
152 ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
153 ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
154 ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
155 ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
156 ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
157 ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
158 ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
159 ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
160 ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
161 ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
162
163 /* round 3: right lane */
164 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
165 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
166 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
167 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
168 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
169 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
170 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
171 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
172 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
173 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
174 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
175 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
176 ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
177 ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
178 ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
179 ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
180
181 /* Swap contents of "c" registers */
182 tmp = cc; cc = ccc; ccc = tmp;
183
184 /* round 4: left lane */
185 ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
186 ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
187 ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
188 ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
189 ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
190 ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
191 ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
192 ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
193 ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
194 ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
195 ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
196 ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
197 ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
198 ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
199 ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
200 ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
201
202 /* round 4: right lane */
203 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
204 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
205 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
206 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
207 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
208 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
209 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
210 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
211 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
212 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
213 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
214 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
215 ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
216 ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
217 ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
218 ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
219
220 /* Swap contents of "d" registers */
221 tmp = dd; dd = ddd; ddd = tmp;
222
223 /* combine results */
224 state[0] += aa;
225 state[1] += bb;
226 state[2] += cc;
227 state[3] += dd;
228 state[4] += aaa;
229 state[5] += bbb;
230 state[6] += ccc;
231 state[7] += ddd;
232
233 return;
234}
235
236static void rmd256_init(struct crypto_tfm *tfm)
237{
238 struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
239
240 rctx->byte_count = 0;
241
242 rctx->state[0] = RMD_H0;
243 rctx->state[1] = RMD_H1;
244 rctx->state[2] = RMD_H2;
245 rctx->state[3] = RMD_H3;
246 rctx->state[4] = RMD_H5;
247 rctx->state[5] = RMD_H6;
248 rctx->state[6] = RMD_H7;
249 rctx->state[7] = RMD_H8;
250
251 memset(rctx->buffer, 0, sizeof(rctx->buffer));
252}
253
254static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
255 unsigned int len)
256{
257 struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
258 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
259
260 rctx->byte_count += len;
261
262 /* Enough space in buffer? If so copy and we're done */
263 if (avail > len) {
264 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
265 data, len);
266 return;
267 }
268
269 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
270 data, avail);
271
272 rmd256_transform(rctx->state, rctx->buffer);
273 data += avail;
274 len -= avail;
275
276 while (len >= sizeof(rctx->buffer)) {
277 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
278 rmd256_transform(rctx->state, rctx->buffer);
279 data += sizeof(rctx->buffer);
280 len -= sizeof(rctx->buffer);
281 }
282
283 memcpy(rctx->buffer, data, len);
284}
285
286/* Add padding and return the message digest. */
287static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
288{
289 struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
290 u32 i, index, padlen;
291 __le64 bits;
292 __le32 *dst = (__le32 *)out;
293 static const u8 padding[64] = { 0x80, };
294
295 bits = cpu_to_le64(rctx->byte_count << 3);
296
297 /* Pad out to 56 mod 64 */
298 index = rctx->byte_count & 0x3f;
299 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
300 rmd256_update(tfm, padding, padlen);
301
302 /* Append length */
303 rmd256_update(tfm, (const u8 *)&bits, sizeof(bits));
304
305 /* Store state in digest */
306 for (i = 0; i < 8; i++)
307 dst[i] = cpu_to_le32p(&rctx->state[i]);
308
309 /* Wipe context */
310 memset(rctx, 0, sizeof(*rctx));
311}
312
313static struct crypto_alg alg = {
314 .cra_name = "rmd256",
315 .cra_driver_name = "rmd256",
316 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
317 .cra_blocksize = RMD256_BLOCK_SIZE,
318 .cra_ctxsize = sizeof(struct rmd256_ctx),
319 .cra_module = THIS_MODULE,
320 .cra_list = LIST_HEAD_INIT(alg.cra_list),
321 .cra_u = { .digest = {
322 .dia_digestsize = RMD256_DIGEST_SIZE,
323 .dia_init = rmd256_init,
324 .dia_update = rmd256_update,
325 .dia_final = rmd256_final } }
326};
327
328static int __init rmd256_mod_init(void)
329{
330 return crypto_register_alg(&alg);
331}
332
333static void __exit rmd256_mod_fini(void)
334{
335 crypto_unregister_alg(&alg);
336}
337
338module_init(rmd256_mod_init);
339module_exit(rmd256_mod_fini);
340
341MODULE_LICENSE("GPL");
342MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
343
344MODULE_ALIAS("rmd256");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
new file mode 100644
index 000000000000..b143d66e42c8
--- /dev/null
+++ b/crypto/rmd320.c
@@ -0,0 +1,393 @@
1/*
2 * Cryptographic API.
3 *
4 * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest.
5 *
6 * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
7 *
8 * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/crypto.h>
20#include <linux/cryptohash.h>
21#include <linux/types.h>
22#include <asm/byteorder.h>
23
24#include "ripemd.h"
25
26struct rmd320_ctx {
27 u64 byte_count;
28 u32 state[10];
29 __le32 buffer[16];
30};
31
32#define K1 RMD_K1
33#define K2 RMD_K2
34#define K3 RMD_K3
35#define K4 RMD_K4
36#define K5 RMD_K5
37#define KK1 RMD_K6
38#define KK2 RMD_K7
39#define KK3 RMD_K8
40#define KK4 RMD_K9
41#define KK5 RMD_K1
42
43#define F1(x, y, z) (x ^ y ^ z) /* XOR */
44#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
45#define F3(x, y, z) ((x | ~y) ^ z)
46#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
47#define F5(x, y, z) (x ^ (y | ~z))
48
49#define ROUND(a, b, c, d, e, f, k, x, s) { \
50 (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
51 (a) = rol32((a), (s)) + (e); \
52 (c) = rol32((c), 10); \
53}
54
55static void rmd320_transform(u32 *state, const __le32 *in)
56{
57 u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
58
59 /* Initialize left lane */
60 aa = state[0];
61 bb = state[1];
62 cc = state[2];
63 dd = state[3];
64 ee = state[4];
65
66 /* Initialize right lane */
67 aaa = state[5];
68 bbb = state[6];
69 ccc = state[7];
70 ddd = state[8];
71 eee = state[9];
72
73 /* round 1: left lane */
74 ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
75 ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
76 ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
77 ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
78 ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
79 ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
80 ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
81 ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
82 ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
83 ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
84 ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
85 ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
86 ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
87 ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
88 ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
89 ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
90
91 /* round 1: right lane */
92 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
93 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
94 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
95 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
96 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
97 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
98 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
99 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
100 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
101 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
102 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
103 ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
104 ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
105 ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
106 ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
107 ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
108
109 /* Swap contents of "a" registers */
110 tmp = aa; aa = aaa; aaa = tmp;
111
112 /* round 2: left lane" */
113 ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
114 ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
115 ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
116 ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
117 ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
118 ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
119 ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
120 ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
121 ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
122 ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
123 ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
124 ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
125 ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
126 ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
127 ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
128 ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
129
130 /* round 2: right lane */
131 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
132 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
133 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
134 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
135 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
136 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
137 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
138 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
139 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
140 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
141 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
142 ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
143 ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
144 ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
145 ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
146 ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
147
148 /* Swap contents of "b" registers */
149 tmp = bb; bb = bbb; bbb = tmp;
150
151 /* round 3: left lane" */
152 ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
153 ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
154 ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
155 ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
156 ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
157 ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
158 ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
159 ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
160 ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
161 ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
162 ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
163 ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
164 ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
165 ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
166 ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
167 ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
168
169 /* round 3: right lane */
170 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
171 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
172 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
173 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
174 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
175 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
176 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
177 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
178 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
179 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
180 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
181 ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
182 ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
183 ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
184 ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
185 ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
186
187 /* Swap contents of "c" registers */
188 tmp = cc; cc = ccc; ccc = tmp;
189
190 /* round 4: left lane" */
191 ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
192 ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
193 ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
194 ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
195 ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
196 ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
197 ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
198 ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
199 ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
200 ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
201 ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
202 ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
203 ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
204 ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
205 ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
206 ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
207
208 /* round 4: right lane */
209 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
210 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
211 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
212 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
213 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
214 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
215 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
216 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
217 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
218 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
219 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
220 ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
221 ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
222 ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
223 ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
224 ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
225
226 /* Swap contents of "d" registers */
227 tmp = dd; dd = ddd; ddd = tmp;
228
229 /* round 5: left lane" */
230 ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
231 ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
232 ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
233 ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
234 ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
235 ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
236 ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
237 ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
238 ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
239 ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
240 ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
241 ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
242 ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
243 ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
244 ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
245 ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
246
247 /* round 5: right lane */
248 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
249 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
250 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
251 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
252 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
253 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
254 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
255 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
256 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
257 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
258 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
259 ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
260 ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
261 ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
262 ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
263 ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
264
265 /* Swap contents of "e" registers */
266 tmp = ee; ee = eee; eee = tmp;
267
268 /* combine results */
269 state[0] += aa;
270 state[1] += bb;
271 state[2] += cc;
272 state[3] += dd;
273 state[4] += ee;
274 state[5] += aaa;
275 state[6] += bbb;
276 state[7] += ccc;
277 state[8] += ddd;
278 state[9] += eee;
279
280 return;
281}
282
283static void rmd320_init(struct crypto_tfm *tfm)
284{
285 struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
286
287 rctx->byte_count = 0;
288
289 rctx->state[0] = RMD_H0;
290 rctx->state[1] = RMD_H1;
291 rctx->state[2] = RMD_H2;
292 rctx->state[3] = RMD_H3;
293 rctx->state[4] = RMD_H4;
294 rctx->state[5] = RMD_H5;
295 rctx->state[6] = RMD_H6;
296 rctx->state[7] = RMD_H7;
297 rctx->state[8] = RMD_H8;
298 rctx->state[9] = RMD_H9;
299
300 memset(rctx->buffer, 0, sizeof(rctx->buffer));
301}
302
303static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
304 unsigned int len)
305{
306 struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
307 const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
308
309 rctx->byte_count += len;
310
311 /* Enough space in buffer? If so copy and we're done */
312 if (avail > len) {
313 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
314 data, len);
315 return;
316 }
317
318 memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
319 data, avail);
320
321 rmd320_transform(rctx->state, rctx->buffer);
322 data += avail;
323 len -= avail;
324
325 while (len >= sizeof(rctx->buffer)) {
326 memcpy(rctx->buffer, data, sizeof(rctx->buffer));
327 rmd320_transform(rctx->state, rctx->buffer);
328 data += sizeof(rctx->buffer);
329 len -= sizeof(rctx->buffer);
330 }
331
332 memcpy(rctx->buffer, data, len);
333}
334
335/* Add padding and return the message digest. */
336static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
337{
338 struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
339 u32 i, index, padlen;
340 __le64 bits;
341 __le32 *dst = (__le32 *)out;
342 static const u8 padding[64] = { 0x80, };
343
344 bits = cpu_to_le64(rctx->byte_count << 3);
345
346 /* Pad out to 56 mod 64 */
347 index = rctx->byte_count & 0x3f;
348 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
349 rmd320_update(tfm, padding, padlen);
350
351 /* Append length */
352 rmd320_update(tfm, (const u8 *)&bits, sizeof(bits));
353
354 /* Store state in digest */
355 for (i = 0; i < 10; i++)
356 dst[i] = cpu_to_le32p(&rctx->state[i]);
357
358 /* Wipe context */
359 memset(rctx, 0, sizeof(*rctx));
360}
361
362static struct crypto_alg alg = {
363 .cra_name = "rmd320",
364 .cra_driver_name = "rmd320",
365 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
366 .cra_blocksize = RMD320_BLOCK_SIZE,
367 .cra_ctxsize = sizeof(struct rmd320_ctx),
368 .cra_module = THIS_MODULE,
369 .cra_list = LIST_HEAD_INIT(alg.cra_list),
370 .cra_u = { .digest = {
371 .dia_digestsize = RMD320_DIGEST_SIZE,
372 .dia_init = rmd320_init,
373 .dia_update = rmd320_update,
374 .dia_final = rmd320_final } }
375};
376
377static int __init rmd320_mod_init(void)
378{
379 return crypto_register_alg(&alg);
380}
381
382static void __exit rmd320_mod_fini(void)
383{
384 crypto_unregister_alg(&alg);
385}
386
387module_init(rmd320_mod_init);
388module_exit(rmd320_mod_fini);
389
390MODULE_LICENSE("GPL");
391MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
392
393MODULE_ALIAS("rmd320");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e47f6e02133c..59821a22d752 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -13,15 +13,9 @@
13 * Software Foundation; either version 2 of the License, or (at your option) 13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * 2007-11-13 Added GCM tests
17 * 2007-11-13 Added AEAD support
18 * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
19 * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
20 * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
21 * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
22 *
23 */ 16 */
24 17
18#include <crypto/hash.h>
25#include <linux/err.h> 19#include <linux/err.h>
26#include <linux/init.h> 20#include <linux/init.h>
27#include <linux/module.h> 21#include <linux/module.h>
@@ -30,7 +24,6 @@
30#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
31#include <linux/string.h> 25#include <linux/string.h>
32#include <linux/crypto.h> 26#include <linux/crypto.h>
33#include <linux/highmem.h>
34#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
35#include <linux/jiffies.h> 28#include <linux/jiffies.h>
36#include <linux/timex.h> 29#include <linux/timex.h>
@@ -38,7 +31,7 @@
38#include "tcrypt.h" 31#include "tcrypt.h"
39 32
40/* 33/*
41 * Need to kmalloc() memory for testing kmap(). 34 * Need to kmalloc() memory for testing.
42 */ 35 */
43#define TVMEMSIZE 16384 36#define TVMEMSIZE 16384
44#define XBUFSIZE 32768 37#define XBUFSIZE 32768
@@ -46,7 +39,7 @@
46/* 39/*
47 * Indexes into the xbuf to simulate cross-page access. 40 * Indexes into the xbuf to simulate cross-page access.
48 */ 41 */
49#define IDX1 37 42#define IDX1 32
50#define IDX2 32400 43#define IDX2 32400
51#define IDX3 1 44#define IDX3 1
52#define IDX4 8193 45#define IDX4 8193
@@ -83,7 +76,8 @@ static char *check[] = {
83 "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", 76 "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
84 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 77 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
85 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 78 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
86 "camellia", "seed", "salsa20", "lzo", "cts", NULL 79 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
80 "lzo", "cts", NULL
87}; 81};
88 82
89static void hexdump(unsigned char *buf, unsigned int len) 83static void hexdump(unsigned char *buf, unsigned int len)
@@ -110,22 +104,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
110 unsigned int i, j, k, temp; 104 unsigned int i, j, k, temp;
111 struct scatterlist sg[8]; 105 struct scatterlist sg[8];
112 char result[64]; 106 char result[64];
113 struct crypto_hash *tfm; 107 struct crypto_ahash *tfm;
114 struct hash_desc desc; 108 struct ahash_request *req;
109 struct tcrypt_result tresult;
115 int ret; 110 int ret;
116 void *hash_buff; 111 void *hash_buff;
117 112
118 printk("\ntesting %s\n", algo); 113 printk("\ntesting %s\n", algo);
119 114
120 tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); 115 init_completion(&tresult.completion);
116
117 tfm = crypto_alloc_ahash(algo, 0, 0);
121 if (IS_ERR(tfm)) { 118 if (IS_ERR(tfm)) {
122 printk("failed to load transform for %s: %ld\n", algo, 119 printk("failed to load transform for %s: %ld\n", algo,
123 PTR_ERR(tfm)); 120 PTR_ERR(tfm));
124 return; 121 return;
125 } 122 }
126 123
127 desc.tfm = tfm; 124 req = ahash_request_alloc(tfm, GFP_KERNEL);
128 desc.flags = 0; 125 if (!req) {
126 printk(KERN_ERR "failed to allocate request for %s\n", algo);
127 goto out_noreq;
128 }
129 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
130 tcrypt_complete, &tresult);
129 131
130 for (i = 0; i < tcount; i++) { 132 for (i = 0; i < tcount; i++) {
131 printk("test %u:\n", i + 1); 133 printk("test %u:\n", i + 1);
@@ -139,8 +141,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
139 sg_init_one(&sg[0], hash_buff, template[i].psize); 141 sg_init_one(&sg[0], hash_buff, template[i].psize);
140 142
141 if (template[i].ksize) { 143 if (template[i].ksize) {
142 ret = crypto_hash_setkey(tfm, template[i].key, 144 crypto_ahash_clear_flags(tfm, ~0);
143 template[i].ksize); 145 ret = crypto_ahash_setkey(tfm, template[i].key,
146 template[i].ksize);
144 if (ret) { 147 if (ret) {
145 printk("setkey() failed ret=%d\n", ret); 148 printk("setkey() failed ret=%d\n", ret);
146 kfree(hash_buff); 149 kfree(hash_buff);
@@ -148,17 +151,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
148 } 151 }
149 } 152 }
150 153
151 ret = crypto_hash_digest(&desc, sg, template[i].psize, result); 154 ahash_request_set_crypt(req, sg, result, template[i].psize);
152 if (ret) { 155 ret = crypto_ahash_digest(req);
156 switch (ret) {
157 case 0:
158 break;
159 case -EINPROGRESS:
160 case -EBUSY:
161 ret = wait_for_completion_interruptible(
162 &tresult.completion);
163 if (!ret && !(ret = tresult.err)) {
164 INIT_COMPLETION(tresult.completion);
165 break;
166 }
167 /* fall through */
168 default:
153 printk("digest () failed ret=%d\n", ret); 169 printk("digest () failed ret=%d\n", ret);
154 kfree(hash_buff); 170 kfree(hash_buff);
155 goto out; 171 goto out;
156 } 172 }
157 173
158 hexdump(result, crypto_hash_digestsize(tfm)); 174 hexdump(result, crypto_ahash_digestsize(tfm));
159 printk("%s\n", 175 printk("%s\n",
160 memcmp(result, template[i].digest, 176 memcmp(result, template[i].digest,
161 crypto_hash_digestsize(tfm)) ? 177 crypto_ahash_digestsize(tfm)) ?
162 "fail" : "pass"); 178 "fail" : "pass");
163 kfree(hash_buff); 179 kfree(hash_buff);
164 } 180 }
@@ -187,8 +203,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
187 } 203 }
188 204
189 if (template[i].ksize) { 205 if (template[i].ksize) {
190 ret = crypto_hash_setkey(tfm, template[i].key, 206 crypto_ahash_clear_flags(tfm, ~0);
191 template[i].ksize); 207 ret = crypto_ahash_setkey(tfm, template[i].key,
208 template[i].ksize);
192 209
193 if (ret) { 210 if (ret) {
194 printk("setkey() failed ret=%d\n", ret); 211 printk("setkey() failed ret=%d\n", ret);
@@ -196,29 +213,44 @@ static void test_hash(char *algo, struct hash_testvec *template,
196 } 213 }
197 } 214 }
198 215
199 ret = crypto_hash_digest(&desc, sg, template[i].psize, 216 ahash_request_set_crypt(req, sg, result,
200 result); 217 template[i].psize);
201 if (ret) { 218 ret = crypto_ahash_digest(req);
219 switch (ret) {
220 case 0:
221 break;
222 case -EINPROGRESS:
223 case -EBUSY:
224 ret = wait_for_completion_interruptible(
225 &tresult.completion);
226 if (!ret && !(ret = tresult.err)) {
227 INIT_COMPLETION(tresult.completion);
228 break;
229 }
230 /* fall through */
231 default:
202 printk("digest () failed ret=%d\n", ret); 232 printk("digest () failed ret=%d\n", ret);
203 goto out; 233 goto out;
204 } 234 }
205 235
206 hexdump(result, crypto_hash_digestsize(tfm)); 236 hexdump(result, crypto_ahash_digestsize(tfm));
207 printk("%s\n", 237 printk("%s\n",
208 memcmp(result, template[i].digest, 238 memcmp(result, template[i].digest,
209 crypto_hash_digestsize(tfm)) ? 239 crypto_ahash_digestsize(tfm)) ?
210 "fail" : "pass"); 240 "fail" : "pass");
211 } 241 }
212 } 242 }
213 243
214out: 244out:
215 crypto_free_hash(tfm); 245 ahash_request_free(req);
246out_noreq:
247 crypto_free_ahash(tfm);
216} 248}
217 249
218static void test_aead(char *algo, int enc, struct aead_testvec *template, 250static void test_aead(char *algo, int enc, struct aead_testvec *template,
219 unsigned int tcount) 251 unsigned int tcount)
220{ 252{
221 unsigned int ret, i, j, k, temp; 253 unsigned int ret, i, j, k, n, temp;
222 char *q; 254 char *q;
223 struct crypto_aead *tfm; 255 struct crypto_aead *tfm;
224 char *key; 256 char *key;
@@ -344,13 +376,12 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
344 goto next_one; 376 goto next_one;
345 } 377 }
346 378
347 q = kmap(sg_page(&sg[0])) + sg[0].offset; 379 q = input;
348 hexdump(q, template[i].rlen); 380 hexdump(q, template[i].rlen);
349 381
350 printk(KERN_INFO "enc/dec: %s\n", 382 printk(KERN_INFO "enc/dec: %s\n",
351 memcmp(q, template[i].result, 383 memcmp(q, template[i].result,
352 template[i].rlen) ? "fail" : "pass"); 384 template[i].rlen) ? "fail" : "pass");
353 kunmap(sg_page(&sg[0]));
354next_one: 385next_one:
355 if (!template[i].key) 386 if (!template[i].key)
356 kfree(key); 387 kfree(key);
@@ -360,7 +391,6 @@ next_one:
360 } 391 }
361 392
362 printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e); 393 printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
363 memset(xbuf, 0, XBUFSIZE);
364 memset(axbuf, 0, XBUFSIZE); 394 memset(axbuf, 0, XBUFSIZE);
365 395
366 for (i = 0, j = 0; i < tcount; i++) { 396 for (i = 0, j = 0; i < tcount; i++) {
@@ -388,6 +418,7 @@ next_one:
388 goto out; 418 goto out;
389 } 419 }
390 420
421 memset(xbuf, 0, XBUFSIZE);
391 sg_init_table(sg, template[i].np); 422 sg_init_table(sg, template[i].np);
392 for (k = 0, temp = 0; k < template[i].np; k++) { 423 for (k = 0, temp = 0; k < template[i].np; k++) {
393 memcpy(&xbuf[IDX[k]], 424 memcpy(&xbuf[IDX[k]],
@@ -450,7 +481,7 @@ next_one:
450 481
451 for (k = 0, temp = 0; k < template[i].np; k++) { 482 for (k = 0, temp = 0; k < template[i].np; k++) {
452 printk(KERN_INFO "page %u\n", k); 483 printk(KERN_INFO "page %u\n", k);
453 q = kmap(sg_page(&sg[k])) + sg[k].offset; 484 q = &axbuf[IDX[k]];
454 hexdump(q, template[i].tap[k]); 485 hexdump(q, template[i].tap[k]);
455 printk(KERN_INFO "%s\n", 486 printk(KERN_INFO "%s\n",
456 memcmp(q, template[i].result + temp, 487 memcmp(q, template[i].result + temp,
@@ -459,8 +490,15 @@ next_one:
459 0 : authsize)) ? 490 0 : authsize)) ?
460 "fail" : "pass"); 491 "fail" : "pass");
461 492
493 for (n = 0; q[template[i].tap[k] + n]; n++)
494 ;
495 if (n) {
496 printk("Result buffer corruption %u "
497 "bytes:\n", n);
498 hexdump(&q[template[i].tap[k]], n);
499 }
500
462 temp += template[i].tap[k]; 501 temp += template[i].tap[k];
463 kunmap(sg_page(&sg[k]));
464 } 502 }
465 } 503 }
466 } 504 }
@@ -473,7 +511,7 @@ out:
473static void test_cipher(char *algo, int enc, 511static void test_cipher(char *algo, int enc,
474 struct cipher_testvec *template, unsigned int tcount) 512 struct cipher_testvec *template, unsigned int tcount)
475{ 513{
476 unsigned int ret, i, j, k, temp; 514 unsigned int ret, i, j, k, n, temp;
477 char *q; 515 char *q;
478 struct crypto_ablkcipher *tfm; 516 struct crypto_ablkcipher *tfm;
479 struct ablkcipher_request *req; 517 struct ablkcipher_request *req;
@@ -569,19 +607,17 @@ static void test_cipher(char *algo, int enc,
569 goto out; 607 goto out;
570 } 608 }
571 609
572 q = kmap(sg_page(&sg[0])) + sg[0].offset; 610 q = data;
573 hexdump(q, template[i].rlen); 611 hexdump(q, template[i].rlen);
574 612
575 printk("%s\n", 613 printk("%s\n",
576 memcmp(q, template[i].result, 614 memcmp(q, template[i].result,
577 template[i].rlen) ? "fail" : "pass"); 615 template[i].rlen) ? "fail" : "pass");
578 kunmap(sg_page(&sg[0]));
579 } 616 }
580 kfree(data); 617 kfree(data);
581 } 618 }
582 619
583 printk("\ntesting %s %s across pages (chunking)\n", algo, e); 620 printk("\ntesting %s %s across pages (chunking)\n", algo, e);
584 memset(xbuf, 0, XBUFSIZE);
585 621
586 j = 0; 622 j = 0;
587 for (i = 0; i < tcount; i++) { 623 for (i = 0; i < tcount; i++) {
@@ -596,6 +632,7 @@ static void test_cipher(char *algo, int enc,
596 printk("test %u (%d bit key):\n", 632 printk("test %u (%d bit key):\n",
597 j, template[i].klen * 8); 633 j, template[i].klen * 8);
598 634
635 memset(xbuf, 0, XBUFSIZE);
599 crypto_ablkcipher_clear_flags(tfm, ~0); 636 crypto_ablkcipher_clear_flags(tfm, ~0);
600 if (template[i].wk) 637 if (template[i].wk)
601 crypto_ablkcipher_set_flags( 638 crypto_ablkcipher_set_flags(
@@ -649,14 +686,21 @@ static void test_cipher(char *algo, int enc,
649 temp = 0; 686 temp = 0;
650 for (k = 0; k < template[i].np; k++) { 687 for (k = 0; k < template[i].np; k++) {
651 printk("page %u\n", k); 688 printk("page %u\n", k);
652 q = kmap(sg_page(&sg[k])) + sg[k].offset; 689 q = &xbuf[IDX[k]];
653 hexdump(q, template[i].tap[k]); 690 hexdump(q, template[i].tap[k]);
654 printk("%s\n", 691 printk("%s\n",
655 memcmp(q, template[i].result + temp, 692 memcmp(q, template[i].result + temp,
656 template[i].tap[k]) ? "fail" : 693 template[i].tap[k]) ? "fail" :
657 "pass"); 694 "pass");
695
696 for (n = 0; q[template[i].tap[k] + n]; n++)
697 ;
698 if (n) {
699 printk("Result buffer corruption %u "
700 "bytes:\n", n);
701 hexdump(&q[template[i].tap[k]], n);
702 }
658 temp += template[i].tap[k]; 703 temp += template[i].tap[k];
659 kunmap(sg_page(&sg[k]));
660 } 704 }
661 } 705 }
662 } 706 }
@@ -1172,6 +1216,14 @@ static void do_test(void)
1172 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, 1216 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1173 DES3_EDE_DEC_TEST_VECTORS); 1217 DES3_EDE_DEC_TEST_VECTORS);
1174 1218
1219 test_cipher("cbc(des3_ede)", ENCRYPT,
1220 des3_ede_cbc_enc_tv_template,
1221 DES3_EDE_CBC_ENC_TEST_VECTORS);
1222
1223 test_cipher("cbc(des3_ede)", DECRYPT,
1224 des3_ede_cbc_dec_tv_template,
1225 DES3_EDE_CBC_DEC_TEST_VECTORS);
1226
1175 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); 1227 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
1176 1228
1177 test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); 1229 test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
@@ -1382,6 +1434,14 @@ static void do_test(void)
1382 DES3_EDE_ENC_TEST_VECTORS); 1434 DES3_EDE_ENC_TEST_VECTORS);
1383 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, 1435 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1384 DES3_EDE_DEC_TEST_VECTORS); 1436 DES3_EDE_DEC_TEST_VECTORS);
1437
1438 test_cipher("cbc(des3_ede)", ENCRYPT,
1439 des3_ede_cbc_enc_tv_template,
1440 DES3_EDE_CBC_ENC_TEST_VECTORS);
1441
1442 test_cipher("cbc(des3_ede)", DECRYPT,
1443 des3_ede_cbc_dec_tv_template,
1444 DES3_EDE_CBC_DEC_TEST_VECTORS);
1385 break; 1445 break;
1386 1446
1387 case 5: 1447 case 5:
@@ -1550,7 +1610,7 @@ static void do_test(void)
1550 case 29: 1610 case 29:
1551 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); 1611 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
1552 break; 1612 break;
1553 1613
1554 case 30: 1614 case 30:
1555 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, 1615 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
1556 XETA_ENC_TEST_VECTORS); 1616 XETA_ENC_TEST_VECTORS);
@@ -1615,6 +1675,22 @@ static void do_test(void)
1615 CTS_MODE_DEC_TEST_VECTORS); 1675 CTS_MODE_DEC_TEST_VECTORS);
1616 break; 1676 break;
1617 1677
1678 case 39:
1679 test_hash("rmd128", rmd128_tv_template, RMD128_TEST_VECTORS);
1680 break;
1681
1682 case 40:
1683 test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS);
1684 break;
1685
1686 case 41:
1687 test_hash("rmd256", rmd256_tv_template, RMD256_TEST_VECTORS);
1688 break;
1689
1690 case 42:
1691 test_hash("rmd320", rmd320_tv_template, RMD320_TEST_VECTORS);
1692 break;
1693
1618 case 100: 1694 case 100:
1619 test_hash("hmac(md5)", hmac_md5_tv_template, 1695 test_hash("hmac(md5)", hmac_md5_tv_template,
1620 HMAC_MD5_TEST_VECTORS); 1696 HMAC_MD5_TEST_VECTORS);
@@ -1650,6 +1726,16 @@ static void do_test(void)
1650 XCBC_AES_TEST_VECTORS); 1726 XCBC_AES_TEST_VECTORS);
1651 break; 1727 break;
1652 1728
1729 case 107:
1730 test_hash("hmac(rmd128)", hmac_rmd128_tv_template,
1731 HMAC_RMD128_TEST_VECTORS);
1732 break;
1733
1734 case 108:
1735 test_hash("hmac(rmd160)", hmac_rmd160_tv_template,
1736 HMAC_RMD160_TEST_VECTORS);
1737 break;
1738
1653 case 200: 1739 case 200:
1654 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1740 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
1655 speed_template_16_24_32); 1741 speed_template_16_24_32);
@@ -1788,6 +1874,22 @@ static void do_test(void)
1788 test_hash_speed("sha224", sec, generic_hash_speed_template); 1874 test_hash_speed("sha224", sec, generic_hash_speed_template);
1789 if (mode > 300 && mode < 400) break; 1875 if (mode > 300 && mode < 400) break;
1790 1876
1877 case 314:
1878 test_hash_speed("rmd128", sec, generic_hash_speed_template);
1879 if (mode > 300 && mode < 400) break;
1880
1881 case 315:
1882 test_hash_speed("rmd160", sec, generic_hash_speed_template);
1883 if (mode > 300 && mode < 400) break;
1884
1885 case 316:
1886 test_hash_speed("rmd256", sec, generic_hash_speed_template);
1887 if (mode > 300 && mode < 400) break;
1888
1889 case 317:
1890 test_hash_speed("rmd320", sec, generic_hash_speed_template);
1891 if (mode > 300 && mode < 400) break;
1892
1791 case 399: 1893 case 399:
1792 break; 1894 break;
1793 1895
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 47bc0ecb8978..801e0c288862 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -13,12 +13,6 @@
13 * Software Foundation; either version 2 of the License, or (at your option) 13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * 2007-11-13 Added GCM tests
17 * 2007-11-13 Added AEAD support
18 * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
19 * 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net>
20 * 2003-09-14 Changes by Kartikey Mahendra Bhatt
21 *
22 */ 16 */
23#ifndef _CRYPTO_TCRYPT_H 17#ifndef _CRYPTO_TCRYPT_H
24#define _CRYPTO_TCRYPT_H 18#define _CRYPTO_TCRYPT_H
@@ -168,6 +162,271 @@ static struct hash_testvec md5_tv_template[] = {
168 .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55" 162 .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55"
169 "\xac\x49\xda\x2e\x21\x07\xb6\x7a", 163 "\xac\x49\xda\x2e\x21\x07\xb6\x7a",
170 } 164 }
165
166};
167
168/*
169 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
170 */
171#define RMD128_TEST_VECTORS 10
172
173static struct hash_testvec rmd128_tv_template[] = {
174 {
175 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
176 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
177 }, {
178 .plaintext = "a",
179 .psize = 1,
180 .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
181 "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
182 }, {
183 .plaintext = "abc",
184 .psize = 3,
185 .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
186 "\x84\x63\x6b\x0f\x69\x14\x4c\x77",
187 }, {
188 .plaintext = "message digest",
189 .psize = 14,
190 .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
191 "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
192 }, {
193 .plaintext = "abcdefghijklmnopqrstuvwxyz",
194 .psize = 26,
195 .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
196 "\x10\x71\x49\x22\xb3\x71\x83\x4e",
197 }, {
198 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
199 "fghijklmnopqrstuvwxyz0123456789",
200 .psize = 62,
201 .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
202 "\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
203 }, {
204 .plaintext = "1234567890123456789012345678901234567890"
205 "1234567890123456789012345678901234567890",
206 .psize = 80,
207 .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
208 "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
209 }, {
210 .plaintext = "abcdbcdecdefdefgefghfghighij"
211 "hijkijkljklmklmnlmnomnopnopq",
212 .psize = 56,
213 .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
214 "\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
215 .np = 2,
216 .tap = { 28, 28 },
217 }, {
218 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
219 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
220 "lmnopqrsmnopqrstnopqrstu",
221 .psize = 112,
222 .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
223 "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
224 }, {
225 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
226 .psize = 32,
227 .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
228 "\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
229 }
230};
231
232/*
233 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
234 */
235#define RMD160_TEST_VECTORS 10
236
237static struct hash_testvec rmd160_tv_template[] = {
238 {
239 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
240 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
241 }, {
242 .plaintext = "a",
243 .psize = 1,
244 .digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae"
245 "\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe",
246 }, {
247 .plaintext = "abc",
248 .psize = 3,
249 .digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04"
250 "\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc",
251 }, {
252 .plaintext = "message digest",
253 .psize = 14,
254 .digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8"
255 "\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36",
256 }, {
257 .plaintext = "abcdefghijklmnopqrstuvwxyz",
258 .psize = 26,
259 .digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb"
260 "\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc",
261 }, {
262 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
263 "fghijklmnopqrstuvwxyz0123456789",
264 .psize = 62,
265 .digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed"
266 "\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89",
267 }, {
268 .plaintext = "1234567890123456789012345678901234567890"
269 "1234567890123456789012345678901234567890",
270 .psize = 80,
271 .digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb"
272 "\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb",
273 }, {
274 .plaintext = "abcdbcdecdefdefgefghfghighij"
275 "hijkijkljklmklmnlmnomnopnopq",
276 .psize = 56,
277 .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
278 "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
279 .np = 2,
280 .tap = { 28, 28 },
281 }, {
282 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
283 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
284 "lmnopqrsmnopqrstnopqrstu",
285 .psize = 112,
286 .digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91"
287 "\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45",
288 }, {
289 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
290 .psize = 32,
291 .digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d"
292 "\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f",
293 }
294};
295
296/*
297 * RIPEMD-256 test vectors
298 */
299#define RMD256_TEST_VECTORS 8
300
301static struct hash_testvec rmd256_tv_template[] = {
302 {
303 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
304 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
305 "\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
306 "\x80\xae\x01\x68\xe3\xc5\x52\x2d",
307 }, {
308 .plaintext = "a",
309 .psize = 1,
310 .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
311 "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
312 "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
313 "\xcd\x88\x3a\x91\x34\x69\x29\x25",
314 }, {
315 .plaintext = "abc",
316 .psize = 3,
317 .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
318 "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
319 "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
320 "\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
321 }, {
322 .plaintext = "message digest",
323 .psize = 14,
324 .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
325 "\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
326 "\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
327 "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
328 }, {
329 .plaintext = "abcdefghijklmnopqrstuvwxyz",
330 .psize = 26,
331 .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
332 "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
333 "\x78\x96\x11\x8a\x51\x97\x96\x87"
334 "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
335 }, {
336 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
337 "fghijklmnopqrstuvwxyz0123456789",
338 .psize = 62,
339 .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
340 "\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
341 "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
342 "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
343 }, {
344 .plaintext = "1234567890123456789012345678901234567890"
345 "1234567890123456789012345678901234567890",
346 .psize = 80,
347 .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
348 "\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
349 "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
350 "\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
351 }, {
352 .plaintext = "abcdbcdecdefdefgefghfghighij"
353 "hijkijkljklmklmnlmnomnopnopq",
354 .psize = 56,
355 .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
356 "\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
357 "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
358 "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
359 .np = 2,
360 .tap = { 28, 28 },
361 }
362};
363
364/*
365 * RIPEMD-320 test vectors
366 */
367#define RMD320_TEST_VECTORS 8
368
369static struct hash_testvec rmd320_tv_template[] = {
370 {
371 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
372 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
373 "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
374 "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
375 }, {
376 .plaintext = "a",
377 .psize = 1,
378 .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
379 "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
380 "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
381 "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
382 }, {
383 .plaintext = "abc",
384 .psize = 3,
385 .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
386 "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
387 "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
388 "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
389 }, {
390 .plaintext = "message digest",
391 .psize = 14,
392 .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
393 "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
394 "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
395 "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
396 }, {
397 .plaintext = "abcdefghijklmnopqrstuvwxyz",
398 .psize = 26,
399 .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
400 "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
401 "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
402 "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
403 }, {
404 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
405 "fghijklmnopqrstuvwxyz0123456789",
406 .psize = 62,
407 .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
408 "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
409 "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
410 "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
411 }, {
412 .plaintext = "1234567890123456789012345678901234567890"
413 "1234567890123456789012345678901234567890",
414 .psize = 80,
415 .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
416 "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
417 "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
418 "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
419 }, {
420 .plaintext = "abcdbcdecdefdefgefghfghighij"
421 "hijkijkljklmklmnlmnomnopnopq",
422 .psize = 56,
423 .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
424 "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
425 "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
426 "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
427 .np = 2,
428 .tap = { 28, 28 },
429 }
171}; 430};
172 431
173/* 432/*
@@ -817,6 +1076,168 @@ static struct hash_testvec hmac_md5_tv_template[] =
817}; 1076};
818 1077
819/* 1078/*
1079 * HMAC-RIPEMD128 test vectors from RFC2286
1080 */
1081#define HMAC_RMD128_TEST_VECTORS 7
1082
1083static struct hash_testvec hmac_rmd128_tv_template[] = {
1084 {
1085 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1086 .ksize = 16,
1087 .plaintext = "Hi There",
1088 .psize = 8,
1089 .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
1090 "\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
1091 }, {
1092 .key = "Jefe",
1093 .ksize = 4,
1094 .plaintext = "what do ya want for nothing?",
1095 .psize = 28,
1096 .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
1097 "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
1098 .np = 2,
1099 .tap = { 14, 14 },
1100 }, {
1101 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1102 .ksize = 16,
1103 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1104 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1105 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1106 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1107 .psize = 50,
1108 .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
1109 "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
1110 }, {
1111 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1112 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1113 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1114 .ksize = 25,
1115 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1116 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1117 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1118 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1119 .psize = 50,
1120 .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
1121 "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
1122 }, {
1123 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1124 .ksize = 16,
1125 .plaintext = "Test With Truncation",
1126 .psize = 20,
1127 .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
1128 "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
1129 }, {
1130 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1131 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1132 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1133 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1134 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1135 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1136 "\xaa\xaa",
1137 .ksize = 80,
1138 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1139 .psize = 54,
1140 .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
1141 "\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
1142 }, {
1143 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1144 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1145 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1146 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1147 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1148 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1149 "\xaa\xaa",
1150 .ksize = 80,
1151 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1152 "Block-Size Data",
1153 .psize = 73,
1154 .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
1155 "\x06\x90\xc2\x37\x63\x5f\x30\xc5",
1156 },
1157};
1158
1159/*
1160 * HMAC-RIPEMD160 test vectors from RFC2286
1161 */
1162#define HMAC_RMD160_TEST_VECTORS 7
1163
1164static struct hash_testvec hmac_rmd160_tv_template[] = {
1165 {
1166 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1167 .ksize = 20,
1168 .plaintext = "Hi There",
1169 .psize = 8,
1170 .digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e"
1171 "\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68",
1172 }, {
1173 .key = "Jefe",
1174 .ksize = 4,
1175 .plaintext = "what do ya want for nothing?",
1176 .psize = 28,
1177 .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
1178 "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
1179 .np = 2,
1180 .tap = { 14, 14 },
1181 }, {
1182 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1183 .ksize = 20,
1184 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1185 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1186 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1187 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1188 .psize = 50,
1189 .digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4"
1190 "\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1",
1191 }, {
1192 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1193 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1194 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1195 .ksize = 25,
1196 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1197 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1198 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1199 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1200 .psize = 50,
1201 .digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1"
1202 "\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4",
1203 }, {
1204 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1205 .ksize = 20,
1206 .plaintext = "Test With Truncation",
1207 .psize = 20,
1208 .digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a"
1209 "\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39",
1210 }, {
1211 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1212 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1213 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1214 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1215 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1216 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1217 "\xaa\xaa",
1218 .ksize = 80,
1219 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1220 .psize = 54,
1221 .digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd"
1222 "\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b",
1223 }, {
1224 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1225 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1226 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1227 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1228 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1229 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1230 "\xaa\xaa",
1231 .ksize = 80,
1232 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1233 "Block-Size Data",
1234 .psize = 73,
1235 .digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f"
1236 "\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a",
1237 },
1238};
1239
1240/*
820 * HMAC-SHA1 test vectors from RFC2202 1241 * HMAC-SHA1 test vectors from RFC2202
821 */ 1242 */
822#define HMAC_SHA1_TEST_VECTORS 7 1243#define HMAC_SHA1_TEST_VECTORS 7
@@ -1442,6 +1863,8 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
1442#define DES_CBC_DEC_TEST_VECTORS 4 1863#define DES_CBC_DEC_TEST_VECTORS 4
1443#define DES3_EDE_ENC_TEST_VECTORS 3 1864#define DES3_EDE_ENC_TEST_VECTORS 3
1444#define DES3_EDE_DEC_TEST_VECTORS 3 1865#define DES3_EDE_DEC_TEST_VECTORS 3
1866#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
1867#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
1445 1868
1446static struct cipher_testvec des_enc_tv_template[] = { 1869static struct cipher_testvec des_enc_tv_template[] = {
1447 { /* From Applied Cryptography */ 1870 { /* From Applied Cryptography */
@@ -1680,9 +2103,6 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
1680 }, 2103 },
1681}; 2104};
1682 2105
1683/*
1684 * We really need some more test vectors, especially for DES3 CBC.
1685 */
1686static struct cipher_testvec des3_ede_enc_tv_template[] = { 2106static struct cipher_testvec des3_ede_enc_tv_template[] = {
1687 { /* These are from openssl */ 2107 { /* These are from openssl */
1688 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 2108 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
@@ -1745,6 +2165,94 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
1745 }, 2165 },
1746}; 2166};
1747 2167
2168static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
2169 { /* Generated from openssl */
2170 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2171 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2172 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2173 .klen = 24,
2174 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2175 .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2176 "\x53\x20\x63\x65\x65\x72\x73\x74"
2177 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2178 "\x20\x79\x65\x53\x72\x63\x74\x65"
2179 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2180 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2181 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2182 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2183 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2184 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2185 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2186 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2187 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2188 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2189 "\x63\x65\x65\x72\x73\x74\x54\x20"
2190 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2191 .ilen = 128,
2192 .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2193 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2194 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2195 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2196 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2197 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2198 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2199 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2200 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2201 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2202 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2203 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2204 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2205 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2206 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2207 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2208 .rlen = 128,
2209 },
2210};
2211
2212static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
2213 { /* Generated from openssl */
2214 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2215 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2216 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2217 .klen = 24,
2218 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2219 .input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2220 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2221 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2222 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2223 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2224 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2225 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2226 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2227 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2228 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2229 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2230 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2231 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2232 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2233 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2234 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2235 .ilen = 128,
2236 .result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2237 "\x53\x20\x63\x65\x65\x72\x73\x74"
2238 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2239 "\x20\x79\x65\x53\x72\x63\x74\x65"
2240 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2241 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2242 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2243 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2244 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2245 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2246 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2247 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2248 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2249 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2250 "\x63\x65\x65\x72\x73\x74\x54\x20"
2251 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2252 .rlen = 128,
2253 },
2254};
2255
1748/* 2256/*
1749 * Blowfish test vectors. 2257 * Blowfish test vectors.
1750 */ 2258 */
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43b71b69daa5..e522144cba3a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG
174 Select this option if you want to enable the random number generator 174 Select this option if you want to enable the random number generator
175 on the HIFN 795x crypto adapters. 175 on the HIFN 795x crypto adapters.
176 176
177config CRYPTO_DEV_TALITOS
178 tristate "Talitos Freescale Security Engine (SEC)"
179 select CRYPTO_ALGAPI
180 select CRYPTO_AUTHENC
181 select HW_RANDOM
182 depends on FSL_SOC
183 help
184 Say 'Y' here to use the Freescale Security Engine (SEC)
185 to offload cryptographic algorithm computation.
186
187 The Freescale SEC is present on PowerQUICC 'E' processors, such
188 as the MPC8349E and MPC8548E.
189
190 To compile this driver as a module, choose M here: the module
191 will be called talitos.
192
193config CRYPTO_DEV_IXP4XX
194 tristate "Driver for IXP4xx crypto hardware acceleration"
195 depends on ARCH_IXP4XX
196 select CRYPTO_DES
197 select CRYPTO_ALGAPI
198 select CRYPTO_AUTHENC
199 select CRYPTO_BLKCIPHER
200 help
201 Driver for the IXP4xx NPE crypto engine.
202
177endif # CRYPTO_HW 203endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c0327f0dadc5..73557b2968d3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 81f3f950cd7d..4d22b21bd3e3 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -29,7 +29,6 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h> 32#include <linux/crypto.h>
34#include <linux/hw_random.h> 33#include <linux/hw_random.h>
35#include <linux/ktime.h> 34#include <linux/ktime.h>
@@ -369,7 +368,9 @@ static atomic_t hifn_dev_number;
369#define HIFN_D_DST_RSIZE 80*4 368#define HIFN_D_DST_RSIZE 80*4
370#define HIFN_D_RES_RSIZE 24*4 369#define HIFN_D_RES_RSIZE 24*4
371 370
372#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5 371#define HIFN_D_DST_DALIGN 4
372
373#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
373 374
374#define AES_MIN_KEY_SIZE 16 375#define AES_MIN_KEY_SIZE 16
375#define AES_MAX_KEY_SIZE 32 376#define AES_MAX_KEY_SIZE 32
@@ -535,10 +536,10 @@ struct hifn_crypt_command
535 */ 536 */
536struct hifn_mac_command 537struct hifn_mac_command
537{ 538{
538 volatile u16 masks; 539 volatile __le16 masks;
539 volatile u16 header_skip; 540 volatile __le16 header_skip;
540 volatile u16 source_count; 541 volatile __le16 source_count;
541 volatile u16 reserved; 542 volatile __le16 reserved;
542}; 543};
543 544
544#define HIFN_MAC_CMD_ALG_MASK 0x0001 545#define HIFN_MAC_CMD_ALG_MASK 0x0001
@@ -564,10 +565,10 @@ struct hifn_mac_command
564 565
565struct hifn_comp_command 566struct hifn_comp_command
566{ 567{
567 volatile u16 masks; 568 volatile __le16 masks;
568 volatile u16 header_skip; 569 volatile __le16 header_skip;
569 volatile u16 source_count; 570 volatile __le16 source_count;
570 volatile u16 reserved; 571 volatile __le16 reserved;
571}; 572};
572 573
573#define HIFN_COMP_CMD_SRCLEN_M 0xc000 574#define HIFN_COMP_CMD_SRCLEN_M 0xc000
@@ -583,10 +584,10 @@ struct hifn_comp_command
583 584
584struct hifn_base_result 585struct hifn_base_result
585{ 586{
586 volatile u16 flags; 587 volatile __le16 flags;
587 volatile u16 session; 588 volatile __le16 session;
588 volatile u16 src_cnt; /* 15:0 of source count */ 589 volatile __le16 src_cnt; /* 15:0 of source count */
589 volatile u16 dst_cnt; /* 15:0 of dest count */ 590 volatile __le16 dst_cnt; /* 15:0 of dest count */
590}; 591};
591 592
592#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ 593#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
@@ -597,8 +598,8 @@ struct hifn_base_result
597 598
598struct hifn_comp_result 599struct hifn_comp_result
599{ 600{
600 volatile u16 flags; 601 volatile __le16 flags;
601 volatile u16 crc; 602 volatile __le16 crc;
602}; 603};
603 604
604#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */ 605#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
@@ -609,8 +610,8 @@ struct hifn_comp_result
609 610
610struct hifn_mac_result 611struct hifn_mac_result
611{ 612{
612 volatile u16 flags; 613 volatile __le16 flags;
613 volatile u16 reserved; 614 volatile __le16 reserved;
614 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ 615 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
615}; 616};
616 617
@@ -619,8 +620,8 @@ struct hifn_mac_result
619 620
620struct hifn_crypt_result 621struct hifn_crypt_result
621{ 622{
622 volatile u16 flags; 623 volatile __le16 flags;
623 volatile u16 reserved; 624 volatile __le16 reserved;
624}; 625};
625 626
626#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */ 627#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
@@ -686,12 +687,12 @@ static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
686 687
687static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) 688static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
688{ 689{
689 writel(val, dev->bar[0] + reg); 690 writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
690} 691}
691 692
692static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) 693static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
693{ 694{
694 writel(val, dev->bar[1] + reg); 695 writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
695} 696}
696 697
697static void hifn_wait_puc(struct hifn_device *dev) 698static void hifn_wait_puc(struct hifn_device *dev)
@@ -894,7 +895,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
894 char *offtbl = NULL; 895 char *offtbl = NULL;
895 int i; 896 int i;
896 897
897 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 898 for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
898 if (pci2id[i].pci_vendor == dev->pdev->vendor && 899 if (pci2id[i].pci_vendor == dev->pdev->vendor &&
899 pci2id[i].pci_prod == dev->pdev->device) { 900 pci2id[i].pci_prod == dev->pdev->device) {
900 offtbl = pci2id[i].card_id; 901 offtbl = pci2id[i].card_id;
@@ -1037,14 +1038,14 @@ static void hifn_init_registers(struct hifn_device *dev)
1037 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1038 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1038 1039
1039 /* write all 4 ring address registers */ 1040 /* write all 4 ring address registers */
1040 hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr + 1041 hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
1041 offsetof(struct hifn_dma, cmdr[0]))); 1042 offsetof(struct hifn_dma, cmdr[0]));
1042 hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr + 1043 hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
1043 offsetof(struct hifn_dma, srcr[0]))); 1044 offsetof(struct hifn_dma, srcr[0]));
1044 hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr + 1045 hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
1045 offsetof(struct hifn_dma, dstr[0]))); 1046 offsetof(struct hifn_dma, dstr[0]));
1046 hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr + 1047 hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
1047 offsetof(struct hifn_dma, resr[0]))); 1048 offsetof(struct hifn_dma, resr[0]));
1048 1049
1049 mdelay(2); 1050 mdelay(2);
1050#if 0 1051#if 0
@@ -1166,109 +1167,15 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
1166 return cmd_len; 1167 return cmd_len;
1167} 1168}
1168 1169
1169static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, 1170static int hifn_setup_cmd_desc(struct hifn_device *dev,
1170 unsigned int offset, unsigned int size) 1171 struct hifn_context *ctx, void *priv, unsigned int nbytes)
1171{
1172 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1173 int idx;
1174 dma_addr_t addr;
1175
1176 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1177
1178 idx = dma->srci;
1179
1180 dma->srcr[idx].p = __cpu_to_le32(addr);
1181 dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
1182 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
1183
1184 if (++idx == HIFN_D_SRC_RSIZE) {
1185 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1186 HIFN_D_JUMP |
1187 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1188 idx = 0;
1189 }
1190
1191 dma->srci = idx;
1192 dma->srcu++;
1193
1194 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1195 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1196 dev->flags |= HIFN_FLAG_SRC_BUSY;
1197 }
1198
1199 return size;
1200}
1201
1202static void hifn_setup_res_desc(struct hifn_device *dev)
1203{
1204 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1205
1206 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1207 HIFN_D_VALID | HIFN_D_LAST);
1208 /*
1209 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1210 * HIFN_D_LAST | HIFN_D_NOINVALID);
1211 */
1212
1213 if (++dma->resi == HIFN_D_RES_RSIZE) {
1214 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1215 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1216 dma->resi = 0;
1217 }
1218
1219 dma->resu++;
1220
1221 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1222 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1223 dev->flags |= HIFN_FLAG_RES_BUSY;
1224 }
1225}
1226
1227static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1228 unsigned offset, unsigned size)
1229{
1230 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1231 int idx;
1232 dma_addr_t addr;
1233
1234 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1235
1236 idx = dma->dsti;
1237 dma->dstr[idx].p = __cpu_to_le32(addr);
1238 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1239 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
1240
1241 if (++idx == HIFN_D_DST_RSIZE) {
1242 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1243 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1244 HIFN_D_LAST | HIFN_D_NOINVALID);
1245 idx = 0;
1246 }
1247 dma->dsti = idx;
1248 dma->dstu++;
1249
1250 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1251 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1252 dev->flags |= HIFN_FLAG_DST_BUSY;
1253 }
1254}
1255
1256static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1257 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1258 struct hifn_context *ctx)
1259{ 1172{
1260 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1173 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1261 int cmd_len, sa_idx; 1174 int cmd_len, sa_idx;
1262 u8 *buf, *buf_pos; 1175 u8 *buf, *buf_pos;
1263 u16 mask; 1176 u16 mask;
1264 1177
1265 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", 1178 sa_idx = dma->cmdi;
1266 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1267
1268 sa_idx = dma->resi;
1269
1270 hifn_setup_src_desc(dev, spage, soff, nbytes);
1271
1272 buf_pos = buf = dma->command_bufs[dma->cmdi]; 1179 buf_pos = buf = dma->command_bufs[dma->cmdi];
1273 1180
1274 mask = 0; 1181 mask = 0;
@@ -1370,16 +1277,113 @@ static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned
1370 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1277 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1371 dev->flags |= HIFN_FLAG_CMD_BUSY; 1278 dev->flags |= HIFN_FLAG_CMD_BUSY;
1372 } 1279 }
1373
1374 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1375 hifn_setup_res_desc(dev);
1376
1377 return 0; 1280 return 0;
1378 1281
1379err_out: 1282err_out:
1380 return -EINVAL; 1283 return -EINVAL;
1381} 1284}
1382 1285
1286static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1287 unsigned int offset, unsigned int size)
1288{
1289 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1290 int idx;
1291 dma_addr_t addr;
1292
1293 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1294
1295 idx = dma->srci;
1296
1297 dma->srcr[idx].p = __cpu_to_le32(addr);
1298 dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1299 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1300
1301 if (++idx == HIFN_D_SRC_RSIZE) {
1302 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1303 HIFN_D_JUMP |
1304 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1305 idx = 0;
1306 }
1307
1308 dma->srci = idx;
1309 dma->srcu++;
1310
1311 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1312 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1313 dev->flags |= HIFN_FLAG_SRC_BUSY;
1314 }
1315
1316 return size;
1317}
1318
1319static void hifn_setup_res_desc(struct hifn_device *dev)
1320{
1321 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1322
1323 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1324 HIFN_D_VALID | HIFN_D_LAST);
1325 /*
1326 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1327 * HIFN_D_LAST);
1328 */
1329
1330 if (++dma->resi == HIFN_D_RES_RSIZE) {
1331 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1332 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1333 dma->resi = 0;
1334 }
1335
1336 dma->resu++;
1337
1338 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1339 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1340 dev->flags |= HIFN_FLAG_RES_BUSY;
1341 }
1342}
1343
1344static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1345 unsigned offset, unsigned size)
1346{
1347 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1348 int idx;
1349 dma_addr_t addr;
1350
1351 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1352
1353 idx = dma->dsti;
1354 dma->dstr[idx].p = __cpu_to_le32(addr);
1355 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1356 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1357
1358 if (++idx == HIFN_D_DST_RSIZE) {
1359 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1360 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1361 HIFN_D_LAST);
1362 idx = 0;
1363 }
1364 dma->dsti = idx;
1365 dma->dstu++;
1366
1367 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1368 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1369 dev->flags |= HIFN_FLAG_DST_BUSY;
1370 }
1371}
1372
1373static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1374 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1375 struct hifn_context *ctx)
1376{
1377 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
1378 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1379
1380 hifn_setup_src_desc(dev, spage, soff, nbytes);
1381 hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
1382 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1383 hifn_setup_res_desc(dev);
1384 return 0;
1385}
1386
1383static int ablkcipher_walk_init(struct ablkcipher_walk *w, 1387static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1384 int num, gfp_t gfp_flags) 1388 int num, gfp_t gfp_flags)
1385{ 1389{
@@ -1431,7 +1435,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
1431 return -EINVAL; 1435 return -EINVAL;
1432 1436
1433 while (size) { 1437 while (size) {
1434 copy = min(drest, src->length); 1438 copy = min(drest, min(size, src->length));
1435 1439
1436 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); 1440 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
1437 memcpy(daddr, saddr + src->offset, copy); 1441 memcpy(daddr, saddr + src->offset, copy);
@@ -1458,10 +1462,6 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
1458static int ablkcipher_walk(struct ablkcipher_request *req, 1462static int ablkcipher_walk(struct ablkcipher_request *req,
1459 struct ablkcipher_walk *w) 1463 struct ablkcipher_walk *w)
1460{ 1464{
1461 unsigned blocksize =
1462 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1463 unsigned alignmask =
1464 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1465 struct scatterlist *src, *dst, *t; 1465 struct scatterlist *src, *dst, *t;
1466 void *daddr; 1466 void *daddr;
1467 unsigned int nbytes = req->nbytes, offset, copy, diff; 1467 unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1477,16 +1477,14 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1477 dst = &req->dst[idx]; 1477 dst = &req->dst[idx];
1478 1478
1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " 1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
1480 "blocksize: %u, nbytes: %u.\n", 1480 "nbytes: %u.\n",
1481 __func__, src->length, dst->length, src->offset, 1481 __func__, src->length, dst->length, src->offset,
1482 dst->offset, offset, blocksize, nbytes); 1482 dst->offset, offset, nbytes);
1483 1483
1484 if (src->length & (blocksize - 1) || 1484 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1485 src->offset & (alignmask - 1) || 1485 !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
1486 dst->length & (blocksize - 1) || 1486 offset) {
1487 dst->offset & (alignmask - 1) || 1487 unsigned slen = min(src->length - offset, nbytes);
1488 offset) {
1489 unsigned slen = src->length - offset;
1490 unsigned dlen = PAGE_SIZE; 1488 unsigned dlen = PAGE_SIZE;
1491 1489
1492 t = &w->cache[idx]; 1490 t = &w->cache[idx];
@@ -1498,8 +1496,8 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1498 1496
1499 idx += err; 1497 idx += err;
1500 1498
1501 copy = slen & ~(blocksize - 1); 1499 copy = slen & ~(HIFN_D_DST_DALIGN - 1);
1502 diff = slen & (blocksize - 1); 1500 diff = slen & (HIFN_D_DST_DALIGN - 1);
1503 1501
1504 if (dlen < nbytes) { 1502 if (dlen < nbytes) {
1505 /* 1503 /*
@@ -1507,7 +1505,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1507 * to put there additional blocksized chunk, 1505 * to put there additional blocksized chunk,
1508 * so we mark that page as containing only 1506 * so we mark that page as containing only
1509 * blocksize aligned chunks: 1507 * blocksize aligned chunks:
1510 * t->length = (slen & ~(blocksize - 1)); 1508 * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
1511 * and increase number of bytes to be processed 1509 * and increase number of bytes to be processed
1512 * in next chunk: 1510 * in next chunk:
1513 * nbytes += diff; 1511 * nbytes += diff;
@@ -1544,7 +1542,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
1544 1542
1545 kunmap_atomic(daddr, KM_SOFTIRQ0); 1543 kunmap_atomic(daddr, KM_SOFTIRQ0);
1546 } else { 1544 } else {
1547 nbytes -= src->length; 1545 nbytes -= min(src->length, nbytes);
1548 idx++; 1546 idx++;
1549 } 1547 }
1550 1548
@@ -1563,14 +1561,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1563 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 1561 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1564 struct hifn_device *dev = ctx->dev; 1562 struct hifn_device *dev = ctx->dev;
1565 struct page *spage, *dpage; 1563 struct page *spage, *dpage;
1566 unsigned long soff, doff, flags; 1564 unsigned long soff, doff, dlen, flags;
1567 unsigned int nbytes = req->nbytes, idx = 0, len; 1565 unsigned int nbytes = req->nbytes, idx = 0, len;
1568 int err = -EINVAL, sg_num; 1566 int err = -EINVAL, sg_num;
1569 struct scatterlist *src, *dst, *t; 1567 struct scatterlist *src, *dst, *t;
1570 unsigned blocksize =
1571 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1572 unsigned alignmask =
1573 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1574 1568
1575 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) 1569 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
1576 goto err_out_exit; 1570 goto err_out_exit;
@@ -1578,17 +1572,14 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1578 ctx->walk.flags = 0; 1572 ctx->walk.flags = 0;
1579 1573
1580 while (nbytes) { 1574 while (nbytes) {
1581 src = &req->src[idx];
1582 dst = &req->dst[idx]; 1575 dst = &req->dst[idx];
1576 dlen = min(dst->length, nbytes);
1583 1577
1584 if (src->length & (blocksize - 1) || 1578 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1585 src->offset & (alignmask - 1) || 1579 !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
1586 dst->length & (blocksize - 1) ||
1587 dst->offset & (alignmask - 1)) {
1588 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; 1580 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1589 }
1590 1581
1591 nbytes -= src->length; 1582 nbytes -= dlen;
1592 idx++; 1583 idx++;
1593 } 1584 }
1594 1585
@@ -1602,7 +1593,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1602 idx = 0; 1593 idx = 0;
1603 1594
1604 sg_num = ablkcipher_walk(req, &ctx->walk); 1595 sg_num = ablkcipher_walk(req, &ctx->walk);
1605 1596 if (sg_num < 0) {
1597 err = sg_num;
1598 goto err_out_exit;
1599 }
1606 atomic_set(&ctx->sg_num, sg_num); 1600 atomic_set(&ctx->sg_num, sg_num);
1607 1601
1608 spin_lock_irqsave(&dev->lock, flags); 1602 spin_lock_irqsave(&dev->lock, flags);
@@ -1640,7 +1634,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1640 if (err) 1634 if (err)
1641 goto err_out; 1635 goto err_out;
1642 1636
1643 nbytes -= len; 1637 nbytes -= min(len, nbytes);
1644 } 1638 }
1645 1639
1646 dev->active = HIFN_DEFAULT_ACTIVE_NUM; 1640 dev->active = HIFN_DEFAULT_ACTIVE_NUM;
@@ -1651,7 +1645,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1651err_out: 1645err_out:
1652 spin_unlock_irqrestore(&dev->lock, flags); 1646 spin_unlock_irqrestore(&dev->lock, flags);
1653err_out_exit: 1647err_out_exit:
1654 if (err && printk_ratelimit()) 1648 if (err)
1655 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " 1649 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1656 "type: %u, err: %d.\n", 1650 "type: %u, err: %d.\n",
1657 dev->name, ctx->iv, ctx->ivsize, 1651 dev->name, ctx->iv, ctx->ivsize,
@@ -1745,8 +1739,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
1745 return -EINVAL; 1739 return -EINVAL;
1746 1740
1747 while (size) { 1741 while (size) {
1748 1742 copy = min(srest, min(dst->length, size));
1749 copy = min(dst->length, srest);
1750 1743
1751 daddr = kmap_atomic(sg_page(dst), KM_IRQ0); 1744 daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1752 memcpy(daddr + dst->offset + offset, saddr, copy); 1745 memcpy(daddr + dst->offset + offset, saddr, copy);
@@ -1803,7 +1796,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
1803 sg_page(dst), dst->length, nbytes); 1796 sg_page(dst), dst->length, nbytes);
1804 1797
1805 if (!t->length) { 1798 if (!t->length) {
1806 nbytes -= dst->length; 1799 nbytes -= min(dst->length, nbytes);
1807 idx++; 1800 idx++;
1808 continue; 1801 continue;
1809 } 1802 }
@@ -2202,9 +2195,9 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2202 return err; 2195 return err;
2203 2196
2204 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) 2197 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2205 err = hifn_process_queue(dev); 2198 hifn_process_queue(dev);
2206 2199
2207 return err; 2200 return -EINPROGRESS;
2208} 2201}
2209 2202
2210/* 2203/*
@@ -2364,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2364 * 3DES ECB, CBC, CFB and OFB modes. 2357 * 3DES ECB, CBC, CFB and OFB modes.
2365 */ 2358 */
2366 { 2359 {
2367 .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2360 .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
2368 .ablkcipher = { 2361 .ablkcipher = {
2369 .min_keysize = HIFN_3DES_KEY_LENGTH, 2362 .min_keysize = HIFN_3DES_KEY_LENGTH,
2370 .max_keysize = HIFN_3DES_KEY_LENGTH, 2363 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2374,7 +2367,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2374 }, 2367 },
2375 }, 2368 },
2376 { 2369 {
2377 .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2370 .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
2378 .ablkcipher = { 2371 .ablkcipher = {
2379 .min_keysize = HIFN_3DES_KEY_LENGTH, 2372 .min_keysize = HIFN_3DES_KEY_LENGTH,
2380 .max_keysize = HIFN_3DES_KEY_LENGTH, 2373 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2384,8 +2377,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2384 }, 2377 },
2385 }, 2378 },
2386 { 2379 {
2387 .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2380 .name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
2388 .ablkcipher = { 2381 .ablkcipher = {
2382 .ivsize = HIFN_IV_LENGTH,
2389 .min_keysize = HIFN_3DES_KEY_LENGTH, 2383 .min_keysize = HIFN_3DES_KEY_LENGTH,
2390 .max_keysize = HIFN_3DES_KEY_LENGTH, 2384 .max_keysize = HIFN_3DES_KEY_LENGTH,
2391 .setkey = hifn_setkey, 2385 .setkey = hifn_setkey,
@@ -2394,7 +2388,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2394 }, 2388 },
2395 }, 2389 },
2396 { 2390 {
2397 .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, 2391 .name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
2398 .ablkcipher = { 2392 .ablkcipher = {
2399 .min_keysize = HIFN_3DES_KEY_LENGTH, 2393 .min_keysize = HIFN_3DES_KEY_LENGTH,
2400 .max_keysize = HIFN_3DES_KEY_LENGTH, 2394 .max_keysize = HIFN_3DES_KEY_LENGTH,
@@ -2408,7 +2402,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2408 * DES ECB, CBC, CFB and OFB modes. 2402 * DES ECB, CBC, CFB and OFB modes.
2409 */ 2403 */
2410 { 2404 {
2411 .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8, 2405 .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
2412 .ablkcipher = { 2406 .ablkcipher = {
2413 .min_keysize = HIFN_DES_KEY_LENGTH, 2407 .min_keysize = HIFN_DES_KEY_LENGTH,
2414 .max_keysize = HIFN_DES_KEY_LENGTH, 2408 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2418,7 +2412,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2418 }, 2412 },
2419 }, 2413 },
2420 { 2414 {
2421 .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8, 2415 .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
2422 .ablkcipher = { 2416 .ablkcipher = {
2423 .min_keysize = HIFN_DES_KEY_LENGTH, 2417 .min_keysize = HIFN_DES_KEY_LENGTH,
2424 .max_keysize = HIFN_DES_KEY_LENGTH, 2418 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2428,8 +2422,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2428 }, 2422 },
2429 }, 2423 },
2430 { 2424 {
2431 .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8, 2425 .name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
2432 .ablkcipher = { 2426 .ablkcipher = {
2427 .ivsize = HIFN_IV_LENGTH,
2433 .min_keysize = HIFN_DES_KEY_LENGTH, 2428 .min_keysize = HIFN_DES_KEY_LENGTH,
2434 .max_keysize = HIFN_DES_KEY_LENGTH, 2429 .max_keysize = HIFN_DES_KEY_LENGTH,
2435 .setkey = hifn_setkey, 2430 .setkey = hifn_setkey,
@@ -2438,7 +2433,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2438 }, 2433 },
2439 }, 2434 },
2440 { 2435 {
2441 .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8, 2436 .name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
2442 .ablkcipher = { 2437 .ablkcipher = {
2443 .min_keysize = HIFN_DES_KEY_LENGTH, 2438 .min_keysize = HIFN_DES_KEY_LENGTH,
2444 .max_keysize = HIFN_DES_KEY_LENGTH, 2439 .max_keysize = HIFN_DES_KEY_LENGTH,
@@ -2452,7 +2447,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2452 * AES ECB, CBC, CFB and OFB modes. 2447 * AES ECB, CBC, CFB and OFB modes.
2453 */ 2448 */
2454 { 2449 {
2455 .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2450 .name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
2456 .ablkcipher = { 2451 .ablkcipher = {
2457 .min_keysize = AES_MIN_KEY_SIZE, 2452 .min_keysize = AES_MIN_KEY_SIZE,
2458 .max_keysize = AES_MAX_KEY_SIZE, 2453 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2462,8 +2457,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2462 }, 2457 },
2463 }, 2458 },
2464 { 2459 {
2465 .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16, 2460 .name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
2466 .ablkcipher = { 2461 .ablkcipher = {
2462 .ivsize = HIFN_AES_IV_LENGTH,
2467 .min_keysize = AES_MIN_KEY_SIZE, 2463 .min_keysize = AES_MIN_KEY_SIZE,
2468 .max_keysize = AES_MAX_KEY_SIZE, 2464 .max_keysize = AES_MAX_KEY_SIZE,
2469 .setkey = hifn_setkey, 2465 .setkey = hifn_setkey,
@@ -2472,7 +2468,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2472 }, 2468 },
2473 }, 2469 },
2474 { 2470 {
2475 .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2471 .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
2476 .ablkcipher = { 2472 .ablkcipher = {
2477 .min_keysize = AES_MIN_KEY_SIZE, 2473 .min_keysize = AES_MIN_KEY_SIZE,
2478 .max_keysize = AES_MAX_KEY_SIZE, 2474 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2482,7 +2478,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2482 }, 2478 },
2483 }, 2479 },
2484 { 2480 {
2485 .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16, 2481 .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
2486 .ablkcipher = { 2482 .ablkcipher = {
2487 .min_keysize = AES_MIN_KEY_SIZE, 2483 .min_keysize = AES_MIN_KEY_SIZE,
2488 .max_keysize = AES_MAX_KEY_SIZE, 2484 .max_keysize = AES_MAX_KEY_SIZE,
@@ -2514,15 +2510,14 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2514 return -ENOMEM; 2510 return -ENOMEM;
2515 2511
2516 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name); 2512 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2517 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name); 2513 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
2514 t->drv_name, dev->name);
2518 2515
2519 alg->alg.cra_priority = 300; 2516 alg->alg.cra_priority = 300;
2520 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 2517 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2521 alg->alg.cra_blocksize = t->bsize; 2518 alg->alg.cra_blocksize = t->bsize;
2522 alg->alg.cra_ctxsize = sizeof(struct hifn_context); 2519 alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2523 alg->alg.cra_alignmask = 15; 2520 alg->alg.cra_alignmask = 0;
2524 if (t->bsize == 8)
2525 alg->alg.cra_alignmask = 3;
2526 alg->alg.cra_type = &crypto_ablkcipher_type; 2521 alg->alg.cra_type = &crypto_ablkcipher_type;
2527 alg->alg.cra_module = THIS_MODULE; 2522 alg->alg.cra_module = THIS_MODULE;
2528 alg->alg.cra_u.ablkcipher = t->ablkcipher; 2523 alg->alg.cra_u.ablkcipher = t->ablkcipher;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
new file mode 100644
index 000000000000..42a107fe9233
--- /dev/null
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -0,0 +1,1506 @@
1/*
2 * Intel IXP4xx NPE-C crypto driver
3 *
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20
21#include <crypto/ctr.h>
22#include <crypto/des.h>
23#include <crypto/aes.h>
24#include <crypto/sha.h>
25#include <crypto/algapi.h>
26#include <crypto/aead.h>
27#include <crypto/authenc.h>
28#include <crypto/scatterwalk.h>
29
30#include <asm/arch/npe.h>
31#include <asm/arch/qmgr.h>
32
33#define MAX_KEYLEN 32
34
35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36#define NPE_CTX_LEN 80
37#define AES_BLOCK128 16
38
39#define NPE_OP_HASH_VERIFY 0x01
40#define NPE_OP_CCM_ENABLE 0x04
41#define NPE_OP_CRYPT_ENABLE 0x08
42#define NPE_OP_HASH_ENABLE 0x10
43#define NPE_OP_NOT_IN_PLACE 0x20
44#define NPE_OP_HMAC_DISABLE 0x40
45#define NPE_OP_CRYPT_ENCRYPT 0x80
46
47#define NPE_OP_CCM_GEN_MIC 0xcc
48#define NPE_OP_HASH_GEN_ICV 0x50
49#define NPE_OP_ENC_GEN_KEY 0xc9
50
51#define MOD_ECB 0x0000
52#define MOD_CTR 0x1000
53#define MOD_CBC_ENC 0x2000
54#define MOD_CBC_DEC 0x3000
55#define MOD_CCM_ENC 0x4000
56#define MOD_CCM_DEC 0x5000
57
58#define KEYLEN_128 4
59#define KEYLEN_192 6
60#define KEYLEN_256 8
61
62#define CIPH_DECR 0x0000
63#define CIPH_ENCR 0x0400
64
65#define MOD_DES 0x0000
66#define MOD_TDEA2 0x0100
67#define MOD_3DES 0x0200
68#define MOD_AES 0x0800
69#define MOD_AES128 (0x0800 | KEYLEN_128)
70#define MOD_AES192 (0x0900 | KEYLEN_192)
71#define MOD_AES256 (0x0a00 | KEYLEN_256)
72
73#define MAX_IVLEN 16
74#define NPE_ID 2 /* NPE C */
75#define NPE_QLEN 16
76/* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78#define NPE_QLEN_TOTAL 64
79
80#define SEND_QID 29
81#define RECV_QID 30
82
83#define CTL_FLAG_UNUSED 0x0000
84#define CTL_FLAG_USED 0x1000
85#define CTL_FLAG_PERFORM_ABLK 0x0001
86#define CTL_FLAG_GEN_ICV 0x0002
87#define CTL_FLAG_GEN_REVAES 0x0004
88#define CTL_FLAG_PERFORM_AEAD 0x0008
89#define CTL_FLAG_MASK 0x000f
90
91#define HMAC_IPAD_VALUE 0x36
92#define HMAC_OPAD_VALUE 0x5C
93#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
94
95#define MD5_DIGEST_SIZE 16
96
97struct buffer_desc {
98 u32 phys_next;
99 u16 buf_len;
100 u16 pkt_len;
101 u32 phys_addr;
102 u32 __reserved[4];
103 struct buffer_desc *next;
104};
105
106struct crypt_ctl {
107 u8 mode; /* NPE_OP_* operation mode */
108 u8 init_len;
109 u16 reserved;
110 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
111 u32 icv_rev_aes; /* icv or rev aes */
112 u32 src_buf;
113 u32 dst_buf;
114 u16 auth_offs; /* Authentication start offset */
115 u16 auth_len; /* Authentication data length */
116 u16 crypt_offs; /* Cryption start offset */
117 u16 crypt_len; /* Cryption data length */
118 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
119 u32 crypto_ctx; /* NPE Crypto Param structure address */
120
121 /* Used by Host: 4*4 bytes*/
122 unsigned ctl_flags;
123 union {
124 struct ablkcipher_request *ablk_req;
125 struct aead_request *aead_req;
126 struct crypto_tfm *tfm;
127 } data;
128 struct buffer_desc *regist_buf;
129 u8 *regist_ptr;
130};
131
132struct ablk_ctx {
133 struct buffer_desc *src;
134 struct buffer_desc *dst;
135 unsigned src_nents;
136 unsigned dst_nents;
137};
138
139struct aead_ctx {
140 struct buffer_desc *buffer;
141 unsigned short assoc_nents;
142 unsigned short src_nents;
143 struct scatterlist ivlist;
144 /* used when the hmac is not on one sg entry */
145 u8 *hmac_virt;
146 int encrypt;
147};
148
149struct ix_hash_algo {
150 u32 cfgword;
151 unsigned char *icv;
152};
153
154struct ix_sa_dir {
155 unsigned char *npe_ctx;
156 dma_addr_t npe_ctx_phys;
157 int npe_ctx_idx;
158 u8 npe_mode;
159};
160
161struct ixp_ctx {
162 struct ix_sa_dir encrypt;
163 struct ix_sa_dir decrypt;
164 int authkey_len;
165 u8 authkey[MAX_KEYLEN];
166 int enckey_len;
167 u8 enckey[MAX_KEYLEN];
168 u8 salt[MAX_IVLEN];
169 u8 nonce[CTR_RFC3686_NONCE_SIZE];
170 unsigned salted;
171 atomic_t configuring;
172 struct completion completion;
173};
174
175struct ixp_alg {
176 struct crypto_alg crypto;
177 const struct ix_hash_algo *hash;
178 u32 cfg_enc;
179 u32 cfg_dec;
180
181 int registered;
182};
183
184static const struct ix_hash_algo hash_alg_md5 = {
185 .cfgword = 0xAA010004,
186 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
187 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
188};
189static const struct ix_hash_algo hash_alg_sha1 = {
190 .cfgword = 0x00000005,
191 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
192 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
193};
194
195static struct npe *npe_c;
196static struct dma_pool *buffer_pool = NULL;
197static struct dma_pool *ctx_pool = NULL;
198
199static struct crypt_ctl *crypt_virt = NULL;
200static dma_addr_t crypt_phys;
201
202static int support_aes = 1;
203
204static void dev_release(struct device *dev)
205{
206 return;
207}
208
209#define DRIVER_NAME "ixp4xx_crypto"
210static struct platform_device pseudo_dev = {
211 .name = DRIVER_NAME,
212 .id = 0,
213 .num_resources = 0,
214 .dev = {
215 .coherent_dma_mask = DMA_32BIT_MASK,
216 .release = dev_release,
217 }
218};
219
220static struct device *dev = &pseudo_dev.dev;
221
222static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
223{
224 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
225}
226
227static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
228{
229 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
230}
231
232static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
233{
234 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
235}
236
237static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
238{
239 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
240}
241
242static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
243{
244 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
245}
246
247static int setup_crypt_desc(void)
248{
249 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
250 crypt_virt = dma_alloc_coherent(dev,
251 NPE_QLEN * sizeof(struct crypt_ctl),
252 &crypt_phys, GFP_KERNEL);
253 if (!crypt_virt)
254 return -ENOMEM;
255 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
256 return 0;
257}
258
259static spinlock_t desc_lock;
260static struct crypt_ctl *get_crypt_desc(void)
261{
262 int i;
263 static int idx = 0;
264 unsigned long flags;
265
266 spin_lock_irqsave(&desc_lock, flags);
267
268 if (unlikely(!crypt_virt))
269 setup_crypt_desc();
270 if (unlikely(!crypt_virt)) {
271 spin_unlock_irqrestore(&desc_lock, flags);
272 return NULL;
273 }
274 i = idx;
275 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
276 if (++idx >= NPE_QLEN)
277 idx = 0;
278 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
279 spin_unlock_irqrestore(&desc_lock, flags);
280 return crypt_virt +i;
281 } else {
282 spin_unlock_irqrestore(&desc_lock, flags);
283 return NULL;
284 }
285}
286
287static spinlock_t emerg_lock;
288static struct crypt_ctl *get_crypt_desc_emerg(void)
289{
290 int i;
291 static int idx = NPE_QLEN;
292 struct crypt_ctl *desc;
293 unsigned long flags;
294
295 desc = get_crypt_desc();
296 if (desc)
297 return desc;
298 if (unlikely(!crypt_virt))
299 return NULL;
300
301 spin_lock_irqsave(&emerg_lock, flags);
302 i = idx;
303 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
304 if (++idx >= NPE_QLEN_TOTAL)
305 idx = NPE_QLEN;
306 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
307 spin_unlock_irqrestore(&emerg_lock, flags);
308 return crypt_virt +i;
309 } else {
310 spin_unlock_irqrestore(&emerg_lock, flags);
311 return NULL;
312 }
313}
314
315static void free_buf_chain(struct buffer_desc *buf, u32 phys)
316{
317 while (buf) {
318 struct buffer_desc *buf1;
319 u32 phys1;
320
321 buf1 = buf->next;
322 phys1 = buf->phys_next;
323 dma_pool_free(buffer_pool, buf, phys);
324 buf = buf1;
325 phys = phys1;
326 }
327}
328
329static struct tasklet_struct crypto_done_tasklet;
330
331static void finish_scattered_hmac(struct crypt_ctl *crypt)
332{
333 struct aead_request *req = crypt->data.aead_req;
334 struct aead_ctx *req_ctx = aead_request_ctx(req);
335 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
336 int authsize = crypto_aead_authsize(tfm);
337 int decryptlen = req->cryptlen - authsize;
338
339 if (req_ctx->encrypt) {
340 scatterwalk_map_and_copy(req_ctx->hmac_virt,
341 req->src, decryptlen, authsize, 1);
342 }
343 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
344}
345
346static void one_packet(dma_addr_t phys)
347{
348 struct crypt_ctl *crypt;
349 struct ixp_ctx *ctx;
350 int failed;
351 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
352
353 failed = phys & 0x1 ? -EBADMSG : 0;
354 phys &= ~0x3;
355 crypt = crypt_phys2virt(phys);
356
357 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
358 case CTL_FLAG_PERFORM_AEAD: {
359 struct aead_request *req = crypt->data.aead_req;
360 struct aead_ctx *req_ctx = aead_request_ctx(req);
361 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
362 DMA_TO_DEVICE);
363 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
364 dma_unmap_sg(dev, req->src, req_ctx->src_nents,
365 DMA_BIDIRECTIONAL);
366
367 free_buf_chain(req_ctx->buffer, crypt->src_buf);
368 if (req_ctx->hmac_virt) {
369 finish_scattered_hmac(crypt);
370 }
371 req->base.complete(&req->base, failed);
372 break;
373 }
374 case CTL_FLAG_PERFORM_ABLK: {
375 struct ablkcipher_request *req = crypt->data.ablk_req;
376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
377 int nents;
378 if (req_ctx->dst) {
379 nents = req_ctx->dst_nents;
380 dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
381 free_buf_chain(req_ctx->dst, crypt->dst_buf);
382 src_direction = DMA_TO_DEVICE;
383 }
384 nents = req_ctx->src_nents;
385 dma_unmap_sg(dev, req->src, nents, src_direction);
386 free_buf_chain(req_ctx->src, crypt->src_buf);
387 req->base.complete(&req->base, failed);
388 break;
389 }
390 case CTL_FLAG_GEN_ICV:
391 ctx = crypto_tfm_ctx(crypt->data.tfm);
392 dma_pool_free(ctx_pool, crypt->regist_ptr,
393 crypt->regist_buf->phys_addr);
394 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
395 if (atomic_dec_and_test(&ctx->configuring))
396 complete(&ctx->completion);
397 break;
398 case CTL_FLAG_GEN_REVAES:
399 ctx = crypto_tfm_ctx(crypt->data.tfm);
400 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
401 if (atomic_dec_and_test(&ctx->configuring))
402 complete(&ctx->completion);
403 break;
404 default:
405 BUG();
406 }
407 crypt->ctl_flags = CTL_FLAG_UNUSED;
408}
409
410static void irqhandler(void *_unused)
411{
412 tasklet_schedule(&crypto_done_tasklet);
413}
414
415static void crypto_done_action(unsigned long arg)
416{
417 int i;
418
419 for(i=0; i<4; i++) {
420 dma_addr_t phys = qmgr_get_entry(RECV_QID);
421 if (!phys)
422 return;
423 one_packet(phys);
424 }
425 tasklet_schedule(&crypto_done_tasklet);
426}
427
428static int init_ixp_crypto(void)
429{
430 int ret = -ENODEV;
431
432 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
433 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
434 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
435 return ret;
436 }
437 npe_c = npe_request(NPE_ID);
438 if (!npe_c)
439 return ret;
440
441 if (!npe_running(npe_c)) {
442 npe_load_firmware(npe_c, npe_name(npe_c), dev);
443 }
444
445 /* buffer_pool will also be used to sometimes store the hmac,
446 * so assure it is large enough
447 */
448 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
449 buffer_pool = dma_pool_create("buffer", dev,
450 sizeof(struct buffer_desc), 32, 0);
451 ret = -ENOMEM;
452 if (!buffer_pool) {
453 goto err;
454 }
455 ctx_pool = dma_pool_create("context", dev,
456 NPE_CTX_LEN, 16, 0);
457 if (!ctx_pool) {
458 goto err;
459 }
460 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
461 if (ret)
462 goto err;
463 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
464 if (ret) {
465 qmgr_release_queue(SEND_QID);
466 goto err;
467 }
468 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
469 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
470
471 qmgr_enable_irq(RECV_QID);
472 return 0;
473err:
474 if (ctx_pool)
475 dma_pool_destroy(ctx_pool);
476 if (buffer_pool)
477 dma_pool_destroy(buffer_pool);
478 npe_release(npe_c);
479 return ret;
480}
481
482static void release_ixp_crypto(void)
483{
484 qmgr_disable_irq(RECV_QID);
485 tasklet_kill(&crypto_done_tasklet);
486
487 qmgr_release_queue(SEND_QID);
488 qmgr_release_queue(RECV_QID);
489
490 dma_pool_destroy(ctx_pool);
491 dma_pool_destroy(buffer_pool);
492
493 npe_release(npe_c);
494
495 if (crypt_virt) {
496 dma_free_coherent(dev,
497 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
498 crypt_virt, crypt_phys);
499 }
500 return;
501}
502
503static void reset_sa_dir(struct ix_sa_dir *dir)
504{
505 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
506 dir->npe_ctx_idx = 0;
507 dir->npe_mode = 0;
508}
509
510static int init_sa_dir(struct ix_sa_dir *dir)
511{
512 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
513 if (!dir->npe_ctx) {
514 return -ENOMEM;
515 }
516 reset_sa_dir(dir);
517 return 0;
518}
519
520static void free_sa_dir(struct ix_sa_dir *dir)
521{
522 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
523 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
524}
525
526static int init_tfm(struct crypto_tfm *tfm)
527{
528 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
529 int ret;
530
531 atomic_set(&ctx->configuring, 0);
532 ret = init_sa_dir(&ctx->encrypt);
533 if (ret)
534 return ret;
535 ret = init_sa_dir(&ctx->decrypt);
536 if (ret) {
537 free_sa_dir(&ctx->encrypt);
538 }
539 return ret;
540}
541
542static int init_tfm_ablk(struct crypto_tfm *tfm)
543{
544 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
545 return init_tfm(tfm);
546}
547
548static int init_tfm_aead(struct crypto_tfm *tfm)
549{
550 tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
551 return init_tfm(tfm);
552}
553
554static void exit_tfm(struct crypto_tfm *tfm)
555{
556 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
557 free_sa_dir(&ctx->encrypt);
558 free_sa_dir(&ctx->decrypt);
559}
560
561static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
562 int init_len, u32 ctx_addr, const u8 *key, int key_len)
563{
564 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
565 struct crypt_ctl *crypt;
566 struct buffer_desc *buf;
567 int i;
568 u8 *pad;
569 u32 pad_phys, buf_phys;
570
571 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
572 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
573 if (!pad)
574 return -ENOMEM;
575 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
576 if (!buf) {
577 dma_pool_free(ctx_pool, pad, pad_phys);
578 return -ENOMEM;
579 }
580 crypt = get_crypt_desc_emerg();
581 if (!crypt) {
582 dma_pool_free(ctx_pool, pad, pad_phys);
583 dma_pool_free(buffer_pool, buf, buf_phys);
584 return -EAGAIN;
585 }
586
587 memcpy(pad, key, key_len);
588 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
589 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
590 pad[i] ^= xpad;
591 }
592
593 crypt->data.tfm = tfm;
594 crypt->regist_ptr = pad;
595 crypt->regist_buf = buf;
596
597 crypt->auth_offs = 0;
598 crypt->auth_len = HMAC_PAD_BLOCKLEN;
599 crypt->crypto_ctx = ctx_addr;
600 crypt->src_buf = buf_phys;
601 crypt->icv_rev_aes = target;
602 crypt->mode = NPE_OP_HASH_GEN_ICV;
603 crypt->init_len = init_len;
604 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
605
606 buf->next = 0;
607 buf->buf_len = HMAC_PAD_BLOCKLEN;
608 buf->pkt_len = 0;
609 buf->phys_addr = pad_phys;
610
611 atomic_inc(&ctx->configuring);
612 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
613 BUG_ON(qmgr_stat_overflow(SEND_QID));
614 return 0;
615}
616
617static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
618 const u8 *key, int key_len, unsigned digest_len)
619{
620 u32 itarget, otarget, npe_ctx_addr;
621 unsigned char *cinfo;
622 int init_len, ret = 0;
623 u32 cfgword;
624 struct ix_sa_dir *dir;
625 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
626 const struct ix_hash_algo *algo;
627
628 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
629 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
630 algo = ix_hash(tfm);
631
632 /* write cfg word to cryptinfo */
633 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
634 *(u32*)cinfo = cpu_to_be32(cfgword);
635 cinfo += sizeof(cfgword);
636
637 /* write ICV to cryptinfo */
638 memcpy(cinfo, algo->icv, digest_len);
639 cinfo += digest_len;
640
641 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
642 + sizeof(algo->cfgword);
643 otarget = itarget + digest_len;
644 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
645 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
646
647 dir->npe_ctx_idx += init_len;
648 dir->npe_mode |= NPE_OP_HASH_ENABLE;
649
650 if (!encrypt)
651 dir->npe_mode |= NPE_OP_HASH_VERIFY;
652
653 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
654 init_len, npe_ctx_addr, key, key_len);
655 if (ret)
656 return ret;
657 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
658 init_len, npe_ctx_addr, key, key_len);
659}
660
661static int gen_rev_aes_key(struct crypto_tfm *tfm)
662{
663 struct crypt_ctl *crypt;
664 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
665 struct ix_sa_dir *dir = &ctx->decrypt;
666
667 crypt = get_crypt_desc_emerg();
668 if (!crypt) {
669 return -EAGAIN;
670 }
671 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
672
673 crypt->data.tfm = tfm;
674 crypt->crypt_offs = 0;
675 crypt->crypt_len = AES_BLOCK128;
676 crypt->src_buf = 0;
677 crypt->crypto_ctx = dir->npe_ctx_phys;
678 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
679 crypt->mode = NPE_OP_ENC_GEN_KEY;
680 crypt->init_len = dir->npe_ctx_idx;
681 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
682
683 atomic_inc(&ctx->configuring);
684 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
685 BUG_ON(qmgr_stat_overflow(SEND_QID));
686 return 0;
687}
688
689static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
690 const u8 *key, int key_len)
691{
692 u8 *cinfo;
693 u32 cipher_cfg;
694 u32 keylen_cfg = 0;
695 struct ix_sa_dir *dir;
696 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
697 u32 *flags = &tfm->crt_flags;
698
699 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
700 cinfo = dir->npe_ctx;
701
702 if (encrypt) {
703 cipher_cfg = cipher_cfg_enc(tfm);
704 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
705 } else {
706 cipher_cfg = cipher_cfg_dec(tfm);
707 }
708 if (cipher_cfg & MOD_AES) {
709 switch (key_len) {
710 case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
711 case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
712 case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
713 default:
714 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
715 return -EINVAL;
716 }
717 cipher_cfg |= keylen_cfg;
718 } else if (cipher_cfg & MOD_3DES) {
719 const u32 *K = (const u32 *)key;
720 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
721 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
722 {
723 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
724 return -EINVAL;
725 }
726 } else {
727 u32 tmp[DES_EXPKEY_WORDS];
728 if (des_ekey(tmp, key) == 0) {
729 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
730 }
731 }
732 /* write cfg word to cryptinfo */
733 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
734 cinfo += sizeof(cipher_cfg);
735
736 /* write cipher key to cryptinfo */
737 memcpy(cinfo, key, key_len);
738 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
739 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
740 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
741 key_len = DES3_EDE_KEY_SIZE;
742 }
743 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
744 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
745 if ((cipher_cfg & MOD_AES) && !encrypt) {
746 return gen_rev_aes_key(tfm);
747 }
748 return 0;
749}
750
751static int count_sg(struct scatterlist *sg, int nbytes)
752{
753 int i;
754 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
755 nbytes -= sg->length;
756 return i;
757}
758
759static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
760 unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
761{
762 int nents = 0;
763
764 while (nbytes > 0) {
765 struct buffer_desc *next_buf;
766 u32 next_buf_phys;
767 unsigned len = min(nbytes, sg_dma_len(sg));
768
769 nents++;
770 nbytes -= len;
771 if (!buf->phys_addr) {
772 buf->phys_addr = sg_dma_address(sg);
773 buf->buf_len = len;
774 buf->next = NULL;
775 buf->phys_next = 0;
776 goto next;
777 }
778 /* Two consecutive chunks on one page may be handled by the old
779 * buffer descriptor, increased by the length of the new one
780 */
781 if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
782 buf->buf_len += len;
783 goto next;
784 }
785 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
786 if (!next_buf)
787 return NULL;
788 buf->next = next_buf;
789 buf->phys_next = next_buf_phys;
790
791 buf = next_buf;
792 buf->next = NULL;
793 buf->phys_next = 0;
794 buf->phys_addr = sg_dma_address(sg);
795 buf->buf_len = len;
796next:
797 if (nbytes > 0) {
798 sg = sg_next(sg);
799 }
800 }
801 return buf;
802}
803
804static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
805 unsigned int key_len)
806{
807 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
808 u32 *flags = &tfm->base.crt_flags;
809 int ret;
810
811 init_completion(&ctx->completion);
812 atomic_inc(&ctx->configuring);
813
814 reset_sa_dir(&ctx->encrypt);
815 reset_sa_dir(&ctx->decrypt);
816
817 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
818 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
819
820 ret = setup_cipher(&tfm->base, 0, key, key_len);
821 if (ret)
822 goto out;
823 ret = setup_cipher(&tfm->base, 1, key, key_len);
824 if (ret)
825 goto out;
826
827 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
828 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
829 ret = -EINVAL;
830 } else {
831 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
832 }
833 }
834out:
835 if (!atomic_dec_and_test(&ctx->configuring))
836 wait_for_completion(&ctx->completion);
837 return ret;
838}
839
840static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
841 unsigned int key_len)
842{
843 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
844
845 /* the nonce is stored in bytes at end of key */
846 if (key_len < CTR_RFC3686_NONCE_SIZE)
847 return -EINVAL;
848
849 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
850 CTR_RFC3686_NONCE_SIZE);
851
852 key_len -= CTR_RFC3686_NONCE_SIZE;
853 return ablk_setkey(tfm, key, key_len);
854}
855
856static int ablk_perform(struct ablkcipher_request *req, int encrypt)
857{
858 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
859 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
860 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
861 int ret = -ENOMEM;
862 struct ix_sa_dir *dir;
863 struct crypt_ctl *crypt;
864 unsigned int nbytes = req->nbytes, nents;
865 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
866 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
867 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
868 GFP_KERNEL : GFP_ATOMIC;
869
870 if (qmgr_stat_full(SEND_QID))
871 return -EAGAIN;
872 if (atomic_read(&ctx->configuring))
873 return -EAGAIN;
874
875 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
876
877 crypt = get_crypt_desc();
878 if (!crypt)
879 return ret;
880
881 crypt->data.ablk_req = req;
882 crypt->crypto_ctx = dir->npe_ctx_phys;
883 crypt->mode = dir->npe_mode;
884 crypt->init_len = dir->npe_ctx_idx;
885
886 crypt->crypt_offs = 0;
887 crypt->crypt_len = nbytes;
888
889 BUG_ON(ivsize && !req->info);
890 memcpy(crypt->iv, req->info, ivsize);
891 if (req->src != req->dst) {
892 crypt->mode |= NPE_OP_NOT_IN_PLACE;
893 nents = count_sg(req->dst, nbytes);
894 /* This was never tested by Intel
895 * for more than one dst buffer, I think. */
896 BUG_ON(nents != 1);
897 req_ctx->dst_nents = nents;
898 dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
899 req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
900 if (!req_ctx->dst)
901 goto unmap_sg_dest;
902 req_ctx->dst->phys_addr = 0;
903 if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
904 goto free_buf_dest;
905 src_direction = DMA_TO_DEVICE;
906 } else {
907 req_ctx->dst = NULL;
908 req_ctx->dst_nents = 0;
909 }
910 nents = count_sg(req->src, nbytes);
911 req_ctx->src_nents = nents;
912 dma_map_sg(dev, req->src, nents, src_direction);
913
914 req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
915 if (!req_ctx->src)
916 goto unmap_sg_src;
917 req_ctx->src->phys_addr = 0;
918 if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
919 goto free_buf_src;
920
921 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
922 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
923 BUG_ON(qmgr_stat_overflow(SEND_QID));
924 return -EINPROGRESS;
925
926free_buf_src:
927 free_buf_chain(req_ctx->src, crypt->src_buf);
928unmap_sg_src:
929 dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
930free_buf_dest:
931 if (req->src != req->dst) {
932 free_buf_chain(req_ctx->dst, crypt->dst_buf);
933unmap_sg_dest:
934 dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
935 DMA_FROM_DEVICE);
936 }
937 crypt->ctl_flags = CTL_FLAG_UNUSED;
938 return ret;
939}
940
941static int ablk_encrypt(struct ablkcipher_request *req)
942{
943 return ablk_perform(req, 1);
944}
945
946static int ablk_decrypt(struct ablkcipher_request *req)
947{
948 return ablk_perform(req, 0);
949}
950
951static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
952{
953 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
954 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
955 u8 iv[CTR_RFC3686_BLOCK_SIZE];
956 u8 *info = req->info;
957 int ret;
958
959 /* set up counter block */
960 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
961 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
962
963 /* initialize counter portion of counter block */
964 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
965 cpu_to_be32(1);
966
967 req->info = iv;
968 ret = ablk_perform(req, 1);
969 req->info = info;
970 return ret;
971}
972
973static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
974 unsigned int nbytes)
975{
976 int offset = 0;
977
978 if (!nbytes)
979 return 0;
980
981 for (;;) {
982 if (start < offset + sg->length)
983 break;
984
985 offset += sg->length;
986 sg = sg_next(sg);
987 }
988 return (start + nbytes > offset + sg->length);
989}
990
991static int aead_perform(struct aead_request *req, int encrypt,
992 int cryptoffset, int eff_cryptlen, u8 *iv)
993{
994 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
995 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
996 unsigned ivsize = crypto_aead_ivsize(tfm);
997 unsigned authsize = crypto_aead_authsize(tfm);
998 int ret = -ENOMEM;
999 struct ix_sa_dir *dir;
1000 struct crypt_ctl *crypt;
1001 unsigned int cryptlen, nents;
1002 struct buffer_desc *buf;
1003 struct aead_ctx *req_ctx = aead_request_ctx(req);
1004 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1005 GFP_KERNEL : GFP_ATOMIC;
1006
1007 if (qmgr_stat_full(SEND_QID))
1008 return -EAGAIN;
1009 if (atomic_read(&ctx->configuring))
1010 return -EAGAIN;
1011
1012 if (encrypt) {
1013 dir = &ctx->encrypt;
1014 cryptlen = req->cryptlen;
1015 } else {
1016 dir = &ctx->decrypt;
1017 /* req->cryptlen includes the authsize when decrypting */
1018 cryptlen = req->cryptlen -authsize;
1019 eff_cryptlen -= authsize;
1020 }
1021 crypt = get_crypt_desc();
1022 if (!crypt)
1023 return ret;
1024
1025 crypt->data.aead_req = req;
1026 crypt->crypto_ctx = dir->npe_ctx_phys;
1027 crypt->mode = dir->npe_mode;
1028 crypt->init_len = dir->npe_ctx_idx;
1029
1030 crypt->crypt_offs = cryptoffset;
1031 crypt->crypt_len = eff_cryptlen;
1032
1033 crypt->auth_offs = 0;
1034 crypt->auth_len = req->assoclen + ivsize + cryptlen;
1035 BUG_ON(ivsize && !req->iv);
1036 memcpy(crypt->iv, req->iv, ivsize);
1037
1038 if (req->src != req->dst) {
1039 BUG(); /* -ENOTSUP because of my lazyness */
1040 }
1041
1042 req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
1043 if (!req_ctx->buffer)
1044 goto out;
1045 req_ctx->buffer->phys_addr = 0;
1046 /* ASSOC data */
1047 nents = count_sg(req->assoc, req->assoclen);
1048 req_ctx->assoc_nents = nents;
1049 dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
1050 buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
1051 if (!buf)
1052 goto unmap_sg_assoc;
1053 /* IV */
1054 sg_init_table(&req_ctx->ivlist, 1);
1055 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1056 dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1057 buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
1058 if (!buf)
1059 goto unmap_sg_iv;
1060 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1061 /* The 12 hmac bytes are scattered,
1062 * we need to copy them into a safe buffer */
1063 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1064 &crypt->icv_rev_aes);
1065 if (unlikely(!req_ctx->hmac_virt))
1066 goto unmap_sg_iv;
1067 if (!encrypt) {
1068 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1069 req->src, cryptlen, authsize, 0);
1070 }
1071 req_ctx->encrypt = encrypt;
1072 } else {
1073 req_ctx->hmac_virt = NULL;
1074 }
1075 /* Crypt */
1076 nents = count_sg(req->src, cryptlen + authsize);
1077 req_ctx->src_nents = nents;
1078 dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
1079 buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
1080 if (!buf)
1081 goto unmap_sg_src;
1082 if (!req_ctx->hmac_virt) {
1083 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1084 }
1085 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1086 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1087 BUG_ON(qmgr_stat_overflow(SEND_QID));
1088 return -EINPROGRESS;
1089unmap_sg_src:
1090 dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
1091 if (req_ctx->hmac_virt) {
1092 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1093 crypt->icv_rev_aes);
1094 }
1095unmap_sg_iv:
1096 dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
1097unmap_sg_assoc:
1098 dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
1099 free_buf_chain(req_ctx->buffer, crypt->src_buf);
1100out:
1101 crypt->ctl_flags = CTL_FLAG_UNUSED;
1102 return ret;
1103}
1104
1105static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1106{
1107 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1108 u32 *flags = &tfm->base.crt_flags;
1109 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1110 int ret;
1111
1112 if (!ctx->enckey_len && !ctx->authkey_len)
1113 return 0;
1114 init_completion(&ctx->completion);
1115 atomic_inc(&ctx->configuring);
1116
1117 reset_sa_dir(&ctx->encrypt);
1118 reset_sa_dir(&ctx->decrypt);
1119
1120 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1121 if (ret)
1122 goto out;
1123 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1124 if (ret)
1125 goto out;
1126 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1127 ctx->authkey_len, digest_len);
1128 if (ret)
1129 goto out;
1130 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1131 ctx->authkey_len, digest_len);
1132 if (ret)
1133 goto out;
1134
1135 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1136 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1137 ret = -EINVAL;
1138 goto out;
1139 } else {
1140 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1141 }
1142 }
1143out:
1144 if (!atomic_dec_and_test(&ctx->configuring))
1145 wait_for_completion(&ctx->completion);
1146 return ret;
1147}
1148
1149static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1150{
1151 int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1152
1153 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1154 return -EINVAL;
1155 return aead_setup(tfm, authsize);
1156}
1157
1158static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1159 unsigned int keylen)
1160{
1161 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1162 struct rtattr *rta = (struct rtattr *)key;
1163 struct crypto_authenc_key_param *param;
1164
1165 if (!RTA_OK(rta, keylen))
1166 goto badkey;
1167 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1168 goto badkey;
1169 if (RTA_PAYLOAD(rta) < sizeof(*param))
1170 goto badkey;
1171
1172 param = RTA_DATA(rta);
1173 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1174
1175 key += RTA_ALIGN(rta->rta_len);
1176 keylen -= RTA_ALIGN(rta->rta_len);
1177
1178 if (keylen < ctx->enckey_len)
1179 goto badkey;
1180
1181 ctx->authkey_len = keylen - ctx->enckey_len;
1182 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1183 memcpy(ctx->authkey, key, ctx->authkey_len);
1184
1185 return aead_setup(tfm, crypto_aead_authsize(tfm));
1186badkey:
1187 ctx->enckey_len = 0;
1188 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1189 return -EINVAL;
1190}
1191
1192static int aead_encrypt(struct aead_request *req)
1193{
1194 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1195 return aead_perform(req, 1, req->assoclen + ivsize,
1196 req->cryptlen, req->iv);
1197}
1198
1199static int aead_decrypt(struct aead_request *req)
1200{
1201 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1202 return aead_perform(req, 0, req->assoclen + ivsize,
1203 req->cryptlen, req->iv);
1204}
1205
1206static int aead_givencrypt(struct aead_givcrypt_request *req)
1207{
1208 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1209 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1210 unsigned len, ivsize = crypto_aead_ivsize(tfm);
1211 __be64 seq;
1212
1213 /* copied from eseqiv.c */
1214 if (!ctx->salted) {
1215 get_random_bytes(ctx->salt, ivsize);
1216 ctx->salted = 1;
1217 }
1218 memcpy(req->areq.iv, ctx->salt, ivsize);
1219 len = ivsize;
1220 if (ivsize > sizeof(u64)) {
1221 memset(req->giv, 0, ivsize - sizeof(u64));
1222 len = sizeof(u64);
1223 }
1224 seq = cpu_to_be64(req->seq);
1225 memcpy(req->giv + ivsize - len, &seq, len);
1226 return aead_perform(&req->areq, 1, req->areq.assoclen,
1227 req->areq.cryptlen +ivsize, req->giv);
1228}
1229
1230static struct ixp_alg ixp4xx_algos[] = {
1231{
1232 .crypto = {
1233 .cra_name = "cbc(des)",
1234 .cra_blocksize = DES_BLOCK_SIZE,
1235 .cra_u = { .ablkcipher = {
1236 .min_keysize = DES_KEY_SIZE,
1237 .max_keysize = DES_KEY_SIZE,
1238 .ivsize = DES_BLOCK_SIZE,
1239 .geniv = "eseqiv",
1240 }
1241 }
1242 },
1243 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1244 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1245
1246}, {
1247 .crypto = {
1248 .cra_name = "ecb(des)",
1249 .cra_blocksize = DES_BLOCK_SIZE,
1250 .cra_u = { .ablkcipher = {
1251 .min_keysize = DES_KEY_SIZE,
1252 .max_keysize = DES_KEY_SIZE,
1253 }
1254 }
1255 },
1256 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1257 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1258}, {
1259 .crypto = {
1260 .cra_name = "cbc(des3_ede)",
1261 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1262 .cra_u = { .ablkcipher = {
1263 .min_keysize = DES3_EDE_KEY_SIZE,
1264 .max_keysize = DES3_EDE_KEY_SIZE,
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .geniv = "eseqiv",
1267 }
1268 }
1269 },
1270 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1271 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1272}, {
1273 .crypto = {
1274 .cra_name = "ecb(des3_ede)",
1275 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1276 .cra_u = { .ablkcipher = {
1277 .min_keysize = DES3_EDE_KEY_SIZE,
1278 .max_keysize = DES3_EDE_KEY_SIZE,
1279 }
1280 }
1281 },
1282 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1283 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1284}, {
1285 .crypto = {
1286 .cra_name = "cbc(aes)",
1287 .cra_blocksize = AES_BLOCK_SIZE,
1288 .cra_u = { .ablkcipher = {
1289 .min_keysize = AES_MIN_KEY_SIZE,
1290 .max_keysize = AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .geniv = "eseqiv",
1293 }
1294 }
1295 },
1296 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1297 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1298}, {
1299 .crypto = {
1300 .cra_name = "ecb(aes)",
1301 .cra_blocksize = AES_BLOCK_SIZE,
1302 .cra_u = { .ablkcipher = {
1303 .min_keysize = AES_MIN_KEY_SIZE,
1304 .max_keysize = AES_MAX_KEY_SIZE,
1305 }
1306 }
1307 },
1308 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1309 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1310}, {
1311 .crypto = {
1312 .cra_name = "ctr(aes)",
1313 .cra_blocksize = AES_BLOCK_SIZE,
1314 .cra_u = { .ablkcipher = {
1315 .min_keysize = AES_MIN_KEY_SIZE,
1316 .max_keysize = AES_MAX_KEY_SIZE,
1317 .ivsize = AES_BLOCK_SIZE,
1318 .geniv = "eseqiv",
1319 }
1320 }
1321 },
1322 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1323 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1324}, {
1325 .crypto = {
1326 .cra_name = "rfc3686(ctr(aes))",
1327 .cra_blocksize = AES_BLOCK_SIZE,
1328 .cra_u = { .ablkcipher = {
1329 .min_keysize = AES_MIN_KEY_SIZE,
1330 .max_keysize = AES_MAX_KEY_SIZE,
1331 .ivsize = AES_BLOCK_SIZE,
1332 .geniv = "eseqiv",
1333 .setkey = ablk_rfc3686_setkey,
1334 .encrypt = ablk_rfc3686_crypt,
1335 .decrypt = ablk_rfc3686_crypt }
1336 }
1337 },
1338 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1339 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1340}, {
1341 .crypto = {
1342 .cra_name = "authenc(hmac(md5),cbc(des))",
1343 .cra_blocksize = DES_BLOCK_SIZE,
1344 .cra_u = { .aead = {
1345 .ivsize = DES_BLOCK_SIZE,
1346 .maxauthsize = MD5_DIGEST_SIZE,
1347 }
1348 }
1349 },
1350 .hash = &hash_alg_md5,
1351 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1352 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1353}, {
1354 .crypto = {
1355 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1356 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1357 .cra_u = { .aead = {
1358 .ivsize = DES3_EDE_BLOCK_SIZE,
1359 .maxauthsize = MD5_DIGEST_SIZE,
1360 }
1361 }
1362 },
1363 .hash = &hash_alg_md5,
1364 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1365 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1366}, {
1367 .crypto = {
1368 .cra_name = "authenc(hmac(sha1),cbc(des))",
1369 .cra_blocksize = DES_BLOCK_SIZE,
1370 .cra_u = { .aead = {
1371 .ivsize = DES_BLOCK_SIZE,
1372 .maxauthsize = SHA1_DIGEST_SIZE,
1373 }
1374 }
1375 },
1376 .hash = &hash_alg_sha1,
1377 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1378 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1379}, {
1380 .crypto = {
1381 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1382 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1383 .cra_u = { .aead = {
1384 .ivsize = DES3_EDE_BLOCK_SIZE,
1385 .maxauthsize = SHA1_DIGEST_SIZE,
1386 }
1387 }
1388 },
1389 .hash = &hash_alg_sha1,
1390 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1391 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1392}, {
1393 .crypto = {
1394 .cra_name = "authenc(hmac(md5),cbc(aes))",
1395 .cra_blocksize = AES_BLOCK_SIZE,
1396 .cra_u = { .aead = {
1397 .ivsize = AES_BLOCK_SIZE,
1398 .maxauthsize = MD5_DIGEST_SIZE,
1399 }
1400 }
1401 },
1402 .hash = &hash_alg_md5,
1403 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1404 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1405}, {
1406 .crypto = {
1407 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1408 .cra_blocksize = AES_BLOCK_SIZE,
1409 .cra_u = { .aead = {
1410 .ivsize = AES_BLOCK_SIZE,
1411 .maxauthsize = SHA1_DIGEST_SIZE,
1412 }
1413 }
1414 },
1415 .hash = &hash_alg_sha1,
1416 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1417 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1418} };
1419
1420#define IXP_POSTFIX "-ixp4xx"
1421static int __init ixp_module_init(void)
1422{
1423 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i,err ;
1425
1426 if (platform_device_register(&pseudo_dev))
1427 return -ENODEV;
1428
1429 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock);
1431
1432 err = init_ixp_crypto();
1433 if (err) {
1434 platform_device_unregister(&pseudo_dev);
1435 return err;
1436 }
1437 for (i=0; i< num; i++) {
1438 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1439
1440 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1441 "%s"IXP_POSTFIX, cra->cra_name) >=
1442 CRYPTO_MAX_ALG_NAME)
1443 {
1444 continue;
1445 }
1446 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1447 continue;
1448 }
1449 if (!ixp4xx_algos[i].hash) {
1450 /* block ciphers */
1451 cra->cra_type = &crypto_ablkcipher_type;
1452 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1453 CRYPTO_ALG_ASYNC;
1454 if (!cra->cra_ablkcipher.setkey)
1455 cra->cra_ablkcipher.setkey = ablk_setkey;
1456 if (!cra->cra_ablkcipher.encrypt)
1457 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1458 if (!cra->cra_ablkcipher.decrypt)
1459 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1460 cra->cra_init = init_tfm_ablk;
1461 } else {
1462 /* authenc */
1463 cra->cra_type = &crypto_aead_type;
1464 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1465 CRYPTO_ALG_ASYNC;
1466 cra->cra_aead.setkey = aead_setkey;
1467 cra->cra_aead.setauthsize = aead_setauthsize;
1468 cra->cra_aead.encrypt = aead_encrypt;
1469 cra->cra_aead.decrypt = aead_decrypt;
1470 cra->cra_aead.givencrypt = aead_givencrypt;
1471 cra->cra_init = init_tfm_aead;
1472 }
1473 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1474 cra->cra_module = THIS_MODULE;
1475 cra->cra_alignmask = 3;
1476 cra->cra_priority = 300;
1477 cra->cra_exit = exit_tfm;
1478 if (crypto_register_alg(cra))
1479 printk(KERN_ERR "Failed to register '%s'\n",
1480 cra->cra_name);
1481 else
1482 ixp4xx_algos[i].registered = 1;
1483 }
1484 return 0;
1485}
1486
1487static void __exit ixp_module_exit(void)
1488{
1489 int num = ARRAY_SIZE(ixp4xx_algos);
1490 int i;
1491
1492 for (i=0; i< num; i++) {
1493 if (ixp4xx_algos[i].registered)
1494 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1495 }
1496 release_ixp_crypto();
1497 platform_device_unregister(&pseudo_dev);
1498}
1499
1500module_init(ixp_module_init);
1501module_exit(ixp_module_exit);
1502
1503MODULE_LICENSE("GPL");
1504MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1505MODULE_DESCRIPTION("IXP4xx hardware crypto");
1506
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index bb30eb9b93ef..54a2a166e566 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -385,12 +385,12 @@ static int __init padlock_init(void)
385 int ret; 385 int ret;
386 386
387 if (!cpu_has_xcrypt) { 387 if (!cpu_has_xcrypt) {
388 printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 388 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
389 return -ENODEV; 389 return -ENODEV;
390 } 390 }
391 391
392 if (!cpu_has_xcrypt_enabled) { 392 if (!cpu_has_xcrypt_enabled) {
393 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 393 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
394 return -ENODEV; 394 return -ENODEV;
395 } 395 }
396 396
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index c666b4e0933e..40d5680fa013 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -254,12 +254,12 @@ static int __init padlock_init(void)
254 int rc = -ENODEV; 254 int rc = -ENODEV;
255 255
256 if (!cpu_has_phe) { 256 if (!cpu_has_phe) {
257 printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); 257 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
258 return -ENODEV; 258 return -ENODEV;
259 } 259 }
260 260
261 if (!cpu_has_phe_enabled) { 261 if (!cpu_has_phe_enabled) {
262 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 262 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
263 return -ENODEV; 263 return -ENODEV;
264 } 264 }
265 265
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
new file mode 100644
index 000000000000..b11943dadefd
--- /dev/null
+++ b/drivers/crypto/talitos.c
@@ -0,0 +1,1597 @@
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_platform.h>
36#include <linux/dma-mapping.h>
37#include <linux/io.h>
38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h>
40
41#include <crypto/algapi.h>
42#include <crypto/aes.h>
43#include <crypto/des.h>
44#include <crypto/sha.h>
45#include <crypto/aead.h>
46#include <crypto/authenc.h>
47
48#include "talitos.h"
49
50#define TALITOS_TIMEOUT 100000
51#define TALITOS_MAX_DATA_LEN 65535
52
53#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
54#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
55#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
56
57/* descriptor pointer entry */
58struct talitos_ptr {
59 __be16 len; /* length */
60 u8 j_extent; /* jump to sg link table and/or extent */
61 u8 eptr; /* extended address */
62 __be32 ptr; /* address */
63};
64
65/* descriptor */
66struct talitos_desc {
67 __be32 hdr; /* header high bits */
68 __be32 hdr_lo; /* header low bits */
69 struct talitos_ptr ptr[7]; /* ptr/len pair array */
70};
71
72/**
73 * talitos_request - descriptor submission request
74 * @desc: descriptor pointer (kernel virtual)
75 * @dma_desc: descriptor's physical bus address
76 * @callback: whom to call when descriptor processing is done
77 * @context: caller context (optional)
78 */
79struct talitos_request {
80 struct talitos_desc *desc;
81 dma_addr_t dma_desc;
82 void (*callback) (struct device *dev, struct talitos_desc *desc,
83 void *context, int error);
84 void *context;
85};
86
87struct talitos_private {
88 struct device *dev;
89 struct of_device *ofdev;
90 void __iomem *reg;
91 int irq;
92
93 /* SEC version geometry (from device tree node) */
94 unsigned int num_channels;
95 unsigned int chfifo_len;
96 unsigned int exec_units;
97 unsigned int desc_types;
98
99 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan;
101
102 /* per-channel request fifo */
103 struct talitos_request **fifo;
104
105 /*
106 * length of the request fifo
107 * fifo_len is chfifo_len rounded up to next power of 2
108 * so we can use bitwise ops to wrap
109 */
110 unsigned int fifo_len;
111
112 /* per-channel index to next free descriptor request */
113 int *head;
114
115 /* per-channel index to next in-progress/done descriptor request */
116 int *tail;
117
118 /* per-channel request submission (head) and release (tail) locks */
119 spinlock_t *head_lock;
120 spinlock_t *tail_lock;
121
122 /* request callback tasklet */
123 struct tasklet_struct done_task;
124 struct tasklet_struct error_task;
125
126 /* list of registered algorithms */
127 struct list_head alg_list;
128
129 /* hwrng device */
130 struct hwrng rng;
131};
132
133/*
134 * map virtual single (contiguous) pointer to h/w descriptor pointer
135 */
136static void map_single_talitos_ptr(struct device *dev,
137 struct talitos_ptr *talitos_ptr,
138 unsigned short len, void *data,
139 unsigned char extent,
140 enum dma_data_direction dir)
141{
142 talitos_ptr->len = cpu_to_be16(len);
143 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
144 talitos_ptr->j_extent = extent;
145}
146
147/*
148 * unmap bus single (contiguous) h/w descriptor pointer
149 */
150static void unmap_single_talitos_ptr(struct device *dev,
151 struct talitos_ptr *talitos_ptr,
152 enum dma_data_direction dir)
153{
154 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
155 be16_to_cpu(talitos_ptr->len), dir);
156}
157
158static int reset_channel(struct device *dev, int ch)
159{
160 struct talitos_private *priv = dev_get_drvdata(dev);
161 unsigned int timeout = TALITOS_TIMEOUT;
162
163 setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
164
165 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
166 && --timeout)
167 cpu_relax();
168
169 if (timeout == 0) {
170 dev_err(dev, "failed to reset channel %d\n", ch);
171 return -EIO;
172 }
173
174 /* set done writeback and IRQ */
175 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
176 TALITOS_CCCR_LO_CDIE);
177
178 return 0;
179}
180
181static int reset_device(struct device *dev)
182{
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185
186 setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
187
188 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
189 && --timeout)
190 cpu_relax();
191
192 if (timeout == 0) {
193 dev_err(dev, "failed to reset device\n");
194 return -EIO;
195 }
196
197 return 0;
198}
199
200/*
201 * Reset and initialize the device
202 */
203static int init_device(struct device *dev)
204{
205 struct talitos_private *priv = dev_get_drvdata(dev);
206 int ch, err;
207
208 /*
209 * Master reset
210 * errata documentation: warning: certain SEC interrupts
211 * are not fully cleared by writing the MCR:SWR bit,
212 * set bit twice to completely reset
213 */
214 err = reset_device(dev);
215 if (err)
216 return err;
217
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 /* reset channels */
223 for (ch = 0; ch < priv->num_channels; ch++) {
224 err = reset_channel(dev, ch);
225 if (err)
226 return err;
227 }
228
229 /* enable channel done and error interrupts */
230 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
231 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
232
233 return 0;
234}
235
236/**
237 * talitos_submit - submits a descriptor to the device for processing
238 * @dev: the SEC device to be used
239 * @desc: the descriptor to be processed by the device
240 * @callback: whom to call when processing is complete
241 * @context: a handle for use by caller (optional)
242 *
243 * desc must contain valid dma-mapped (bus physical) address pointers.
244 * callback must check err and feedback in descriptor header
245 * for device processing status.
246 */
247static int talitos_submit(struct device *dev, struct talitos_desc *desc,
248 void (*callback)(struct device *dev,
249 struct talitos_desc *desc,
250 void *context, int error),
251 void *context)
252{
253 struct talitos_private *priv = dev_get_drvdata(dev);
254 struct talitos_request *request;
255 unsigned long flags, ch;
256 int head;
257
258 /* select done notification */
259 desc->hdr |= DESC_HDR_DONE_NOTIFY;
260
261 /* emulate SEC's round-robin channel fifo polling scheme */
262 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
263
264 spin_lock_irqsave(&priv->head_lock[ch], flags);
265
266 head = priv->head[ch];
267 request = &priv->fifo[ch][head];
268
269 if (request->desc) {
270 /* request queue is full */
271 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
272 return -EAGAIN;
273 }
274
275 /* map descriptor and save caller data */
276 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
277 DMA_BIDIRECTIONAL);
278 request->callback = callback;
279 request->context = context;
280
281 /* increment fifo head */
282 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
283
284 smp_wmb();
285 request->desc = desc;
286
287 /* GO! */
288 wmb();
289 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
290
291 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
292
293 return -EINPROGRESS;
294}
295
296/*
297 * process what was done, notify callback of error if not
298 */
299static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
300{
301 struct talitos_private *priv = dev_get_drvdata(dev);
302 struct talitos_request *request, saved_req;
303 unsigned long flags;
304 int tail, status;
305
306 spin_lock_irqsave(&priv->tail_lock[ch], flags);
307
308 tail = priv->tail[ch];
309 while (priv->fifo[ch][tail].desc) {
310 request = &priv->fifo[ch][tail];
311
312 /* descriptors with their done bits set don't get the error */
313 rmb();
314 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
315 status = 0;
316 else
317 if (!error)
318 break;
319 else
320 status = error;
321
322 dma_unmap_single(dev, request->dma_desc,
323 sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
324
325 /* copy entries so we can call callback outside lock */
326 saved_req.desc = request->desc;
327 saved_req.callback = request->callback;
328 saved_req.context = request->context;
329
330 /* release request entry in fifo */
331 smp_wmb();
332 request->desc = NULL;
333
334 /* increment fifo tail */
335 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
336
337 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
338 saved_req.callback(dev, saved_req.desc, saved_req.context,
339 status);
340 /* channel may resume processing in single desc error case */
341 if (error && !reset_ch && status == error)
342 return;
343 spin_lock_irqsave(&priv->tail_lock[ch], flags);
344 tail = priv->tail[ch];
345 }
346
347 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
348}
349
350/*
351 * process completed requests for channels that have done status
352 */
353static void talitos_done(unsigned long data)
354{
355 struct device *dev = (struct device *)data;
356 struct talitos_private *priv = dev_get_drvdata(dev);
357 int ch;
358
359 for (ch = 0; ch < priv->num_channels; ch++)
360 flush_channel(dev, ch, 0, 0);
361}
362
363/*
364 * locate current (offending) descriptor
365 */
366static struct talitos_desc *current_desc(struct device *dev, int ch)
367{
368 struct talitos_private *priv = dev_get_drvdata(dev);
369 int tail = priv->tail[ch];
370 dma_addr_t cur_desc;
371
372 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
373
374 while (priv->fifo[ch][tail].dma_desc != cur_desc) {
375 tail = (tail + 1) & (priv->fifo_len - 1);
376 if (tail == priv->tail[ch]) {
377 dev_err(dev, "couldn't locate current descriptor\n");
378 return NULL;
379 }
380 }
381
382 return priv->fifo[ch][tail].desc;
383}
384
385/*
386 * user diagnostics; report root cause of error based on execution unit status
387 */
388static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
389{
390 struct talitos_private *priv = dev_get_drvdata(dev);
391 int i;
392
393 switch (desc->hdr & DESC_HDR_SEL0_MASK) {
394 case DESC_HDR_SEL0_AFEU:
395 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
396 in_be32(priv->reg + TALITOS_AFEUISR),
397 in_be32(priv->reg + TALITOS_AFEUISR_LO));
398 break;
399 case DESC_HDR_SEL0_DEU:
400 dev_err(dev, "DEUISR 0x%08x_%08x\n",
401 in_be32(priv->reg + TALITOS_DEUISR),
402 in_be32(priv->reg + TALITOS_DEUISR_LO));
403 break;
404 case DESC_HDR_SEL0_MDEUA:
405 case DESC_HDR_SEL0_MDEUB:
406 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
407 in_be32(priv->reg + TALITOS_MDEUISR),
408 in_be32(priv->reg + TALITOS_MDEUISR_LO));
409 break;
410 case DESC_HDR_SEL0_RNG:
411 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
412 in_be32(priv->reg + TALITOS_RNGUISR),
413 in_be32(priv->reg + TALITOS_RNGUISR_LO));
414 break;
415 case DESC_HDR_SEL0_PKEU:
416 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
417 in_be32(priv->reg + TALITOS_PKEUISR),
418 in_be32(priv->reg + TALITOS_PKEUISR_LO));
419 break;
420 case DESC_HDR_SEL0_AESU:
421 dev_err(dev, "AESUISR 0x%08x_%08x\n",
422 in_be32(priv->reg + TALITOS_AESUISR),
423 in_be32(priv->reg + TALITOS_AESUISR_LO));
424 break;
425 case DESC_HDR_SEL0_CRCU:
426 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
427 in_be32(priv->reg + TALITOS_CRCUISR),
428 in_be32(priv->reg + TALITOS_CRCUISR_LO));
429 break;
430 case DESC_HDR_SEL0_KEU:
431 dev_err(dev, "KEUISR 0x%08x_%08x\n",
432 in_be32(priv->reg + TALITOS_KEUISR),
433 in_be32(priv->reg + TALITOS_KEUISR_LO));
434 break;
435 }
436
437 switch (desc->hdr & DESC_HDR_SEL1_MASK) {
438 case DESC_HDR_SEL1_MDEUA:
439 case DESC_HDR_SEL1_MDEUB:
440 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
441 in_be32(priv->reg + TALITOS_MDEUISR),
442 in_be32(priv->reg + TALITOS_MDEUISR_LO));
443 break;
444 case DESC_HDR_SEL1_CRCU:
445 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
446 in_be32(priv->reg + TALITOS_CRCUISR),
447 in_be32(priv->reg + TALITOS_CRCUISR_LO));
448 break;
449 }
450
451 for (i = 0; i < 8; i++)
452 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
453 in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
454 in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
455}
456
457/*
458 * recover from error interrupts
459 */
460static void talitos_error(unsigned long data)
461{
462 struct device *dev = (struct device *)data;
463 struct talitos_private *priv = dev_get_drvdata(dev);
464 unsigned int timeout = TALITOS_TIMEOUT;
465 int ch, error, reset_dev = 0, reset_ch = 0;
466 u32 isr, isr_lo, v, v_lo;
467
468 isr = in_be32(priv->reg + TALITOS_ISR);
469 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
470
471 for (ch = 0; ch < priv->num_channels; ch++) {
472 /* skip channels without errors */
473 if (!(isr & (1 << (ch * 2 + 1))))
474 continue;
475
476 error = -EINVAL;
477
478 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
479 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
480
481 if (v_lo & TALITOS_CCPSR_LO_DOF) {
482 dev_err(dev, "double fetch fifo overflow error\n");
483 error = -EAGAIN;
484 reset_ch = 1;
485 }
486 if (v_lo & TALITOS_CCPSR_LO_SOF) {
487 /* h/w dropped descriptor */
488 dev_err(dev, "single fetch fifo overflow error\n");
489 error = -EAGAIN;
490 }
491 if (v_lo & TALITOS_CCPSR_LO_MDTE)
492 dev_err(dev, "master data transfer error\n");
493 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
494 dev_err(dev, "s/g data length zero error\n");
495 if (v_lo & TALITOS_CCPSR_LO_FPZ)
496 dev_err(dev, "fetch pointer zero error\n");
497 if (v_lo & TALITOS_CCPSR_LO_IDH)
498 dev_err(dev, "illegal descriptor header error\n");
499 if (v_lo & TALITOS_CCPSR_LO_IEU)
500 dev_err(dev, "invalid execution unit error\n");
501 if (v_lo & TALITOS_CCPSR_LO_EU)
502 report_eu_error(dev, ch, current_desc(dev, ch));
503 if (v_lo & TALITOS_CCPSR_LO_GB)
504 dev_err(dev, "gather boundary error\n");
505 if (v_lo & TALITOS_CCPSR_LO_GRL)
506 dev_err(dev, "gather return/length error\n");
507 if (v_lo & TALITOS_CCPSR_LO_SB)
508 dev_err(dev, "scatter boundary error\n");
509 if (v_lo & TALITOS_CCPSR_LO_SRL)
510 dev_err(dev, "scatter return/length error\n");
511
512 flush_channel(dev, ch, error, reset_ch);
513
514 if (reset_ch) {
515 reset_channel(dev, ch);
516 } else {
517 setbits32(priv->reg + TALITOS_CCCR(ch),
518 TALITOS_CCCR_CONT);
519 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
520 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
521 TALITOS_CCCR_CONT) && --timeout)
522 cpu_relax();
523 if (timeout == 0) {
524 dev_err(dev, "failed to restart channel %d\n",
525 ch);
526 reset_dev = 1;
527 }
528 }
529 }
530 if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
531 dev_err(dev, "done overflow, internal time out, or rngu error: "
532 "ISR 0x%08x_%08x\n", isr, isr_lo);
533
534 /* purge request queues */
535 for (ch = 0; ch < priv->num_channels; ch++)
536 flush_channel(dev, ch, -EIO, 1);
537
538 /* reset and reinitialize the device */
539 init_device(dev);
540 }
541}
542
543static irqreturn_t talitos_interrupt(int irq, void *data)
544{
545 struct device *dev = data;
546 struct talitos_private *priv = dev_get_drvdata(dev);
547 u32 isr, isr_lo;
548
549 isr = in_be32(priv->reg + TALITOS_ISR);
550 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
551
552 /* ack */
553 out_be32(priv->reg + TALITOS_ICR, isr);
554 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
555
556 if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
557 talitos_error((unsigned long)data);
558 else
559 if (likely(isr & TALITOS_ISR_CHDONE))
560 tasklet_schedule(&priv->done_task);
561
562 return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
563}
564
565/*
566 * hwrng
567 */
568static int talitos_rng_data_present(struct hwrng *rng, int wait)
569{
570 struct device *dev = (struct device *)rng->priv;
571 struct talitos_private *priv = dev_get_drvdata(dev);
572 u32 ofl;
573 int i;
574
575 for (i = 0; i < 20; i++) {
576 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
577 TALITOS_RNGUSR_LO_OFL;
578 if (ofl || !wait)
579 break;
580 udelay(10);
581 }
582
583 return !!ofl;
584}
585
586static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
587{
588 struct device *dev = (struct device *)rng->priv;
589 struct talitos_private *priv = dev_get_drvdata(dev);
590
591 /* rng fifo requires 64-bit accesses */
592 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
593 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
594
595 return sizeof(u32);
596}
597
598static int talitos_rng_init(struct hwrng *rng)
599{
600 struct device *dev = (struct device *)rng->priv;
601 struct talitos_private *priv = dev_get_drvdata(dev);
602 unsigned int timeout = TALITOS_TIMEOUT;
603
604 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
605 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
606 && --timeout)
607 cpu_relax();
608 if (timeout == 0) {
609 dev_err(dev, "failed to reset rng hw\n");
610 return -ENODEV;
611 }
612
613 /* start generating */
614 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
615
616 return 0;
617}
618
619static int talitos_register_rng(struct device *dev)
620{
621 struct talitos_private *priv = dev_get_drvdata(dev);
622
623 priv->rng.name = dev_driver_string(dev),
624 priv->rng.init = talitos_rng_init,
625 priv->rng.data_present = talitos_rng_data_present,
626 priv->rng.data_read = talitos_rng_data_read,
627 priv->rng.priv = (unsigned long)dev;
628
629 return hwrng_register(&priv->rng);
630}
631
632static void talitos_unregister_rng(struct device *dev)
633{
634 struct talitos_private *priv = dev_get_drvdata(dev);
635
636 hwrng_unregister(&priv->rng);
637}
638
639/*
640 * crypto alg
641 */
642#define TALITOS_CRA_PRIORITY 3000
643#define TALITOS_MAX_KEY_SIZE 64
644#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
645
646#define MD5_DIGEST_SIZE 16
647
648struct talitos_ctx {
649 struct device *dev;
650 __be32 desc_hdr_template;
651 u8 key[TALITOS_MAX_KEY_SIZE];
652 u8 iv[TALITOS_MAX_IV_LENGTH];
653 unsigned int keylen;
654 unsigned int enckeylen;
655 unsigned int authkeylen;
656 unsigned int authsize;
657};
658
659static int aead_authenc_setauthsize(struct crypto_aead *authenc,
660 unsigned int authsize)
661{
662 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
663
664 ctx->authsize = authsize;
665
666 return 0;
667}
668
669static int aead_authenc_setkey(struct crypto_aead *authenc,
670 const u8 *key, unsigned int keylen)
671{
672 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
673 struct rtattr *rta = (void *)key;
674 struct crypto_authenc_key_param *param;
675 unsigned int authkeylen;
676 unsigned int enckeylen;
677
678 if (!RTA_OK(rta, keylen))
679 goto badkey;
680
681 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
682 goto badkey;
683
684 if (RTA_PAYLOAD(rta) < sizeof(*param))
685 goto badkey;
686
687 param = RTA_DATA(rta);
688 enckeylen = be32_to_cpu(param->enckeylen);
689
690 key += RTA_ALIGN(rta->rta_len);
691 keylen -= RTA_ALIGN(rta->rta_len);
692
693 if (keylen < enckeylen)
694 goto badkey;
695
696 authkeylen = keylen - enckeylen;
697
698 if (keylen > TALITOS_MAX_KEY_SIZE)
699 goto badkey;
700
701 memcpy(&ctx->key, key, keylen);
702
703 ctx->keylen = keylen;
704 ctx->enckeylen = enckeylen;
705 ctx->authkeylen = authkeylen;
706
707 return 0;
708
709badkey:
710 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
711 return -EINVAL;
712}
713
714/*
715 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
716 * @src_nents: number of segments in input scatterlist
717 * @dst_nents: number of segments in output scatterlist
718 * @dma_len: length of dma mapped link_tbl space
719 * @dma_link_tbl: bus physical address of link_tbl
720 * @desc: h/w descriptor
721 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
722 *
723 * if decrypting (with authcheck), or either one of src_nents or dst_nents
724 * is greater than 1, an integrity check value is concatenated to the end
725 * of link_tbl data
726 */
727struct ipsec_esp_edesc {
728 int src_nents;
729 int dst_nents;
730 int dma_len;
731 dma_addr_t dma_link_tbl;
732 struct talitos_desc desc;
733 struct talitos_ptr link_tbl[0];
734};
735
736static void ipsec_esp_unmap(struct device *dev,
737 struct ipsec_esp_edesc *edesc,
738 struct aead_request *areq)
739{
740 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
741 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
742 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
743 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
744
745 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
746
747 if (areq->src != areq->dst) {
748 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
749 DMA_TO_DEVICE);
750 dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
751 DMA_FROM_DEVICE);
752 } else {
753 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
754 DMA_BIDIRECTIONAL);
755 }
756
757 if (edesc->dma_len)
758 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
759 DMA_BIDIRECTIONAL);
760}
761
762/*
763 * ipsec_esp descriptor callbacks
764 */
765static void ipsec_esp_encrypt_done(struct device *dev,
766 struct talitos_desc *desc, void *context,
767 int err)
768{
769 struct aead_request *areq = context;
770 struct ipsec_esp_edesc *edesc =
771 container_of(desc, struct ipsec_esp_edesc, desc);
772 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
773 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
774 struct scatterlist *sg;
775 void *icvdata;
776
777 ipsec_esp_unmap(dev, edesc, areq);
778
779 /* copy the generated ICV to dst */
780 if (edesc->dma_len) {
781 icvdata = &edesc->link_tbl[edesc->src_nents +
782 edesc->dst_nents + 1];
783 sg = sg_last(areq->dst, edesc->dst_nents);
784 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
785 icvdata, ctx->authsize);
786 }
787
788 kfree(edesc);
789
790 aead_request_complete(areq, err);
791}
792
793static void ipsec_esp_decrypt_done(struct device *dev,
794 struct talitos_desc *desc, void *context,
795 int err)
796{
797 struct aead_request *req = context;
798 struct ipsec_esp_edesc *edesc =
799 container_of(desc, struct ipsec_esp_edesc, desc);
800 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
801 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
802 struct scatterlist *sg;
803 void *icvdata;
804
805 ipsec_esp_unmap(dev, edesc, req);
806
807 if (!err) {
808 /* auth check */
809 if (edesc->dma_len)
810 icvdata = &edesc->link_tbl[edesc->src_nents +
811 edesc->dst_nents + 1];
812 else
813 icvdata = &edesc->link_tbl[0];
814
815 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
816 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
817 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
818 }
819
820 kfree(edesc);
821
822 aead_request_complete(req, err);
823}
824
825/*
826 * convert scatterlist to SEC h/w link table format
827 * stop at cryptlen bytes
828 */
829static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
830 int cryptlen, struct talitos_ptr *link_tbl_ptr)
831{
832 int n_sg = sg_count;
833
834 while (n_sg--) {
835 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
836 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
837 link_tbl_ptr->j_extent = 0;
838 link_tbl_ptr++;
839 cryptlen -= sg_dma_len(sg);
840 sg = sg_next(sg);
841 }
842
843 /* adjust (decrease) last one (or two) entry's len to cryptlen */
844 link_tbl_ptr--;
845 while (link_tbl_ptr->len <= (-cryptlen)) {
846 /* Empty this entry, and move to previous one */
847 cryptlen += be16_to_cpu(link_tbl_ptr->len);
848 link_tbl_ptr->len = 0;
849 sg_count--;
850 link_tbl_ptr--;
851 }
852 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
853 + cryptlen);
854
855 /* tag end of link table */
856 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
857
858 return sg_count;
859}
860
861/*
862 * fill in and submit ipsec_esp descriptor
863 */
864static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
865 u8 *giv, u64 seq,
866 void (*callback) (struct device *dev,
867 struct talitos_desc *desc,
868 void *context, int error))
869{
870 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
871 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
872 struct device *dev = ctx->dev;
873 struct talitos_desc *desc = &edesc->desc;
874 unsigned int cryptlen = areq->cryptlen;
875 unsigned int authsize = ctx->authsize;
876 unsigned int ivsize;
877 int sg_count;
878
879 /* hmac key */
880 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
881 0, DMA_TO_DEVICE);
882 /* hmac data */
883 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
884 sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
885 DMA_TO_DEVICE);
886 /* cipher iv */
887 ivsize = crypto_aead_ivsize(aead);
888 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
889 DMA_TO_DEVICE);
890
891 /* cipher key */
892 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
893 (char *)&ctx->key + ctx->authkeylen, 0,
894 DMA_TO_DEVICE);
895
896 /*
897 * cipher in
898 * map and adjust cipher len to aead request cryptlen.
899 * extent is bytes of HMAC postpended to ciphertext,
900 * typically 12 for ipsec
901 */
902 desc->ptr[4].len = cpu_to_be16(cryptlen);
903 desc->ptr[4].j_extent = authsize;
904
905 if (areq->src == areq->dst)
906 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
907 DMA_BIDIRECTIONAL);
908 else
909 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
910 DMA_TO_DEVICE);
911
912 if (sg_count == 1) {
913 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
914 } else {
915 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
916 &edesc->link_tbl[0]);
917 if (sg_count > 1) {
918 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
919 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
920 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
921 edesc->dma_len, DMA_BIDIRECTIONAL);
922 } else {
923 /* Only one segment now, so no link tbl needed */
924 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
925 }
926 }
927
928 /* cipher out */
929 desc->ptr[5].len = cpu_to_be16(cryptlen);
930 desc->ptr[5].j_extent = authsize;
931
932 if (areq->src != areq->dst) {
933 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
934 DMA_FROM_DEVICE);
935 }
936
937 if (sg_count == 1) {
938 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
939 } else {
940 struct talitos_ptr *link_tbl_ptr =
941 &edesc->link_tbl[edesc->src_nents];
942 struct scatterlist *sg;
943
944 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
945 edesc->dma_link_tbl +
946 edesc->src_nents);
947 if (areq->src == areq->dst) {
948 memcpy(link_tbl_ptr, &edesc->link_tbl[0],
949 edesc->src_nents * sizeof(struct talitos_ptr));
950 } else {
951 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
952 link_tbl_ptr);
953 }
954 link_tbl_ptr += sg_count - 1;
955
956 /* handle case where sg_last contains the ICV exclusively */
957 sg = sg_last(areq->dst, edesc->dst_nents);
958 if (sg->length == ctx->authsize)
959 link_tbl_ptr--;
960
961 link_tbl_ptr->j_extent = 0;
962 link_tbl_ptr++;
963 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
964 link_tbl_ptr->len = cpu_to_be16(authsize);
965
966 /* icv data follows link tables */
967 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
968 edesc->dma_link_tbl +
969 edesc->src_nents +
970 edesc->dst_nents + 1);
971
972 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
973 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
974 edesc->dma_len, DMA_BIDIRECTIONAL);
975 }
976
977 /* iv out */
978 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
979 DMA_FROM_DEVICE);
980
981 return talitos_submit(dev, desc, callback, areq);
982}
983
984
985/*
986 * derive number of elements in scatterlist
987 */
988static int sg_count(struct scatterlist *sg_list, int nbytes)
989{
990 struct scatterlist *sg = sg_list;
991 int sg_nents = 0;
992
993 while (nbytes) {
994 sg_nents++;
995 nbytes -= sg->length;
996 sg = sg_next(sg);
997 }
998
999 return sg_nents;
1000}
1001
1002/*
1003 * allocate and map the ipsec_esp extended descriptor
1004 */
1005static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1006 int icv_stashing)
1007{
1008 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1009 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1010 struct ipsec_esp_edesc *edesc;
1011 int src_nents, dst_nents, alloc_len, dma_len;
1012
1013 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
1014 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
1015 return ERR_PTR(-EINVAL);
1016 }
1017
1018 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
1019 src_nents = (src_nents == 1) ? 0 : src_nents;
1020
1021 if (areq->dst == areq->src) {
1022 dst_nents = src_nents;
1023 } else {
1024 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
1025 dst_nents = (dst_nents == 1) ? 0 : src_nents;
1026 }
1027
1028 /*
1029 * allocate space for base edesc plus the link tables,
1030 * allowing for a separate entry for the generated ICV (+ 1),
1031 * and the ICV data itself
1032 */
1033 alloc_len = sizeof(struct ipsec_esp_edesc);
1034 if (src_nents || dst_nents) {
1035 dma_len = (src_nents + dst_nents + 1) *
1036 sizeof(struct talitos_ptr) + ctx->authsize;
1037 alloc_len += dma_len;
1038 } else {
1039 dma_len = 0;
1040 alloc_len += icv_stashing ? ctx->authsize : 0;
1041 }
1042
1043 edesc = kmalloc(alloc_len, GFP_DMA);
1044 if (!edesc) {
1045 dev_err(ctx->dev, "could not allocate edescriptor\n");
1046 return ERR_PTR(-ENOMEM);
1047 }
1048
1049 edesc->src_nents = src_nents;
1050 edesc->dst_nents = dst_nents;
1051 edesc->dma_len = dma_len;
1052 edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
1053 edesc->dma_len, DMA_BIDIRECTIONAL);
1054
1055 return edesc;
1056}
1057
1058static int aead_authenc_encrypt(struct aead_request *req)
1059{
1060 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1061 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1062 struct ipsec_esp_edesc *edesc;
1063
1064 /* allocate extended descriptor */
1065 edesc = ipsec_esp_edesc_alloc(req, 0);
1066 if (IS_ERR(edesc))
1067 return PTR_ERR(edesc);
1068
1069 /* set encrypt */
1070 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1071
1072 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1073}
1074
1075static int aead_authenc_decrypt(struct aead_request *req)
1076{
1077 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1078 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1079 unsigned int authsize = ctx->authsize;
1080 struct ipsec_esp_edesc *edesc;
1081 struct scatterlist *sg;
1082 void *icvdata;
1083
1084 req->cryptlen -= authsize;
1085
1086 /* allocate extended descriptor */
1087 edesc = ipsec_esp_edesc_alloc(req, 1);
1088 if (IS_ERR(edesc))
1089 return PTR_ERR(edesc);
1090
1091 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1092 if (edesc->dma_len)
1093 icvdata = &edesc->link_tbl[edesc->src_nents +
1094 edesc->dst_nents + 1];
1095 else
1096 icvdata = &edesc->link_tbl[0];
1097
1098 sg = sg_last(req->src, edesc->src_nents ? : 1);
1099
1100 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1101 ctx->authsize);
1102
1103 /* decrypt */
1104 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1105
1106 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done);
1107}
1108
1109static int aead_authenc_givencrypt(
1110 struct aead_givcrypt_request *req)
1111{
1112 struct aead_request *areq = &req->areq;
1113 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1114 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1115 struct ipsec_esp_edesc *edesc;
1116
1117 /* allocate extended descriptor */
1118 edesc = ipsec_esp_edesc_alloc(areq, 0);
1119 if (IS_ERR(edesc))
1120 return PTR_ERR(edesc);
1121
1122 /* set encrypt */
1123 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1124
1125 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1126
1127 return ipsec_esp(edesc, areq, req->giv, req->seq,
1128 ipsec_esp_encrypt_done);
1129}
1130
1131struct talitos_alg_template {
1132 char name[CRYPTO_MAX_ALG_NAME];
1133 char driver_name[CRYPTO_MAX_ALG_NAME];
1134 unsigned int blocksize;
1135 struct aead_alg aead;
1136 struct device *dev;
1137 __be32 desc_hdr_template;
1138};
1139
1140static struct talitos_alg_template driver_algs[] = {
1141 /* single-pass ipsec_esp descriptor */
1142 {
1143 .name = "authenc(hmac(sha1),cbc(aes))",
1144 .driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1145 .blocksize = AES_BLOCK_SIZE,
1146 .aead = {
1147 .setkey = aead_authenc_setkey,
1148 .setauthsize = aead_authenc_setauthsize,
1149 .encrypt = aead_authenc_encrypt,
1150 .decrypt = aead_authenc_decrypt,
1151 .givencrypt = aead_authenc_givencrypt,
1152 .geniv = "<built-in>",
1153 .ivsize = AES_BLOCK_SIZE,
1154 .maxauthsize = SHA1_DIGEST_SIZE,
1155 },
1156 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1157 DESC_HDR_SEL0_AESU |
1158 DESC_HDR_MODE0_AESU_CBC |
1159 DESC_HDR_SEL1_MDEUA |
1160 DESC_HDR_MODE1_MDEU_INIT |
1161 DESC_HDR_MODE1_MDEU_PAD |
1162 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1163 },
1164 {
1165 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1166 .driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1167 .blocksize = DES3_EDE_BLOCK_SIZE,
1168 .aead = {
1169 .setkey = aead_authenc_setkey,
1170 .setauthsize = aead_authenc_setauthsize,
1171 .encrypt = aead_authenc_encrypt,
1172 .decrypt = aead_authenc_decrypt,
1173 .givencrypt = aead_authenc_givencrypt,
1174 .geniv = "<built-in>",
1175 .ivsize = DES3_EDE_BLOCK_SIZE,
1176 .maxauthsize = SHA1_DIGEST_SIZE,
1177 },
1178 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1179 DESC_HDR_SEL0_DEU |
1180 DESC_HDR_MODE0_DEU_CBC |
1181 DESC_HDR_MODE0_DEU_3DES |
1182 DESC_HDR_SEL1_MDEUA |
1183 DESC_HDR_MODE1_MDEU_INIT |
1184 DESC_HDR_MODE1_MDEU_PAD |
1185 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1186 },
1187 {
1188 .name = "authenc(hmac(sha256),cbc(aes))",
1189 .driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1190 .blocksize = AES_BLOCK_SIZE,
1191 .aead = {
1192 .setkey = aead_authenc_setkey,
1193 .setauthsize = aead_authenc_setauthsize,
1194 .encrypt = aead_authenc_encrypt,
1195 .decrypt = aead_authenc_decrypt,
1196 .givencrypt = aead_authenc_givencrypt,
1197 .geniv = "<built-in>",
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA256_DIGEST_SIZE,
1200 },
1201 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1202 DESC_HDR_SEL0_AESU |
1203 DESC_HDR_MODE0_AESU_CBC |
1204 DESC_HDR_SEL1_MDEUA |
1205 DESC_HDR_MODE1_MDEU_INIT |
1206 DESC_HDR_MODE1_MDEU_PAD |
1207 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1208 },
1209 {
1210 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1211 .driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1212 .blocksize = DES3_EDE_BLOCK_SIZE,
1213 .aead = {
1214 .setkey = aead_authenc_setkey,
1215 .setauthsize = aead_authenc_setauthsize,
1216 .encrypt = aead_authenc_encrypt,
1217 .decrypt = aead_authenc_decrypt,
1218 .givencrypt = aead_authenc_givencrypt,
1219 .geniv = "<built-in>",
1220 .ivsize = DES3_EDE_BLOCK_SIZE,
1221 .maxauthsize = SHA256_DIGEST_SIZE,
1222 },
1223 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1224 DESC_HDR_SEL0_DEU |
1225 DESC_HDR_MODE0_DEU_CBC |
1226 DESC_HDR_MODE0_DEU_3DES |
1227 DESC_HDR_SEL1_MDEUA |
1228 DESC_HDR_MODE1_MDEU_INIT |
1229 DESC_HDR_MODE1_MDEU_PAD |
1230 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1231 },
1232 {
1233 .name = "authenc(hmac(md5),cbc(aes))",
1234 .driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1235 .blocksize = AES_BLOCK_SIZE,
1236 .aead = {
1237 .setkey = aead_authenc_setkey,
1238 .setauthsize = aead_authenc_setauthsize,
1239 .encrypt = aead_authenc_encrypt,
1240 .decrypt = aead_authenc_decrypt,
1241 .givencrypt = aead_authenc_givencrypt,
1242 .geniv = "<built-in>",
1243 .ivsize = AES_BLOCK_SIZE,
1244 .maxauthsize = MD5_DIGEST_SIZE,
1245 },
1246 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1247 DESC_HDR_SEL0_AESU |
1248 DESC_HDR_MODE0_AESU_CBC |
1249 DESC_HDR_SEL1_MDEUA |
1250 DESC_HDR_MODE1_MDEU_INIT |
1251 DESC_HDR_MODE1_MDEU_PAD |
1252 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1253 },
1254 {
1255 .name = "authenc(hmac(md5),cbc(des3_ede))",
1256 .driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1257 .blocksize = DES3_EDE_BLOCK_SIZE,
1258 .aead = {
1259 .setkey = aead_authenc_setkey,
1260 .setauthsize = aead_authenc_setauthsize,
1261 .encrypt = aead_authenc_encrypt,
1262 .decrypt = aead_authenc_decrypt,
1263 .givencrypt = aead_authenc_givencrypt,
1264 .geniv = "<built-in>",
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .maxauthsize = MD5_DIGEST_SIZE,
1267 },
1268 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1269 DESC_HDR_SEL0_DEU |
1270 DESC_HDR_MODE0_DEU_CBC |
1271 DESC_HDR_MODE0_DEU_3DES |
1272 DESC_HDR_SEL1_MDEUA |
1273 DESC_HDR_MODE1_MDEU_INIT |
1274 DESC_HDR_MODE1_MDEU_PAD |
1275 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1276 }
1277};
1278
1279struct talitos_crypto_alg {
1280 struct list_head entry;
1281 struct device *dev;
1282 __be32 desc_hdr_template;
1283 struct crypto_alg crypto_alg;
1284};
1285
1286static int talitos_cra_init(struct crypto_tfm *tfm)
1287{
1288 struct crypto_alg *alg = tfm->__crt_alg;
1289 struct talitos_crypto_alg *talitos_alg =
1290 container_of(alg, struct talitos_crypto_alg, crypto_alg);
1291 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1292
1293 /* update context with ptr to dev */
1294 ctx->dev = talitos_alg->dev;
1295 /* copy descriptor header template value */
1296 ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1297
1298 /* random first IV */
1299 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
1300
1301 return 0;
1302}
1303
1304/*
1305 * given the alg's descriptor header template, determine whether descriptor
1306 * type and primary/secondary execution units required match the hw
1307 * capabilities description provided in the device tree node.
1308 */
1309static int hw_supports(struct device *dev, __be32 desc_hdr_template)
1310{
1311 struct talitos_private *priv = dev_get_drvdata(dev);
1312 int ret;
1313
1314 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
1315 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
1316
1317 if (SECONDARY_EU(desc_hdr_template))
1318 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
1319 & priv->exec_units);
1320
1321 return ret;
1322}
1323
1324static int __devexit talitos_remove(struct of_device *ofdev)
1325{
1326 struct device *dev = &ofdev->dev;
1327 struct talitos_private *priv = dev_get_drvdata(dev);
1328 struct talitos_crypto_alg *t_alg, *n;
1329 int i;
1330
1331 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1332 crypto_unregister_alg(&t_alg->crypto_alg);
1333 list_del(&t_alg->entry);
1334 kfree(t_alg);
1335 }
1336
1337 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1338 talitos_unregister_rng(dev);
1339
1340 kfree(priv->tail);
1341 kfree(priv->head);
1342
1343 if (priv->fifo)
1344 for (i = 0; i < priv->num_channels; i++)
1345 kfree(priv->fifo[i]);
1346
1347 kfree(priv->fifo);
1348 kfree(priv->head_lock);
1349 kfree(priv->tail_lock);
1350
1351 if (priv->irq != NO_IRQ) {
1352 free_irq(priv->irq, dev);
1353 irq_dispose_mapping(priv->irq);
1354 }
1355
1356 tasklet_kill(&priv->done_task);
1357 tasklet_kill(&priv->error_task);
1358
1359 iounmap(priv->reg);
1360
1361 dev_set_drvdata(dev, NULL);
1362
1363 kfree(priv);
1364
1365 return 0;
1366}
1367
1368static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1369 struct talitos_alg_template
1370 *template)
1371{
1372 struct talitos_crypto_alg *t_alg;
1373 struct crypto_alg *alg;
1374
1375 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
1376 if (!t_alg)
1377 return ERR_PTR(-ENOMEM);
1378
1379 alg = &t_alg->crypto_alg;
1380
1381 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1382 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1383 template->driver_name);
1384 alg->cra_module = THIS_MODULE;
1385 alg->cra_init = talitos_cra_init;
1386 alg->cra_priority = TALITOS_CRA_PRIORITY;
1387 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1388 alg->cra_blocksize = template->blocksize;
1389 alg->cra_alignmask = 0;
1390 alg->cra_type = &crypto_aead_type;
1391 alg->cra_ctxsize = sizeof(struct talitos_ctx);
1392 alg->cra_u.aead = template->aead;
1393
1394 t_alg->desc_hdr_template = template->desc_hdr_template;
1395 t_alg->dev = dev;
1396
1397 return t_alg;
1398}
1399
1400static int talitos_probe(struct of_device *ofdev,
1401 const struct of_device_id *match)
1402{
1403 struct device *dev = &ofdev->dev;
1404 struct device_node *np = ofdev->node;
1405 struct talitos_private *priv;
1406 const unsigned int *prop;
1407 int i, err;
1408
1409 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
1410 if (!priv)
1411 return -ENOMEM;
1412
1413 dev_set_drvdata(dev, priv);
1414
1415 priv->ofdev = ofdev;
1416
1417 tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
1418 tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
1419
1420 priv->irq = irq_of_parse_and_map(np, 0);
1421
1422 if (priv->irq == NO_IRQ) {
1423 dev_err(dev, "failed to map irq\n");
1424 err = -EINVAL;
1425 goto err_out;
1426 }
1427
1428 /* get the irq line */
1429 err = request_irq(priv->irq, talitos_interrupt, 0,
1430 dev_driver_string(dev), dev);
1431 if (err) {
1432 dev_err(dev, "failed to request irq %d\n", priv->irq);
1433 irq_dispose_mapping(priv->irq);
1434 priv->irq = NO_IRQ;
1435 goto err_out;
1436 }
1437
1438 priv->reg = of_iomap(np, 0);
1439 if (!priv->reg) {
1440 dev_err(dev, "failed to of_iomap\n");
1441 err = -ENOMEM;
1442 goto err_out;
1443 }
1444
1445 /* get SEC version capabilities from device tree */
1446 prop = of_get_property(np, "fsl,num-channels", NULL);
1447 if (prop)
1448 priv->num_channels = *prop;
1449
1450 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
1451 if (prop)
1452 priv->chfifo_len = *prop;
1453
1454 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
1455 if (prop)
1456 priv->exec_units = *prop;
1457
1458 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
1459 if (prop)
1460 priv->desc_types = *prop;
1461
1462 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
1463 !priv->exec_units || !priv->desc_types) {
1464 dev_err(dev, "invalid property data in device tree node\n");
1465 err = -EINVAL;
1466 goto err_out;
1467 }
1468
1469 of_node_put(np);
1470 np = NULL;
1471
1472 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1473 GFP_KERNEL);
1474 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1475 GFP_KERNEL);
1476 if (!priv->head_lock || !priv->tail_lock) {
1477 dev_err(dev, "failed to allocate fifo locks\n");
1478 err = -ENOMEM;
1479 goto err_out;
1480 }
1481
1482 for (i = 0; i < priv->num_channels; i++) {
1483 spin_lock_init(&priv->head_lock[i]);
1484 spin_lock_init(&priv->tail_lock[i]);
1485 }
1486
1487 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1488 priv->num_channels, GFP_KERNEL);
1489 if (!priv->fifo) {
1490 dev_err(dev, "failed to allocate request fifo\n");
1491 err = -ENOMEM;
1492 goto err_out;
1493 }
1494
1495 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1496
1497 for (i = 0; i < priv->num_channels; i++) {
1498 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
1499 priv->fifo_len, GFP_KERNEL);
1500 if (!priv->fifo[i]) {
1501 dev_err(dev, "failed to allocate request fifo %d\n", i);
1502 err = -ENOMEM;
1503 goto err_out;
1504 }
1505 }
1506
1507 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1508 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1509 if (!priv->head || !priv->tail) {
1510 dev_err(dev, "failed to allocate request index space\n");
1511 err = -ENOMEM;
1512 goto err_out;
1513 }
1514
1515 /* reset and initialize the h/w */
1516 err = init_device(dev);
1517 if (err) {
1518 dev_err(dev, "failed to initialize device\n");
1519 goto err_out;
1520 }
1521
1522 /* register the RNG, if available */
1523 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
1524 err = talitos_register_rng(dev);
1525 if (err) {
1526 dev_err(dev, "failed to register hwrng: %d\n", err);
1527 goto err_out;
1528 } else
1529 dev_info(dev, "hwrng\n");
1530 }
1531
1532 /* register crypto algorithms the device supports */
1533 INIT_LIST_HEAD(&priv->alg_list);
1534
1535 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1536 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1537 struct talitos_crypto_alg *t_alg;
1538
1539 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1540 if (IS_ERR(t_alg)) {
1541 err = PTR_ERR(t_alg);
1542 goto err_out;
1543 }
1544
1545 err = crypto_register_alg(&t_alg->crypto_alg);
1546 if (err) {
1547 dev_err(dev, "%s alg registration failed\n",
1548 t_alg->crypto_alg.cra_driver_name);
1549 kfree(t_alg);
1550 } else {
1551 list_add_tail(&t_alg->entry, &priv->alg_list);
1552 dev_info(dev, "%s\n",
1553 t_alg->crypto_alg.cra_driver_name);
1554 }
1555 }
1556 }
1557
1558 return 0;
1559
1560err_out:
1561 talitos_remove(ofdev);
1562 if (np)
1563 of_node_put(np);
1564
1565 return err;
1566}
1567
1568static struct of_device_id talitos_match[] = {
1569 {
1570 .compatible = "fsl,sec2.0",
1571 },
1572 {},
1573};
1574MODULE_DEVICE_TABLE(of, talitos_match);
1575
1576static struct of_platform_driver talitos_driver = {
1577 .name = "talitos",
1578 .match_table = talitos_match,
1579 .probe = talitos_probe,
1580 .remove = __devexit_p(talitos_remove),
1581};
1582
1583static int __init talitos_init(void)
1584{
1585 return of_register_platform_driver(&talitos_driver);
1586}
1587module_init(talitos_init);
1588
1589static void __exit talitos_exit(void)
1590{
1591 of_unregister_platform_driver(&talitos_driver);
1592}
1593module_exit(talitos_exit);
1594
1595MODULE_LICENSE("GPL");
1596MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
1597MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
new file mode 100644
index 000000000000..c48a405abf70
--- /dev/null
+++ b/drivers/crypto/talitos.h
@@ -0,0 +1,199 @@
1/*
2 * Freescale SEC (talitos) device register and descriptor header defines
3 *
4 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31/*
32 * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
33 */
34
35/* global register offset addresses */
36#define TALITOS_MCR 0x1030 /* master control register */
37#define TALITOS_MCR_LO 0x1038
38#define TALITOS_MCR_SWR 0x1 /* s/w reset */
39#define TALITOS_IMR 0x1008 /* interrupt mask register */
40#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */
41#define TALITOS_IMR_LO 0x100C
42#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
43#define TALITOS_ISR 0x1010 /* interrupt status register */
44#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
45#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
46#define TALITOS_ISR_LO 0x1014
47#define TALITOS_ICR 0x1018 /* interrupt clear register */
48#define TALITOS_ICR_LO 0x101C
49
50/* channel register address stride */
51#define TALITOS_CH_STRIDE 0x100
52
53/* channel configuration register */
54#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
55#define TALITOS_CCCR_CONT 0x2 /* channel continue */
56#define TALITOS_CCCR_RESET 0x1 /* channel reset */
57#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
58#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
59#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
60#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
61
62/* CCPSR: channel pointer status register */
63#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
64#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
65#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
66#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
67#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
68#define TALITOS_CCPSR_LO_SGDLZ 0x1000 /* s/g data len zero error */
69#define TALITOS_CCPSR_LO_FPZ 0x0800 /* fetch ptr zero error */
70#define TALITOS_CCPSR_LO_IDH 0x0400 /* illegal desc hdr error */
71#define TALITOS_CCPSR_LO_IEU 0x0200 /* invalid EU error */
72#define TALITOS_CCPSR_LO_EU 0x0100 /* EU error detected */
73#define TALITOS_CCPSR_LO_GB 0x0080 /* gather boundary error */
74#define TALITOS_CCPSR_LO_GRL 0x0040 /* gather return/length error */
75#define TALITOS_CCPSR_LO_SB 0x0020 /* scatter boundary error */
76#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
77
78/* channel fetch fifo register */
79#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
80#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
81
82/* current descriptor pointer register */
83#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
84#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
85
86/* descriptor buffer register */
87#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
88#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
89
90/* gather link table */
91#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
92#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
93
94/* scatter link table */
95#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
96#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
97
98/* execution unit interrupt status registers */
99#define TALITOS_DEUISR 0x2030 /* DES unit */
100#define TALITOS_DEUISR_LO 0x2034
101#define TALITOS_AESUISR 0x4030 /* AES unit */
102#define TALITOS_AESUISR_LO 0x4034
103#define TALITOS_MDEUISR 0x6030 /* message digest unit */
104#define TALITOS_MDEUISR_LO 0x6034
105#define TALITOS_AFEUISR 0x8030 /* arc4 unit */
106#define TALITOS_AFEUISR_LO 0x8034
107#define TALITOS_RNGUISR 0xa030 /* random number unit */
108#define TALITOS_RNGUISR_LO 0xa034
109#define TALITOS_RNGUSR 0xa028 /* rng status */
110#define TALITOS_RNGUSR_LO 0xa02c
111#define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */
112#define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */
113#define TALITOS_RNGUDSR 0xa010 /* data size */
114#define TALITOS_RNGUDSR_LO 0xa014
115#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */
116#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */
117#define TALITOS_RNGURCR 0xa018 /* reset control */
118#define TALITOS_RNGURCR_LO 0xa01c
119#define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */
120#define TALITOS_PKEUISR 0xc030 /* public key unit */
121#define TALITOS_PKEUISR_LO 0xc034
122#define TALITOS_KEUISR 0xe030 /* kasumi unit */
123#define TALITOS_KEUISR_LO 0xe034
124#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
125#define TALITOS_CRCUISR_LO 0xf034
126
127/*
128 * talitos descriptor header (hdr) bits
129 */
130
131/* written back when done */
132#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000)
133
134/* primary execution unit select */
135#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000)
136#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000)
137#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000)
138#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000)
139#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000)
140#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000)
141#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000)
142#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000)
143#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000)
144#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000)
145
146/* primary execution unit mode (MODE0) and derivatives */
147#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000)
148#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000)
149#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000)
150#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000)
151#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000)
152#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000)
153#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000)
154#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000)
155#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000)
156#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
157#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
158 DESC_HDR_MODE0_MDEU_HMAC)
159#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
160 DESC_HDR_MODE0_MDEU_HMAC)
161#define DESC_HDR_MODE0_MDEU_SHA1_HMAC (DESC_HDR_MODE0_MDEU_SHA1 | \
162 DESC_HDR_MODE0_MDEU_HMAC)
163
164/* secondary execution unit select (SEL1) */
165#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000)
166#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000)
167#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000)
168#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000)
169
170/* secondary execution unit mode (MODE1) and derivatives */
171#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000)
172#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800)
173#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400)
174#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200)
175#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100)
176#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
177#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
178 DESC_HDR_MODE1_MDEU_HMAC)
179#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
180 DESC_HDR_MODE1_MDEU_HMAC)
181#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
182 DESC_HDR_MODE1_MDEU_HMAC)
183
184/* direction of overall data flow (DIR) */
185#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002)
186
187/* request done notification (DN) */
188#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001)
189
190/* descriptor types */
191#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3)
192#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3)
193#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3)
194#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3)
195
196/* link table extent field bits */
197#define DESC_PTR_LNKTBL_JUMP 0x80
198#define DESC_PTR_LNKTBL_RETURN 0x02
199#define DESC_PTR_LNKTBL_NEXT 0x01
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
new file mode 100644
index 000000000000..d12498ec8a4e
--- /dev/null
+++ b/include/crypto/hash.h
@@ -0,0 +1,154 @@
1/*
2 * Hash: Hash algorithms under the crypto API
3 *
4 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _CRYPTO_HASH_H
14#define _CRYPTO_HASH_H
15
16#include <linux/crypto.h>
17
18struct crypto_ahash {
19 struct crypto_tfm base;
20};
21
22static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
23{
24 return (struct crypto_ahash *)tfm;
25}
26
27static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name,
28 u32 type, u32 mask)
29{
30 type &= ~CRYPTO_ALG_TYPE_MASK;
31 mask &= ~CRYPTO_ALG_TYPE_MASK;
32 type |= CRYPTO_ALG_TYPE_AHASH;
33 mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
34
35 return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
36}
37
38static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
39{
40 return &tfm->base;
41}
42
43static inline void crypto_free_ahash(struct crypto_ahash *tfm)
44{
45 crypto_free_tfm(crypto_ahash_tfm(tfm));
46}
47
48static inline unsigned int crypto_ahash_alignmask(
49 struct crypto_ahash *tfm)
50{
51 return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
52}
53
54static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm)
55{
56 return &crypto_ahash_tfm(tfm)->crt_ahash;
57}
58
59static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
60{
61 return crypto_ahash_crt(tfm)->digestsize;
62}
63
64static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
65{
66 return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
67}
68
69static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
70{
71 crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
72}
73
74static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
75{
76 crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
77}
78
79static inline struct crypto_ahash *crypto_ahash_reqtfm(
80 struct ahash_request *req)
81{
82 return __crypto_ahash_cast(req->base.tfm);
83}
84
85static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
86{
87 return crypto_ahash_crt(tfm)->reqsize;
88}
89
90static inline int crypto_ahash_setkey(struct crypto_ahash *tfm,
91 const u8 *key, unsigned int keylen)
92{
93 struct ahash_tfm *crt = crypto_ahash_crt(tfm);
94
95 return crt->setkey(tfm, key, keylen);
96}
97
98static inline int crypto_ahash_digest(struct ahash_request *req)
99{
100 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
101 return crt->digest(req);
102}
103
104static inline void ahash_request_set_tfm(struct ahash_request *req,
105 struct crypto_ahash *tfm)
106{
107 req->base.tfm = crypto_ahash_tfm(tfm);
108}
109
110static inline struct ahash_request *ahash_request_alloc(
111 struct crypto_ahash *tfm, gfp_t gfp)
112{
113 struct ahash_request *req;
114
115 req = kmalloc(sizeof(struct ahash_request) +
116 crypto_ahash_reqsize(tfm), gfp);
117
118 if (likely(req))
119 ahash_request_set_tfm(req, tfm);
120
121 return req;
122}
123
124static inline void ahash_request_free(struct ahash_request *req)
125{
126 kfree(req);
127}
128
129static inline struct ahash_request *ahash_request_cast(
130 struct crypto_async_request *req)
131{
132 return container_of(req, struct ahash_request, base);
133}
134
135static inline void ahash_request_set_callback(struct ahash_request *req,
136 u32 flags,
137 crypto_completion_t complete,
138 void *data)
139{
140 req->base.complete = complete;
141 req->base.data = data;
142 req->base.flags = flags;
143}
144
145static inline void ahash_request_set_crypt(struct ahash_request *req,
146 struct scatterlist *src, u8 *result,
147 unsigned int nbytes)
148{
149 req->src = src;
150 req->nbytes = nbytes;
151 req->result = result;
152}
153
154#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
new file mode 100644
index 000000000000..917ae57bad4a
--- /dev/null
+++ b/include/crypto/internal/hash.h
@@ -0,0 +1,78 @@
1/*
2 * Hash algorithms.
3 *
4 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _CRYPTO_INTERNAL_HASH_H
14#define _CRYPTO_INTERNAL_HASH_H
15
16#include <crypto/algapi.h>
17#include <crypto/hash.h>
18
19struct ahash_request;
20struct scatterlist;
21
22struct crypto_hash_walk {
23 char *data;
24
25 unsigned int offset;
26 unsigned int alignmask;
27
28 struct page *pg;
29 unsigned int entrylen;
30
31 unsigned int total;
32 struct scatterlist *sg;
33
34 unsigned int flags;
35};
36
37extern const struct crypto_type crypto_ahash_type;
38
39int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
40int crypto_hash_walk_first(struct ahash_request *req,
41 struct crypto_hash_walk *walk);
42
43static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
44{
45 return crypto_tfm_ctx(&tfm->base);
46}
47
48static inline struct ahash_alg *crypto_ahash_alg(
49 struct crypto_ahash *tfm)
50{
51 return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash;
52}
53
54static inline int ahash_enqueue_request(struct crypto_queue *queue,
55 struct ahash_request *request)
56{
57 return crypto_enqueue_request(queue, &request->base);
58}
59
60static inline struct ahash_request *ahash_dequeue_request(
61 struct crypto_queue *queue)
62{
63 return ahash_request_cast(crypto_dequeue_request(queue));
64}
65
66static inline void *ahash_request_ctx(struct ahash_request *req)
67{
68 return req->__ctx;
69}
70
71static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
72 struct crypto_ahash *tfm)
73{
74 return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
75}
76
77#endif /* _CRYPTO_INTERNAL_HASH_H */
78
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 425824bd49f3..c43dc47fdf75 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -30,15 +30,17 @@
30 */ 30 */
31#define CRYPTO_ALG_TYPE_MASK 0x0000000f 31#define CRYPTO_ALG_TYPE_MASK 0x0000000f
32#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 32#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
33#define CRYPTO_ALG_TYPE_DIGEST 0x00000002 33#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
34#define CRYPTO_ALG_TYPE_HASH 0x00000003 34#define CRYPTO_ALG_TYPE_AEAD 0x00000003
35#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 35#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
36#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 36#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
37#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 37#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
38#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008 38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
39#define CRYPTO_ALG_TYPE_AEAD 0x00000009 39#define CRYPTO_ALG_TYPE_HASH 0x00000009
40#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
40 41
41#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 42#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
43#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
42#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 44#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
43 45
44#define CRYPTO_ALG_LARVAL 0x00000010 46#define CRYPTO_ALG_LARVAL 0x00000010
@@ -102,6 +104,7 @@ struct crypto_async_request;
102struct crypto_aead; 104struct crypto_aead;
103struct crypto_blkcipher; 105struct crypto_blkcipher;
104struct crypto_hash; 106struct crypto_hash;
107struct crypto_ahash;
105struct crypto_tfm; 108struct crypto_tfm;
106struct crypto_type; 109struct crypto_type;
107struct aead_givcrypt_request; 110struct aead_givcrypt_request;
@@ -131,6 +134,16 @@ struct ablkcipher_request {
131 void *__ctx[] CRYPTO_MINALIGN_ATTR; 134 void *__ctx[] CRYPTO_MINALIGN_ATTR;
132}; 135};
133 136
137struct ahash_request {
138 struct crypto_async_request base;
139
140 unsigned int nbytes;
141 struct scatterlist *src;
142 u8 *result;
143
144 void *__ctx[] CRYPTO_MINALIGN_ATTR;
145};
146
134/** 147/**
135 * struct aead_request - AEAD request 148 * struct aead_request - AEAD request
136 * @base: Common attributes for async crypto requests 149 * @base: Common attributes for async crypto requests
@@ -195,6 +208,17 @@ struct ablkcipher_alg {
195 unsigned int ivsize; 208 unsigned int ivsize;
196}; 209};
197 210
211struct ahash_alg {
212 int (*init)(struct ahash_request *req);
213 int (*update)(struct ahash_request *req);
214 int (*final)(struct ahash_request *req);
215 int (*digest)(struct ahash_request *req);
216 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
217 unsigned int keylen);
218
219 unsigned int digestsize;
220};
221
198struct aead_alg { 222struct aead_alg {
199 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 223 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
200 unsigned int keylen); 224 unsigned int keylen);
@@ -272,6 +296,7 @@ struct compress_alg {
272#define cra_cipher cra_u.cipher 296#define cra_cipher cra_u.cipher
273#define cra_digest cra_u.digest 297#define cra_digest cra_u.digest
274#define cra_hash cra_u.hash 298#define cra_hash cra_u.hash
299#define cra_ahash cra_u.ahash
275#define cra_compress cra_u.compress 300#define cra_compress cra_u.compress
276 301
277struct crypto_alg { 302struct crypto_alg {
@@ -298,6 +323,7 @@ struct crypto_alg {
298 struct cipher_alg cipher; 323 struct cipher_alg cipher;
299 struct digest_alg digest; 324 struct digest_alg digest;
300 struct hash_alg hash; 325 struct hash_alg hash;
326 struct ahash_alg ahash;
301 struct compress_alg compress; 327 struct compress_alg compress;
302 } cra_u; 328 } cra_u;
303 329
@@ -383,6 +409,18 @@ struct hash_tfm {
383 unsigned int digestsize; 409 unsigned int digestsize;
384}; 410};
385 411
412struct ahash_tfm {
413 int (*init)(struct ahash_request *req);
414 int (*update)(struct ahash_request *req);
415 int (*final)(struct ahash_request *req);
416 int (*digest)(struct ahash_request *req);
417 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
418 unsigned int keylen);
419
420 unsigned int digestsize;
421 unsigned int reqsize;
422};
423
386struct compress_tfm { 424struct compress_tfm {
387 int (*cot_compress)(struct crypto_tfm *tfm, 425 int (*cot_compress)(struct crypto_tfm *tfm,
388 const u8 *src, unsigned int slen, 426 const u8 *src, unsigned int slen,
@@ -397,6 +435,7 @@ struct compress_tfm {
397#define crt_blkcipher crt_u.blkcipher 435#define crt_blkcipher crt_u.blkcipher
398#define crt_cipher crt_u.cipher 436#define crt_cipher crt_u.cipher
399#define crt_hash crt_u.hash 437#define crt_hash crt_u.hash
438#define crt_ahash crt_u.ahash
400#define crt_compress crt_u.compress 439#define crt_compress crt_u.compress
401 440
402struct crypto_tfm { 441struct crypto_tfm {
@@ -409,6 +448,7 @@ struct crypto_tfm {
409 struct blkcipher_tfm blkcipher; 448 struct blkcipher_tfm blkcipher;
410 struct cipher_tfm cipher; 449 struct cipher_tfm cipher;
411 struct hash_tfm hash; 450 struct hash_tfm hash;
451 struct ahash_tfm ahash;
412 struct compress_tfm compress; 452 struct compress_tfm compress;
413 } crt_u; 453 } crt_u;
414 454