aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig40
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c281
-rw-r--r--crypto/aead.c5
-rw-r--r--crypto/aes_generic.c4
-rw-r--r--crypto/ahash.c1
-rw-r--r--crypto/algapi.c7
-rw-r--r--crypto/algboss.c5
-rw-r--r--crypto/ansi_cprng.c82
-rw-r--r--crypto/anubis.c22
-rw-r--r--crypto/api.c13
-rw-r--r--crypto/async_tx/Kconfig5
-rw-r--r--crypto/async_tx/async_pq.c75
-rw-r--r--crypto/async_tx/async_raid6_recov.c121
-rw-r--r--crypto/async_tx/async_tx.c46
-rw-r--r--crypto/async_tx/async_xor.c33
-rw-r--r--crypto/async_tx/raid6test.c8
-rw-r--r--crypto/authenc.c63
-rw-r--r--crypto/blowfish.c18
-rw-r--r--crypto/camellia.c616
-rw-r--r--crypto/cast5.c14
-rw-r--r--crypto/cast6.c122
-rw-r--r--crypto/cipher.c2
-rw-r--r--crypto/compress.c4
-rw-r--r--crypto/crc32c.c6
-rw-r--r--crypto/cryptd.c11
-rw-r--r--crypto/crypto_null.c8
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/deflate.c20
-rw-r--r--crypto/des_generic.c3
-rw-r--r--crypto/digest.c240
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/fcrypt.c6
-rw-r--r--crypto/gcm.c394
-rw-r--r--crypto/hash.c183
-rw-r--r--crypto/hmac.c1
-rw-r--r--crypto/internal.h2
-rw-r--r--crypto/md5.c41
-rw-r--r--crypto/pcrypt.c567
-rw-r--r--crypto/proc.c19
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/seqiv.c1
-rw-r--r--crypto/shash.c2
-rw-r--r--crypto/tcrypt.c345
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.c175
-rw-r--r--crypto/testmgr.h79
-rw-r--r--crypto/twofish_generic.c (renamed from crypto/twofish.c)1
-rw-r--r--crypto/vmac.c75
-rw-r--r--crypto/xor.c1
-rw-r--r--crypto/xts.c2
52 files changed, 2657 insertions, 1153 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 26b5dd0cb564..1cd497d7a15a 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -28,7 +28,7 @@ config CRYPTO_FIPS
28 This options enables the fips boot option which is 28 This options enables the fips boot option which is
29 required if you want to system to operate in a FIPS 200 29 required if you want to system to operate in a FIPS 200
30 certification. You should say no unless you know what 30 certification. You should say no unless you know what
31 this is. Note that CRYPTO_ANSI_CPRNG is requred if this 31 this is. Note that CRYPTO_ANSI_CPRNG is required if this
32 option is selected 32 option is selected
33 33
34config CRYPTO_ALGAPI 34config CRYPTO_ALGAPI
@@ -80,6 +80,11 @@ config CRYPTO_RNG2
80 80
81config CRYPTO_PCOMP 81config CRYPTO_PCOMP
82 tristate 82 tristate
83 select CRYPTO_PCOMP2
84 select CRYPTO_ALGAPI
85
86config CRYPTO_PCOMP2
87 tristate
83 select CRYPTO_ALGAPI2 88 select CRYPTO_ALGAPI2
84 89
85config CRYPTO_MANAGER 90config CRYPTO_MANAGER
@@ -94,7 +99,15 @@ config CRYPTO_MANAGER2
94 select CRYPTO_AEAD2 99 select CRYPTO_AEAD2
95 select CRYPTO_HASH2 100 select CRYPTO_HASH2
96 select CRYPTO_BLKCIPHER2 101 select CRYPTO_BLKCIPHER2
97 select CRYPTO_PCOMP 102 select CRYPTO_PCOMP2
103
104config CRYPTO_MANAGER_TESTS
105 bool "Run algolithms' self-tests"
106 default y
107 depends on CRYPTO_MANAGER2
108 help
109 Run cryptomanager's tests for the new crypto algorithms being
110 registered.
98 111
99config CRYPTO_GF128MUL 112config CRYPTO_GF128MUL
100 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" 113 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -114,6 +127,16 @@ config CRYPTO_NULL
114 help 127 help
115 These are 'Null' algorithms, used by IPsec, which do nothing. 128 These are 'Null' algorithms, used by IPsec, which do nothing.
116 129
130config CRYPTO_PCRYPT
131 tristate "Parallel crypto engine (EXPERIMENTAL)"
132 depends on SMP && EXPERIMENTAL
133 select PADATA
134 select CRYPTO_MANAGER
135 select CRYPTO_AEAD
136 help
137 This converts an arbitrary crypto algorithm into a parallel
138 algorithm that executes in kernel threads.
139
117config CRYPTO_WORKQUEUE 140config CRYPTO_WORKQUEUE
118 tristate 141 tristate
119 142
@@ -440,6 +463,15 @@ config CRYPTO_WP512
440 See also: 463 See also:
441 <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> 464 <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html>
442 465
466config CRYPTO_GHASH_CLMUL_NI_INTEL
467 tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
468 depends on (X86 || UML_X86) && 64BIT
469 select CRYPTO_SHASH
470 select CRYPTO_CRYPTD
471 help
472 GHASH is message digest algorithm for GCM (Galois/Counter Mode).
473 The implementation is accelerated by CLMUL-NI of Intel.
474
443comment "Ciphers" 475comment "Ciphers"
444 476
445config CRYPTO_AES 477config CRYPTO_AES
@@ -807,8 +839,8 @@ config CRYPTO_ANSI_CPRNG
807 help 839 help
808 This option enables the generic pseudo random number generator 840 This option enables the generic pseudo random number generator
809 for cryptographic modules. Uses the Algorithm specified in 841 for cryptographic modules. Uses the Algorithm specified in
810 ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS 842 ANSI X9.31 A.2.4. Note that this option must be enabled if
811 is selected 843 CRYPTO_FIPS is selected
812 844
813source "drivers/crypto/Kconfig" 845source "drivers/crypto/Kconfig"
814 846
diff --git a/crypto/Makefile b/crypto/Makefile
index 9e8f61908cb5..423b7de61f93 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -26,7 +26,7 @@ crypto_hash-objs += ahash.o
26crypto_hash-objs += shash.o 26crypto_hash-objs += shash.o
27obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 27obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
28 28
29obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o 29obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
30 30
31cryptomgr-objs := algboss.o testmgr.o 31cryptomgr-objs := algboss.o testmgr.o
32 32
@@ -56,11 +56,12 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
56obj-$(CONFIG_CRYPTO_CTR) += ctr.o 56obj-$(CONFIG_CRYPTO_CTR) += ctr.o
57obj-$(CONFIG_CRYPTO_GCM) += gcm.o 57obj-$(CONFIG_CRYPTO_GCM) += gcm.o
58obj-$(CONFIG_CRYPTO_CCM) += ccm.o 58obj-$(CONFIG_CRYPTO_CCM) += ccm.o
59obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
59obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o 60obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
60obj-$(CONFIG_CRYPTO_DES) += des_generic.o 61obj-$(CONFIG_CRYPTO_DES) += des_generic.o
61obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o 62obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
62obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o 63obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
63obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o 64obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
64obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o 65obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
65obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o 66obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
66obj-$(CONFIG_CRYPTO_AES) += aes_generic.o 67obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index f6f08336df5d..a854df2a5a4b 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Asynchronous block chaining cipher operations. 2 * Asynchronous block chaining cipher operations.
3 * 3 *
4 * This is the asynchronous version of blkcipher.c indicating completion 4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback. 5 * via a callback.
6 * 6 *
@@ -8,7 +8,7 @@
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option) 11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
@@ -24,10 +24,287 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26 26
27#include <crypto/scatterwalk.h>
28
27#include "internal.h" 29#include "internal.h"
28 30
29static const char *skcipher_default_geniv __read_mostly; 31static const char *skcipher_default_geniv __read_mostly;
30 32
33struct ablkcipher_buffer {
34 struct list_head entry;
35 struct scatter_walk dst;
36 unsigned int len;
37 void *data;
38};
39
40enum {
41 ABLKCIPHER_WALK_SLOW = 1 << 0,
42};
43
44static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
45{
46 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
47}
48
49void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
50{
51 struct ablkcipher_buffer *p, *tmp;
52
53 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
54 ablkcipher_buffer_write(p);
55 list_del(&p->entry);
56 kfree(p);
57 }
58}
59EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
60
61static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
62 struct ablkcipher_buffer *p)
63{
64 p->dst = walk->out;
65 list_add_tail(&p->entry, &walk->buffers);
66}
67
68/* Get a spot of the specified length that does not straddle a page.
69 * The caller needs to ensure that there is enough space for this operation.
70 */
71static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
72{
73 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
74 return max(start, end_page);
75}
76
77static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
78 unsigned int bsize)
79{
80 unsigned int n = bsize;
81
82 for (;;) {
83 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
84
85 if (len_this_page > n)
86 len_this_page = n;
87 scatterwalk_advance(&walk->out, n);
88 if (n == len_this_page)
89 break;
90 n -= len_this_page;
91 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
92 }
93
94 return bsize;
95}
96
97static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
98 unsigned int n)
99{
100 scatterwalk_advance(&walk->in, n);
101 scatterwalk_advance(&walk->out, n);
102
103 return n;
104}
105
106static int ablkcipher_walk_next(struct ablkcipher_request *req,
107 struct ablkcipher_walk *walk);
108
109int ablkcipher_walk_done(struct ablkcipher_request *req,
110 struct ablkcipher_walk *walk, int err)
111{
112 struct crypto_tfm *tfm = req->base.tfm;
113 unsigned int nbytes = 0;
114
115 if (likely(err >= 0)) {
116 unsigned int n = walk->nbytes - err;
117
118 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
119 n = ablkcipher_done_fast(walk, n);
120 else if (WARN_ON(err)) {
121 err = -EINVAL;
122 goto err;
123 } else
124 n = ablkcipher_done_slow(walk, n);
125
126 nbytes = walk->total - n;
127 err = 0;
128 }
129
130 scatterwalk_done(&walk->in, 0, nbytes);
131 scatterwalk_done(&walk->out, 1, nbytes);
132
133err:
134 walk->total = nbytes;
135 walk->nbytes = nbytes;
136
137 if (nbytes) {
138 crypto_yield(req->base.flags);
139 return ablkcipher_walk_next(req, walk);
140 }
141
142 if (walk->iv != req->info)
143 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
144 if (walk->iv_buffer)
145 kfree(walk->iv_buffer);
146
147 return err;
148}
149EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
150
151static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
152 struct ablkcipher_walk *walk,
153 unsigned int bsize,
154 unsigned int alignmask,
155 void **src_p, void **dst_p)
156{
157 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
158 struct ablkcipher_buffer *p;
159 void *src, *dst, *base;
160 unsigned int n;
161
162 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
163 n += (aligned_bsize * 3 - (alignmask + 1) +
164 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
165
166 p = kmalloc(n, GFP_ATOMIC);
167 if (!p)
168 return ablkcipher_walk_done(req, walk, -ENOMEM);
169
170 base = p + 1;
171
172 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
173 src = dst = ablkcipher_get_spot(dst, bsize);
174
175 p->len = bsize;
176 p->data = dst;
177
178 scatterwalk_copychunks(src, &walk->in, bsize, 0);
179
180 ablkcipher_queue_write(walk, p);
181
182 walk->nbytes = bsize;
183 walk->flags |= ABLKCIPHER_WALK_SLOW;
184
185 *src_p = src;
186 *dst_p = dst;
187
188 return 0;
189}
190
191static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
192 struct crypto_tfm *tfm,
193 unsigned int alignmask)
194{
195 unsigned bs = walk->blocksize;
196 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
197 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
198 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
199 (alignmask + 1);
200 u8 *iv;
201
202 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
203 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
204 if (!walk->iv_buffer)
205 return -ENOMEM;
206
207 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
208 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
209 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
210 iv = ablkcipher_get_spot(iv, ivsize);
211
212 walk->iv = memcpy(iv, walk->iv, ivsize);
213 return 0;
214}
215
216static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
217 struct ablkcipher_walk *walk)
218{
219 walk->src.page = scatterwalk_page(&walk->in);
220 walk->src.offset = offset_in_page(walk->in.offset);
221 walk->dst.page = scatterwalk_page(&walk->out);
222 walk->dst.offset = offset_in_page(walk->out.offset);
223
224 return 0;
225}
226
227static int ablkcipher_walk_next(struct ablkcipher_request *req,
228 struct ablkcipher_walk *walk)
229{
230 struct crypto_tfm *tfm = req->base.tfm;
231 unsigned int alignmask, bsize, n;
232 void *src, *dst;
233 int err;
234
235 alignmask = crypto_tfm_alg_alignmask(tfm);
236 n = walk->total;
237 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
238 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
239 return ablkcipher_walk_done(req, walk, -EINVAL);
240 }
241
242 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
243 src = dst = NULL;
244
245 bsize = min(walk->blocksize, n);
246 n = scatterwalk_clamp(&walk->in, n);
247 n = scatterwalk_clamp(&walk->out, n);
248
249 if (n < bsize ||
250 !scatterwalk_aligned(&walk->in, alignmask) ||
251 !scatterwalk_aligned(&walk->out, alignmask)) {
252 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
253 &src, &dst);
254 goto set_phys_lowmem;
255 }
256
257 walk->nbytes = n;
258
259 return ablkcipher_next_fast(req, walk);
260
261set_phys_lowmem:
262 if (err >= 0) {
263 walk->src.page = virt_to_page(src);
264 walk->dst.page = virt_to_page(dst);
265 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
266 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
267 }
268
269 return err;
270}
271
272static int ablkcipher_walk_first(struct ablkcipher_request *req,
273 struct ablkcipher_walk *walk)
274{
275 struct crypto_tfm *tfm = req->base.tfm;
276 unsigned int alignmask;
277
278 alignmask = crypto_tfm_alg_alignmask(tfm);
279 if (WARN_ON_ONCE(in_irq()))
280 return -EDEADLK;
281
282 walk->nbytes = walk->total;
283 if (unlikely(!walk->total))
284 return 0;
285
286 walk->iv_buffer = NULL;
287 walk->iv = req->info;
288 if (unlikely(((unsigned long)walk->iv & alignmask))) {
289 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
290 if (err)
291 return err;
292 }
293
294 scatterwalk_start(&walk->in, walk->in.sg);
295 scatterwalk_start(&walk->out, walk->out.sg);
296
297 return ablkcipher_walk_next(req, walk);
298}
299
300int ablkcipher_walk_phys(struct ablkcipher_request *req,
301 struct ablkcipher_walk *walk)
302{
303 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
304 return ablkcipher_walk_first(req, walk);
305}
306EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
307
31static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 308static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
32 unsigned int keylen) 309 unsigned int keylen)
33{ 310{
diff --git a/crypto/aead.c b/crypto/aead.c
index d9aa733db164..6729e8ff68e7 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * AEAD: Authenticated Encryption with Associated Data 2 * AEAD: Authenticated Encryption with Associated Data
3 * 3 *
4 * This file provides API support for AEAD algorithms. 4 * This file provides API support for AEAD algorithms.
5 * 5 *
6 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 6 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option) 10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version. 11 * any later version.
12 * 12 *
13 */ 13 */
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23 24
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index e78b7ee44a74..a68c73dae15a 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * AES Cipher Algorithm. 4 * AES Cipher Algorithm.
@@ -1127,7 +1127,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
1127 1127
1128#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) 1128#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
1129 1129
1130#define imix_col(y,x) do { \ 1130#define imix_col(y, x) do { \
1131 u = star_x(x); \ 1131 u = star_x(x); \
1132 v = star_x(u); \ 1132 v = star_x(u); \
1133 w = star_x(v); \ 1133 w = star_x(v); \
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 33a4ff45f842..b8c59b889c6e 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -78,7 +78,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
78 walk->data -= walk->offset; 78 walk->data -= walk->offset;
79 79
80 if (nbytes && walk->offset & alignmask && !err) { 80 if (nbytes && walk->offset & alignmask && !err) {
81 walk->offset += alignmask - 1;
82 walk->offset = ALIGN(walk->offset, alignmask + 1); 81 walk->offset = ALIGN(walk->offset, alignmask + 1);
83 walk->data += walk->offset; 82 walk->data += walk->offset;
84 83
diff --git a/crypto/algapi.c b/crypto/algapi.c
index f149b1c8b76d..c3cf1a69a47a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -17,6 +17,7 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/slab.h>
20#include <linux/string.h> 21#include <linux/string.h>
21 22
22#include "internal.h" 23#include "internal.h"
@@ -230,7 +231,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
230 list_add(&alg->cra_list, &crypto_alg_list); 231 list_add(&alg->cra_list, &crypto_alg_list);
231 list_add(&larval->alg.cra_list, &crypto_alg_list); 232 list_add(&larval->alg.cra_list, &crypto_alg_list);
232 233
233out: 234out:
234 return larval; 235 return larval;
235 236
236free_larval: 237free_larval:
@@ -388,7 +389,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
388{ 389{
389 int ret; 390 int ret;
390 LIST_HEAD(list); 391 LIST_HEAD(list);
391 392
392 down_write(&crypto_alg_sem); 393 down_write(&crypto_alg_sem);
393 ret = crypto_remove_alg(alg, &list); 394 ret = crypto_remove_alg(alg, &list);
394 up_write(&crypto_alg_sem); 395 up_write(&crypto_alg_sem);
@@ -543,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
543{ 544{
544 int err = -EINVAL; 545 int err = -EINVAL;
545 546
546 if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) 547 if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
547 goto out; 548 goto out;
548 549
549 spawn->frontend = frontend; 550 spawn->frontend = frontend;
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 412241ce4cfa..40bd391f34d9 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -19,6 +19,7 @@
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/slab.h>
22#include <linux/string.h> 23#include <linux/string.h>
23 24
24#include "internal.h" 25#include "internal.h"
@@ -205,6 +206,7 @@ err:
205 return NOTIFY_OK; 206 return NOTIFY_OK;
206} 207}
207 208
209#ifdef CONFIG_CRYPTO_MANAGER_TESTS
208static int cryptomgr_test(void *data) 210static int cryptomgr_test(void *data)
209{ 211{
210 struct crypto_test_param *param = data; 212 struct crypto_test_param *param = data;
@@ -265,6 +267,7 @@ err_put_module:
265err: 267err:
266 return NOTIFY_OK; 268 return NOTIFY_OK;
267} 269}
270#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
268 271
269static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, 272static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
270 void *data) 273 void *data)
@@ -272,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
272 switch (msg) { 275 switch (msg) {
273 case CRYPTO_MSG_ALG_REQUEST: 276 case CRYPTO_MSG_ALG_REQUEST:
274 return cryptomgr_schedule_probe(data); 277 return cryptomgr_schedule_probe(data);
278#ifdef CONFIG_CRYPTO_MANAGER_TESTS
275 case CRYPTO_MSG_ALG_REGISTER: 279 case CRYPTO_MSG_ALG_REGISTER:
276 return cryptomgr_schedule_test(data); 280 return cryptomgr_schedule_test(data);
281#endif
277 } 282 }
278 283
279 return NOTIFY_DONE; 284 return NOTIFY_DONE;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 3aa6e3834bfe..2bc332142849 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2,
85 * Returns DEFAULT_BLK_SZ bytes of random data per call 85 * Returns DEFAULT_BLK_SZ bytes of random data per call
86 * returns 0 if generation succeded, <0 if something went wrong 86 * returns 0 if generation succeded, <0 if something went wrong
87 */ 87 */
88static int _get_more_prng_bytes(struct prng_context *ctx) 88static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
89{ 89{
90 int i; 90 int i;
91 unsigned char tmp[DEFAULT_BLK_SZ]; 91 unsigned char tmp[DEFAULT_BLK_SZ];
@@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
132 */ 132 */
133 if (!memcmp(ctx->rand_data, ctx->last_rand_data, 133 if (!memcmp(ctx->rand_data, ctx->last_rand_data,
134 DEFAULT_BLK_SZ)) { 134 DEFAULT_BLK_SZ)) {
135 if (fips_enabled) { 135 if (cont_test) {
136 panic("cprng %p Failed repetition check!\n", 136 panic("cprng %p Failed repetition check!\n",
137 ctx); 137 ctx);
138 } 138 }
@@ -185,16 +185,14 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
185} 185}
186 186
187/* Our exported functions */ 187/* Our exported functions */
188static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) 188static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
189 int do_cont_test)
189{ 190{
190 unsigned char *ptr = buf; 191 unsigned char *ptr = buf;
191 unsigned int byte_count = (unsigned int)nbytes; 192 unsigned int byte_count = (unsigned int)nbytes;
192 int err; 193 int err;
193 194
194 195
195 if (nbytes < 0)
196 return -EINVAL;
197
198 spin_lock_bh(&ctx->prng_lock); 196 spin_lock_bh(&ctx->prng_lock);
199 197
200 err = -EINVAL; 198 err = -EINVAL;
@@ -220,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
220 218
221remainder: 219remainder:
222 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { 220 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
223 if (_get_more_prng_bytes(ctx) < 0) { 221 if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
224 memset(buf, 0, nbytes); 222 memset(buf, 0, nbytes);
225 err = -EINVAL; 223 err = -EINVAL;
226 goto done; 224 goto done;
@@ -247,7 +245,7 @@ empty_rbuf:
247 */ 245 */
248 for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { 246 for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
249 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { 247 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
250 if (_get_more_prng_bytes(ctx) < 0) { 248 if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
251 memset(buf, 0, nbytes); 249 memset(buf, 0, nbytes);
252 err = -EINVAL; 250 err = -EINVAL;
253 goto done; 251 goto done;
@@ -356,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
356{ 354{
357 struct prng_context *prng = crypto_rng_ctx(tfm); 355 struct prng_context *prng = crypto_rng_ctx(tfm);
358 356
359 return get_prng_bytes(rdata, dlen, prng); 357 return get_prng_bytes(rdata, dlen, prng, 0);
360} 358}
361 359
362/* 360/*
@@ -404,19 +402,79 @@ static struct crypto_alg rng_alg = {
404 } 402 }
405}; 403};
406 404
405#ifdef CONFIG_CRYPTO_FIPS
406static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
407 unsigned int dlen)
408{
409 struct prng_context *prng = crypto_rng_ctx(tfm);
410
411 return get_prng_bytes(rdata, dlen, prng, 1);
412}
413
414static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
415{
416 u8 rdata[DEFAULT_BLK_SZ];
417 int rc;
418
419 struct prng_context *prng = crypto_rng_ctx(tfm);
420
421 rc = cprng_reset(tfm, seed, slen);
422
423 if (!rc)
424 goto out;
425
426 /* this primes our continuity test */
427 rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
428 prng->rand_data_valid = DEFAULT_BLK_SZ;
429
430out:
431 return rc;
432}
433
434static struct crypto_alg fips_rng_alg = {
435 .cra_name = "fips(ansi_cprng)",
436 .cra_driver_name = "fips_ansi_cprng",
437 .cra_priority = 300,
438 .cra_flags = CRYPTO_ALG_TYPE_RNG,
439 .cra_ctxsize = sizeof(struct prng_context),
440 .cra_type = &crypto_rng_type,
441 .cra_module = THIS_MODULE,
442 .cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
443 .cra_init = cprng_init,
444 .cra_exit = cprng_exit,
445 .cra_u = {
446 .rng = {
447 .rng_make_random = fips_cprng_get_random,
448 .rng_reset = fips_cprng_reset,
449 .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
450 }
451 }
452};
453#endif
407 454
408/* Module initalization */ 455/* Module initalization */
409static int __init prng_mod_init(void) 456static int __init prng_mod_init(void)
410{ 457{
411 if (fips_enabled) 458 int rc = 0;
412 rng_alg.cra_priority += 200;
413 459
414 return crypto_register_alg(&rng_alg); 460 rc = crypto_register_alg(&rng_alg);
461#ifdef CONFIG_CRYPTO_FIPS
462 if (rc)
463 goto out;
464
465 rc = crypto_register_alg(&fips_rng_alg);
466
467out:
468#endif
469 return rc;
415} 470}
416 471
417static void __exit prng_mod_fini(void) 472static void __exit prng_mod_fini(void)
418{ 473{
419 crypto_unregister_alg(&rng_alg); 474 crypto_unregister_alg(&rng_alg);
475#ifdef CONFIG_CRYPTO_FIPS
476 crypto_unregister_alg(&fips_rng_alg);
477#endif
420 return; 478 return;
421} 479}
422 480
diff --git a/crypto/anubis.c b/crypto/anubis.c
index e42c3a8ba4aa..77530d571c96 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -469,14 +469,13 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
469 u32 kappa[ANUBIS_MAX_N]; 469 u32 kappa[ANUBIS_MAX_N];
470 u32 inter[ANUBIS_MAX_N]; 470 u32 inter[ANUBIS_MAX_N];
471 471
472 switch (key_len) 472 switch (key_len) {
473 {
474 case 16: case 20: case 24: case 28: 473 case 16: case 20: case 24: case 28:
475 case 32: case 36: case 40: 474 case 32: case 36: case 40:
476 break; 475 break;
477 default: 476 default:
478 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 477 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
479 return - EINVAL; 478 return -EINVAL;
480 } 479 }
481 480
482 ctx->key_len = key_len * 8; 481 ctx->key_len = key_len * 8;
@@ -530,23 +529,24 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
530 /* 529 /*
531 * compute kappa^{r+1} from kappa^r: 530 * compute kappa^{r+1} from kappa^r:
532 */ 531 */
533 if (r == R) { 532 if (r == R)
534 break; 533 break;
535 }
536 for (i = 0; i < N; i++) { 534 for (i = 0; i < N; i++) {
537 int j = i; 535 int j = i;
538 inter[i] = T0[(kappa[j--] >> 24) ]; 536 inter[i] = T0[(kappa[j--] >> 24) ];
539 if (j < 0) j = N - 1; 537 if (j < 0)
538 j = N - 1;
540 inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; 539 inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
541 if (j < 0) j = N - 1; 540 if (j < 0)
541 j = N - 1;
542 inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; 542 inter[i] ^= T2[(kappa[j--] >> 8) & 0xff];
543 if (j < 0) j = N - 1; 543 if (j < 0)
544 j = N - 1;
544 inter[i] ^= T3[(kappa[j ] ) & 0xff]; 545 inter[i] ^= T3[(kappa[j ] ) & 0xff];
545 } 546 }
546 kappa[0] = inter[0] ^ rc[r]; 547 kappa[0] = inter[0] ^ rc[r];
547 for (i = 1; i < N; i++) { 548 for (i = 1; i < N; i++)
548 kappa[i] = inter[i]; 549 kappa[i] = inter[i];
549 }
550 } 550 }
551 551
552 /* 552 /*
@@ -690,7 +690,7 @@ static struct crypto_alg anubis_alg = {
690static int __init anubis_mod_init(void) 690static int __init anubis_mod_init(void)
691{ 691{
692 int ret = 0; 692 int ret = 0;
693 693
694 ret = crypto_register_alg(&anubis_alg); 694 ret = crypto_register_alg(&anubis_alg);
695 return ret; 695 return ret;
696} 696}
diff --git a/crypto/api.c b/crypto/api.c
index 798526d90538..033a7147e5eb 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -10,7 +10,7 @@
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free 12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option) 13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 */ 16 */
@@ -288,11 +288,11 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
288 288
289 case CRYPTO_ALG_TYPE_COMPRESS: 289 case CRYPTO_ALG_TYPE_COMPRESS:
290 return crypto_init_compress_ops(tfm); 290 return crypto_init_compress_ops(tfm);
291 291
292 default: 292 default:
293 break; 293 break;
294 } 294 }
295 295
296 BUG(); 296 BUG();
297 return -EINVAL; 297 return -EINVAL;
298} 298}
@@ -315,10 +315,9 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
315 case CRYPTO_ALG_TYPE_COMPRESS: 315 case CRYPTO_ALG_TYPE_COMPRESS:
316 crypto_exit_compress_ops(tfm); 316 crypto_exit_compress_ops(tfm);
317 break; 317 break;
318 318
319 default: 319 default:
320 BUG(); 320 BUG();
321
322 } 321 }
323} 322}
324 323
@@ -593,12 +592,12 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
593{ 592{
594 int ret = 0; 593 int ret = 0;
595 struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); 594 struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
596 595
597 if (!IS_ERR(alg)) { 596 if (!IS_ERR(alg)) {
598 crypto_mod_put(alg); 597 crypto_mod_put(alg);
599 ret = 1; 598 ret = 1;
600 } 599 }
601 600
602 return ret; 601 return ret;
603} 602}
604EXPORT_SYMBOL_GPL(crypto_has_alg); 603EXPORT_SYMBOL_GPL(crypto_has_alg);
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index e24aa80087ad..5de2ed13b35d 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -37,3 +37,8 @@ config ASYNC_RAID6_TEST
37 37
38 If unsure, say N. 38 If unsure, say N.
39 39
40config ASYNC_TX_DISABLE_PQ_VAL_DMA
41 bool
42
43config ASYNC_TX_DISABLE_XOR_VAL_DMA
44 bool
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index b88db6d1dc65..fdd8257d35d9 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -24,16 +24,13 @@
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/raid/pq.h> 25#include <linux/raid/pq.h>
26#include <linux/async_tx.h> 26#include <linux/async_tx.h>
27#include <linux/gfp.h>
27 28
28/** 29/**
29 * scribble - space to hold throwaway P buffer for synchronous gen_syndrome 30 * pq_scribble_page - space to hold throwaway P or Q buffer for
31 * synchronous gen_syndrome
30 */ 32 */
31static struct page *scribble; 33static struct page *pq_scribble_page;
32
33static bool is_raid6_zero_block(struct page *p)
34{
35 return p == (void *) raid6_empty_zero_page;
36}
37 34
38/* the struct page *blocks[] parameter passed to async_gen_syndrome() 35/* the struct page *blocks[] parameter passed to async_gen_syndrome()
39 * and async_syndrome_val() contains the 'P' destination address at 36 * and async_syndrome_val() contains the 'P' destination address at
@@ -83,7 +80,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
83 * sources and update the coefficients accordingly 80 * sources and update the coefficients accordingly
84 */ 81 */
85 for (i = 0, idx = 0; i < src_cnt; i++) { 82 for (i = 0, idx = 0; i < src_cnt; i++) {
86 if (is_raid6_zero_block(blocks[i])) 83 if (blocks[i] == NULL)
87 continue; 84 continue;
88 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, 85 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
89 DMA_TO_DEVICE); 86 DMA_TO_DEVICE);
@@ -160,9 +157,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
160 srcs = (void **) blocks; 157 srcs = (void **) blocks;
161 158
162 for (i = 0; i < disks; i++) { 159 for (i = 0; i < disks; i++) {
163 if (is_raid6_zero_block(blocks[i])) { 160 if (blocks[i] == NULL) {
164 BUG_ON(i > disks - 3); /* P or Q can't be zero */ 161 BUG_ON(i > disks - 3); /* P or Q can't be zero */
165 srcs[i] = blocks[i]; 162 srcs[i] = (void*)raid6_empty_zero_page;
166 } else 163 } else
167 srcs[i] = page_address(blocks[i]) + offset; 164 srcs[i] = page_address(blocks[i]) + offset;
168 } 165 }
@@ -186,10 +183,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
186 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= 183 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
187 * PAGE_SIZE as a temporary buffer of this size is used in the 184 * PAGE_SIZE as a temporary buffer of this size is used in the
188 * synchronous path. 'disks' always accounts for both destination 185 * synchronous path. 'disks' always accounts for both destination
189 * buffers. 186 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
187 * set to NULL those buffers will be replaced with the raid6_zero_page
188 * in the synchronous path and omitted in the hardware-asynchronous
189 * path.
190 * 190 *
191 * 'blocks' note: if submit->scribble is NULL then the contents of 191 * 'blocks' note: if submit->scribble is NULL then the contents of
192 * 'blocks' may be overridden 192 * 'blocks' may be overwritten to perform address conversions
193 * (dma_map_page() or page_address()).
193 */ 194 */
194struct dma_async_tx_descriptor * 195struct dma_async_tx_descriptor *
195async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, 196async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -227,11 +228,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
227 async_tx_quiesce(&submit->depend_tx); 228 async_tx_quiesce(&submit->depend_tx);
228 229
229 if (!P(blocks, disks)) { 230 if (!P(blocks, disks)) {
230 P(blocks, disks) = scribble; 231 P(blocks, disks) = pq_scribble_page;
231 BUG_ON(len + offset > PAGE_SIZE); 232 BUG_ON(len + offset > PAGE_SIZE);
232 } 233 }
233 if (!Q(blocks, disks)) { 234 if (!Q(blocks, disks)) {
234 Q(blocks, disks) = scribble; 235 Q(blocks, disks) = pq_scribble_page;
235 BUG_ON(len + offset > PAGE_SIZE); 236 BUG_ON(len + offset > PAGE_SIZE);
236 } 237 }
237 do_sync_gen_syndrome(blocks, offset, disks, len, submit); 238 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
@@ -240,6 +241,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
240} 241}
241EXPORT_SYMBOL_GPL(async_gen_syndrome); 242EXPORT_SYMBOL_GPL(async_gen_syndrome);
242 243
244static inline struct dma_chan *
245pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
246{
247 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
248 return NULL;
249 #endif
250 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
251 disks, len);
252}
253
243/** 254/**
244 * async_syndrome_val - asynchronously validate a raid6 syndrome 255 * async_syndrome_val - asynchronously validate a raid6 syndrome
245 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 256 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -260,13 +271,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
260 size_t len, enum sum_check_flags *pqres, struct page *spare, 271 size_t len, enum sum_check_flags *pqres, struct page *spare,
261 struct async_submit_ctl *submit) 272 struct async_submit_ctl *submit)
262{ 273{
263 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, 274 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
264 NULL, 0, blocks, disks,
265 len);
266 struct dma_device *device = chan ? chan->device : NULL; 275 struct dma_device *device = chan ? chan->device : NULL;
267 struct dma_async_tx_descriptor *tx; 276 struct dma_async_tx_descriptor *tx;
277 unsigned char coefs[disks-2];
268 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 278 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
269 dma_addr_t *dma_src = NULL; 279 dma_addr_t *dma_src = NULL;
280 int src_cnt = 0;
270 281
271 BUG_ON(disks < 4); 282 BUG_ON(disks < 4);
272 283
@@ -285,22 +296,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
285 __func__, disks, len); 296 __func__, disks, len);
286 if (!P(blocks, disks)) 297 if (!P(blocks, disks))
287 dma_flags |= DMA_PREP_PQ_DISABLE_P; 298 dma_flags |= DMA_PREP_PQ_DISABLE_P;
299 else
300 pq[0] = dma_map_page(dev, P(blocks, disks),
301 offset, len,
302 DMA_TO_DEVICE);
288 if (!Q(blocks, disks)) 303 if (!Q(blocks, disks))
289 dma_flags |= DMA_PREP_PQ_DISABLE_Q; 304 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
305 else
306 pq[1] = dma_map_page(dev, Q(blocks, disks),
307 offset, len,
308 DMA_TO_DEVICE);
309
290 if (submit->flags & ASYNC_TX_FENCE) 310 if (submit->flags & ASYNC_TX_FENCE)
291 dma_flags |= DMA_PREP_FENCE; 311 dma_flags |= DMA_PREP_FENCE;
292 for (i = 0; i < disks; i++) 312 for (i = 0; i < disks-2; i++)
293 if (likely(blocks[i])) { 313 if (likely(blocks[i])) {
294 BUG_ON(is_raid6_zero_block(blocks[i])); 314 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
295 dma_src[i] = dma_map_page(dev, blocks[i], 315 offset, len,
296 offset, len, 316 DMA_TO_DEVICE);
297 DMA_TO_DEVICE); 317 coefs[src_cnt] = raid6_gfexp[i];
318 src_cnt++;
298 } 319 }
299 320
300 for (;;) { 321 for (;;) {
301 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 322 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
302 disks - 2, 323 src_cnt,
303 raid6_gfexp, 324 coefs,
304 len, pqres, 325 len, pqres,
305 dma_flags); 326 dma_flags);
306 if (likely(tx)) 327 if (likely(tx))
@@ -373,9 +394,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
373 394
374static int __init async_pq_init(void) 395static int __init async_pq_init(void)
375{ 396{
376 scribble = alloc_page(GFP_KERNEL); 397 pq_scribble_page = alloc_page(GFP_KERNEL);
377 398
378 if (scribble) 399 if (pq_scribble_page)
379 return 0; 400 return 0;
380 401
381 pr_err("%s: failed to allocate required spare page\n", __func__); 402 pr_err("%s: failed to allocate required spare page\n", __func__);
@@ -385,7 +406,7 @@ static int __init async_pq_init(void)
385 406
386static void __exit async_pq_exit(void) 407static void __exit async_pq_exit(void)
387{ 408{
388 put_page(scribble); 409 put_page(pq_scribble_page);
389} 410}
390 411
391module_init(async_pq_init); 412module_init(async_pq_init);
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 6d73dde4786d..ce038d861eb9 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
131} 131}
132 132
133static struct dma_async_tx_descriptor * 133static struct dma_async_tx_descriptor *
134__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, 134__2data_recov_4(int disks, size_t bytes, int faila, int failb,
135 struct async_submit_ctl *submit) 135 struct page **blocks, struct async_submit_ctl *submit)
136{ 136{
137 struct dma_async_tx_descriptor *tx = NULL; 137 struct dma_async_tx_descriptor *tx = NULL;
138 struct page *p, *q, *a, *b; 138 struct page *p, *q, *a, *b;
@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
143 void *cb_param = submit->cb_param; 143 void *cb_param = submit->cb_param;
144 void *scribble = submit->scribble; 144 void *scribble = submit->scribble;
145 145
146 p = blocks[4-2]; 146 p = blocks[disks-2];
147 q = blocks[4-1]; 147 q = blocks[disks-1];
148 148
149 a = blocks[faila]; 149 a = blocks[faila];
150 b = blocks[failb]; 150 b = blocks[failb];
@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
170} 170}
171 171
172static struct dma_async_tx_descriptor * 172static struct dma_async_tx_descriptor *
173__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, 173__2data_recov_5(int disks, size_t bytes, int faila, int failb,
174 struct async_submit_ctl *submit) 174 struct page **blocks, struct async_submit_ctl *submit)
175{ 175{
176 struct dma_async_tx_descriptor *tx = NULL; 176 struct dma_async_tx_descriptor *tx = NULL;
177 struct page *p, *q, *g, *dp, *dq; 177 struct page *p, *q, *g, *dp, *dq;
@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
181 dma_async_tx_callback cb_fn = submit->cb_fn; 181 dma_async_tx_callback cb_fn = submit->cb_fn;
182 void *cb_param = submit->cb_param; 182 void *cb_param = submit->cb_param;
183 void *scribble = submit->scribble; 183 void *scribble = submit->scribble;
184 int uninitialized_var(good); 184 int good_srcs, good, i;
185 int i;
186 185
187 for (i = 0; i < 3; i++) { 186 good_srcs = 0;
187 good = -1;
188 for (i = 0; i < disks-2; i++) {
189 if (blocks[i] == NULL)
190 continue;
188 if (i == faila || i == failb) 191 if (i == faila || i == failb)
189 continue; 192 continue;
190 else { 193 good = i;
191 good = i; 194 good_srcs++;
192 break;
193 }
194 } 195 }
195 BUG_ON(i >= 3); 196 BUG_ON(good_srcs > 1);
196 197
197 p = blocks[5-2]; 198 p = blocks[disks-2];
198 q = blocks[5-1]; 199 q = blocks[disks-1];
199 g = blocks[good]; 200 g = blocks[good];
200 201
201 /* Compute syndrome with zero for the missing data pages 202 /* Compute syndrome with zero for the missing data pages
@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
263 * delta p and delta q 264 * delta p and delta q
264 */ 265 */
265 dp = blocks[faila]; 266 dp = blocks[faila];
266 blocks[faila] = (void *)raid6_empty_zero_page; 267 blocks[faila] = NULL;
267 blocks[disks-2] = dp; 268 blocks[disks-2] = dp;
268 dq = blocks[failb]; 269 dq = blocks[failb];
269 blocks[failb] = (void *)raid6_empty_zero_page; 270 blocks[failb] = NULL;
270 blocks[disks-1] = dq; 271 blocks[disks-1] = dq;
271 272
272 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); 273 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
@@ -323,22 +324,29 @@ struct dma_async_tx_descriptor *
323async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, 324async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
324 struct page **blocks, struct async_submit_ctl *submit) 325 struct page **blocks, struct async_submit_ctl *submit)
325{ 326{
327 void *scribble = submit->scribble;
328 int non_zero_srcs, i;
329
326 BUG_ON(faila == failb); 330 BUG_ON(faila == failb);
327 if (failb < faila) 331 if (failb < faila)
328 swap(faila, failb); 332 swap(faila, failb);
329 333
330 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 334 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
331 335
332 /* we need to preserve the contents of 'blocks' for the async 336 /* if a dma resource is not available or a scribble buffer is not
333 * case, so punt to synchronous if a scribble buffer is not available 337 * available punt to the synchronous path. In the 'dma not
338 * available' case be sure to use the scribble buffer to
339 * preserve the content of 'blocks' as the caller intended.
334 */ 340 */
335 if (!submit->scribble) { 341 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
336 void **ptrs = (void **) blocks; 342 void **ptrs = scribble ? scribble : (void **) blocks;
337 int i;
338 343
339 async_tx_quiesce(&submit->depend_tx); 344 async_tx_quiesce(&submit->depend_tx);
340 for (i = 0; i < disks; i++) 345 for (i = 0; i < disks; i++)
341 ptrs[i] = page_address(blocks[i]); 346 if (blocks[i] == NULL)
347 ptrs[i] = (void *) raid6_empty_zero_page;
348 else
349 ptrs[i] = page_address(blocks[i]);
342 350
343 raid6_2data_recov(disks, bytes, faila, failb, ptrs); 351 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
344 352
@@ -347,19 +355,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
347 return NULL; 355 return NULL;
348 } 356 }
349 357
350 switch (disks) { 358 non_zero_srcs = 0;
351 case 4: 359 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
360 if (blocks[i])
361 non_zero_srcs++;
362 switch (non_zero_srcs) {
363 case 0:
364 case 1:
365 /* There must be at least 2 sources - the failed devices. */
366 BUG();
367
368 case 2:
352 /* dma devices do not uniformly understand a zero source pq 369 /* dma devices do not uniformly understand a zero source pq
353 * operation (in contrast to the synchronous case), so 370 * operation (in contrast to the synchronous case), so
354 * explicitly handle the 4 disk special case 371 * explicitly handle the special case of a 4 disk array with
372 * both data disks missing.
355 */ 373 */
356 return __2data_recov_4(bytes, faila, failb, blocks, submit); 374 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
357 case 5: 375 case 3:
358 /* dma devices do not uniformly understand a single 376 /* dma devices do not uniformly understand a single
359 * source pq operation (in contrast to the synchronous 377 * source pq operation (in contrast to the synchronous
360 * case), so explicitly handle the 5 disk special case 378 * case), so explicitly handle the special case of a 5 disk
379 * array with 2 of 3 data disks missing.
361 */ 380 */
362 return __2data_recov_5(bytes, faila, failb, blocks, submit); 381 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
363 default: 382 default:
364 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); 383 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
365 } 384 }
@@ -385,20 +404,25 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
385 dma_async_tx_callback cb_fn = submit->cb_fn; 404 dma_async_tx_callback cb_fn = submit->cb_fn;
386 void *cb_param = submit->cb_param; 405 void *cb_param = submit->cb_param;
387 void *scribble = submit->scribble; 406 void *scribble = submit->scribble;
407 int good_srcs, good, i;
388 struct page *srcs[2]; 408 struct page *srcs[2];
389 409
390 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 410 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
391 411
392 /* we need to preserve the contents of 'blocks' for the async 412 /* if a dma resource is not available or a scribble buffer is not
393 * case, so punt to synchronous if a scribble buffer is not available 413 * available punt to the synchronous path. In the 'dma not
414 * available' case be sure to use the scribble buffer to
415 * preserve the content of 'blocks' as the caller intended.
394 */ 416 */
395 if (!scribble) { 417 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
396 void **ptrs = (void **) blocks; 418 void **ptrs = scribble ? scribble : (void **) blocks;
397 int i;
398 419
399 async_tx_quiesce(&submit->depend_tx); 420 async_tx_quiesce(&submit->depend_tx);
400 for (i = 0; i < disks; i++) 421 for (i = 0; i < disks; i++)
401 ptrs[i] = page_address(blocks[i]); 422 if (blocks[i] == NULL)
423 ptrs[i] = (void*)raid6_empty_zero_page;
424 else
425 ptrs[i] = page_address(blocks[i]);
402 426
403 raid6_datap_recov(disks, bytes, faila, ptrs); 427 raid6_datap_recov(disks, bytes, faila, ptrs);
404 428
@@ -407,6 +431,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
407 return NULL; 431 return NULL;
408 } 432 }
409 433
434 good_srcs = 0;
435 good = -1;
436 for (i = 0; i < disks-2; i++) {
437 if (i == faila)
438 continue;
439 if (blocks[i]) {
440 good = i;
441 good_srcs++;
442 if (good_srcs > 1)
443 break;
444 }
445 }
446 BUG_ON(good_srcs == 0);
447
410 p = blocks[disks-2]; 448 p = blocks[disks-2];
411 q = blocks[disks-1]; 449 q = blocks[disks-1];
412 450
@@ -414,14 +452,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
414 * Use the dead data page as temporary storage for delta q 452 * Use the dead data page as temporary storage for delta q
415 */ 453 */
416 dq = blocks[faila]; 454 dq = blocks[faila];
417 blocks[faila] = (void *)raid6_empty_zero_page; 455 blocks[faila] = NULL;
418 blocks[disks-1] = dq; 456 blocks[disks-1] = dq;
419 457
420 /* in the 4 disk case we only need to perform a single source 458 /* in the 4-disk case we only need to perform a single source
421 * multiplication 459 * multiplication with the one good data block.
422 */ 460 */
423 if (disks == 4) { 461 if (good_srcs == 1) {
424 int good = faila == 0 ? 1 : 0;
425 struct page *g = blocks[good]; 462 struct page *g = blocks[good];
426 463
427 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, 464 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index f9cdf04fe7c0..7f2c00a45205 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
81 struct dma_device *device = chan->device; 81 struct dma_device *device = chan->device;
82 struct dma_async_tx_descriptor *intr_tx = (void *) ~0; 82 struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
83 83
84 #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
85 BUG();
86 #endif
87
88 /* first check to see if we can still append to depend_tx */ 84 /* first check to see if we can still append to depend_tx */
89 spin_lock_bh(&depend_tx->lock); 85 txd_lock(depend_tx);
90 if (depend_tx->parent && depend_tx->chan == tx->chan) { 86 if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
91 tx->parent = depend_tx; 87 txd_chain(depend_tx, tx);
92 depend_tx->next = tx;
93 intr_tx = NULL; 88 intr_tx = NULL;
94 } 89 }
95 spin_unlock_bh(&depend_tx->lock); 90 txd_unlock(depend_tx);
96 91
97 /* attached dependency, flush the parent channel */ 92 /* attached dependency, flush the parent channel */
98 if (!intr_tx) { 93 if (!intr_tx) {
@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
111 if (intr_tx) { 106 if (intr_tx) {
112 intr_tx->callback = NULL; 107 intr_tx->callback = NULL;
113 intr_tx->callback_param = NULL; 108 intr_tx->callback_param = NULL;
114 tx->parent = intr_tx; 109 /* safe to chain outside the lock since we know we are
115 /* safe to set ->next outside the lock since we know we are
116 * not submitted yet 110 * not submitted yet
117 */ 111 */
118 intr_tx->next = tx; 112 txd_chain(intr_tx, tx);
119 113
120 /* check if we need to append */ 114 /* check if we need to append */
121 spin_lock_bh(&depend_tx->lock); 115 txd_lock(depend_tx);
122 if (depend_tx->parent) { 116 if (txd_parent(depend_tx)) {
123 intr_tx->parent = depend_tx; 117 txd_chain(depend_tx, intr_tx);
124 depend_tx->next = intr_tx;
125 async_tx_ack(intr_tx); 118 async_tx_ack(intr_tx);
126 intr_tx = NULL; 119 intr_tx = NULL;
127 } 120 }
128 spin_unlock_bh(&depend_tx->lock); 121 txd_unlock(depend_tx);
129 122
130 if (intr_tx) { 123 if (intr_tx) {
131 intr_tx->parent = NULL; 124 txd_clear_parent(intr_tx);
132 intr_tx->tx_submit(intr_tx); 125 intr_tx->tx_submit(intr_tx);
133 async_tx_ack(intr_tx); 126 async_tx_ack(intr_tx);
134 } 127 }
@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
176 * 2/ dependencies are 1:1 i.e. two transactions can 169 * 2/ dependencies are 1:1 i.e. two transactions can
177 * not depend on the same parent 170 * not depend on the same parent
178 */ 171 */
179 BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || 172 BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
180 tx->parent); 173 txd_parent(tx));
181 174
182 /* the lock prevents async_tx_run_dependencies from missing 175 /* the lock prevents async_tx_run_dependencies from missing
183 * the setting of ->next when ->parent != NULL 176 * the setting of ->next when ->parent != NULL
184 */ 177 */
185 spin_lock_bh(&depend_tx->lock); 178 txd_lock(depend_tx);
186 if (depend_tx->parent) { 179 if (txd_parent(depend_tx)) {
187 /* we have a parent so we can not submit directly 180 /* we have a parent so we can not submit directly
188 * if we are staying on the same channel: append 181 * if we are staying on the same channel: append
189 * else: channel switch 182 * else: channel switch
190 */ 183 */
191 if (depend_tx->chan == chan) { 184 if (depend_tx->chan == chan) {
192 tx->parent = depend_tx; 185 txd_chain(depend_tx, tx);
193 depend_tx->next = tx;
194 s = ASYNC_TX_SUBMITTED; 186 s = ASYNC_TX_SUBMITTED;
195 } else 187 } else
196 s = ASYNC_TX_CHANNEL_SWITCH; 188 s = ASYNC_TX_CHANNEL_SWITCH;
@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
203 else 195 else
204 s = ASYNC_TX_CHANNEL_SWITCH; 196 s = ASYNC_TX_CHANNEL_SWITCH;
205 } 197 }
206 spin_unlock_bh(&depend_tx->lock); 198 txd_unlock(depend_tx);
207 199
208 switch (s) { 200 switch (s) {
209 case ASYNC_TX_SUBMITTED: 201 case ASYNC_TX_SUBMITTED:
@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
212 async_tx_channel_switch(depend_tx, tx); 204 async_tx_channel_switch(depend_tx, tx);
213 break; 205 break;
214 case ASYNC_TX_DIRECT_SUBMIT: 206 case ASYNC_TX_DIRECT_SUBMIT:
215 tx->parent = NULL; 207 txd_clear_parent(tx);
216 tx->tx_submit(tx); 208 tx->tx_submit(tx);
217 break; 209 break;
218 } 210 }
219 } else { 211 } else {
220 tx->parent = NULL; 212 txd_clear_parent(tx);
221 tx->tx_submit(tx); 213 tx->tx_submit(tx);
222 } 214 }
223 215
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index b459a9034aac..079ae8ca590b 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
44 void *cb_param_orig = submit->cb_param; 44 void *cb_param_orig = submit->cb_param;
45 enum async_tx_flags flags_orig = submit->flags; 45 enum async_tx_flags flags_orig = submit->flags;
46 enum dma_ctrl_flags dma_flags; 46 enum dma_ctrl_flags dma_flags;
47 int xor_src_cnt; 47 int xor_src_cnt = 0;
48 dma_addr_t dma_dest; 48 dma_addr_t dma_dest;
49 49
50 /* map the dest bidrectional in case it is re-used as a source */ 50 /* map the dest bidrectional in case it is re-used as a source */
51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); 51 dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
52 for (i = 0; i < src_cnt; i++) { 52 for (i = 0; i < src_cnt; i++) {
53 /* only map the dest once */ 53 /* only map the dest once */
54 if (!src_list[i])
55 continue;
54 if (unlikely(src_list[i] == dest)) { 56 if (unlikely(src_list[i] == dest)) {
55 dma_src[i] = dma_dest; 57 dma_src[xor_src_cnt++] = dma_dest;
56 continue; 58 continue;
57 } 59 }
58 dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, 60 dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
59 len, DMA_TO_DEVICE); 61 len, DMA_TO_DEVICE);
60 } 62 }
63 src_cnt = xor_src_cnt;
61 64
62 while (src_cnt) { 65 while (src_cnt) {
63 submit->flags = flags_orig; 66 submit->flags = flags_orig;
@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
123 int src_cnt, size_t len, struct async_submit_ctl *submit) 126 int src_cnt, size_t len, struct async_submit_ctl *submit)
124{ 127{
125 int i; 128 int i;
126 int xor_src_cnt; 129 int xor_src_cnt = 0;
127 int src_off = 0; 130 int src_off = 0;
128 void *dest_buf; 131 void *dest_buf;
129 void **srcs; 132 void **srcs;
@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
135 138
136 /* convert to buffer pointers */ 139 /* convert to buffer pointers */
137 for (i = 0; i < src_cnt; i++) 140 for (i = 0; i < src_cnt; i++)
138 srcs[i] = page_address(src_list[i]) + offset; 141 if (src_list[i])
139 142 srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
143 src_cnt = xor_src_cnt;
140 /* set destination address */ 144 /* set destination address */
141 dest_buf = page_address(dest) + offset; 145 dest_buf = page_address(dest) + offset;
142 146
@@ -230,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
230 memcmp(a, a + 4, len - 4) == 0); 234 memcmp(a, a + 4, len - 4) == 0);
231} 235}
232 236
237static inline struct dma_chan *
238xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
239 struct page **src_list, int src_cnt, size_t len)
240{
241 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
242 return NULL;
243 #endif
244 return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
245 src_cnt, len);
246}
247
233/** 248/**
234 * async_xor_val - attempt a xor parity check with a dma engine. 249 * async_xor_val - attempt a xor parity check with a dma engine.
235 * @dest: destination page used if the xor is performed synchronously 250 * @dest: destination page used if the xor is performed synchronously
@@ -251,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
251 int src_cnt, size_t len, enum sum_check_flags *result, 266 int src_cnt, size_t len, enum sum_check_flags *result,
252 struct async_submit_ctl *submit) 267 struct async_submit_ctl *submit)
253{ 268{
254 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, 269 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
255 &dest, 1, src_list,
256 src_cnt, len);
257 struct dma_device *device = chan ? chan->device : NULL; 270 struct dma_device *device = chan ? chan->device : NULL;
258 struct dma_async_tx_descriptor *tx = NULL; 271 struct dma_async_tx_descriptor *tx = NULL;
259 dma_addr_t *dma_src = NULL; 272 dma_addr_t *dma_src = NULL;
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 3ec27c7e62ea..c1321935ebcc 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22#include <linux/async_tx.h> 22#include <linux/async_tx.h>
23#include <linux/gfp.h>
23#include <linux/random.h> 24#include <linux/random.h>
24 25
25#undef pr 26#undef pr
@@ -214,6 +215,13 @@ static int raid6_test(void)
214 err += test(4, &tests); 215 err += test(4, &tests);
215 if (NDISKS > 5) 216 if (NDISKS > 5)
216 err += test(5, &tests); 217 err += test(5, &tests);
218 /* the 11 and 12 disk cases are special for ioatdma (p-disabled
219 * q-continuation without extended descriptor)
220 */
221 if (NDISKS > 12) {
222 err += test(11, &tests);
223 err += test(12, &tests);
224 }
217 err += test(NDISKS, &tests); 225 err += test(NDISKS, &tests);
218 226
219 pr("\n"); 227 pr("\n");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4d6f49a5daeb..a5a22cfcd07b 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -46,6 +46,12 @@ struct authenc_request_ctx {
46 char tail[]; 46 char tail[];
47}; 47};
48 48
49static void authenc_request_complete(struct aead_request *req, int err)
50{
51 if (err != -EINPROGRESS)
52 aead_request_complete(req, err);
53}
54
49static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 55static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
50 unsigned int keylen) 56 unsigned int keylen)
51{ 57{
@@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
142 crypto_aead_authsize(authenc), 1); 148 crypto_aead_authsize(authenc), 1);
143 149
144out: 150out:
145 aead_request_complete(req, err); 151 authenc_request_complete(req, err);
146} 152}
147 153
148static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) 154static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
@@ -175,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
175 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 181 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
176 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 182 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
177 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); 183 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
184 unsigned int cryptlen = req->cryptlen;
178 185
179 if (err) 186 if (err)
180 goto out; 187 goto out;
@@ -190,11 +197,12 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
190 goto out; 197 goto out;
191 198
192 authsize = crypto_aead_authsize(authenc); 199 authsize = crypto_aead_authsize(authenc);
200 cryptlen -= authsize;
193 ihash = ahreq->result + authsize; 201 ihash = ahreq->result + authsize;
194 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 202 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
195 authsize, 0); 203 authsize, 0);
196 204
197 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; 205 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
198 if (err) 206 if (err)
199 goto out; 207 goto out;
200 208
@@ -203,12 +211,12 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
203 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 211 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
204 req->base.complete, req->base.data); 212 req->base.complete, req->base.data);
205 ablkcipher_request_set_crypt(abreq, req->src, req->dst, 213 ablkcipher_request_set_crypt(abreq, req->src, req->dst,
206 req->cryptlen, req->iv); 214 cryptlen, req->iv);
207 215
208 err = crypto_ablkcipher_decrypt(abreq); 216 err = crypto_ablkcipher_decrypt(abreq);
209 217
210out: 218out:
211 aead_request_complete(req, err); 219 authenc_request_complete(req, err);
212} 220}
213 221
214static void authenc_verify_ahash_done(struct crypto_async_request *areq, 222static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -222,16 +230,18 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
222 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 230 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
223 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 231 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
224 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); 232 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
233 unsigned int cryptlen = req->cryptlen;
225 234
226 if (err) 235 if (err)
227 goto out; 236 goto out;
228 237
229 authsize = crypto_aead_authsize(authenc); 238 authsize = crypto_aead_authsize(authenc);
239 cryptlen -= authsize;
230 ihash = ahreq->result + authsize; 240 ihash = ahreq->result + authsize;
231 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 241 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
232 authsize, 0); 242 authsize, 0);
233 243
234 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; 244 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
235 if (err) 245 if (err)
236 goto out; 246 goto out;
237 247
@@ -240,12 +250,12 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
240 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 250 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
241 req->base.complete, req->base.data); 251 req->base.complete, req->base.data);
242 ablkcipher_request_set_crypt(abreq, req->src, req->dst, 252 ablkcipher_request_set_crypt(abreq, req->src, req->dst,
243 req->cryptlen, req->iv); 253 cryptlen, req->iv);
244 254
245 err = crypto_ablkcipher_decrypt(abreq); 255 err = crypto_ablkcipher_decrypt(abreq);
246 256
247out: 257out:
248 aead_request_complete(req, err); 258 authenc_request_complete(req, err);
249} 259}
250 260
251static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) 261static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
@@ -379,18 +389,20 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
379 err = crypto_authenc_genicv(areq, iv, 0); 389 err = crypto_authenc_genicv(areq, iv, 0);
380 } 390 }
381 391
382 aead_request_complete(areq, err); 392 authenc_request_complete(areq, err);
383} 393}
384 394
385static int crypto_authenc_encrypt(struct aead_request *req) 395static int crypto_authenc_encrypt(struct aead_request *req)
386{ 396{
387 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 397 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
388 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 398 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
389 struct ablkcipher_request *abreq = aead_request_ctx(req); 399 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
390 struct crypto_ablkcipher *enc = ctx->enc; 400 struct crypto_ablkcipher *enc = ctx->enc;
391 struct scatterlist *dst = req->dst; 401 struct scatterlist *dst = req->dst;
392 unsigned int cryptlen = req->cryptlen; 402 unsigned int cryptlen = req->cryptlen;
393 u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc); 403 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
404 + ctx->reqoff);
405 u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
394 int err; 406 int err;
395 407
396 ablkcipher_request_set_tfm(abreq, enc); 408 ablkcipher_request_set_tfm(abreq, enc);
@@ -418,7 +430,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
418 err = crypto_authenc_genicv(areq, greq->giv, 0); 430 err = crypto_authenc_genicv(areq, greq->giv, 0);
419 } 431 }
420 432
421 aead_request_complete(areq, err); 433 authenc_request_complete(areq, err);
422} 434}
423 435
424static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) 436static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
@@ -454,7 +466,7 @@ static int crypto_authenc_verify(struct aead_request *req,
454 unsigned int authsize; 466 unsigned int authsize;
455 467
456 areq_ctx->complete = authenc_verify_ahash_done; 468 areq_ctx->complete = authenc_verify_ahash_done;
457 areq_ctx->complete = authenc_verify_ahash_update_done; 469 areq_ctx->update_complete = authenc_verify_ahash_update_done;
458 470
459 ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); 471 ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
460 if (IS_ERR(ohash)) 472 if (IS_ERR(ohash))
@@ -464,7 +476,7 @@ static int crypto_authenc_verify(struct aead_request *req,
464 ihash = ohash + authsize; 476 ihash = ohash + authsize;
465 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 477 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
466 authsize, 0); 478 authsize, 0);
467 return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; 479 return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
468} 480}
469 481
470static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, 482static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
@@ -546,10 +558,6 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
546 if (IS_ERR(auth)) 558 if (IS_ERR(auth))
547 return PTR_ERR(auth); 559 return PTR_ERR(auth);
548 560
549 ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
550 crypto_ahash_alignmask(auth),
551 crypto_ahash_alignmask(auth) + 1);
552
553 enc = crypto_spawn_skcipher(&ictx->enc); 561 enc = crypto_spawn_skcipher(&ictx->enc);
554 err = PTR_ERR(enc); 562 err = PTR_ERR(enc);
555 if (IS_ERR(enc)) 563 if (IS_ERR(enc))
@@ -557,14 +565,19 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
557 565
558 ctx->auth = auth; 566 ctx->auth = auth;
559 ctx->enc = enc; 567 ctx->enc = enc;
560 568
561 tfm->crt_aead.reqsize = max_t(unsigned int, 569 ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
562 crypto_ahash_reqsize(auth) + ctx->reqoff + 570 crypto_ahash_alignmask(auth),
563 sizeof(struct authenc_request_ctx) + 571 crypto_ahash_alignmask(auth) + 1) +
564 sizeof(struct ahash_request), 572 crypto_ablkcipher_ivsize(enc);
573
574 tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) +
575 ctx->reqoff +
576 max_t(unsigned int,
577 crypto_ahash_reqsize(auth) +
578 sizeof(struct ahash_request),
565 sizeof(struct skcipher_givcrypt_request) + 579 sizeof(struct skcipher_givcrypt_request) +
566 crypto_ablkcipher_reqsize(enc) + 580 crypto_ablkcipher_reqsize(enc));
567 crypto_ablkcipher_ivsize(enc));
568 581
569 return 0; 582 return 0;
570 583
@@ -603,7 +616,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
603 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, 616 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
604 CRYPTO_ALG_TYPE_AHASH_MASK); 617 CRYPTO_ALG_TYPE_AHASH_MASK);
605 if (IS_ERR(auth)) 618 if (IS_ERR(auth))
606 return ERR_PTR(PTR_ERR(auth)); 619 return ERR_CAST(auth);
607 620
608 auth_base = &auth->base; 621 auth_base = &auth->base;
609 622
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index 6f5b48731922..a67d52ee0580 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * Blowfish Cipher Algorithm, by Bruce Schneier. 4 * Blowfish Cipher Algorithm, by Bruce Schneier.
@@ -299,7 +299,7 @@ static const u32 bf_sbox[256 * 4] = {
299 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, 299 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
300}; 300};
301 301
302/* 302/*
303 * Round loop unrolling macros, S is a pointer to a S-Box array 303 * Round loop unrolling macros, S is a pointer to a S-Box array
304 * organized in 4 unsigned longs at a row. 304 * organized in 4 unsigned longs at a row.
305 */ 305 */
@@ -315,7 +315,7 @@ static const u32 bf_sbox[256 * 4] = {
315 315
316/* 316/*
317 * The blowfish encipher, processes 64-bit blocks. 317 * The blowfish encipher, processes 64-bit blocks.
318 * NOTE: This function MUSTN'T respect endianess 318 * NOTE: This function MUSTN'T respect endianess
319 */ 319 */
320static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) 320static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
321{ 321{
@@ -395,7 +395,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
395 out_blk[1] = cpu_to_be32(yl); 395 out_blk[1] = cpu_to_be32(yl);
396} 396}
397 397
398/* 398/*
399 * Calculates the blowfish S and P boxes for encryption and decryption. 399 * Calculates the blowfish S and P boxes for encryption and decryption.
400 */ 400 */
401static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) 401static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -417,10 +417,10 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
417 417
418 /* Actual subkey generation */ 418 /* Actual subkey generation */
419 for (j = 0, i = 0; i < 16 + 2; i++) { 419 for (j = 0, i = 0; i < 16 + 2; i++) {
420 temp = (((u32 )key[j] << 24) | 420 temp = (((u32)key[j] << 24) |
421 ((u32 )key[(j + 1) % keylen] << 16) | 421 ((u32)key[(j + 1) % keylen] << 16) |
422 ((u32 )key[(j + 2) % keylen] << 8) | 422 ((u32)key[(j + 2) % keylen] << 8) |
423 ((u32 )key[(j + 3) % keylen])); 423 ((u32)key[(j + 3) % keylen]));
424 424
425 P[i] = P[i] ^ temp; 425 P[i] = P[i] ^ temp;
426 j = (j + 4) % keylen; 426 j = (j + 4) % keylen;
@@ -444,7 +444,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
444 S[count + 1] = data[1]; 444 S[count + 1] = data[1];
445 } 445 }
446 } 446 }
447 447
448 /* Bruce says not to bother with the weak key check. */ 448 /* Bruce says not to bother with the weak key check. */
449 return 0; 449 return 0;
450} 450}
diff --git a/crypto/camellia.c b/crypto/camellia.c
index 964635d163f4..64cff46ea5e4 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -39,271 +39,271 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41static const u32 camellia_sp1110[256] = { 41static const u32 camellia_sp1110[256] = {
42 0x70707000,0x82828200,0x2c2c2c00,0xececec00, 42 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00,
43 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500, 43 0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500,
44 0xe4e4e400,0x85858500,0x57575700,0x35353500, 44 0xe4e4e400, 0x85858500, 0x57575700, 0x35353500,
45 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100, 45 0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100,
46 0x23232300,0xefefef00,0x6b6b6b00,0x93939300, 46 0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300,
47 0x45454500,0x19191900,0xa5a5a500,0x21212100, 47 0x45454500, 0x19191900, 0xa5a5a500, 0x21212100,
48 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00, 48 0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00,
49 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00, 49 0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00,
50 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00, 50 0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00,
51 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00, 51 0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00,
52 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00, 52 0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00,
53 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00, 53 0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00,
54 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00, 54 0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00,
55 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00, 55 0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00,
56 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600, 56 0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600,
57 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00, 57 0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00,
58 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600, 58 0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600,
59 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00, 59 0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00,
60 0x74747400,0x12121200,0x2b2b2b00,0x20202000, 60 0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000,
61 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900, 61 0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900,
62 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200, 62 0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200,
63 0x34343400,0x7e7e7e00,0x76767600,0x05050500, 63 0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500,
64 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100, 64 0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100,
65 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700, 65 0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700,
66 0x14141400,0x58585800,0x3a3a3a00,0x61616100, 66 0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100,
67 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00, 67 0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00,
68 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600, 68 0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600,
69 0x53535300,0x18181800,0xf2f2f200,0x22222200, 69 0x53535300, 0x18181800, 0xf2f2f200, 0x22222200,
70 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200, 70 0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200,
71 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100, 71 0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100,
72 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800, 72 0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800,
73 0x60606000,0xfcfcfc00,0x69696900,0x50505000, 73 0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000,
74 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00, 74 0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00,
75 0xa1a1a100,0x89898900,0x62626200,0x97979700, 75 0xa1a1a100, 0x89898900, 0x62626200, 0x97979700,
76 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500, 76 0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500,
77 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200, 77 0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200,
78 0x10101000,0xc4c4c400,0x00000000,0x48484800, 78 0x10101000, 0xc4c4c400, 0x00000000, 0x48484800,
79 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00, 79 0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00,
80 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00, 80 0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00,
81 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400, 81 0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400,
82 0x87878700,0x5c5c5c00,0x83838300,0x02020200, 82 0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200,
83 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300, 83 0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300,
84 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300, 84 0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300,
85 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200, 85 0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200,
86 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600, 86 0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600,
87 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00, 87 0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00,
88 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00, 88 0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00,
89 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00, 89 0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00,
90 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00, 90 0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00,
91 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00, 91 0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00,
92 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600, 92 0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600,
93 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900, 93 0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900,
94 0x78787800,0x98989800,0x06060600,0x6a6a6a00, 94 0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00,
95 0xe7e7e700,0x46464600,0x71717100,0xbababa00, 95 0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00,
96 0xd4d4d400,0x25252500,0xababab00,0x42424200, 96 0xd4d4d400, 0x25252500, 0xababab00, 0x42424200,
97 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00, 97 0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00,
98 0x72727200,0x07070700,0xb9b9b900,0x55555500, 98 0x72727200, 0x07070700, 0xb9b9b900, 0x55555500,
99 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00, 99 0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00,
100 0x36363600,0x49494900,0x2a2a2a00,0x68686800, 100 0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800,
101 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400, 101 0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400,
102 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00, 102 0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00,
103 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100, 103 0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100,
104 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400, 104 0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400,
105 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00, 105 0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00,
106}; 106};
107 107
108static const u32 camellia_sp0222[256] = { 108static const u32 camellia_sp0222[256] = {
109 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9, 109 0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9,
110 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb, 110 0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb,
111 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a, 111 0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a,
112 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282, 112 0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282,
113 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727, 113 0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727,
114 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242, 114 0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242,
115 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c, 115 0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c,
116 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b, 116 0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b,
117 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f, 117 0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f,
118 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d, 118 0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d,
119 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe, 119 0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe,
120 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434, 120 0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434,
121 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595, 121 0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595,
122 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a, 122 0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a,
123 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad, 123 0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad,
124 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a, 124 0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a,
125 0x00171717,0x001a1a1a,0x00353535,0x00cccccc, 125 0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc,
126 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a, 126 0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a,
127 0x00e8e8e8,0x00242424,0x00565656,0x00404040, 127 0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040,
128 0x00e1e1e1,0x00636363,0x00090909,0x00333333, 128 0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333,
129 0x00bfbfbf,0x00989898,0x00979797,0x00858585, 129 0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585,
130 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a, 130 0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a,
131 0x00dadada,0x006f6f6f,0x00535353,0x00626262, 131 0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262,
132 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf, 132 0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf,
133 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2, 133 0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2,
134 0x00bdbdbd,0x00363636,0x00222222,0x00383838, 134 0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838,
135 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c, 135 0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c,
136 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444, 136 0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444,
137 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565, 137 0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565,
138 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323, 138 0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323,
139 0x00484848,0x00101010,0x00d1d1d1,0x00515151, 139 0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151,
140 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0, 140 0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0,
141 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa, 141 0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa,
142 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f, 142 0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f,
143 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b, 143 0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b,
144 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5, 144 0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5,
145 0x00202020,0x00898989,0x00000000,0x00909090, 145 0x00202020, 0x00898989, 0x00000000, 0x00909090,
146 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7, 146 0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7,
147 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5, 147 0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5,
148 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929, 148 0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929,
149 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404, 149 0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404,
150 0x009b9b9b,0x00949494,0x00212121,0x00666666, 150 0x009b9b9b, 0x00949494, 0x00212121, 0x00666666,
151 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7, 151 0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7,
152 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5, 152 0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5,
153 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c, 153 0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c,
154 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676, 154 0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676,
155 0x00030303,0x002d2d2d,0x00dedede,0x00969696, 155 0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696,
156 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c, 156 0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c,
157 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919, 157 0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919,
158 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d, 158 0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d,
159 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d, 159 0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d,
160 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2, 160 0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2,
161 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4, 161 0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4,
162 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575, 162 0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575,
163 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484, 163 0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484,
164 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5, 164 0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5,
165 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa, 165 0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa,
166 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414, 166 0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414,
167 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0, 167 0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0,
168 0x00787878,0x00707070,0x00e3e3e3,0x00494949, 168 0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949,
169 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6, 169 0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6,
170 0x00777777,0x00939393,0x00868686,0x00838383, 170 0x00777777, 0x00939393, 0x00868686, 0x00838383,
171 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9, 171 0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9,
172 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d, 172 0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d,
173}; 173};
174 174
175static const u32 camellia_sp3033[256] = { 175static const u32 camellia_sp3033[256] = {
176 0x38003838,0x41004141,0x16001616,0x76007676, 176 0x38003838, 0x41004141, 0x16001616, 0x76007676,
177 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2, 177 0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2,
178 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a, 178 0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a,
179 0x75007575,0x06000606,0x57005757,0xa000a0a0, 179 0x75007575, 0x06000606, 0x57005757, 0xa000a0a0,
180 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9, 180 0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9,
181 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090, 181 0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090,
182 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727, 182 0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727,
183 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede, 183 0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede,
184 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7, 184 0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7,
185 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767, 185 0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767,
186 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf, 186 0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf,
187 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d, 187 0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d,
188 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565, 188 0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565,
189 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e, 189 0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e,
190 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b, 190 0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b,
191 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6, 191 0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6,
192 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333, 192 0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333,
193 0xfd00fdfd,0x66006666,0x58005858,0x96009696, 193 0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696,
194 0x3a003a3a,0x09000909,0x95009595,0x10001010, 194 0x3a003a3a, 0x09000909, 0x95009595, 0x10001010,
195 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc, 195 0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc,
196 0xef00efef,0x26002626,0xe500e5e5,0x61006161, 196 0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161,
197 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282, 197 0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282,
198 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898, 198 0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898,
199 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb, 199 0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb,
200 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0, 200 0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0,
201 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e, 201 0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e,
202 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b, 202 0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b,
203 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111, 203 0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111,
204 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959, 204 0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959,
205 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8, 205 0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8,
206 0x12001212,0x04000404,0x74007474,0x54005454, 206 0x12001212, 0x04000404, 0x74007474, 0x54005454,
207 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828, 207 0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828,
208 0x55005555,0x68006868,0x50005050,0xbe00bebe, 208 0x55005555, 0x68006868, 0x50005050, 0xbe00bebe,
209 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb, 209 0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb,
210 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca, 210 0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca,
211 0x70007070,0xff00ffff,0x32003232,0x69006969, 211 0x70007070, 0xff00ffff, 0x32003232, 0x69006969,
212 0x08000808,0x62006262,0x00000000,0x24002424, 212 0x08000808, 0x62006262, 0x00000000, 0x24002424,
213 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded, 213 0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded,
214 0x45004545,0x81008181,0x73007373,0x6d006d6d, 214 0x45004545, 0x81008181, 0x73007373, 0x6d006d6d,
215 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a, 215 0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a,
216 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101, 216 0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101,
217 0xe600e6e6,0x25002525,0x48004848,0x99009999, 217 0xe600e6e6, 0x25002525, 0x48004848, 0x99009999,
218 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9, 218 0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9,
219 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171, 219 0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171,
220 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313, 220 0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313,
221 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d, 221 0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d,
222 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5, 222 0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5,
223 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717, 223 0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717,
224 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646, 224 0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646,
225 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747, 225 0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747,
226 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b, 226 0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b,
227 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac, 227 0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac,
228 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535, 228 0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535,
229 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d, 229 0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d,
230 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121, 230 0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121,
231 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d, 231 0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d,
232 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa, 232 0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa,
233 0x7c007c7c,0x77007777,0x56005656,0x05000505, 233 0x7c007c7c, 0x77007777, 0x56005656, 0x05000505,
234 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434, 234 0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434,
235 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252, 235 0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252,
236 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd, 236 0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd,
237 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0, 237 0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0,
238 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a, 238 0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a,
239 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f, 239 0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f,
240}; 240};
241 241
242static const u32 camellia_sp4404[256] = { 242static const u32 camellia_sp4404[256] = {
243 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0, 243 0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0,
244 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae, 244 0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae,
245 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5, 245 0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5,
246 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092, 246 0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092,
247 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f, 247 0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f,
248 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b, 248 0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b,
249 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d, 249 0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d,
250 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c, 250 0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c,
251 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0, 251 0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0,
252 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084, 252 0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084,
253 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076, 253 0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076,
254 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004, 254 0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004,
255 0x14140014,0x3a3a003a,0xdede00de,0x11110011, 255 0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011,
256 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2, 256 0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2,
257 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a, 257 0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a,
258 0x24240024,0xe8e800e8,0x60600060,0x69690069, 258 0x24240024, 0xe8e800e8, 0x60600060, 0x69690069,
259 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062, 259 0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062,
260 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064, 260 0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064,
261 0x10100010,0x00000000,0xa3a300a3,0x75750075, 261 0x10100010, 0x00000000, 0xa3a300a3, 0x75750075,
262 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd, 262 0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd,
263 0x87870087,0x83830083,0xcdcd00cd,0x90900090, 263 0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090,
264 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf, 264 0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf,
265 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6, 265 0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6,
266 0x81810081,0x6f6f006f,0x13130013,0x63630063, 266 0x81810081, 0x6f6f006f, 0x13130013, 0x63630063,
267 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc, 267 0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc,
268 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4, 268 0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4,
269 0x78780078,0x06060006,0xe7e700e7,0x71710071, 269 0x78780078, 0x06060006, 0xe7e700e7, 0x71710071,
270 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d, 270 0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d,
271 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac, 271 0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac,
272 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1, 272 0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1,
273 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043, 273 0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043,
274 0x15150015,0xadad00ad,0x77770077,0x80800080, 274 0x15150015, 0xadad00ad, 0x77770077, 0x80800080,
275 0x82820082,0xecec00ec,0x27270027,0xe5e500e5, 275 0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5,
276 0x85850085,0x35350035,0x0c0c000c,0x41410041, 276 0x85850085, 0x35350035, 0x0c0c000c, 0x41410041,
277 0xefef00ef,0x93930093,0x19190019,0x21210021, 277 0xefef00ef, 0x93930093, 0x19190019, 0x21210021,
278 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd, 278 0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd,
279 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce, 279 0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce,
280 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a, 280 0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a,
281 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d, 281 0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d,
282 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d, 282 0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d,
283 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d, 283 0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d,
284 0x12120012,0x20200020,0xb1b100b1,0x99990099, 284 0x12120012, 0x20200020, 0xb1b100b1, 0x99990099,
285 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005, 285 0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005,
286 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7, 286 0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7,
287 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c, 287 0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c,
288 0x0f0f000f,0x16160016,0x18180018,0x22220022, 288 0x0f0f000f, 0x16160016, 0x18180018, 0x22220022,
289 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091, 289 0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091,
290 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050, 290 0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050,
291 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097, 291 0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097,
292 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2, 292 0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2,
293 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db, 293 0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db,
294 0x03030003,0xdada00da,0x3f3f003f,0x94940094, 294 0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094,
295 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033, 295 0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033,
296 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2, 296 0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2,
297 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b, 297 0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b,
298 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e, 298 0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e,
299 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e, 299 0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e,
300 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059, 300 0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059,
301 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba, 301 0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba,
302 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa, 302 0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa,
303 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a, 303 0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a,
304 0x49490049,0x68680068,0x38380038,0xa4a400a4, 304 0x49490049, 0x68680068, 0x38380038, 0xa4a400a4,
305 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1, 305 0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1,
306 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e, 306 0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e,
307}; 307};
308 308
309 309
@@ -344,7 +344,7 @@ static const u32 camellia_sp4404[256] = {
344 lr = (lr << bits) + (rl >> (32 - bits)); \ 344 lr = (lr << bits) + (rl >> (32 - bits)); \
345 rl = (rl << bits) + (rr >> (32 - bits)); \ 345 rl = (rl << bits) + (rr >> (32 - bits)); \
346 rr = (rr << bits) + (w0 >> (32 - bits)); \ 346 rr = (rr << bits) + (w0 >> (32 - bits)); \
347 } while(0) 347 } while (0)
348 348
349#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ 349#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \
350 do { \ 350 do { \
@@ -354,7 +354,7 @@ static const u32 camellia_sp4404[256] = {
354 lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ 354 lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
355 rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ 355 rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
356 rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ 356 rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
357 } while(0) 357 } while (0)
358 358
359#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ 359#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
360 do { \ 360 do { \
@@ -373,7 +373,7 @@ static const u32 camellia_sp4404[256] = {
373 yl ^= yr; \ 373 yl ^= yr; \
374 yr = ror32(yr, 8); \ 374 yr = ror32(yr, 8); \
375 yr ^= yl; \ 375 yr ^= yl; \
376 } while(0) 376 } while (0)
377 377
378#define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) 378#define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
379#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) 379#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
@@ -835,7 +835,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
835static void camellia_setup192(const unsigned char *key, u32 *subkey) 835static void camellia_setup192(const unsigned char *key, u32 *subkey)
836{ 836{
837 unsigned char kk[32]; 837 unsigned char kk[32];
838 u32 krll, krlr, krrl,krrr; 838 u32 krll, krlr, krrl, krrr;
839 839
840 memcpy(kk, key, 24); 840 memcpy(kk, key, 24);
841 memcpy((unsigned char *)&krll, key+16, 4); 841 memcpy((unsigned char *)&krll, key+16, 4);
@@ -865,7 +865,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
865 t1 |= lr; \ 865 t1 |= lr; \
866 ll ^= t1; \ 866 ll ^= t1; \
867 rr ^= rol32(t3, 1); \ 867 rr ^= rol32(t3, 1); \
868 } while(0) 868 } while (0)
869 869
870#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ 870#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
871 do { \ 871 do { \
@@ -881,12 +881,12 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
881 ir ^= il ^ kr; \ 881 ir ^= il ^ kr; \
882 yl ^= ir; \ 882 yl ^= ir; \
883 yr ^= ror32(il, 8) ^ ir; \ 883 yr ^= ror32(il, 8) ^ ir; \
884 } while(0) 884 } while (0)
885 885
886/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ 886/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
887static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) 887static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
888{ 888{
889 u32 il,ir,t0,t1; /* temporary variables */ 889 u32 il, ir, t0, t1; /* temporary variables */
890 890
891 /* pre whitening but absorb kw2 */ 891 /* pre whitening but absorb kw2 */
892 io[0] ^= SUBKEY_L(0); 892 io[0] ^= SUBKEY_L(0);
@@ -894,30 +894,30 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
894 894
895 /* main iteration */ 895 /* main iteration */
896#define ROUNDS(i) do { \ 896#define ROUNDS(i) do { \
897 CAMELLIA_ROUNDSM(io[0],io[1], \ 897 CAMELLIA_ROUNDSM(io[0], io[1], \
898 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ 898 SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
899 io[2],io[3],il,ir); \ 899 io[2], io[3], il, ir); \
900 CAMELLIA_ROUNDSM(io[2],io[3], \ 900 CAMELLIA_ROUNDSM(io[2], io[3], \
901 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ 901 SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
902 io[0],io[1],il,ir); \ 902 io[0], io[1], il, ir); \
903 CAMELLIA_ROUNDSM(io[0],io[1], \ 903 CAMELLIA_ROUNDSM(io[0], io[1], \
904 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ 904 SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
905 io[2],io[3],il,ir); \ 905 io[2], io[3], il, ir); \
906 CAMELLIA_ROUNDSM(io[2],io[3], \ 906 CAMELLIA_ROUNDSM(io[2], io[3], \
907 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ 907 SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
908 io[0],io[1],il,ir); \ 908 io[0], io[1], il, ir); \
909 CAMELLIA_ROUNDSM(io[0],io[1], \ 909 CAMELLIA_ROUNDSM(io[0], io[1], \
910 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ 910 SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
911 io[2],io[3],il,ir); \ 911 io[2], io[3], il, ir); \
912 CAMELLIA_ROUNDSM(io[2],io[3], \ 912 CAMELLIA_ROUNDSM(io[2], io[3], \
913 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ 913 SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
914 io[0],io[1],il,ir); \ 914 io[0], io[1], il, ir); \
915} while (0) 915} while (0)
916#define FLS(i) do { \ 916#define FLS(i) do { \
917 CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ 917 CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
918 SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ 918 SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
919 SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ 919 SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
920 t0,t1,il,ir); \ 920 t0, t1, il, ir); \
921} while (0) 921} while (0)
922 922
923 ROUNDS(0); 923 ROUNDS(0);
@@ -941,7 +941,7 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
941 941
942static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) 942static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
943{ 943{
944 u32 il,ir,t0,t1; /* temporary variables */ 944 u32 il, ir, t0, t1; /* temporary variables */
945 945
946 /* pre whitening but absorb kw2 */ 946 /* pre whitening but absorb kw2 */
947 io[0] ^= SUBKEY_L(i); 947 io[0] ^= SUBKEY_L(i);
@@ -949,30 +949,30 @@ static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
949 949
950 /* main iteration */ 950 /* main iteration */
951#define ROUNDS(i) do { \ 951#define ROUNDS(i) do { \
952 CAMELLIA_ROUNDSM(io[0],io[1], \ 952 CAMELLIA_ROUNDSM(io[0], io[1], \
953 SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ 953 SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
954 io[2],io[3],il,ir); \ 954 io[2], io[3], il, ir); \
955 CAMELLIA_ROUNDSM(io[2],io[3], \ 955 CAMELLIA_ROUNDSM(io[2], io[3], \
956 SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ 956 SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
957 io[0],io[1],il,ir); \ 957 io[0], io[1], il, ir); \
958 CAMELLIA_ROUNDSM(io[0],io[1], \ 958 CAMELLIA_ROUNDSM(io[0], io[1], \
959 SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ 959 SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
960 io[2],io[3],il,ir); \ 960 io[2], io[3], il, ir); \
961 CAMELLIA_ROUNDSM(io[2],io[3], \ 961 CAMELLIA_ROUNDSM(io[2], io[3], \
962 SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ 962 SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
963 io[0],io[1],il,ir); \ 963 io[0], io[1], il, ir); \
964 CAMELLIA_ROUNDSM(io[0],io[1], \ 964 CAMELLIA_ROUNDSM(io[0], io[1], \
965 SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ 965 SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
966 io[2],io[3],il,ir); \ 966 io[2], io[3], il, ir); \
967 CAMELLIA_ROUNDSM(io[2],io[3], \ 967 CAMELLIA_ROUNDSM(io[2], io[3], \
968 SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ 968 SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
969 io[0],io[1],il,ir); \ 969 io[0], io[1], il, ir); \
970} while (0) 970} while (0)
971#define FLS(i) do { \ 971#define FLS(i) do { \
972 CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ 972 CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
973 SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ 973 SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
974 SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ 974 SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
975 t0,t1,il,ir); \ 975 t0, t1, il, ir); \
976} while (0) 976} while (0)
977 977
978 if (i == 32) { 978 if (i == 32) {
diff --git a/crypto/cast5.c b/crypto/cast5.c
index 8cbe28fa0e0c..a1d2294b50ad 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -569,12 +569,12 @@ static const u32 sb8[256] = {
569 0xeaee6801, 0x8db2a283, 0xea8bf59e 569 0xeaee6801, 0x8db2a283, 0xea8bf59e
570}; 570};
571 571
572#define F1(D,m,r) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ 572#define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \
573 (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) 573 (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
574#define F2(D,m,r) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ 574#define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
575 (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) 575 (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
576#define F3(D,m,r) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ 576#define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \
577 (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) 577 (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
578 578
579 579
580static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) 580static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
@@ -694,7 +694,7 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
694 dst[1] = cpu_to_be32(l); 694 dst[1] = cpu_to_be32(l);
695} 695}
696 696
697static void key_schedule(u32 * x, u32 * z, u32 * k) 697static void key_schedule(u32 *x, u32 *z, u32 *k)
698{ 698{
699 699
700#define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) 700#define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff)
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 007d02beed67..e0c15a6c7c34 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -11,7 +11,7 @@
11 * under the terms of GNU General Public License as published by the Free 11 * under the terms of GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option) 12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
@@ -35,12 +35,12 @@ struct cast6_ctx {
35 u8 Kr[12][4]; 35 u8 Kr[12][4];
36}; 36};
37 37
38#define F1(D,r,m) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ 38#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
39 (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) 39 (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
40#define F2(D,r,m) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ 40#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
41 (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) 41 (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
42#define F3(D,r,m) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ 42#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
43 (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) 43 (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
44 44
45static const u32 s1[256] = { 45static const u32 s1[256] = {
46 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 46 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
@@ -312,7 +312,7 @@ static const u32 s4[256] = {
312 312
313static const u32 Tm[24][8] = { 313static const u32 Tm[24][8] = {
314 { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, 314 { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
315 0x84c413be, 0xf39dff5f, 0x6277eb00 } , 315 0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
316 { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, 316 { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
317 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , 317 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
318 { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, 318 { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
@@ -369,7 +369,8 @@ static const u8 Tr[4][8] = {
369}; 369};
370 370
371/* forward octave */ 371/* forward octave */
372static void W(u32 *key, unsigned int i) { 372static void W(u32 *key, unsigned int i)
373{
373 u32 I; 374 u32 I;
374 key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); 375 key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
375 key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); 376 key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
@@ -377,7 +378,7 @@ static void W(u32 *key, unsigned int i) {
377 key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); 378 key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
378 key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); 379 key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
379 key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); 380 key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
380 key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); 381 key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
381 key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); 382 key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
382} 383}
383 384
@@ -393,11 +394,11 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
393 if (key_len % 4 != 0) { 394 if (key_len % 4 != 0) {
394 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 395 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
395 return -EINVAL; 396 return -EINVAL;
396 } 397 }
398
399 memset(p_key, 0, 32);
400 memcpy(p_key, in_key, key_len);
397 401
398 memset (p_key, 0, 32);
399 memcpy (p_key, in_key, key_len);
400
401 key[0] = be32_to_cpu(p_key[0]); /* A */ 402 key[0] = be32_to_cpu(p_key[0]); /* A */
402 key[1] = be32_to_cpu(p_key[1]); /* B */ 403 key[1] = be32_to_cpu(p_key[1]); /* B */
403 key[2] = be32_to_cpu(p_key[2]); /* C */ 404 key[2] = be32_to_cpu(p_key[2]); /* C */
@@ -406,18 +407,16 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
406 key[5] = be32_to_cpu(p_key[5]); /* F */ 407 key[5] = be32_to_cpu(p_key[5]); /* F */
407 key[6] = be32_to_cpu(p_key[6]); /* G */ 408 key[6] = be32_to_cpu(p_key[6]); /* G */
408 key[7] = be32_to_cpu(p_key[7]); /* H */ 409 key[7] = be32_to_cpu(p_key[7]); /* H */
409
410
411 410
412 for (i = 0; i < 12; i++) { 411 for (i = 0; i < 12; i++) {
413 W (key, 2 * i); 412 W(key, 2 * i);
414 W (key, 2 * i + 1); 413 W(key, 2 * i + 1);
415 414
416 c->Kr[i][0] = key[0] & 0x1f; 415 c->Kr[i][0] = key[0] & 0x1f;
417 c->Kr[i][1] = key[2] & 0x1f; 416 c->Kr[i][1] = key[2] & 0x1f;
418 c->Kr[i][2] = key[4] & 0x1f; 417 c->Kr[i][2] = key[4] & 0x1f;
419 c->Kr[i][3] = key[6] & 0x1f; 418 c->Kr[i][3] = key[6] & 0x1f;
420 419
421 c->Km[i][0] = key[7]; 420 c->Km[i][0] = key[7];
422 c->Km[i][1] = key[5]; 421 c->Km[i][1] = key[5];
423 c->Km[i][2] = key[3]; 422 c->Km[i][2] = key[3];
@@ -428,21 +427,23 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
428} 427}
429 428
430/*forward quad round*/ 429/*forward quad round*/
431static void Q (u32 * block, u8 * Kr, u32 * Km) { 430static void Q(u32 *block, u8 *Kr, u32 *Km)
431{
432 u32 I; 432 u32 I;
433 block[2] ^= F1(block[3], Kr[0], Km[0]); 433 block[2] ^= F1(block[3], Kr[0], Km[0]);
434 block[1] ^= F2(block[2], Kr[1], Km[1]); 434 block[1] ^= F2(block[2], Kr[1], Km[1]);
435 block[0] ^= F3(block[1], Kr[2], Km[2]); 435 block[0] ^= F3(block[1], Kr[2], Km[2]);
436 block[3] ^= F1(block[0], Kr[3], Km[3]); 436 block[3] ^= F1(block[0], Kr[3], Km[3]);
437} 437}
438 438
439/*reverse quad round*/ 439/*reverse quad round*/
440static void QBAR (u32 * block, u8 * Kr, u32 * Km) { 440static void QBAR(u32 *block, u8 *Kr, u32 *Km)
441{
441 u32 I; 442 u32 I;
442 block[3] ^= F1(block[0], Kr[3], Km[3]); 443 block[3] ^= F1(block[0], Kr[3], Km[3]);
443 block[0] ^= F3(block[1], Kr[2], Km[2]); 444 block[0] ^= F3(block[1], Kr[2], Km[2]);
444 block[1] ^= F2(block[2], Kr[1], Km[1]); 445 block[1] ^= F2(block[2], Kr[1], Km[1]);
445 block[2] ^= F1(block[3], Kr[0], Km[0]); 446 block[2] ^= F1(block[3], Kr[0], Km[0]);
446} 447}
447 448
448static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) 449static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
@@ -451,64 +452,65 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
451 const __be32 *src = (const __be32 *)inbuf; 452 const __be32 *src = (const __be32 *)inbuf;
452 __be32 *dst = (__be32 *)outbuf; 453 __be32 *dst = (__be32 *)outbuf;
453 u32 block[4]; 454 u32 block[4];
454 u32 * Km; 455 u32 *Km;
455 u8 * Kr; 456 u8 *Kr;
456 457
457 block[0] = be32_to_cpu(src[0]); 458 block[0] = be32_to_cpu(src[0]);
458 block[1] = be32_to_cpu(src[1]); 459 block[1] = be32_to_cpu(src[1]);
459 block[2] = be32_to_cpu(src[2]); 460 block[2] = be32_to_cpu(src[2]);
460 block[3] = be32_to_cpu(src[3]); 461 block[3] = be32_to_cpu(src[3]);
461 462
462 Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); 463 Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
463 Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); 464 Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
464 Km = c->Km[2]; Kr = c->Kr[2]; Q (block, Kr, Km); 465 Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
465 Km = c->Km[3]; Kr = c->Kr[3]; Q (block, Kr, Km); 466 Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
466 Km = c->Km[4]; Kr = c->Kr[4]; Q (block, Kr, Km); 467 Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
467 Km = c->Km[5]; Kr = c->Kr[5]; Q (block, Kr, Km); 468 Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
468 Km = c->Km[6]; Kr = c->Kr[6]; QBAR (block, Kr, Km); 469 Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
469 Km = c->Km[7]; Kr = c->Kr[7]; QBAR (block, Kr, Km); 470 Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
470 Km = c->Km[8]; Kr = c->Kr[8]; QBAR (block, Kr, Km); 471 Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
471 Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); 472 Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
472 Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); 473 Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
473 Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); 474 Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
474 475
475 dst[0] = cpu_to_be32(block[0]); 476 dst[0] = cpu_to_be32(block[0]);
476 dst[1] = cpu_to_be32(block[1]); 477 dst[1] = cpu_to_be32(block[1]);
477 dst[2] = cpu_to_be32(block[2]); 478 dst[2] = cpu_to_be32(block[2]);
478 dst[3] = cpu_to_be32(block[3]); 479 dst[3] = cpu_to_be32(block[3]);
479} 480}
480 481
481static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { 482static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
482 struct cast6_ctx * c = crypto_tfm_ctx(tfm); 483{
484 struct cast6_ctx *c = crypto_tfm_ctx(tfm);
483 const __be32 *src = (const __be32 *)inbuf; 485 const __be32 *src = (const __be32 *)inbuf;
484 __be32 *dst = (__be32 *)outbuf; 486 __be32 *dst = (__be32 *)outbuf;
485 u32 block[4]; 487 u32 block[4];
486 u32 * Km; 488 u32 *Km;
487 u8 * Kr; 489 u8 *Kr;
488 490
489 block[0] = be32_to_cpu(src[0]); 491 block[0] = be32_to_cpu(src[0]);
490 block[1] = be32_to_cpu(src[1]); 492 block[1] = be32_to_cpu(src[1]);
491 block[2] = be32_to_cpu(src[2]); 493 block[2] = be32_to_cpu(src[2]);
492 block[3] = be32_to_cpu(src[3]); 494 block[3] = be32_to_cpu(src[3]);
493 495
494 Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); 496 Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
495 Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); 497 Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
496 Km = c->Km[9]; Kr = c->Kr[9]; Q (block, Kr, Km); 498 Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
497 Km = c->Km[8]; Kr = c->Kr[8]; Q (block, Kr, Km); 499 Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
498 Km = c->Km[7]; Kr = c->Kr[7]; Q (block, Kr, Km); 500 Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
499 Km = c->Km[6]; Kr = c->Kr[6]; Q (block, Kr, Km); 501 Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
500 Km = c->Km[5]; Kr = c->Kr[5]; QBAR (block, Kr, Km); 502 Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
501 Km = c->Km[4]; Kr = c->Kr[4]; QBAR (block, Kr, Km); 503 Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
502 Km = c->Km[3]; Kr = c->Kr[3]; QBAR (block, Kr, Km); 504 Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
503 Km = c->Km[2]; Kr = c->Kr[2]; QBAR (block, Kr, Km); 505 Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
504 Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); 506 Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
505 Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); 507 Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
506 508
507 dst[0] = cpu_to_be32(block[0]); 509 dst[0] = cpu_to_be32(block[0]);
508 dst[1] = cpu_to_be32(block[1]); 510 dst[1] = cpu_to_be32(block[1]);
509 dst[2] = cpu_to_be32(block[2]); 511 dst[2] = cpu_to_be32(block[2]);
510 dst[3] = cpu_to_be32(block[3]); 512 dst[3] = cpu_to_be32(block[3]);
511} 513}
512 514
513static struct crypto_alg alg = { 515static struct crypto_alg alg = {
514 .cra_name = "cast6", 516 .cra_name = "cast6",
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 9a1a7316eeac..39541e0e537d 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -8,7 +8,7 @@
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option) 11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
diff --git a/crypto/compress.c b/crypto/compress.c
index 1ee357085d3a..c33f0763a956 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option) 10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version. 11 * any later version.
12 * 12 *
13 */ 13 */
@@ -39,7 +39,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm)
39 39
40 ops->cot_compress = crypto_compress; 40 ops->cot_compress = crypto_compress;
41 ops->cot_decompress = crypto_decompress; 41 ops->cot_decompress = crypto_decompress;
42 42
43 return 0; 43 return 0;
44} 44}
45 45
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 973bc2cfab2e..de9e55c29794 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * CRC32C chksum 4 * CRC32C chksum
@@ -30,7 +30,7 @@
30 * 30 *
31 * This program is free software; you can redistribute it and/or modify it 31 * This program is free software; you can redistribute it and/or modify it
32 * under the terms of the GNU General Public License as published by the Free 32 * under the terms of the GNU General Public License as published by the Free
33 * Software Foundation; either version 2 of the License, or (at your option) 33 * Software Foundation; either version 2 of the License, or (at your option)
34 * any later version. 34 * any later version.
35 * 35 *
36 */ 36 */
@@ -142,7 +142,7 @@ static u32 crc32c(u32 crc, const u8 *data, unsigned int length)
142} 142}
143 143
144/* 144/*
145 * Steps through buffer one byte at at time, calculates reflected 145 * Steps through buffer one byte at at time, calculates reflected
146 * crc using table. 146 * crc using table.
147 */ 147 */
148 148
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 35335825a4ef..ef71318976c7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -31,7 +31,7 @@ struct cryptd_cpu_queue {
31}; 31};
32 32
33struct cryptd_queue { 33struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue; 34 struct cryptd_cpu_queue __percpu *cpu_queue;
35}; 35};
36 36
37struct cryptd_instance_ctx { 37struct cryptd_instance_ctx {
@@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
99 struct cryptd_cpu_queue *cpu_queue; 99 struct cryptd_cpu_queue *cpu_queue;
100 100
101 cpu = get_cpu(); 101 cpu = get_cpu();
102 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 102 cpu_queue = this_cpu_ptr(queue->cpu_queue);
103 err = crypto_enqueue_request(&cpu_queue->queue, request); 103 err = crypto_enqueue_request(&cpu_queue->queue, request);
104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
105 put_cpu(); 105 put_cpu();
@@ -711,6 +711,13 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
711} 711}
712EXPORT_SYMBOL_GPL(cryptd_ahash_child); 712EXPORT_SYMBOL_GPL(cryptd_ahash_child);
713 713
714struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
715{
716 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
717 return &rctx->desc;
718}
719EXPORT_SYMBOL_GPL(cryptd_shash_desc);
720
714void cryptd_free_ahash(struct cryptd_ahash *tfm) 721void cryptd_free_ahash(struct cryptd_ahash *tfm)
715{ 722{
716 crypto_free_ahash(&tfm->base); 723 crypto_free_ahash(&tfm->base);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index cb71c9122bc0..07a8a96d46fc 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -1,11 +1,11 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * Null algorithms, aka Much Ado About Nothing. 4 * Null algorithms, aka Much Ado About Nothing.
5 * 5 *
6 * These are needed for IPsec, and may be useful in general for 6 * These are needed for IPsec, and may be useful in general for
7 * testing & debugging. 7 * testing & debugging.
8 * 8 *
9 * The null cipher is compliant with RFC2410. 9 * The null cipher is compliant with RFC2410.
10 * 10 *
11 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 11 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -163,7 +163,7 @@ MODULE_ALIAS("cipher_null");
163static int __init crypto_null_mod_init(void) 163static int __init crypto_null_mod_init(void)
164{ 164{
165 int ret = 0; 165 int ret = 0;
166 166
167 ret = crypto_register_alg(&cipher_null); 167 ret = crypto_register_alg(&cipher_null);
168 if (ret < 0) 168 if (ret < 0)
169 goto out; 169 goto out;
@@ -180,7 +180,7 @@ static int __init crypto_null_mod_init(void)
180 if (ret < 0) 180 if (ret < 0)
181 goto out_unregister_digest; 181 goto out_unregister_digest;
182 182
183out: 183out:
184 return ret; 184 return ret;
185 185
186out_unregister_digest: 186out_unregister_digest:
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 6c3bfabb9d1d..4ca7222cfeb6 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
185 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, 185 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
186 CRYPTO_ALG_TYPE_MASK); 186 CRYPTO_ALG_TYPE_MASK);
187 if (IS_ERR(alg)) 187 if (IS_ERR(alg))
188 return ERR_PTR(PTR_ERR(alg)); 188 return ERR_CAST(alg);
189 189
190 /* Block size must be >= 4 bytes. */ 190 /* Block size must be >= 4 bytes. */
191 err = -EINVAL; 191 err = -EINVAL;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 9128da44e953..463dc859aa05 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -1,14 +1,14 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * Deflate algorithm (RFC 1951), implemented here primarily for use 4 * Deflate algorithm (RFC 1951), implemented here primarily for use
5 * by IPCOMP (RFC 3173 & RFC 2394). 5 * by IPCOMP (RFC 3173 & RFC 2394).
6 * 6 *
7 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> 7 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option) 11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version. 12 * any later version.
13 * 13 *
14 * FIXME: deflate transforms will require up to a total of about 436k of kernel 14 * FIXME: deflate transforms will require up to a total of about 436k of kernel
@@ -49,7 +49,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
49 struct z_stream_s *stream = &ctx->comp_stream; 49 struct z_stream_s *stream = &ctx->comp_stream;
50 50
51 stream->workspace = vmalloc(zlib_deflate_workspacesize()); 51 stream->workspace = vmalloc(zlib_deflate_workspacesize());
52 if (!stream->workspace ) { 52 if (!stream->workspace) {
53 ret = -ENOMEM; 53 ret = -ENOMEM;
54 goto out; 54 goto out;
55 } 55 }
@@ -61,7 +61,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
61 ret = -EINVAL; 61 ret = -EINVAL;
62 goto out_free; 62 goto out_free;
63 } 63 }
64out: 64out:
65 return ret; 65 return ret;
66out_free: 66out_free:
67 vfree(stream->workspace); 67 vfree(stream->workspace);
@@ -74,7 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
74 struct z_stream_s *stream = &ctx->decomp_stream; 74 struct z_stream_s *stream = &ctx->decomp_stream;
75 75
76 stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); 76 stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
77 if (!stream->workspace ) { 77 if (!stream->workspace) {
78 ret = -ENOMEM; 78 ret = -ENOMEM;
79 goto out; 79 goto out;
80 } 80 }
@@ -106,7 +106,7 @@ static int deflate_init(struct crypto_tfm *tfm)
106{ 106{
107 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); 107 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
108 int ret; 108 int ret;
109 109
110 ret = deflate_comp_init(ctx); 110 ret = deflate_comp_init(ctx);
111 if (ret) 111 if (ret)
112 goto out; 112 goto out;
@@ -153,11 +153,11 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
153out: 153out:
154 return ret; 154 return ret;
155} 155}
156 156
157static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, 157static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
158 unsigned int slen, u8 *dst, unsigned int *dlen) 158 unsigned int slen, u8 *dst, unsigned int *dlen)
159{ 159{
160 160
161 int ret = 0; 161 int ret = 0;
162 struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); 162 struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
163 struct z_stream_s *stream = &dctx->decomp_stream; 163 struct z_stream_s *stream = &dctx->decomp_stream;
@@ -182,7 +182,7 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
182 if (ret == Z_OK && !stream->avail_in && stream->avail_out) { 182 if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
183 u8 zerostuff = 0; 183 u8 zerostuff = 0;
184 stream->next_in = &zerostuff; 184 stream->next_in = &zerostuff;
185 stream->avail_in = 1; 185 stream->avail_in = 1;
186 ret = zlib_inflate(stream, Z_FINISH); 186 ret = zlib_inflate(stream, Z_FINISH);
187 } 187 }
188 if (ret != Z_STREAM_END) { 188 if (ret != Z_STREAM_END) {
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 5bd3ee345a64..249f903cc453 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
869 869
870 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 870 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
871 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && 871 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
872 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) 872 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
873 {
874 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 873 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
875 return -EINVAL; 874 return -EINVAL;
876 } 875 }
diff --git a/crypto/digest.c b/crypto/digest.c
deleted file mode 100644
index 5d3f1303da98..000000000000
--- a/crypto/digest.c
+++ /dev/null
@@ -1,240 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Digest operations.
5 *
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/scatterwalk.h>
17#include <linux/mm.h>
18#include <linux/errno.h>
19#include <linux/hardirq.h>
20#include <linux/highmem.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/scatterlist.h>
24
25#include "internal.h"
26
27static int init(struct hash_desc *desc)
28{
29 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
30
31 tfm->__crt_alg->cra_digest.dia_init(tfm);
32 return 0;
33}
34
35static int update2(struct hash_desc *desc,
36 struct scatterlist *sg, unsigned int nbytes)
37{
38 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
39 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
40
41 if (!nbytes)
42 return 0;
43
44 for (;;) {
45 struct page *pg = sg_page(sg);
46 unsigned int offset = sg->offset;
47 unsigned int l = sg->length;
48
49 if (unlikely(l > nbytes))
50 l = nbytes;
51 nbytes -= l;
52
53 do {
54 unsigned int bytes_from_page = min(l, ((unsigned int)
55 (PAGE_SIZE)) -
56 offset);
57 char *src = crypto_kmap(pg, 0);
58 char *p = src + offset;
59
60 if (unlikely(offset & alignmask)) {
61 unsigned int bytes =
62 alignmask + 1 - (offset & alignmask);
63 bytes = min(bytes, bytes_from_page);
64 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
65 bytes);
66 p += bytes;
67 bytes_from_page -= bytes;
68 l -= bytes;
69 }
70 tfm->__crt_alg->cra_digest.dia_update(tfm, p,
71 bytes_from_page);
72 crypto_kunmap(src, 0);
73 crypto_yield(desc->flags);
74 offset = 0;
75 pg++;
76 l -= bytes_from_page;
77 } while (l > 0);
78
79 if (!nbytes)
80 break;
81 sg = scatterwalk_sg_next(sg);
82 }
83
84 return 0;
85}
86
87static int update(struct hash_desc *desc,
88 struct scatterlist *sg, unsigned int nbytes)
89{
90 if (WARN_ON_ONCE(in_irq()))
91 return -EDEADLK;
92 return update2(desc, sg, nbytes);
93}
94
95static int final(struct hash_desc *desc, u8 *out)
96{
97 struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
98 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
99 struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
100
101 if (unlikely((unsigned long)out & alignmask)) {
102 unsigned long align = alignmask + 1;
103 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
104 u8 *dst = (u8 *)ALIGN(addr, align) +
105 ALIGN(tfm->__crt_alg->cra_ctxsize, align);
106
107 digest->dia_final(tfm, dst);
108 memcpy(out, dst, digest->dia_digestsize);
109 } else
110 digest->dia_final(tfm, out);
111
112 return 0;
113}
114
115static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
116{
117 crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
118 return -ENOSYS;
119}
120
121static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
122{
123 struct crypto_tfm *tfm = crypto_hash_tfm(hash);
124
125 crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
126 return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
127}
128
129static int digest(struct hash_desc *desc,
130 struct scatterlist *sg, unsigned int nbytes, u8 *out)
131{
132 if (WARN_ON_ONCE(in_irq()))
133 return -EDEADLK;
134
135 init(desc);
136 update2(desc, sg, nbytes);
137 return final(desc, out);
138}
139
140int crypto_init_digest_ops(struct crypto_tfm *tfm)
141{
142 struct hash_tfm *ops = &tfm->crt_hash;
143 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
144
145 if (dalg->dia_digestsize > PAGE_SIZE / 8)
146 return -EINVAL;
147
148 ops->init = init;
149 ops->update = update;
150 ops->final = final;
151 ops->digest = digest;
152 ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
153 ops->digestsize = dalg->dia_digestsize;
154
155 return 0;
156}
157
158void crypto_exit_digest_ops(struct crypto_tfm *tfm)
159{
160}
161
162static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
163 unsigned int keylen)
164{
165 crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
166 return -ENOSYS;
167}
168
169static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
170 unsigned int keylen)
171{
172 struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
173 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
174
175 crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
176 return dalg->dia_setkey(tfm, key, keylen);
177}
178
179static int digest_async_init(struct ahash_request *req)
180{
181 struct crypto_tfm *tfm = req->base.tfm;
182 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
183
184 dalg->dia_init(tfm);
185 return 0;
186}
187
188static int digest_async_update(struct ahash_request *req)
189{
190 struct crypto_tfm *tfm = req->base.tfm;
191 struct hash_desc desc = {
192 .tfm = __crypto_hash_cast(tfm),
193 .flags = req->base.flags,
194 };
195
196 update(&desc, req->src, req->nbytes);
197 return 0;
198}
199
200static int digest_async_final(struct ahash_request *req)
201{
202 struct crypto_tfm *tfm = req->base.tfm;
203 struct hash_desc desc = {
204 .tfm = __crypto_hash_cast(tfm),
205 .flags = req->base.flags,
206 };
207
208 final(&desc, req->result);
209 return 0;
210}
211
212static int digest_async_digest(struct ahash_request *req)
213{
214 struct crypto_tfm *tfm = req->base.tfm;
215 struct hash_desc desc = {
216 .tfm = __crypto_hash_cast(tfm),
217 .flags = req->base.flags,
218 };
219
220 return digest(&desc, req->src, req->nbytes, req->result);
221}
222
223int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
224{
225 struct ahash_tfm *crt = &tfm->crt_ahash;
226 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
227
228 if (dalg->dia_digestsize > PAGE_SIZE / 8)
229 return -EINVAL;
230
231 crt->init = digest_async_init;
232 crt->update = digest_async_update;
233 crt->final = digest_async_final;
234 crt->digest = digest_async_digest;
235 crt->setkey = dalg->dia_setkey ? digest_async_setkey :
236 digest_async_nosetkey;
237 crt->digestsize = dalg->dia_digestsize;
238
239 return 0;
240}
diff --git a/crypto/ecb.c b/crypto/ecb.c
index a46838e98a71..935cfef4aa84 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -55,7 +55,7 @@ static int crypto_ecb_crypt(struct blkcipher_desc *desc,
55 55
56 do { 56 do {
57 fn(crypto_cipher_tfm(tfm), wdst, wsrc); 57 fn(crypto_cipher_tfm(tfm), wdst, wsrc);
58 58
59 wsrc += bsize; 59 wsrc += bsize;
60 wdst += bsize; 60 wdst += bsize;
61 } while ((nbytes -= bsize) >= bsize); 61 } while ((nbytes -= bsize) >= bsize);
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index b82d61f4e26c..c33107e340b6 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -60,13 +60,13 @@ do { \
60 u32 t = lo & ((1 << n) - 1); \ 60 u32 t = lo & ((1 << n) - 1); \
61 lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \ 61 lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \
62 hi = (hi >> n) | (t << (24-n)); \ 62 hi = (hi >> n) | (t << (24-n)); \
63} while(0) 63} while (0)
64 64
65/* Rotate right one 64 bit number as a 56 bit number */ 65/* Rotate right one 64 bit number as a 56 bit number */
66#define ror56_64(k, n) \ 66#define ror56_64(k, n) \
67do { \ 67do { \
68 k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ 68 k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
69} while(0) 69} while (0)
70 70
71/* 71/*
72 * Sboxes for Feistel network derived from 72 * Sboxes for Feistel network derived from
@@ -228,7 +228,7 @@ do { \
228 union lc4 { __be32 l; u8 c[4]; } u; \ 228 union lc4 { __be32 l; u8 c[4]; } u; \
229 u.l = sched ^ R; \ 229 u.l = sched ^ R; \
230 L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ 230 L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
231} while(0) 231} while (0)
232 232
233/* 233/*
234 * encryptor 234 * encryptor
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 5fc3292483ef..2f5fbba6576c 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -37,10 +37,23 @@ struct crypto_rfc4106_ctx {
37 u8 nonce[4]; 37 u8 nonce[4];
38}; 38};
39 39
40struct crypto_rfc4543_ctx {
41 struct crypto_aead *child;
42 u8 nonce[4];
43};
44
45struct crypto_rfc4543_req_ctx {
46 u8 auth_tag[16];
47 struct scatterlist cipher[1];
48 struct scatterlist payload[2];
49 struct scatterlist assoc[2];
50 struct aead_request subreq;
51};
52
40struct crypto_gcm_ghash_ctx { 53struct crypto_gcm_ghash_ctx {
41 unsigned int cryptlen; 54 unsigned int cryptlen;
42 struct scatterlist *src; 55 struct scatterlist *src;
43 crypto_completion_t complete; 56 void (*complete)(struct aead_request *req, int err);
44}; 57};
45 58
46struct crypto_gcm_req_priv_ctx { 59struct crypto_gcm_req_priv_ctx {
@@ -267,23 +280,26 @@ static int gcm_hash_final(struct aead_request *req,
267 return crypto_ahash_final(ahreq); 280 return crypto_ahash_final(ahreq);
268} 281}
269 282
270static void gcm_hash_final_done(struct crypto_async_request *areq, 283static void __gcm_hash_final_done(struct aead_request *req, int err)
271 int err)
272{ 284{
273 struct aead_request *req = areq->data;
274 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 285 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
275 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 286 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
276 287
277 if (!err) 288 if (!err)
278 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); 289 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
279 290
280 gctx->complete(areq, err); 291 gctx->complete(req, err);
281} 292}
282 293
283static void gcm_hash_len_done(struct crypto_async_request *areq, 294static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
284 int err)
285{ 295{
286 struct aead_request *req = areq->data; 296 struct aead_request *req = areq->data;
297
298 __gcm_hash_final_done(req, err);
299}
300
301static void __gcm_hash_len_done(struct aead_request *req, int err)
302{
287 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 303 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
288 304
289 if (!err) { 305 if (!err) {
@@ -292,13 +308,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq,
292 return; 308 return;
293 } 309 }
294 310
295 gcm_hash_final_done(areq, err); 311 __gcm_hash_final_done(req, err);
296} 312}
297 313
298static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, 314static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
299 int err)
300{ 315{
301 struct aead_request *req = areq->data; 316 struct aead_request *req = areq->data;
317
318 __gcm_hash_len_done(req, err);
319}
320
321static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
322{
302 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 323 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
303 324
304 if (!err) { 325 if (!err) {
@@ -307,13 +328,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
307 return; 328 return;
308 } 329 }
309 330
310 gcm_hash_len_done(areq, err); 331 __gcm_hash_len_done(req, err);
311} 332}
312 333
313static void gcm_hash_crypt_done(struct crypto_async_request *areq, 334static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
314 int err) 335 int err)
315{ 336{
316 struct aead_request *req = areq->data; 337 struct aead_request *req = areq->data;
338
339 __gcm_hash_crypt_remain_done(req, err);
340}
341
342static void __gcm_hash_crypt_done(struct aead_request *req, int err)
343{
317 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 344 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
318 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 345 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
319 unsigned int remain; 346 unsigned int remain;
@@ -327,13 +354,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq,
327 return; 354 return;
328 } 355 }
329 356
330 gcm_hash_crypt_remain_done(areq, err); 357 __gcm_hash_crypt_remain_done(req, err);
331} 358}
332 359
333static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, 360static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
334 int err)
335{ 361{
336 struct aead_request *req = areq->data; 362 struct aead_request *req = areq->data;
363
364 __gcm_hash_crypt_done(req, err);
365}
366
367static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
368{
337 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 369 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
338 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 370 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
339 crypto_completion_t complete; 371 crypto_completion_t complete;
@@ -350,15 +382,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
350 } 382 }
351 383
352 if (remain) 384 if (remain)
353 gcm_hash_crypt_done(areq, err); 385 __gcm_hash_crypt_done(req, err);
354 else 386 else
355 gcm_hash_crypt_remain_done(areq, err); 387 __gcm_hash_crypt_remain_done(req, err);
356} 388}
357 389
358static void gcm_hash_assoc_done(struct crypto_async_request *areq, 390static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
359 int err) 391 int err)
360{ 392{
361 struct aead_request *req = areq->data; 393 struct aead_request *req = areq->data;
394
395 __gcm_hash_assoc_remain_done(req, err);
396}
397
398static void __gcm_hash_assoc_done(struct aead_request *req, int err)
399{
362 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 400 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
363 unsigned int remain; 401 unsigned int remain;
364 402
@@ -371,13 +409,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq,
371 return; 409 return;
372 } 410 }
373 411
374 gcm_hash_assoc_remain_done(areq, err); 412 __gcm_hash_assoc_remain_done(req, err);
375} 413}
376 414
377static void gcm_hash_init_done(struct crypto_async_request *areq, 415static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
378 int err)
379{ 416{
380 struct aead_request *req = areq->data; 417 struct aead_request *req = areq->data;
418
419 __gcm_hash_assoc_done(req, err);
420}
421
422static void __gcm_hash_init_done(struct aead_request *req, int err)
423{
381 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 424 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
382 crypto_completion_t complete; 425 crypto_completion_t complete;
383 unsigned int remain = 0; 426 unsigned int remain = 0;
@@ -393,9 +436,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq,
393 } 436 }
394 437
395 if (remain) 438 if (remain)
396 gcm_hash_assoc_done(areq, err); 439 __gcm_hash_assoc_done(req, err);
397 else 440 else
398 gcm_hash_assoc_remain_done(areq, err); 441 __gcm_hash_assoc_remain_done(req, err);
442}
443
444static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
445{
446 struct aead_request *req = areq->data;
447
448 __gcm_hash_init_done(req, err);
399} 449}
400 450
401static int gcm_hash(struct aead_request *req, 451static int gcm_hash(struct aead_request *req,
@@ -457,10 +507,8 @@ static void gcm_enc_copy_hash(struct aead_request *req,
457 crypto_aead_authsize(aead), 1); 507 crypto_aead_authsize(aead), 1);
458} 508}
459 509
460static void gcm_enc_hash_done(struct crypto_async_request *areq, 510static void gcm_enc_hash_done(struct aead_request *req, int err)
461 int err)
462{ 511{
463 struct aead_request *req = areq->data;
464 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 512 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
465 513
466 if (!err) 514 if (!err)
@@ -469,8 +517,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq,
469 aead_request_complete(req, err); 517 aead_request_complete(req, err);
470} 518}
471 519
472static void gcm_encrypt_done(struct crypto_async_request *areq, 520static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
473 int err)
474{ 521{
475 struct aead_request *req = areq->data; 522 struct aead_request *req = areq->data;
476 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 523 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@@ -479,9 +526,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq,
479 err = gcm_hash(req, pctx); 526 err = gcm_hash(req, pctx);
480 if (err == -EINPROGRESS || err == -EBUSY) 527 if (err == -EINPROGRESS || err == -EBUSY)
481 return; 528 return;
529 else if (!err) {
530 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
531 gcm_enc_copy_hash(req, pctx);
532 }
482 } 533 }
483 534
484 gcm_enc_hash_done(areq, err); 535 aead_request_complete(req, err);
485} 536}
486 537
487static int crypto_gcm_encrypt(struct aead_request *req) 538static int crypto_gcm_encrypt(struct aead_request *req)
@@ -538,9 +589,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
538 aead_request_complete(req, err); 589 aead_request_complete(req, err);
539} 590}
540 591
541static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) 592static void gcm_dec_hash_done(struct aead_request *req, int err)
542{ 593{
543 struct aead_request *req = areq->data;
544 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 594 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
545 struct ablkcipher_request *abreq = &pctx->u.abreq; 595 struct ablkcipher_request *abreq = &pctx->u.abreq;
546 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 596 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@@ -552,9 +602,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
552 err = crypto_ablkcipher_decrypt(abreq); 602 err = crypto_ablkcipher_decrypt(abreq);
553 if (err == -EINPROGRESS || err == -EBUSY) 603 if (err == -EINPROGRESS || err == -EBUSY)
554 return; 604 return;
605 else if (!err)
606 err = crypto_gcm_verify(req, pctx);
555 } 607 }
556 608
557 gcm_decrypt_done(areq, err); 609 aead_request_complete(req, err);
558} 610}
559 611
560static int crypto_gcm_decrypt(struct aead_request *req) 612static int crypto_gcm_decrypt(struct aead_request *req)
@@ -1008,6 +1060,272 @@ static struct crypto_template crypto_rfc4106_tmpl = {
1008 .module = THIS_MODULE, 1060 .module = THIS_MODULE,
1009}; 1061};
1010 1062
1063static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx(
1064 struct aead_request *req)
1065{
1066 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
1067
1068 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
1069}
1070
1071static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
1072 unsigned int keylen)
1073{
1074 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
1075 struct crypto_aead *child = ctx->child;
1076 int err;
1077
1078 if (keylen < 4)
1079 return -EINVAL;
1080
1081 keylen -= 4;
1082 memcpy(ctx->nonce, key + keylen, 4);
1083
1084 crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
1085 crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
1086 CRYPTO_TFM_REQ_MASK);
1087 err = crypto_aead_setkey(child, key, keylen);
1088 crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
1089 CRYPTO_TFM_RES_MASK);
1090
1091 return err;
1092}
1093
1094static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
1095 unsigned int authsize)
1096{
1097 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
1098
1099 if (authsize != 16)
1100 return -EINVAL;
1101
1102 return crypto_aead_setauthsize(ctx->child, authsize);
1103}
1104
1105/* this is the same as crypto_authenc_chain */
1106static void crypto_rfc4543_chain(struct scatterlist *head,
1107 struct scatterlist *sg, int chain)
1108{
1109 if (chain) {
1110 head->length += sg->length;
1111 sg = scatterwalk_sg_next(sg);
1112 }
1113
1114 if (sg)
1115 scatterwalk_sg_chain(head, 2, sg);
1116 else
1117 sg_mark_end(head);
1118}
1119
1120static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
1121 int enc)
1122{
1123 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1124 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
1125 struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
1126 struct aead_request *subreq = &rctx->subreq;
1127 struct scatterlist *dst = req->dst;
1128 struct scatterlist *cipher = rctx->cipher;
1129 struct scatterlist *payload = rctx->payload;
1130 struct scatterlist *assoc = rctx->assoc;
1131 unsigned int authsize = crypto_aead_authsize(aead);
1132 unsigned int assoclen = req->assoclen;
1133 struct page *dstp;
1134 u8 *vdst;
1135 u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
1136 crypto_aead_alignmask(ctx->child) + 1);
1137
1138 memcpy(iv, ctx->nonce, 4);
1139 memcpy(iv + 4, req->iv, 8);
1140
1141 /* construct cipher/plaintext */
1142 if (enc)
1143 memset(rctx->auth_tag, 0, authsize);
1144 else
1145 scatterwalk_map_and_copy(rctx->auth_tag, dst,
1146 req->cryptlen - authsize,
1147 authsize, 0);
1148
1149 sg_init_one(cipher, rctx->auth_tag, authsize);
1150
1151 /* construct the aad */
1152 dstp = sg_page(dst);
1153 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
1154
1155 sg_init_table(payload, 2);
1156 sg_set_buf(payload, req->iv, 8);
1157 crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
1158 assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
1159
1160 sg_init_table(assoc, 2);
1161 sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
1162 req->assoc->offset);
1163 crypto_rfc4543_chain(assoc, payload, 0);
1164
1165 aead_request_set_tfm(subreq, ctx->child);
1166 aead_request_set_callback(subreq, req->base.flags, req->base.complete,
1167 req->base.data);
1168 aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
1169 aead_request_set_assoc(subreq, assoc, assoclen);
1170
1171 return subreq;
1172}
1173
1174static int crypto_rfc4543_encrypt(struct aead_request *req)
1175{
1176 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1177 struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
1178 struct aead_request *subreq;
1179 int err;
1180
1181 subreq = crypto_rfc4543_crypt(req, 1);
1182 err = crypto_aead_encrypt(subreq);
1183 if (err)
1184 return err;
1185
1186 scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
1187 crypto_aead_authsize(aead), 1);
1188
1189 return 0;
1190}
1191
1192static int crypto_rfc4543_decrypt(struct aead_request *req)
1193{
1194 req = crypto_rfc4543_crypt(req, 0);
1195
1196 return crypto_aead_decrypt(req);
1197}
1198
1199static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
1200{
1201 struct crypto_instance *inst = (void *)tfm->__crt_alg;
1202 struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
1203 struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
1204 struct crypto_aead *aead;
1205 unsigned long align;
1206
1207 aead = crypto_spawn_aead(spawn);
1208 if (IS_ERR(aead))
1209 return PTR_ERR(aead);
1210
1211 ctx->child = aead;
1212
1213 align = crypto_aead_alignmask(aead);
1214 align &= ~(crypto_tfm_ctx_alignment() - 1);
1215 tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) +
1216 ALIGN(crypto_aead_reqsize(aead),
1217 crypto_tfm_ctx_alignment()) +
1218 align + 16;
1219
1220 return 0;
1221}
1222
1223static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm)
1224{
1225 struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
1226
1227 crypto_free_aead(ctx->child);
1228}
1229
1230static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
1231{
1232 struct crypto_attr_type *algt;
1233 struct crypto_instance *inst;
1234 struct crypto_aead_spawn *spawn;
1235 struct crypto_alg *alg;
1236 const char *ccm_name;
1237 int err;
1238
1239 algt = crypto_get_attr_type(tb);
1240 err = PTR_ERR(algt);
1241 if (IS_ERR(algt))
1242 return ERR_PTR(err);
1243
1244 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1245 return ERR_PTR(-EINVAL);
1246
1247 ccm_name = crypto_attr_alg_name(tb[1]);
1248 err = PTR_ERR(ccm_name);
1249 if (IS_ERR(ccm_name))
1250 return ERR_PTR(err);
1251
1252 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1253 if (!inst)
1254 return ERR_PTR(-ENOMEM);
1255
1256 spawn = crypto_instance_ctx(inst);
1257 crypto_set_aead_spawn(spawn, inst);
1258 err = crypto_grab_aead(spawn, ccm_name, 0,
1259 crypto_requires_sync(algt->type, algt->mask));
1260 if (err)
1261 goto out_free_inst;
1262
1263 alg = crypto_aead_spawn_alg(spawn);
1264
1265 err = -EINVAL;
1266
1267 /* We only support 16-byte blocks. */
1268 if (alg->cra_aead.ivsize != 16)
1269 goto out_drop_alg;
1270
1271 /* Not a stream cipher? */
1272 if (alg->cra_blocksize != 1)
1273 goto out_drop_alg;
1274
1275 err = -ENAMETOOLONG;
1276 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
1277 "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
1278 snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1279 "rfc4543(%s)", alg->cra_driver_name) >=
1280 CRYPTO_MAX_ALG_NAME)
1281 goto out_drop_alg;
1282
1283 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
1284 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
1285 inst->alg.cra_priority = alg->cra_priority;
1286 inst->alg.cra_blocksize = 1;
1287 inst->alg.cra_alignmask = alg->cra_alignmask;
1288 inst->alg.cra_type = &crypto_nivaead_type;
1289
1290 inst->alg.cra_aead.ivsize = 8;
1291 inst->alg.cra_aead.maxauthsize = 16;
1292
1293 inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
1294
1295 inst->alg.cra_init = crypto_rfc4543_init_tfm;
1296 inst->alg.cra_exit = crypto_rfc4543_exit_tfm;
1297
1298 inst->alg.cra_aead.setkey = crypto_rfc4543_setkey;
1299 inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize;
1300 inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt;
1301 inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt;
1302
1303 inst->alg.cra_aead.geniv = "seqiv";
1304
1305out:
1306 return inst;
1307
1308out_drop_alg:
1309 crypto_drop_aead(spawn);
1310out_free_inst:
1311 kfree(inst);
1312 inst = ERR_PTR(err);
1313 goto out;
1314}
1315
1316static void crypto_rfc4543_free(struct crypto_instance *inst)
1317{
1318 crypto_drop_spawn(crypto_instance_ctx(inst));
1319 kfree(inst);
1320}
1321
1322static struct crypto_template crypto_rfc4543_tmpl = {
1323 .name = "rfc4543",
1324 .alloc = crypto_rfc4543_alloc,
1325 .free = crypto_rfc4543_free,
1326 .module = THIS_MODULE,
1327};
1328
1011static int __init crypto_gcm_module_init(void) 1329static int __init crypto_gcm_module_init(void)
1012{ 1330{
1013 int err; 1331 int err;
@@ -1028,8 +1346,14 @@ static int __init crypto_gcm_module_init(void)
1028 if (err) 1346 if (err)
1029 goto out_undo_gcm; 1347 goto out_undo_gcm;
1030 1348
1349 err = crypto_register_template(&crypto_rfc4543_tmpl);
1350 if (err)
1351 goto out_undo_rfc4106;
1352
1031 return 0; 1353 return 0;
1032 1354
1355out_undo_rfc4106:
1356 crypto_unregister_template(&crypto_rfc4106_tmpl);
1033out_undo_gcm: 1357out_undo_gcm:
1034 crypto_unregister_template(&crypto_gcm_tmpl); 1358 crypto_unregister_template(&crypto_gcm_tmpl);
1035out_undo_base: 1359out_undo_base:
@@ -1042,6 +1366,7 @@ out:
1042static void __exit crypto_gcm_module_exit(void) 1366static void __exit crypto_gcm_module_exit(void)
1043{ 1367{
1044 kfree(gcm_zeroes); 1368 kfree(gcm_zeroes);
1369 crypto_unregister_template(&crypto_rfc4543_tmpl);
1045 crypto_unregister_template(&crypto_rfc4106_tmpl); 1370 crypto_unregister_template(&crypto_rfc4106_tmpl);
1046 crypto_unregister_template(&crypto_gcm_tmpl); 1371 crypto_unregister_template(&crypto_gcm_tmpl);
1047 crypto_unregister_template(&crypto_gcm_base_tmpl); 1372 crypto_unregister_template(&crypto_gcm_base_tmpl);
@@ -1055,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode");
1055MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); 1380MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
1056MODULE_ALIAS("gcm_base"); 1381MODULE_ALIAS("gcm_base");
1057MODULE_ALIAS("rfc4106"); 1382MODULE_ALIAS("rfc4106");
1383MODULE_ALIAS("rfc4543");
diff --git a/crypto/hash.c b/crypto/hash.c
deleted file mode 100644
index cb86b19fd105..000000000000
--- a/crypto/hash.c
+++ /dev/null
@@ -1,183 +0,0 @@
1/*
2 * Cryptographic Hash operations.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12#include <crypto/internal/hash.h>
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/seq_file.h>
18
19#include "internal.h"
20
21static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
22 u32 mask)
23{
24 return alg->cra_ctxsize;
25}
26
27static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key,
28 unsigned int keylen)
29{
30 struct crypto_tfm *tfm = crypto_hash_tfm(crt);
31 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
32 unsigned long alignmask = crypto_hash_alignmask(crt);
33 int ret;
34 u8 *buffer, *alignbuffer;
35 unsigned long absize;
36
37 absize = keylen + alignmask;
38 buffer = kmalloc(absize, GFP_ATOMIC);
39 if (!buffer)
40 return -ENOMEM;
41
42 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
43 memcpy(alignbuffer, key, keylen);
44 ret = alg->setkey(crt, alignbuffer, keylen);
45 memset(alignbuffer, 0, keylen);
46 kfree(buffer);
47 return ret;
48}
49
50static int hash_setkey(struct crypto_hash *crt, const u8 *key,
51 unsigned int keylen)
52{
53 struct crypto_tfm *tfm = crypto_hash_tfm(crt);
54 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
55 unsigned long alignmask = crypto_hash_alignmask(crt);
56
57 if ((unsigned long)key & alignmask)
58 return hash_setkey_unaligned(crt, key, keylen);
59
60 return alg->setkey(crt, key, keylen);
61}
62
63static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
64 unsigned int keylen)
65{
66 struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
67 struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
68 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
69
70 return alg->setkey(tfm_hash, key, keylen);
71}
72
73static int hash_async_init(struct ahash_request *req)
74{
75 struct crypto_tfm *tfm = req->base.tfm;
76 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
77 struct hash_desc desc = {
78 .tfm = __crypto_hash_cast(tfm),
79 .flags = req->base.flags,
80 };
81
82 return alg->init(&desc);
83}
84
85static int hash_async_update(struct ahash_request *req)
86{
87 struct crypto_tfm *tfm = req->base.tfm;
88 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
89 struct hash_desc desc = {
90 .tfm = __crypto_hash_cast(tfm),
91 .flags = req->base.flags,
92 };
93
94 return alg->update(&desc, req->src, req->nbytes);
95}
96
97static int hash_async_final(struct ahash_request *req)
98{
99 struct crypto_tfm *tfm = req->base.tfm;
100 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
101 struct hash_desc desc = {
102 .tfm = __crypto_hash_cast(tfm),
103 .flags = req->base.flags,
104 };
105
106 return alg->final(&desc, req->result);
107}
108
109static int hash_async_digest(struct ahash_request *req)
110{
111 struct crypto_tfm *tfm = req->base.tfm;
112 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
113 struct hash_desc desc = {
114 .tfm = __crypto_hash_cast(tfm),
115 .flags = req->base.flags,
116 };
117
118 return alg->digest(&desc, req->src, req->nbytes, req->result);
119}
120
121static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
122{
123 struct ahash_tfm *crt = &tfm->crt_ahash;
124 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
125
126 crt->init = hash_async_init;
127 crt->update = hash_async_update;
128 crt->final = hash_async_final;
129 crt->digest = hash_async_digest;
130 crt->setkey = hash_async_setkey;
131 crt->digestsize = alg->digestsize;
132
133 return 0;
134}
135
136static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
137{
138 struct hash_tfm *crt = &tfm->crt_hash;
139 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
140
141 crt->init = alg->init;
142 crt->update = alg->update;
143 crt->final = alg->final;
144 crt->digest = alg->digest;
145 crt->setkey = hash_setkey;
146 crt->digestsize = alg->digestsize;
147
148 return 0;
149}
150
151static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
152{
153 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
154
155 if (alg->digestsize > PAGE_SIZE / 8)
156 return -EINVAL;
157
158 if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
159 return crypto_init_hash_ops_async(tfm);
160 else
161 return crypto_init_hash_ops_sync(tfm);
162}
163
164static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
165 __attribute__ ((unused));
166static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
167{
168 seq_printf(m, "type : hash\n");
169 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
170 seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
171}
172
173const struct crypto_type crypto_hash_type = {
174 .ctxsize = crypto_hash_ctxsize,
175 .init = crypto_init_hash_ops,
176#ifdef CONFIG_PROC_FS
177 .show = crypto_hash_show,
178#endif
179};
180EXPORT_SYMBOL_GPL(crypto_hash_type);
181
182MODULE_LICENSE("GPL");
183MODULE_DESCRIPTION("Generic cryptographic hash type");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 15c2eb534541..8d9544cf8169 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -23,7 +23,6 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/slab.h>
27#include <linux/string.h> 26#include <linux/string.h>
28 27
29struct hmac_ctx { 28struct hmac_ctx {
diff --git a/crypto/internal.h b/crypto/internal.h
index 2d226362e594..d4384b08ab29 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free 8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option) 9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version. 10 * any later version.
11 * 11 *
12 */ 12 */
diff --git a/crypto/md5.c b/crypto/md5.c
index 83eb52961750..30efc7dad891 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -16,17 +16,13 @@
16 * 16 *
17 */ 17 */
18#include <crypto/internal/hash.h> 18#include <crypto/internal/hash.h>
19#include <crypto/md5.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/string.h> 22#include <linux/string.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <asm/byteorder.h> 24#include <asm/byteorder.h>
24 25
25#define MD5_DIGEST_SIZE 16
26#define MD5_HMAC_BLOCK_SIZE 64
27#define MD5_BLOCK_WORDS 16
28#define MD5_HASH_WORDS 4
29
30#define F1(x, y, z) (z ^ (x & (y ^ z))) 26#define F1(x, y, z) (z ^ (x & (y ^ z)))
31#define F2(x, y, z) F1(z, x, y) 27#define F2(x, y, z) F1(z, x, y)
32#define F3(x, y, z) (x ^ y ^ z) 28#define F3(x, y, z) (x ^ y ^ z)
@@ -35,12 +31,6 @@
35#define MD5STEP(f, w, x, y, z, in, s) \ 31#define MD5STEP(f, w, x, y, z, in, s) \
36 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) 32 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
37 33
38struct md5_ctx {
39 u32 hash[MD5_HASH_WORDS];
40 u32 block[MD5_BLOCK_WORDS];
41 u64 byte_count;
42};
43
44static void md5_transform(u32 *hash, u32 const *in) 34static void md5_transform(u32 *hash, u32 const *in)
45{ 35{
46 u32 a, b, c, d; 36 u32 a, b, c, d;
@@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
141 } 131 }
142} 132}
143 133
144static inline void md5_transform_helper(struct md5_ctx *ctx) 134static inline void md5_transform_helper(struct md5_state *ctx)
145{ 135{
146 le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); 136 le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
147 md5_transform(ctx->hash, ctx->block); 137 md5_transform(ctx->hash, ctx->block);
@@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
149 139
150static int md5_init(struct shash_desc *desc) 140static int md5_init(struct shash_desc *desc)
151{ 141{
152 struct md5_ctx *mctx = shash_desc_ctx(desc); 142 struct md5_state *mctx = shash_desc_ctx(desc);
153 143
154 mctx->hash[0] = 0x67452301; 144 mctx->hash[0] = 0x67452301;
155 mctx->hash[1] = 0xefcdab89; 145 mctx->hash[1] = 0xefcdab89;
@@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc)
162 152
163static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) 153static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
164{ 154{
165 struct md5_ctx *mctx = shash_desc_ctx(desc); 155 struct md5_state *mctx = shash_desc_ctx(desc);
166 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); 156 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
167 157
168 mctx->byte_count += len; 158 mctx->byte_count += len;
@@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
194 184
195static int md5_final(struct shash_desc *desc, u8 *out) 185static int md5_final(struct shash_desc *desc, u8 *out)
196{ 186{
197 struct md5_ctx *mctx = shash_desc_ctx(desc); 187 struct md5_state *mctx = shash_desc_ctx(desc);
198 const unsigned int offset = mctx->byte_count & 0x3f; 188 const unsigned int offset = mctx->byte_count & 0x3f;
199 char *p = (char *)mctx->block + offset; 189 char *p = (char *)mctx->block + offset;
200 int padding = 56 - (offset + 1); 190 int padding = 56 - (offset + 1);
@@ -220,12 +210,31 @@ static int md5_final(struct shash_desc *desc, u8 *out)
220 return 0; 210 return 0;
221} 211}
222 212
213static int md5_export(struct shash_desc *desc, void *out)
214{
215 struct md5_state *ctx = shash_desc_ctx(desc);
216
217 memcpy(out, ctx, sizeof(*ctx));
218 return 0;
219}
220
221static int md5_import(struct shash_desc *desc, const void *in)
222{
223 struct md5_state *ctx = shash_desc_ctx(desc);
224
225 memcpy(ctx, in, sizeof(*ctx));
226 return 0;
227}
228
223static struct shash_alg alg = { 229static struct shash_alg alg = {
224 .digestsize = MD5_DIGEST_SIZE, 230 .digestsize = MD5_DIGEST_SIZE,
225 .init = md5_init, 231 .init = md5_init,
226 .update = md5_update, 232 .update = md5_update,
227 .final = md5_final, 233 .final = md5_final,
228 .descsize = sizeof(struct md5_ctx), 234 .export = md5_export,
235 .import = md5_import,
236 .descsize = sizeof(struct md5_state),
237 .statesize = sizeof(struct md5_state),
229 .base = { 238 .base = {
230 .cra_name = "md5", 239 .cra_name = "md5",
231 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 240 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
new file mode 100644
index 000000000000..de3078215fe6
--- /dev/null
+++ b/crypto/pcrypt.c
@@ -0,0 +1,567 @@
1/*
2 * pcrypt - Parallel crypto wrapper.
3 *
4 * Copyright (C) 2009 secunet Security Networks AG
5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <crypto/algapi.h>
22#include <crypto/internal/aead.h>
23#include <linux/err.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/notifier.h>
28#include <linux/kobject.h>
29#include <linux/cpu.h>
30#include <crypto/pcrypt.h>
31
32struct padata_pcrypt {
33 struct padata_instance *pinst;
34 struct workqueue_struct *wq;
35
36 /*
37 * Cpumask for callback CPUs. It should be
38 * equal to serial cpumask of corresponding padata instance,
39 * so it is updated when padata notifies us about serial
40 * cpumask change.
41 *
42 * cb_cpumask is protected by RCU. This fact prevents us from
43 * using cpumask_var_t directly because the actual type of
44 * cpumsak_var_t depends on kernel configuration(particularly on
45 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
46 * cpumask_var_t may be either a pointer to the struct cpumask
47 * or a variable allocated on the stack. Thus we can not safely use
48 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
49 * rcu_dereference. So cpumask_var_t is wrapped with struct
50 * pcrypt_cpumask which makes possible to use it with RCU.
51 */
52 struct pcrypt_cpumask {
53 cpumask_var_t mask;
54 } *cb_cpumask;
55 struct notifier_block nblock;
56};
57
58static struct padata_pcrypt pencrypt;
59static struct padata_pcrypt pdecrypt;
60static struct kset *pcrypt_kset;
61
62struct pcrypt_instance_ctx {
63 struct crypto_spawn spawn;
64 unsigned int tfm_count;
65};
66
67struct pcrypt_aead_ctx {
68 struct crypto_aead *child;
69 unsigned int cb_cpu;
70};
71
72static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
73 struct padata_pcrypt *pcrypt)
74{
75 unsigned int cpu_index, cpu, i;
76 struct pcrypt_cpumask *cpumask;
77
78 cpu = *cb_cpu;
79
80 rcu_read_lock_bh();
81 cpumask = rcu_dereference(pcrypt->cb_cpumask);
82 if (cpumask_test_cpu(cpu, cpumask->mask))
83 goto out;
84
85 if (!cpumask_weight(cpumask->mask))
86 goto out;
87
88 cpu_index = cpu % cpumask_weight(cpumask->mask);
89
90 cpu = cpumask_first(cpumask->mask);
91 for (i = 0; i < cpu_index; i++)
92 cpu = cpumask_next(cpu, cpumask->mask);
93
94 *cb_cpu = cpu;
95
96out:
97 rcu_read_unlock_bh();
98 return padata_do_parallel(pcrypt->pinst, padata, cpu);
99}
100
101static int pcrypt_aead_setkey(struct crypto_aead *parent,
102 const u8 *key, unsigned int keylen)
103{
104 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
105
106 return crypto_aead_setkey(ctx->child, key, keylen);
107}
108
109static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
110 unsigned int authsize)
111{
112 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
113
114 return crypto_aead_setauthsize(ctx->child, authsize);
115}
116
117static void pcrypt_aead_serial(struct padata_priv *padata)
118{
119 struct pcrypt_request *preq = pcrypt_padata_request(padata);
120 struct aead_request *req = pcrypt_request_ctx(preq);
121
122 aead_request_complete(req->base.data, padata->info);
123}
124
125static void pcrypt_aead_giv_serial(struct padata_priv *padata)
126{
127 struct pcrypt_request *preq = pcrypt_padata_request(padata);
128 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
129
130 aead_request_complete(req->areq.base.data, padata->info);
131}
132
133static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
134{
135 struct aead_request *req = areq->data;
136 struct pcrypt_request *preq = aead_request_ctx(req);
137 struct padata_priv *padata = pcrypt_request_padata(preq);
138
139 padata->info = err;
140 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
141
142 padata_do_serial(padata);
143}
144
145static void pcrypt_aead_enc(struct padata_priv *padata)
146{
147 struct pcrypt_request *preq = pcrypt_padata_request(padata);
148 struct aead_request *req = pcrypt_request_ctx(preq);
149
150 padata->info = crypto_aead_encrypt(req);
151
152 if (padata->info == -EINPROGRESS)
153 return;
154
155 padata_do_serial(padata);
156}
157
158static int pcrypt_aead_encrypt(struct aead_request *req)
159{
160 int err;
161 struct pcrypt_request *preq = aead_request_ctx(req);
162 struct aead_request *creq = pcrypt_request_ctx(preq);
163 struct padata_priv *padata = pcrypt_request_padata(preq);
164 struct crypto_aead *aead = crypto_aead_reqtfm(req);
165 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
166 u32 flags = aead_request_flags(req);
167
168 memset(padata, 0, sizeof(struct padata_priv));
169
170 padata->parallel = pcrypt_aead_enc;
171 padata->serial = pcrypt_aead_serial;
172
173 aead_request_set_tfm(creq, ctx->child);
174 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
175 pcrypt_aead_done, req);
176 aead_request_set_crypt(creq, req->src, req->dst,
177 req->cryptlen, req->iv);
178 aead_request_set_assoc(creq, req->assoc, req->assoclen);
179
180 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
181 if (!err)
182 return -EINPROGRESS;
183
184 return err;
185}
186
187static void pcrypt_aead_dec(struct padata_priv *padata)
188{
189 struct pcrypt_request *preq = pcrypt_padata_request(padata);
190 struct aead_request *req = pcrypt_request_ctx(preq);
191
192 padata->info = crypto_aead_decrypt(req);
193
194 if (padata->info == -EINPROGRESS)
195 return;
196
197 padata_do_serial(padata);
198}
199
200static int pcrypt_aead_decrypt(struct aead_request *req)
201{
202 int err;
203 struct pcrypt_request *preq = aead_request_ctx(req);
204 struct aead_request *creq = pcrypt_request_ctx(preq);
205 struct padata_priv *padata = pcrypt_request_padata(preq);
206 struct crypto_aead *aead = crypto_aead_reqtfm(req);
207 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
208 u32 flags = aead_request_flags(req);
209
210 memset(padata, 0, sizeof(struct padata_priv));
211
212 padata->parallel = pcrypt_aead_dec;
213 padata->serial = pcrypt_aead_serial;
214
215 aead_request_set_tfm(creq, ctx->child);
216 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
217 pcrypt_aead_done, req);
218 aead_request_set_crypt(creq, req->src, req->dst,
219 req->cryptlen, req->iv);
220 aead_request_set_assoc(creq, req->assoc, req->assoclen);
221
222 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
223 if (!err)
224 return -EINPROGRESS;
225
226 return err;
227}
228
229static void pcrypt_aead_givenc(struct padata_priv *padata)
230{
231 struct pcrypt_request *preq = pcrypt_padata_request(padata);
232 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
233
234 padata->info = crypto_aead_givencrypt(req);
235
236 if (padata->info == -EINPROGRESS)
237 return;
238
239 padata_do_serial(padata);
240}
241
242static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
243{
244 int err;
245 struct aead_request *areq = &req->areq;
246 struct pcrypt_request *preq = aead_request_ctx(areq);
247 struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
248 struct padata_priv *padata = pcrypt_request_padata(preq);
249 struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
250 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
251 u32 flags = aead_request_flags(areq);
252
253 memset(padata, 0, sizeof(struct padata_priv));
254
255 padata->parallel = pcrypt_aead_givenc;
256 padata->serial = pcrypt_aead_giv_serial;
257
258 aead_givcrypt_set_tfm(creq, ctx->child);
259 aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
260 pcrypt_aead_done, areq);
261 aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
262 areq->cryptlen, areq->iv);
263 aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
264 aead_givcrypt_set_giv(creq, req->giv, req->seq);
265
266 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
267 if (!err)
268 return -EINPROGRESS;
269
270 return err;
271}
272
273static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
274{
275 int cpu, cpu_index;
276 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
277 struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
278 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
279 struct crypto_aead *cipher;
280
281 ictx->tfm_count++;
282
283 cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
284
285 ctx->cb_cpu = cpumask_first(cpu_active_mask);
286 for (cpu = 0; cpu < cpu_index; cpu++)
287 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
288
289 cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
290
291 if (IS_ERR(cipher))
292 return PTR_ERR(cipher);
293
294 ctx->child = cipher;
295 tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
296 + sizeof(struct aead_givcrypt_request)
297 + crypto_aead_reqsize(cipher);
298
299 return 0;
300}
301
302static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
303{
304 struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
305
306 crypto_free_aead(ctx->child);
307}
308
309static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
310{
311 struct crypto_instance *inst;
312 struct pcrypt_instance_ctx *ctx;
313 int err;
314
315 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
316 if (!inst) {
317 inst = ERR_PTR(-ENOMEM);
318 goto out;
319 }
320
321 err = -ENAMETOOLONG;
322 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
323 "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
324 goto out_free_inst;
325
326 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
327
328 ctx = crypto_instance_ctx(inst);
329 err = crypto_init_spawn(&ctx->spawn, alg, inst,
330 CRYPTO_ALG_TYPE_MASK);
331 if (err)
332 goto out_free_inst;
333
334 inst->alg.cra_priority = alg->cra_priority + 100;
335 inst->alg.cra_blocksize = alg->cra_blocksize;
336 inst->alg.cra_alignmask = alg->cra_alignmask;
337
338out:
339 return inst;
340
341out_free_inst:
342 kfree(inst);
343 inst = ERR_PTR(err);
344 goto out;
345}
346
347static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
348 u32 type, u32 mask)
349{
350 struct crypto_instance *inst;
351 struct crypto_alg *alg;
352
353 alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
354 if (IS_ERR(alg))
355 return ERR_CAST(alg);
356
357 inst = pcrypt_alloc_instance(alg);
358 if (IS_ERR(inst))
359 goto out_put_alg;
360
361 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
362 inst->alg.cra_type = &crypto_aead_type;
363
364 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
365 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
366 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
367
368 inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
369
370 inst->alg.cra_init = pcrypt_aead_init_tfm;
371 inst->alg.cra_exit = pcrypt_aead_exit_tfm;
372
373 inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
374 inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
375 inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
376 inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
377 inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
378
379out_put_alg:
380 crypto_mod_put(alg);
381 return inst;
382}
383
384static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
385{
386 struct crypto_attr_type *algt;
387
388 algt = crypto_get_attr_type(tb);
389 if (IS_ERR(algt))
390 return ERR_CAST(algt);
391
392 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
393 case CRYPTO_ALG_TYPE_AEAD:
394 return pcrypt_alloc_aead(tb, algt->type, algt->mask);
395 }
396
397 return ERR_PTR(-EINVAL);
398}
399
400static void pcrypt_free(struct crypto_instance *inst)
401{
402 struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
403
404 crypto_drop_spawn(&ctx->spawn);
405 kfree(inst);
406}
407
408static int pcrypt_cpumask_change_notify(struct notifier_block *self,
409 unsigned long val, void *data)
410{
411 struct padata_pcrypt *pcrypt;
412 struct pcrypt_cpumask *new_mask, *old_mask;
413 struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
414
415 if (!(val & PADATA_CPU_SERIAL))
416 return 0;
417
418 pcrypt = container_of(self, struct padata_pcrypt, nblock);
419 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
420 if (!new_mask)
421 return -ENOMEM;
422 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
423 kfree(new_mask);
424 return -ENOMEM;
425 }
426
427 old_mask = pcrypt->cb_cpumask;
428
429 cpumask_copy(new_mask->mask, cpumask->cbcpu);
430 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
431 synchronize_rcu_bh();
432
433 free_cpumask_var(old_mask->mask);
434 kfree(old_mask);
435 return 0;
436}
437
438static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
439{
440 int ret;
441
442 pinst->kobj.kset = pcrypt_kset;
443 ret = kobject_add(&pinst->kobj, NULL, name);
444 if (!ret)
445 kobject_uevent(&pinst->kobj, KOBJ_ADD);
446
447 return ret;
448}
449
450static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
451 const char *name)
452{
453 int ret = -ENOMEM;
454 struct pcrypt_cpumask *mask;
455
456 get_online_cpus();
457
458 pcrypt->wq = create_workqueue(name);
459 if (!pcrypt->wq)
460 goto err;
461
462 pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
463 if (!pcrypt->pinst)
464 goto err_destroy_workqueue;
465
466 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
467 if (!mask)
468 goto err_free_padata;
469 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
470 kfree(mask);
471 goto err_free_padata;
472 }
473
474 cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
475 rcu_assign_pointer(pcrypt->cb_cpumask, mask);
476
477 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
478 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
479 if (ret)
480 goto err_free_cpumask;
481
482 ret = pcrypt_sysfs_add(pcrypt->pinst, name);
483 if (ret)
484 goto err_unregister_notifier;
485
486 put_online_cpus();
487
488 return ret;
489
490err_unregister_notifier:
491 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
492err_free_cpumask:
493 free_cpumask_var(mask->mask);
494 kfree(mask);
495err_free_padata:
496 padata_free(pcrypt->pinst);
497err_destroy_workqueue:
498 destroy_workqueue(pcrypt->wq);
499err:
500 put_online_cpus();
501
502 return ret;
503}
504
505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
506{
507 kobject_put(&pcrypt->pinst->kobj);
508 free_cpumask_var(pcrypt->cb_cpumask->mask);
509 kfree(pcrypt->cb_cpumask);
510
511 padata_stop(pcrypt->pinst);
512 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
513 destroy_workqueue(pcrypt->wq);
514 padata_free(pcrypt->pinst);
515}
516
517static struct crypto_template pcrypt_tmpl = {
518 .name = "pcrypt",
519 .alloc = pcrypt_alloc,
520 .free = pcrypt_free,
521 .module = THIS_MODULE,
522};
523
524static int __init pcrypt_init(void)
525{
526 int err = -ENOMEM;
527
528 pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
529 if (!pcrypt_kset)
530 goto err;
531
532 err = pcrypt_init_padata(&pencrypt, "pencrypt");
533 if (err)
534 goto err_unreg_kset;
535
536 err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
537 if (err)
538 goto err_deinit_pencrypt;
539
540 padata_start(pencrypt.pinst);
541 padata_start(pdecrypt.pinst);
542
543 return crypto_register_template(&pcrypt_tmpl);
544
545err_deinit_pencrypt:
546 pcrypt_fini_padata(&pencrypt);
547err_unreg_kset:
548 kset_unregister(pcrypt_kset);
549err:
550 return err;
551}
552
553static void __exit pcrypt_exit(void)
554{
555 pcrypt_fini_padata(&pencrypt);
556 pcrypt_fini_padata(&pdecrypt);
557
558 kset_unregister(pcrypt_kset);
559 crypto_unregister_template(&pcrypt_tmpl);
560}
561
562module_init(pcrypt_init);
563module_exit(pcrypt_exit);
564
565MODULE_LICENSE("GPL");
566MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
567MODULE_DESCRIPTION("Parallel crypto wrapper");
diff --git a/crypto/proc.c b/crypto/proc.c
index 5dc07e442fca..58fef67d4f4d 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -25,28 +25,22 @@
25#ifdef CONFIG_CRYPTO_FIPS 25#ifdef CONFIG_CRYPTO_FIPS
26static struct ctl_table crypto_sysctl_table[] = { 26static struct ctl_table crypto_sysctl_table[] = {
27 { 27 {
28 .ctl_name = CTL_UNNUMBERED,
29 .procname = "fips_enabled", 28 .procname = "fips_enabled",
30 .data = &fips_enabled, 29 .data = &fips_enabled,
31 .maxlen = sizeof(int), 30 .maxlen = sizeof(int),
32 .mode = 0444, 31 .mode = 0444,
33 .proc_handler = &proc_dointvec 32 .proc_handler = proc_dointvec
34 },
35 {
36 .ctl_name = 0,
37 }, 33 },
34 {}
38}; 35};
39 36
40static struct ctl_table crypto_dir_table[] = { 37static struct ctl_table crypto_dir_table[] = {
41 { 38 {
42 .ctl_name = CTL_UNNUMBERED,
43 .procname = "crypto", 39 .procname = "crypto",
44 .mode = 0555, 40 .mode = 0555,
45 .child = crypto_sysctl_table 41 .child = crypto_sysctl_table
46 }, 42 },
47 { 43 {}
48 .ctl_name = 0,
49 },
50}; 44};
51 45
52static struct ctl_table_header *crypto_sysctls; 46static struct ctl_table_header *crypto_sysctls;
@@ -115,13 +109,6 @@ static int c_show(struct seq_file *m, void *p)
115 seq_printf(m, "max keysize : %u\n", 109 seq_printf(m, "max keysize : %u\n",
116 alg->cra_cipher.cia_max_keysize); 110 alg->cra_cipher.cia_max_keysize);
117 break; 111 break;
118
119 case CRYPTO_ALG_TYPE_DIGEST:
120 seq_printf(m, "type : digest\n");
121 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
122 seq_printf(m, "digestsize : %u\n",
123 alg->cra_digest.dia_digestsize);
124 break;
125 case CRYPTO_ALG_TYPE_COMPRESS: 112 case CRYPTO_ALG_TYPE_COMPRESS:
126 seq_printf(m, "type : compression\n"); 113 seq_printf(m, "type : compression\n");
127 break; 114 break;
diff --git a/crypto/rng.c b/crypto/rng.c
index ba05e7380e76..f93cb5311182 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/random.h> 20#include <linux/random.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/slab.h>
22#include <linux/string.h> 23#include <linux/string.h>
23 24
24static DEFINE_MUTEX(crypto_default_rng_lock); 25static DEFINE_MUTEX(crypto_default_rng_lock);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3de89a424401..41e529af0773 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
68 68
69void scatterwalk_done(struct scatter_walk *walk, int out, int more) 69void scatterwalk_done(struct scatter_walk *walk, int out, int more)
70{ 70{
71 if (!offset_in_page(walk->offset) || !more) 71 if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
72 scatterwalk_pagedone(walk, out, more); 72 scatterwalk_pagedone(walk, out, more);
73} 73}
74EXPORT_SYMBOL_GPL(scatterwalk_done); 74EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 5a013a8bf87a..4c4491229417 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/slab.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/string.h> 25#include <linux/string.h>
25 26
diff --git a/crypto/shash.c b/crypto/shash.c
index 91f7b9d83881..22fd9433141f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
37 u8 *buffer, *alignbuffer; 37 u8 *buffer, *alignbuffer;
38 int err; 38 int err;
39 39
40 absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); 40 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
41 buffer = kmalloc(absize, GFP_KERNEL); 41 buffer = kmalloc(absize, GFP_KERNEL);
42 if (!buffer) 42 if (!buffer)
43 return -ENOMEM; 43 return -ENOMEM;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index aa3f84ccc786..3ca68f9fc14d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -18,8 +18,8 @@
18#include <crypto/hash.h> 18#include <crypto/hash.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/gfp.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
@@ -394,6 +394,17 @@ out:
394 return 0; 394 return 0;
395} 395}
396 396
397static void test_hash_sg_init(struct scatterlist *sg)
398{
399 int i;
400
401 sg_init_table(sg, TVMEMSIZE);
402 for (i = 0; i < TVMEMSIZE; i++) {
403 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
404 memset(tvmem[i], 0xff, PAGE_SIZE);
405 }
406}
407
397static void test_hash_speed(const char *algo, unsigned int sec, 408static void test_hash_speed(const char *algo, unsigned int sec,
398 struct hash_speed *speed) 409 struct hash_speed *speed)
399{ 410{
@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
423 goto out; 434 goto out;
424 } 435 }
425 436
426 sg_init_table(sg, TVMEMSIZE); 437 test_hash_sg_init(sg);
427 for (i = 0; i < TVMEMSIZE; i++) {
428 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
429 memset(tvmem[i], 0xff, PAGE_SIZE);
430 }
431
432 for (i = 0; speed[i].blen != 0; i++) { 438 for (i = 0; speed[i].blen != 0; i++) {
433 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { 439 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
434 printk(KERN_ERR 440 printk(KERN_ERR
@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
437 goto out; 443 goto out;
438 } 444 }
439 445
446 if (speed[i].klen)
447 crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
448
440 printk(KERN_INFO "test%3u " 449 printk(KERN_INFO "test%3u "
441 "(%5u byte blocks,%5u bytes per update,%4u updates): ", 450 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
442 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); 451 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -458,6 +467,250 @@ out:
458 crypto_free_hash(tfm); 467 crypto_free_hash(tfm);
459} 468}
460 469
470struct tcrypt_result {
471 struct completion completion;
472 int err;
473};
474
475static void tcrypt_complete(struct crypto_async_request *req, int err)
476{
477 struct tcrypt_result *res = req->data;
478
479 if (err == -EINPROGRESS)
480 return;
481
482 res->err = err;
483 complete(&res->completion);
484}
485
486static inline int do_one_ahash_op(struct ahash_request *req, int ret)
487{
488 if (ret == -EINPROGRESS || ret == -EBUSY) {
489 struct tcrypt_result *tr = req->base.data;
490
491 ret = wait_for_completion_interruptible(&tr->completion);
492 if (!ret)
493 ret = tr->err;
494 INIT_COMPLETION(tr->completion);
495 }
496 return ret;
497}
498
499static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
500 char *out, int sec)
501{
502 unsigned long start, end;
503 int bcount;
504 int ret;
505
506 for (start = jiffies, end = start + sec * HZ, bcount = 0;
507 time_before(jiffies, end); bcount++) {
508 ret = do_one_ahash_op(req, crypto_ahash_digest(req));
509 if (ret)
510 return ret;
511 }
512
513 printk("%6u opers/sec, %9lu bytes/sec\n",
514 bcount / sec, ((long)bcount * blen) / sec);
515
516 return 0;
517}
518
519static int test_ahash_jiffies(struct ahash_request *req, int blen,
520 int plen, char *out, int sec)
521{
522 unsigned long start, end;
523 int bcount, pcount;
524 int ret;
525
526 if (plen == blen)
527 return test_ahash_jiffies_digest(req, blen, out, sec);
528
529 for (start = jiffies, end = start + sec * HZ, bcount = 0;
530 time_before(jiffies, end); bcount++) {
531 ret = crypto_ahash_init(req);
532 if (ret)
533 return ret;
534 for (pcount = 0; pcount < blen; pcount += plen) {
535 ret = do_one_ahash_op(req, crypto_ahash_update(req));
536 if (ret)
537 return ret;
538 }
539 /* we assume there is enough space in 'out' for the result */
540 ret = do_one_ahash_op(req, crypto_ahash_final(req));
541 if (ret)
542 return ret;
543 }
544
545 pr_cont("%6u opers/sec, %9lu bytes/sec\n",
546 bcount / sec, ((long)bcount * blen) / sec);
547
548 return 0;
549}
550
551static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
552 char *out)
553{
554 unsigned long cycles = 0;
555 int ret, i;
556
557 /* Warm-up run. */
558 for (i = 0; i < 4; i++) {
559 ret = do_one_ahash_op(req, crypto_ahash_digest(req));
560 if (ret)
561 goto out;
562 }
563
564 /* The real thing. */
565 for (i = 0; i < 8; i++) {
566 cycles_t start, end;
567
568 start = get_cycles();
569
570 ret = do_one_ahash_op(req, crypto_ahash_digest(req));
571 if (ret)
572 goto out;
573
574 end = get_cycles();
575
576 cycles += end - start;
577 }
578
579out:
580 if (ret)
581 return ret;
582
583 pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
584 cycles / 8, cycles / (8 * blen));
585
586 return 0;
587}
588
589static int test_ahash_cycles(struct ahash_request *req, int blen,
590 int plen, char *out)
591{
592 unsigned long cycles = 0;
593 int i, pcount, ret;
594
595 if (plen == blen)
596 return test_ahash_cycles_digest(req, blen, out);
597
598 /* Warm-up run. */
599 for (i = 0; i < 4; i++) {
600 ret = crypto_ahash_init(req);
601 if (ret)
602 goto out;
603 for (pcount = 0; pcount < blen; pcount += plen) {
604 ret = do_one_ahash_op(req, crypto_ahash_update(req));
605 if (ret)
606 goto out;
607 }
608 ret = do_one_ahash_op(req, crypto_ahash_final(req));
609 if (ret)
610 goto out;
611 }
612
613 /* The real thing. */
614 for (i = 0; i < 8; i++) {
615 cycles_t start, end;
616
617 start = get_cycles();
618
619 ret = crypto_ahash_init(req);
620 if (ret)
621 goto out;
622 for (pcount = 0; pcount < blen; pcount += plen) {
623 ret = do_one_ahash_op(req, crypto_ahash_update(req));
624 if (ret)
625 goto out;
626 }
627 ret = do_one_ahash_op(req, crypto_ahash_final(req));
628 if (ret)
629 goto out;
630
631 end = get_cycles();
632
633 cycles += end - start;
634 }
635
636out:
637 if (ret)
638 return ret;
639
640 pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
641 cycles / 8, cycles / (8 * blen));
642
643 return 0;
644}
645
646static void test_ahash_speed(const char *algo, unsigned int sec,
647 struct hash_speed *speed)
648{
649 struct scatterlist sg[TVMEMSIZE];
650 struct tcrypt_result tresult;
651 struct ahash_request *req;
652 struct crypto_ahash *tfm;
653 static char output[1024];
654 int i, ret;
655
656 printk(KERN_INFO "\ntesting speed of async %s\n", algo);
657
658 tfm = crypto_alloc_ahash(algo, 0, 0);
659 if (IS_ERR(tfm)) {
660 pr_err("failed to load transform for %s: %ld\n",
661 algo, PTR_ERR(tfm));
662 return;
663 }
664
665 if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
666 pr_err("digestsize(%u) > outputbuffer(%zu)\n",
667 crypto_ahash_digestsize(tfm), sizeof(output));
668 goto out;
669 }
670
671 test_hash_sg_init(sg);
672 req = ahash_request_alloc(tfm, GFP_KERNEL);
673 if (!req) {
674 pr_err("ahash request allocation failure\n");
675 goto out;
676 }
677
678 init_completion(&tresult.completion);
679 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
680 tcrypt_complete, &tresult);
681
682 for (i = 0; speed[i].blen != 0; i++) {
683 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
684 pr_err("template (%u) too big for tvmem (%lu)\n",
685 speed[i].blen, TVMEMSIZE * PAGE_SIZE);
686 break;
687 }
688
689 pr_info("test%3u "
690 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
691 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
692
693 ahash_request_set_crypt(req, sg, output, speed[i].plen);
694
695 if (sec)
696 ret = test_ahash_jiffies(req, speed[i].blen,
697 speed[i].plen, output, sec);
698 else
699 ret = test_ahash_cycles(req, speed[i].blen,
700 speed[i].plen, output);
701
702 if (ret) {
703 pr_err("hashing failed ret=%d\n", ret);
704 break;
705 }
706 }
707
708 ahash_request_free(req);
709
710out:
711 crypto_free_ahash(tfm);
712}
713
461static void test_available(void) 714static void test_available(void)
462{ 715{
463 char **name = check; 716 char **name = check;
@@ -881,9 +1134,87 @@ static int do_test(int m)
881 test_hash_speed("rmd320", sec, generic_hash_speed_template); 1134 test_hash_speed("rmd320", sec, generic_hash_speed_template);
882 if (mode > 300 && mode < 400) break; 1135 if (mode > 300 && mode < 400) break;
883 1136
1137 case 318:
1138 test_hash_speed("ghash-generic", sec, hash_speed_template_16);
1139 if (mode > 300 && mode < 400) break;
1140
884 case 399: 1141 case 399:
885 break; 1142 break;
886 1143
1144 case 400:
1145 /* fall through */
1146
1147 case 401:
1148 test_ahash_speed("md4", sec, generic_hash_speed_template);
1149 if (mode > 400 && mode < 500) break;
1150
1151 case 402:
1152 test_ahash_speed("md5", sec, generic_hash_speed_template);
1153 if (mode > 400 && mode < 500) break;
1154
1155 case 403:
1156 test_ahash_speed("sha1", sec, generic_hash_speed_template);
1157 if (mode > 400 && mode < 500) break;
1158
1159 case 404:
1160 test_ahash_speed("sha256", sec, generic_hash_speed_template);
1161 if (mode > 400 && mode < 500) break;
1162
1163 case 405:
1164 test_ahash_speed("sha384", sec, generic_hash_speed_template);
1165 if (mode > 400 && mode < 500) break;
1166
1167 case 406:
1168 test_ahash_speed("sha512", sec, generic_hash_speed_template);
1169 if (mode > 400 && mode < 500) break;
1170
1171 case 407:
1172 test_ahash_speed("wp256", sec, generic_hash_speed_template);
1173 if (mode > 400 && mode < 500) break;
1174
1175 case 408:
1176 test_ahash_speed("wp384", sec, generic_hash_speed_template);
1177 if (mode > 400 && mode < 500) break;
1178
1179 case 409:
1180 test_ahash_speed("wp512", sec, generic_hash_speed_template);
1181 if (mode > 400 && mode < 500) break;
1182
1183 case 410:
1184 test_ahash_speed("tgr128", sec, generic_hash_speed_template);
1185 if (mode > 400 && mode < 500) break;
1186
1187 case 411:
1188 test_ahash_speed("tgr160", sec, generic_hash_speed_template);
1189 if (mode > 400 && mode < 500) break;
1190
1191 case 412:
1192 test_ahash_speed("tgr192", sec, generic_hash_speed_template);
1193 if (mode > 400 && mode < 500) break;
1194
1195 case 413:
1196 test_ahash_speed("sha224", sec, generic_hash_speed_template);
1197 if (mode > 400 && mode < 500) break;
1198
1199 case 414:
1200 test_ahash_speed("rmd128", sec, generic_hash_speed_template);
1201 if (mode > 400 && mode < 500) break;
1202
1203 case 415:
1204 test_ahash_speed("rmd160", sec, generic_hash_speed_template);
1205 if (mode > 400 && mode < 500) break;
1206
1207 case 416:
1208 test_ahash_speed("rmd256", sec, generic_hash_speed_template);
1209 if (mode > 400 && mode < 500) break;
1210
1211 case 417:
1212 test_ahash_speed("rmd320", sec, generic_hash_speed_template);
1213 if (mode > 400 && mode < 500) break;
1214
1215 case 499:
1216 break;
1217
887 case 1000: 1218 case 1000:
888 test_available(); 1219 test_available();
889 break; 1220 break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfaf95b1..10cb925132c9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@ struct cipher_speed_template {
25struct hash_speed { 25struct hash_speed {
26 unsigned int blen; /* buffer length */ 26 unsigned int blen; /* buffer length */
27 unsigned int plen; /* per-update length */ 27 unsigned int plen; /* per-update length */
28 unsigned int klen; /* key length */
28}; 29};
29 30
30/* 31/*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
83 { .blen = 0, .plen = 0, } 84 { .blen = 0, .plen = 0, }
84}; 85};
85 86
87static struct hash_speed hash_speed_template_16[] = {
88 { .blen = 16, .plen = 16, .klen = 16, },
89 { .blen = 64, .plen = 16, .klen = 16, },
90 { .blen = 64, .plen = 64, .klen = 16, },
91 { .blen = 256, .plen = 16, .klen = 16, },
92 { .blen = 256, .plen = 64, .klen = 16, },
93 { .blen = 256, .plen = 256, .klen = 16, },
94 { .blen = 1024, .plen = 16, .klen = 16, },
95 { .blen = 1024, .plen = 256, .klen = 16, },
96 { .blen = 1024, .plen = 1024, .klen = 16, },
97 { .blen = 2048, .plen = 16, .klen = 16, },
98 { .blen = 2048, .plen = 256, .klen = 16, },
99 { .blen = 2048, .plen = 1024, .klen = 16, },
100 { .blen = 2048, .plen = 2048, .klen = 16, },
101 { .blen = 4096, .plen = 16, .klen = 16, },
102 { .blen = 4096, .plen = 256, .klen = 16, },
103 { .blen = 4096, .plen = 1024, .klen = 16, },
104 { .blen = 4096, .plen = 4096, .klen = 16, },
105 { .blen = 8192, .plen = 16, .klen = 16, },
106 { .blen = 8192, .plen = 256, .klen = 16, },
107 { .blen = 8192, .plen = 1024, .klen = 16, },
108 { .blen = 8192, .plen = 4096, .klen = 16, },
109 { .blen = 8192, .plen = 8192, .klen = 16, },
110
111 /* End marker */
112 { .blen = 0, .plen = 0, .klen = 0, }
113};
114
86#endif /* _CRYPTO_TCRYPT_H */ 115#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6d5b746637be..abd980c729eb 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -22,6 +22,17 @@
22#include <crypto/rng.h> 22#include <crypto/rng.h>
23 23
24#include "internal.h" 24#include "internal.h"
25
26#ifndef CONFIG_CRYPTO_MANAGER_TESTS
27
28/* a perfect nop */
29int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
30{
31 return 0;
32}
33
34#else
35
25#include "testmgr.h" 36#include "testmgr.h"
26 37
27/* 38/*
@@ -153,8 +164,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
153 free_page((unsigned long)buf[i]); 164 free_page((unsigned long)buf[i]);
154} 165}
155 166
167static int do_one_async_hash_op(struct ahash_request *req,
168 struct tcrypt_result *tr,
169 int ret)
170{
171 if (ret == -EINPROGRESS || ret == -EBUSY) {
172 ret = wait_for_completion_interruptible(&tr->completion);
173 if (!ret)
174 ret = tr->err;
175 INIT_COMPLETION(tr->completion);
176 }
177 return ret;
178}
179
156static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, 180static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
157 unsigned int tcount) 181 unsigned int tcount, bool use_digest)
158{ 182{
159 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); 183 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
160 unsigned int i, j, k, temp; 184 unsigned int i, j, k, temp;
@@ -206,23 +230,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
206 } 230 }
207 231
208 ahash_request_set_crypt(req, sg, result, template[i].psize); 232 ahash_request_set_crypt(req, sg, result, template[i].psize);
209 ret = crypto_ahash_digest(req); 233 if (use_digest) {
210 switch (ret) { 234 ret = do_one_async_hash_op(req, &tresult,
211 case 0: 235 crypto_ahash_digest(req));
212 break; 236 if (ret) {
213 case -EINPROGRESS: 237 pr_err("alg: hash: digest failed on test %d "
214 case -EBUSY: 238 "for %s: ret=%d\n", j, algo, -ret);
215 ret = wait_for_completion_interruptible( 239 goto out;
216 &tresult.completion); 240 }
217 if (!ret && !(ret = tresult.err)) { 241 } else {
218 INIT_COMPLETION(tresult.completion); 242 ret = do_one_async_hash_op(req, &tresult,
219 break; 243 crypto_ahash_init(req));
244 if (ret) {
245 pr_err("alt: hash: init failed on test %d "
246 "for %s: ret=%d\n", j, algo, -ret);
247 goto out;
248 }
249 ret = do_one_async_hash_op(req, &tresult,
250 crypto_ahash_update(req));
251 if (ret) {
252 pr_err("alt: hash: update failed on test %d "
253 "for %s: ret=%d\n", j, algo, -ret);
254 goto out;
255 }
256 ret = do_one_async_hash_op(req, &tresult,
257 crypto_ahash_final(req));
258 if (ret) {
259 pr_err("alt: hash: final failed on test %d "
260 "for %s: ret=%d\n", j, algo, -ret);
261 goto out;
220 } 262 }
221 /* fall through */
222 default:
223 printk(KERN_ERR "alg: hash: digest failed on test %d "
224 "for %s: ret=%d\n", j, algo, -ret);
225 goto out;
226 } 263 }
227 264
228 if (memcmp(result, template[i].digest, 265 if (memcmp(result, template[i].digest,
@@ -1201,7 +1238,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1201 unsigned int tcount) 1238 unsigned int tcount)
1202{ 1239{
1203 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); 1240 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1204 int err, i, j, seedsize; 1241 int err = 0, i, j, seedsize;
1205 u8 *seed; 1242 u8 *seed;
1206 char result[32]; 1243 char result[32];
1207 1244
@@ -1402,7 +1439,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1402 return PTR_ERR(tfm); 1439 return PTR_ERR(tfm);
1403 } 1440 }
1404 1441
1405 err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); 1442 err = test_hash(tfm, desc->suite.hash.vecs,
1443 desc->suite.hash.count, true);
1444 if (!err)
1445 err = test_hash(tfm, desc->suite.hash.vecs,
1446 desc->suite.hash.count, false);
1406 1447
1407 crypto_free_ahash(tfm); 1448 crypto_free_ahash(tfm);
1408 return err; 1449 return err;
@@ -1477,9 +1518,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
1477 return err; 1518 return err;
1478} 1519}
1479 1520
1521static int alg_test_null(const struct alg_test_desc *desc,
1522 const char *driver, u32 type, u32 mask)
1523{
1524 return 0;
1525}
1526
1480/* Please keep this list sorted by algorithm name. */ 1527/* Please keep this list sorted by algorithm name. */
1481static const struct alg_test_desc alg_test_descs[] = { 1528static const struct alg_test_desc alg_test_descs[] = {
1482 { 1529 {
1530 .alg = "__driver-cbc-aes-aesni",
1531 .test = alg_test_null,
1532 .suite = {
1533 .cipher = {
1534 .enc = {
1535 .vecs = NULL,
1536 .count = 0
1537 },
1538 .dec = {
1539 .vecs = NULL,
1540 .count = 0
1541 }
1542 }
1543 }
1544 }, {
1545 .alg = "__driver-ecb-aes-aesni",
1546 .test = alg_test_null,
1547 .suite = {
1548 .cipher = {
1549 .enc = {
1550 .vecs = NULL,
1551 .count = 0
1552 },
1553 .dec = {
1554 .vecs = NULL,
1555 .count = 0
1556 }
1557 }
1558 }
1559 }, {
1560 .alg = "__ghash-pclmulqdqni",
1561 .test = alg_test_null,
1562 .suite = {
1563 .hash = {
1564 .vecs = NULL,
1565 .count = 0
1566 }
1567 }
1568 }, {
1483 .alg = "ansi_cprng", 1569 .alg = "ansi_cprng",
1484 .test = alg_test_cprng, 1570 .test = alg_test_cprng,
1485 .fips_allowed = 1, 1571 .fips_allowed = 1,
@@ -1623,6 +1709,30 @@ static const struct alg_test_desc alg_test_descs[] = {
1623 } 1709 }
1624 } 1710 }
1625 }, { 1711 }, {
1712 .alg = "cryptd(__driver-ecb-aes-aesni)",
1713 .test = alg_test_null,
1714 .suite = {
1715 .cipher = {
1716 .enc = {
1717 .vecs = NULL,
1718 .count = 0
1719 },
1720 .dec = {
1721 .vecs = NULL,
1722 .count = 0
1723 }
1724 }
1725 }
1726 }, {
1727 .alg = "cryptd(__ghash-pclmulqdqni)",
1728 .test = alg_test_null,
1729 .suite = {
1730 .hash = {
1731 .vecs = NULL,
1732 .count = 0
1733 }
1734 }
1735 }, {
1626 .alg = "ctr(aes)", 1736 .alg = "ctr(aes)",
1627 .test = alg_test_skcipher, 1737 .test = alg_test_skcipher,
1628 .fips_allowed = 1, 1738 .fips_allowed = 1,
@@ -1669,6 +1779,21 @@ static const struct alg_test_desc alg_test_descs[] = {
1669 } 1779 }
1670 } 1780 }
1671 }, { 1781 }, {
1782 .alg = "ecb(__aes-aesni)",
1783 .test = alg_test_null,
1784 .suite = {
1785 .cipher = {
1786 .enc = {
1787 .vecs = NULL,
1788 .count = 0
1789 },
1790 .dec = {
1791 .vecs = NULL,
1792 .count = 0
1793 }
1794 }
1795 }
1796 }, {
1672 .alg = "ecb(aes)", 1797 .alg = "ecb(aes)",
1673 .test = alg_test_skcipher, 1798 .test = alg_test_skcipher,
1674 .fips_allowed = 1, 1799 .fips_allowed = 1,
@@ -1943,6 +2068,15 @@ static const struct alg_test_desc alg_test_descs[] = {
1943 } 2068 }
1944 } 2069 }
1945 }, { 2070 }, {
2071 .alg = "ghash",
2072 .test = alg_test_hash,
2073 .suite = {
2074 .hash = {
2075 .vecs = ghash_tv_template,
2076 .count = GHASH_TEST_VECTORS
2077 }
2078 }
2079 }, {
1946 .alg = "hmac(md5)", 2080 .alg = "hmac(md5)",
1947 .test = alg_test_hash, 2081 .test = alg_test_hash,
1948 .suite = { 2082 .suite = {
@@ -2407,4 +2541,7 @@ notest:
2407non_fips_alg: 2541non_fips_alg:
2408 return -EINVAL; 2542 return -EINVAL;
2409} 2543}
2544
2545#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
2546
2410EXPORT_SYMBOL_GPL(alg_test); 2547EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 9963b18983ab..74e35377fd30 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = {
1003 }, 1003 },
1004}; 1004};
1005 1005
1006#define GHASH_TEST_VECTORS 1
1007
1008static struct hash_testvec ghash_tv_template[] =
1009{
1010 {
1011
1012 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
1013 .ksize = 16,
1014 .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
1015 .psize = 16,
1016 .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
1017 "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
1018 },
1019};
1020
1006/* 1021/*
1007 * HMAC-MD5 test vectors from RFC2202 1022 * HMAC-MD5 test vectors from RFC2202
1008 * (These need to be fixed to not use strlen). 1023 * (These need to be fixed to not use strlen).
@@ -1654,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
1654 } 1669 }
1655}; 1670};
1656 1671
1657#define VMAC_AES_TEST_VECTORS 1 1672#define VMAC_AES_TEST_VECTORS 8
1658static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', 1673static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
1659 '\x02', '\x03', '\x02', '\x02', 1674 '\x02', '\x03', '\x02', '\x02',
1660 '\x02', '\x04', '\x01', '\x07', 1675 '\x02', '\x04', '\x01', '\x07',
1661 '\x04', '\x01', '\x04', '\x03',}; 1676 '\x04', '\x01', '\x04', '\x03',};
1677static char vmac_string2[128] = {'a', 'b', 'c',};
1678static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
1679 'a', 'b', 'c', 'a', 'b', 'c',
1680 'a', 'b', 'c', 'a', 'b', 'c',
1681 'a', 'b', 'c', 'a', 'b', 'c',
1682 'a', 'b', 'c', 'a', 'b', 'c',
1683 'a', 'b', 'c', 'a', 'b', 'c',
1684 'a', 'b', 'c', 'a', 'b', 'c',
1685 'a', 'b', 'c', 'a', 'b', 'c',
1686 };
1687
1662static struct hash_testvec aes_vmac128_tv_template[] = { 1688static struct hash_testvec aes_vmac128_tv_template[] = {
1663 { 1689 {
1690 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1691 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1692 .plaintext = NULL,
1693 .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
1694 .psize = 0,
1695 .ksize = 16,
1696 }, {
1664 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 1697 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1665 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 1698 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1666 .plaintext = vmac_string, 1699 .plaintext = vmac_string1,
1667 .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", 1700 .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
1701 .psize = 128,
1702 .ksize = 16,
1703 }, {
1704 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1705 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1706 .plaintext = vmac_string2,
1707 .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
1708 .psize = 128,
1709 .ksize = 16,
1710 }, {
1711 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1712 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1713 .plaintext = vmac_string3,
1714 .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
1715 .psize = 128,
1716 .ksize = 16,
1717 }, {
1718 .key = "abcdefghijklmnop",
1719 .plaintext = NULL,
1720 .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
1721 .psize = 0,
1722 .ksize = 16,
1723 }, {
1724 .key = "abcdefghijklmnop",
1725 .plaintext = vmac_string1,
1726 .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
1727 .psize = 128,
1728 .ksize = 16,
1729 }, {
1730 .key = "abcdefghijklmnop",
1731 .plaintext = vmac_string2,
1732 .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
1733 .psize = 128,
1734 .ksize = 16,
1735 }, {
1736 .key = "abcdefghijklmnop",
1737 .plaintext = vmac_string3,
1738 .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
1668 .psize = 128, 1739 .psize = 128,
1669 .ksize = 16, 1740 .ksize = 16,
1670 }, 1741 },
diff --git a/crypto/twofish.c b/crypto/twofish_generic.c
index dfcda231f87a..1f07b843e07c 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish_generic.c
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
212 212
213MODULE_LICENSE("GPL"); 213MODULE_LICENSE("GPL");
214MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); 214MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
215MODULE_ALIAS("twofish");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0a9468e575de..0999274a27ac 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ 43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ 44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
45 45
46#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
47
46#ifdef __LITTLE_ENDIAN 48#ifdef __LITTLE_ENDIAN
47#define INDEX_HIGH 1 49#define INDEX_HIGH 1
48#define INDEX_LOW 0 50#define INDEX_LOW 0
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
110 int i; u64 th, tl; \ 112 int i; u64 th, tl; \
111 rh = rl = 0; \ 113 rh = rl = 0; \
112 for (i = 0; i < nw; i += 2) { \ 114 for (i = 0; i < nw; i += 2) { \
113 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 115 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
114 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 116 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
115 ADD128(rh, rl, th, tl); \ 117 ADD128(rh, rl, th, tl); \
116 } \ 118 } \
117 } while (0) 119 } while (0)
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
121 int i; u64 th, tl; \ 123 int i; u64 th, tl; \
122 rh1 = rl1 = rh = rl = 0; \ 124 rh1 = rl1 = rh = rl = 0; \
123 for (i = 0; i < nw; i += 2) { \ 125 for (i = 0; i < nw; i += 2) { \
124 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 126 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
125 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 127 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
126 ADD128(rh, rl, th, tl); \ 128 ADD128(rh, rl, th, tl); \
127 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ 129 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
128 le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ 130 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
129 ADD128(rh1, rl1, th, tl); \ 131 ADD128(rh1, rl1, th, tl); \
130 } \ 132 } \
131 } while (0) 133 } while (0)
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
136 int i; u64 th, tl; \ 138 int i; u64 th, tl; \
137 rh = rl = 0; \ 139 rh = rl = 0; \
138 for (i = 0; i < nw; i += 8) { \ 140 for (i = 0; i < nw; i += 8) { \
139 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 141 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
140 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 142 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
141 ADD128(rh, rl, th, tl); \ 143 ADD128(rh, rl, th, tl); \
142 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ 144 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
143 le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ 145 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
144 ADD128(rh, rl, th, tl); \ 146 ADD128(rh, rl, th, tl); \
145 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ 147 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
146 le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ 148 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
147 ADD128(rh, rl, th, tl); \ 149 ADD128(rh, rl, th, tl); \
148 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ 150 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
149 le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ 151 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
150 ADD128(rh, rl, th, tl); \ 152 ADD128(rh, rl, th, tl); \
151 } \ 153 } \
152 } while (0) 154 } while (0)
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
156 int i; u64 th, tl; \ 158 int i; u64 th, tl; \
157 rh1 = rl1 = rh = rl = 0; \ 159 rh1 = rl1 = rh = rl = 0; \
158 for (i = 0; i < nw; i += 8) { \ 160 for (i = 0; i < nw; i += 8) { \
159 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 161 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
160 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 162 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
161 ADD128(rh, rl, th, tl); \ 163 ADD128(rh, rl, th, tl); \
162 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ 164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
163 le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ 165 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
164 ADD128(rh1, rl1, th, tl); \ 166 ADD128(rh1, rl1, th, tl); \
165 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ 167 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
166 le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ 168 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
167 ADD128(rh, rl, th, tl); \ 169 ADD128(rh, rl, th, tl); \
168 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ 170 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
169 le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ 171 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
170 ADD128(rh1, rl1, th, tl); \ 172 ADD128(rh1, rl1, th, tl); \
171 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ 173 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
172 le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ 174 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
173 ADD128(rh, rl, th, tl); \ 175 ADD128(rh, rl, th, tl); \
174 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ 176 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
175 le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ 177 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
176 ADD128(rh1, rl1, th, tl); \ 178 ADD128(rh1, rl1, th, tl); \
177 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ 179 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
178 le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ 180 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
179 ADD128(rh, rl, th, tl); \ 181 ADD128(rh, rl, th, tl); \
180 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ 182 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
181 le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ 183 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
182 ADD128(rh1, rl1, th, tl); \ 184 ADD128(rh1, rl1, th, tl); \
183 } \ 185 } \
184 } while (0) 186 } while (0)
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
216 int i; \ 218 int i; \
217 rh = rl = t = 0; \ 219 rh = rl = t = 0; \
218 for (i = 0; i < nw; i += 2) { \ 220 for (i = 0; i < nw; i += 2) { \
219 t1 = le64_to_cpup(mp+i) + kp[i]; \ 221 t1 = pe64_to_cpup(mp+i) + kp[i]; \
220 t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ 222 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
221 m2 = MUL32(t1 >> 32, t2); \ 223 m2 = MUL32(t1 >> 32, t2); \
222 m1 = MUL32(t1, t2 >> 32); \ 224 m1 = MUL32(t1, t2 >> 32); \
223 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ 225 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
322 ctx->first_block_processed = 0; 324 ctx->first_block_processed = 0;
323} 325}
324 326
325static u64 l3hash(u64 p1, u64 p2, 327static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
326 u64 k1, u64 k2, u64 len)
327{ 328{
328 u64 rh, rl, t, z = 0; 329 u64 rh, rl, t, z = 0;
329 330
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
474 } 475 }
475 p = be64_to_cpup(out_p + i); 476 p = be64_to_cpup(out_p + i);
476 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); 477 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
477 return p + h; 478 return le64_to_cpu(p + h);
478} 479}
479 480
480static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) 481static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
549 550
550static int vmac_init(struct shash_desc *pdesc) 551static int vmac_init(struct shash_desc *pdesc)
551{ 552{
552 struct crypto_shash *parent = pdesc->tfm;
553 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
554
555 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
556 return 0; 553 return 0;
557} 554}
558 555
diff --git a/crypto/xor.c b/crypto/xor.c
index fc5b836f3430..b75182d8ab14 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -18,6 +18,7 @@
18 18
19#define BH_TRACE 0 19#define BH_TRACE 0
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/gfp.h>
21#include <linux/raid/xor.h> 22#include <linux/raid/xor.h>
22#include <linux/jiffies.h> 23#include <linux/jiffies.h>
23#include <asm/xor.h> 24#include <asm/xor.h>
diff --git a/crypto/xts.c b/crypto/xts.c
index d87b0f3102c3..555ecaab1e54 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb)
224 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, 224 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
225 CRYPTO_ALG_TYPE_MASK); 225 CRYPTO_ALG_TYPE_MASK);
226 if (IS_ERR(alg)) 226 if (IS_ERR(alg))
227 return ERR_PTR(PTR_ERR(alg)); 227 return ERR_CAST(alg);
228 228
229 inst = crypto_alloc_instance("xts", alg); 229 inst = crypto_alloc_instance("xts", alg);
230 if (IS_ERR(inst)) 230 if (IS_ERR(inst))