aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ablkcipher.c277
-rw-r--r--crypto/algapi.c2
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/internal.h2
-rw-r--r--crypto/pcrypt.c11
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/shash.c2
-rw-r--r--crypto/tcrypt.c343
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.c66
-rw-r--r--crypto/testmgr.h64
-rw-r--r--crypto/vmac.c75
12 files changed, 801 insertions, 80 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index fe980dae1727..98a66103f4f2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -24,10 +24,287 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26 26
27#include <crypto/scatterwalk.h>
28
27#include "internal.h" 29#include "internal.h"
28 30
29static const char *skcipher_default_geniv __read_mostly; 31static const char *skcipher_default_geniv __read_mostly;
30 32
33struct ablkcipher_buffer {
34 struct list_head entry;
35 struct scatter_walk dst;
36 unsigned int len;
37 void *data;
38};
39
40enum {
41 ABLKCIPHER_WALK_SLOW = 1 << 0,
42};
43
44static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
45{
46 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
47}
48
49void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
50{
51 struct ablkcipher_buffer *p, *tmp;
52
53 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
54 ablkcipher_buffer_write(p);
55 list_del(&p->entry);
56 kfree(p);
57 }
58}
59EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
60
61static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
62 struct ablkcipher_buffer *p)
63{
64 p->dst = walk->out;
65 list_add_tail(&p->entry, &walk->buffers);
66}
67
68/* Get a spot of the specified length that does not straddle a page.
69 * The caller needs to ensure that there is enough space for this operation.
70 */
71static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
72{
73 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
74 return max(start, end_page);
75}
76
77static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
78 unsigned int bsize)
79{
80 unsigned int n = bsize;
81
82 for (;;) {
83 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
84
85 if (len_this_page > n)
86 len_this_page = n;
87 scatterwalk_advance(&walk->out, n);
88 if (n == len_this_page)
89 break;
90 n -= len_this_page;
91 scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
92 }
93
94 return bsize;
95}
96
97static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
98 unsigned int n)
99{
100 scatterwalk_advance(&walk->in, n);
101 scatterwalk_advance(&walk->out, n);
102
103 return n;
104}
105
106static int ablkcipher_walk_next(struct ablkcipher_request *req,
107 struct ablkcipher_walk *walk);
108
109int ablkcipher_walk_done(struct ablkcipher_request *req,
110 struct ablkcipher_walk *walk, int err)
111{
112 struct crypto_tfm *tfm = req->base.tfm;
113 unsigned int nbytes = 0;
114
115 if (likely(err >= 0)) {
116 unsigned int n = walk->nbytes - err;
117
118 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
119 n = ablkcipher_done_fast(walk, n);
120 else if (WARN_ON(err)) {
121 err = -EINVAL;
122 goto err;
123 } else
124 n = ablkcipher_done_slow(walk, n);
125
126 nbytes = walk->total - n;
127 err = 0;
128 }
129
130 scatterwalk_done(&walk->in, 0, nbytes);
131 scatterwalk_done(&walk->out, 1, nbytes);
132
133err:
134 walk->total = nbytes;
135 walk->nbytes = nbytes;
136
137 if (nbytes) {
138 crypto_yield(req->base.flags);
139 return ablkcipher_walk_next(req, walk);
140 }
141
142 if (walk->iv != req->info)
143 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
144 if (walk->iv_buffer)
145 kfree(walk->iv_buffer);
146
147 return err;
148}
149EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
150
151static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
152 struct ablkcipher_walk *walk,
153 unsigned int bsize,
154 unsigned int alignmask,
155 void **src_p, void **dst_p)
156{
157 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
158 struct ablkcipher_buffer *p;
159 void *src, *dst, *base;
160 unsigned int n;
161
162 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
163 n += (aligned_bsize * 3 - (alignmask + 1) +
164 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
165
166 p = kmalloc(n, GFP_ATOMIC);
167 if (!p)
168 ablkcipher_walk_done(req, walk, -ENOMEM);
169
170 base = p + 1;
171
172 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
173 src = dst = ablkcipher_get_spot(dst, bsize);
174
175 p->len = bsize;
176 p->data = dst;
177
178 scatterwalk_copychunks(src, &walk->in, bsize, 0);
179
180 ablkcipher_queue_write(walk, p);
181
182 walk->nbytes = bsize;
183 walk->flags |= ABLKCIPHER_WALK_SLOW;
184
185 *src_p = src;
186 *dst_p = dst;
187
188 return 0;
189}
190
191static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
192 struct crypto_tfm *tfm,
193 unsigned int alignmask)
194{
195 unsigned bs = walk->blocksize;
196 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
197 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
198 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
199 (alignmask + 1);
200 u8 *iv;
201
202 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
203 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
204 if (!walk->iv_buffer)
205 return -ENOMEM;
206
207 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
208 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
209 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
210 iv = ablkcipher_get_spot(iv, ivsize);
211
212 walk->iv = memcpy(iv, walk->iv, ivsize);
213 return 0;
214}
215
216static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
217 struct ablkcipher_walk *walk)
218{
219 walk->src.page = scatterwalk_page(&walk->in);
220 walk->src.offset = offset_in_page(walk->in.offset);
221 walk->dst.page = scatterwalk_page(&walk->out);
222 walk->dst.offset = offset_in_page(walk->out.offset);
223
224 return 0;
225}
226
227static int ablkcipher_walk_next(struct ablkcipher_request *req,
228 struct ablkcipher_walk *walk)
229{
230 struct crypto_tfm *tfm = req->base.tfm;
231 unsigned int alignmask, bsize, n;
232 void *src, *dst;
233 int err;
234
235 alignmask = crypto_tfm_alg_alignmask(tfm);
236 n = walk->total;
237 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
238 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
239 return ablkcipher_walk_done(req, walk, -EINVAL);
240 }
241
242 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
243 src = dst = NULL;
244
245 bsize = min(walk->blocksize, n);
246 n = scatterwalk_clamp(&walk->in, n);
247 n = scatterwalk_clamp(&walk->out, n);
248
249 if (n < bsize ||
250 !scatterwalk_aligned(&walk->in, alignmask) ||
251 !scatterwalk_aligned(&walk->out, alignmask)) {
252 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
253 &src, &dst);
254 goto set_phys_lowmem;
255 }
256
257 walk->nbytes = n;
258
259 return ablkcipher_next_fast(req, walk);
260
261set_phys_lowmem:
262 if (err >= 0) {
263 walk->src.page = virt_to_page(src);
264 walk->dst.page = virt_to_page(dst);
265 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
266 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
267 }
268
269 return err;
270}
271
272static int ablkcipher_walk_first(struct ablkcipher_request *req,
273 struct ablkcipher_walk *walk)
274{
275 struct crypto_tfm *tfm = req->base.tfm;
276 unsigned int alignmask;
277
278 alignmask = crypto_tfm_alg_alignmask(tfm);
279 if (WARN_ON_ONCE(in_irq()))
280 return -EDEADLK;
281
282 walk->nbytes = walk->total;
283 if (unlikely(!walk->total))
284 return 0;
285
286 walk->iv_buffer = NULL;
287 walk->iv = req->info;
288 if (unlikely(((unsigned long)walk->iv & alignmask))) {
289 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
290 if (err)
291 return err;
292 }
293
294 scatterwalk_start(&walk->in, walk->in.sg);
295 scatterwalk_start(&walk->out, walk->out.sg);
296
297 return ablkcipher_walk_next(req, walk);
298}
299
300int ablkcipher_walk_phys(struct ablkcipher_request *req,
301 struct ablkcipher_walk *walk)
302{
303 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
304 return ablkcipher_walk_first(req, walk);
305}
306EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
307
31static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 308static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
32 unsigned int keylen) 309 unsigned int keylen)
33{ 310{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 76fae27ed01c..c3cf1a69a47a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
544{ 544{
545 int err = -EINVAL; 545 int err = -EINVAL;
546 546
547 if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) 547 if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
548 goto out; 548 goto out;
549 549
550 spawn->frontend = frontend; 550 spawn->frontend = frontend;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 05eb32e0d949..b9884ee0adb6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
181 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 181 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
182 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 182 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
183 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); 183 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
184 unsigned int cryptlen = req->cryptlen;
184 185
185 if (err) 186 if (err)
186 goto out; 187 goto out;
@@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
196 goto out; 197 goto out;
197 198
198 authsize = crypto_aead_authsize(authenc); 199 authsize = crypto_aead_authsize(authenc);
200 cryptlen -= authsize;
199 ihash = ahreq->result + authsize; 201 ihash = ahreq->result + authsize;
200 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 202 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
201 authsize, 0); 203 authsize, 0);
@@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
209 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 211 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
210 req->base.complete, req->base.data); 212 req->base.complete, req->base.data);
211 ablkcipher_request_set_crypt(abreq, req->src, req->dst, 213 ablkcipher_request_set_crypt(abreq, req->src, req->dst,
212 req->cryptlen, req->iv); 214 cryptlen, req->iv);
213 215
214 err = crypto_ablkcipher_decrypt(abreq); 216 err = crypto_ablkcipher_decrypt(abreq);
215 217
@@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
228 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 230 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
229 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 231 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
230 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); 232 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
233 unsigned int cryptlen = req->cryptlen;
231 234
232 if (err) 235 if (err)
233 goto out; 236 goto out;
234 237
235 authsize = crypto_aead_authsize(authenc); 238 authsize = crypto_aead_authsize(authenc);
239 cryptlen -= authsize;
236 ihash = ahreq->result + authsize; 240 ihash = ahreq->result + authsize;
237 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 241 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
238 authsize, 0); 242 authsize, 0);
@@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
246 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 250 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
247 req->base.complete, req->base.data); 251 req->base.complete, req->base.data);
248 ablkcipher_request_set_crypt(abreq, req->src, req->dst, 252 ablkcipher_request_set_crypt(abreq, req->src, req->dst,
249 req->cryptlen, req->iv); 253 cryptlen, req->iv);
250 254
251 err = crypto_ablkcipher_decrypt(abreq); 255 err = crypto_ablkcipher_decrypt(abreq);
252 256
diff --git a/crypto/internal.h b/crypto/internal.h
index 2d226362e594..d4384b08ab29 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free 8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option) 9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version. 10 * any later version.
11 * 11 *
12 */ 12 */
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 80201241b698..247178cb98ec 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -315,16 +315,13 @@ out_free_inst:
315 goto out; 315 goto out;
316} 316}
317 317
318static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb) 318static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
319 u32 type, u32 mask)
319{ 320{
320 struct crypto_instance *inst; 321 struct crypto_instance *inst;
321 struct crypto_alg *alg; 322 struct crypto_alg *alg;
322 struct crypto_attr_type *algt;
323
324 algt = crypto_get_attr_type(tb);
325 323
326 alg = crypto_get_attr_alg(tb, algt->type, 324 alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
327 (algt->mask & CRYPTO_ALG_TYPE_MASK));
328 if (IS_ERR(alg)) 325 if (IS_ERR(alg))
329 return ERR_CAST(alg); 326 return ERR_CAST(alg);
330 327
@@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
365 362
366 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 363 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
367 case CRYPTO_ALG_TYPE_AEAD: 364 case CRYPTO_ALG_TYPE_AEAD:
368 return pcrypt_alloc_aead(tb); 365 return pcrypt_alloc_aead(tb, algt->type, algt->mask);
369 } 366 }
370 367
371 return ERR_PTR(-EINVAL); 368 return ERR_PTR(-EINVAL);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3de89a424401..41e529af0773 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
68 68
69void scatterwalk_done(struct scatter_walk *walk, int out, int more) 69void scatterwalk_done(struct scatter_walk *walk, int out, int more)
70{ 70{
71 if (!offset_in_page(walk->offset) || !more) 71 if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
72 scatterwalk_pagedone(walk, out, more); 72 scatterwalk_pagedone(walk, out, more);
73} 73}
74EXPORT_SYMBOL_GPL(scatterwalk_done); 74EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/crypto/shash.c b/crypto/shash.c
index 91f7b9d83881..22fd9433141f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
37 u8 *buffer, *alignbuffer; 37 u8 *buffer, *alignbuffer;
38 int err; 38 int err;
39 39
40 absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); 40 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
41 buffer = kmalloc(absize, GFP_KERNEL); 41 buffer = kmalloc(absize, GFP_KERNEL);
42 if (!buffer) 42 if (!buffer)
43 return -ENOMEM; 43 return -ENOMEM;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a35159947a26..3ca68f9fc14d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -394,6 +394,17 @@ out:
394 return 0; 394 return 0;
395} 395}
396 396
397static void test_hash_sg_init(struct scatterlist *sg)
398{
399 int i;
400
401 sg_init_table(sg, TVMEMSIZE);
402 for (i = 0; i < TVMEMSIZE; i++) {
403 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
404 memset(tvmem[i], 0xff, PAGE_SIZE);
405 }
406}
407
397static void test_hash_speed(const char *algo, unsigned int sec, 408static void test_hash_speed(const char *algo, unsigned int sec,
398 struct hash_speed *speed) 409 struct hash_speed *speed)
399{ 410{
@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
423 goto out; 434 goto out;
424 } 435 }
425 436
426 sg_init_table(sg, TVMEMSIZE); 437 test_hash_sg_init(sg);
427 for (i = 0; i < TVMEMSIZE; i++) {
428 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
429 memset(tvmem[i], 0xff, PAGE_SIZE);
430 }
431
432 for (i = 0; speed[i].blen != 0; i++) { 438 for (i = 0; speed[i].blen != 0; i++) {
433 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { 439 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
434 printk(KERN_ERR 440 printk(KERN_ERR
@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
437 goto out; 443 goto out;
438 } 444 }
439 445
446 if (speed[i].klen)
447 crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
448
440 printk(KERN_INFO "test%3u " 449 printk(KERN_INFO "test%3u "
441 "(%5u byte blocks,%5u bytes per update,%4u updates): ", 450 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
442 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); 451 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -458,6 +467,250 @@ out:
458 crypto_free_hash(tfm); 467 crypto_free_hash(tfm);
459} 468}
460 469
470struct tcrypt_result {
471 struct completion completion;
472 int err;
473};
474
475static void tcrypt_complete(struct crypto_async_request *req, int err)
476{
477 struct tcrypt_result *res = req->data;
478
479 if (err == -EINPROGRESS)
480 return;
481
482 res->err = err;
483 complete(&res->completion);
484}
485
486static inline int do_one_ahash_op(struct ahash_request *req, int ret)
487{
488 if (ret == -EINPROGRESS || ret == -EBUSY) {
489 struct tcrypt_result *tr = req->base.data;
490
491 ret = wait_for_completion_interruptible(&tr->completion);
492 if (!ret)
493 ret = tr->err;
494 INIT_COMPLETION(tr->completion);
495 }
496 return ret;
497}
498
499static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
500 char *out, int sec)
501{
502 unsigned long start, end;
503 int bcount;
504 int ret;
505
506 for (start = jiffies, end = start + sec * HZ, bcount = 0;
507 time_before(jiffies, end); bcount++) {
508 ret = do_one_ahash_op(req, crypto_ahash_digest(req));
509 if (ret)
510 return ret;
511 }
512
513 printk("%6u opers/sec, %9lu bytes/sec\n",
514 bcount / sec, ((long)bcount * blen) / sec);
515
516 return 0;
517}
518
519static int test_ahash_jiffies(struct ahash_request *req, int blen,
520 int plen, char *out, int sec)
521{
522 unsigned long start, end;
523 int bcount, pcount;
524 int ret;
525
526 if (plen == blen)
527 return test_ahash_jiffies_digest(req, blen, out, sec);
528
529 for (start = jiffies, end = start + sec * HZ, bcount = 0;
530 time_before(jiffies, end); bcount++) {
531 ret = crypto_ahash_init(req);
532 if (ret)
533 return ret;
534 for (pcount = 0; pcount < blen; pcount += plen) {
535 ret = do_one_ahash_op(req, crypto_ahash_update(req));
536 if (ret)
537 return ret;
538 }
539 /* we assume there is enough space in 'out' for the result */
540 ret = do_one_ahash_op(req, crypto_ahash_final(req));
541 if (ret)
542 return ret;
543 }
544
545 pr_cont("%6u opers/sec, %9lu bytes/sec\n",
546 bcount / sec, ((long)bcount * blen) / sec);
547
548 return 0;
549}
550
551static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
552 char *out)
553{
554 unsigned long cycles = 0;
555 int ret, i;
556
557 /* Warm-up run. */
558 for (i = 0; i < 4; i++) {
559 ret = do_one_ahash_op(req, crypto_ahash_digest(req));
560 if (ret)
561 goto out;
562 }
563
564 /* The real thing. */
565 for (i = 0; i < 8; i++) {
566 cycles_t start, end;
567
568 start = get_cycles();
569
570 ret = do_one_ahash_op(req, crypto_ahash_digest(req));
571 if (ret)
572 goto out;
573
574 end = get_cycles();
575
576 cycles += end - start;
577 }
578
579out:
580 if (ret)
581 return ret;
582
583 pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
584 cycles / 8, cycles / (8 * blen));
585
586 return 0;
587}
588
589static int test_ahash_cycles(struct ahash_request *req, int blen,
590 int plen, char *out)
591{
592 unsigned long cycles = 0;
593 int i, pcount, ret;
594
595 if (plen == blen)
596 return test_ahash_cycles_digest(req, blen, out);
597
598 /* Warm-up run. */
599 for (i = 0; i < 4; i++) {
600 ret = crypto_ahash_init(req);
601 if (ret)
602 goto out;
603 for (pcount = 0; pcount < blen; pcount += plen) {
604 ret = do_one_ahash_op(req, crypto_ahash_update(req));
605 if (ret)
606 goto out;
607 }
608 ret = do_one_ahash_op(req, crypto_ahash_final(req));
609 if (ret)
610 goto out;
611 }
612
613 /* The real thing. */
614 for (i = 0; i < 8; i++) {
615 cycles_t start, end;
616
617 start = get_cycles();
618
619 ret = crypto_ahash_init(req);
620 if (ret)
621 goto out;
622 for (pcount = 0; pcount < blen; pcount += plen) {
623 ret = do_one_ahash_op(req, crypto_ahash_update(req));
624 if (ret)
625 goto out;
626 }
627 ret = do_one_ahash_op(req, crypto_ahash_final(req));
628 if (ret)
629 goto out;
630
631 end = get_cycles();
632
633 cycles += end - start;
634 }
635
636out:
637 if (ret)
638 return ret;
639
640 pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
641 cycles / 8, cycles / (8 * blen));
642
643 return 0;
644}
645
646static void test_ahash_speed(const char *algo, unsigned int sec,
647 struct hash_speed *speed)
648{
649 struct scatterlist sg[TVMEMSIZE];
650 struct tcrypt_result tresult;
651 struct ahash_request *req;
652 struct crypto_ahash *tfm;
653 static char output[1024];
654 int i, ret;
655
656 printk(KERN_INFO "\ntesting speed of async %s\n", algo);
657
658 tfm = crypto_alloc_ahash(algo, 0, 0);
659 if (IS_ERR(tfm)) {
660 pr_err("failed to load transform for %s: %ld\n",
661 algo, PTR_ERR(tfm));
662 return;
663 }
664
665 if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
666 pr_err("digestsize(%u) > outputbuffer(%zu)\n",
667 crypto_ahash_digestsize(tfm), sizeof(output));
668 goto out;
669 }
670
671 test_hash_sg_init(sg);
672 req = ahash_request_alloc(tfm, GFP_KERNEL);
673 if (!req) {
674 pr_err("ahash request allocation failure\n");
675 goto out;
676 }
677
678 init_completion(&tresult.completion);
679 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
680 tcrypt_complete, &tresult);
681
682 for (i = 0; speed[i].blen != 0; i++) {
683 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
684 pr_err("template (%u) too big for tvmem (%lu)\n",
685 speed[i].blen, TVMEMSIZE * PAGE_SIZE);
686 break;
687 }
688
689 pr_info("test%3u "
690 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
691 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
692
693 ahash_request_set_crypt(req, sg, output, speed[i].plen);
694
695 if (sec)
696 ret = test_ahash_jiffies(req, speed[i].blen,
697 speed[i].plen, output, sec);
698 else
699 ret = test_ahash_cycles(req, speed[i].blen,
700 speed[i].plen, output);
701
702 if (ret) {
703 pr_err("hashing failed ret=%d\n", ret);
704 break;
705 }
706 }
707
708 ahash_request_free(req);
709
710out:
711 crypto_free_ahash(tfm);
712}
713
461static void test_available(void) 714static void test_available(void)
462{ 715{
463 char **name = check; 716 char **name = check;
@@ -881,9 +1134,87 @@ static int do_test(int m)
881 test_hash_speed("rmd320", sec, generic_hash_speed_template); 1134 test_hash_speed("rmd320", sec, generic_hash_speed_template);
882 if (mode > 300 && mode < 400) break; 1135 if (mode > 300 && mode < 400) break;
883 1136
1137 case 318:
1138 test_hash_speed("ghash-generic", sec, hash_speed_template_16);
1139 if (mode > 300 && mode < 400) break;
1140
884 case 399: 1141 case 399:
885 break; 1142 break;
886 1143
1144 case 400:
1145 /* fall through */
1146
1147 case 401:
1148 test_ahash_speed("md4", sec, generic_hash_speed_template);
1149 if (mode > 400 && mode < 500) break;
1150
1151 case 402:
1152 test_ahash_speed("md5", sec, generic_hash_speed_template);
1153 if (mode > 400 && mode < 500) break;
1154
1155 case 403:
1156 test_ahash_speed("sha1", sec, generic_hash_speed_template);
1157 if (mode > 400 && mode < 500) break;
1158
1159 case 404:
1160 test_ahash_speed("sha256", sec, generic_hash_speed_template);
1161 if (mode > 400 && mode < 500) break;
1162
1163 case 405:
1164 test_ahash_speed("sha384", sec, generic_hash_speed_template);
1165 if (mode > 400 && mode < 500) break;
1166
1167 case 406:
1168 test_ahash_speed("sha512", sec, generic_hash_speed_template);
1169 if (mode > 400 && mode < 500) break;
1170
1171 case 407:
1172 test_ahash_speed("wp256", sec, generic_hash_speed_template);
1173 if (mode > 400 && mode < 500) break;
1174
1175 case 408:
1176 test_ahash_speed("wp384", sec, generic_hash_speed_template);
1177 if (mode > 400 && mode < 500) break;
1178
1179 case 409:
1180 test_ahash_speed("wp512", sec, generic_hash_speed_template);
1181 if (mode > 400 && mode < 500) break;
1182
1183 case 410:
1184 test_ahash_speed("tgr128", sec, generic_hash_speed_template);
1185 if (mode > 400 && mode < 500) break;
1186
1187 case 411:
1188 test_ahash_speed("tgr160", sec, generic_hash_speed_template);
1189 if (mode > 400 && mode < 500) break;
1190
1191 case 412:
1192 test_ahash_speed("tgr192", sec, generic_hash_speed_template);
1193 if (mode > 400 && mode < 500) break;
1194
1195 case 413:
1196 test_ahash_speed("sha224", sec, generic_hash_speed_template);
1197 if (mode > 400 && mode < 500) break;
1198
1199 case 414:
1200 test_ahash_speed("rmd128", sec, generic_hash_speed_template);
1201 if (mode > 400 && mode < 500) break;
1202
1203 case 415:
1204 test_ahash_speed("rmd160", sec, generic_hash_speed_template);
1205 if (mode > 400 && mode < 500) break;
1206
1207 case 416:
1208 test_ahash_speed("rmd256", sec, generic_hash_speed_template);
1209 if (mode > 400 && mode < 500) break;
1210
1211 case 417:
1212 test_ahash_speed("rmd320", sec, generic_hash_speed_template);
1213 if (mode > 400 && mode < 500) break;
1214
1215 case 499:
1216 break;
1217
887 case 1000: 1218 case 1000:
888 test_available(); 1219 test_available();
889 break; 1220 break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfaf95b1..10cb925132c9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@ struct cipher_speed_template {
25struct hash_speed { 25struct hash_speed {
26 unsigned int blen; /* buffer length */ 26 unsigned int blen; /* buffer length */
27 unsigned int plen; /* per-update length */ 27 unsigned int plen; /* per-update length */
28 unsigned int klen; /* key length */
28}; 29};
29 30
30/* 31/*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
83 { .blen = 0, .plen = 0, } 84 { .blen = 0, .plen = 0, }
84}; 85};
85 86
87static struct hash_speed hash_speed_template_16[] = {
88 { .blen = 16, .plen = 16, .klen = 16, },
89 { .blen = 64, .plen = 16, .klen = 16, },
90 { .blen = 64, .plen = 64, .klen = 16, },
91 { .blen = 256, .plen = 16, .klen = 16, },
92 { .blen = 256, .plen = 64, .klen = 16, },
93 { .blen = 256, .plen = 256, .klen = 16, },
94 { .blen = 1024, .plen = 16, .klen = 16, },
95 { .blen = 1024, .plen = 256, .klen = 16, },
96 { .blen = 1024, .plen = 1024, .klen = 16, },
97 { .blen = 2048, .plen = 16, .klen = 16, },
98 { .blen = 2048, .plen = 256, .klen = 16, },
99 { .blen = 2048, .plen = 1024, .klen = 16, },
100 { .blen = 2048, .plen = 2048, .klen = 16, },
101 { .blen = 4096, .plen = 16, .klen = 16, },
102 { .blen = 4096, .plen = 256, .klen = 16, },
103 { .blen = 4096, .plen = 1024, .klen = 16, },
104 { .blen = 4096, .plen = 4096, .klen = 16, },
105 { .blen = 8192, .plen = 16, .klen = 16, },
106 { .blen = 8192, .plen = 256, .klen = 16, },
107 { .blen = 8192, .plen = 1024, .klen = 16, },
108 { .blen = 8192, .plen = 4096, .klen = 16, },
109 { .blen = 8192, .plen = 8192, .klen = 16, },
110
111 /* End marker */
112 { .blen = 0, .plen = 0, .klen = 0, }
113};
114
86#endif /* _CRYPTO_TCRYPT_H */ 115#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c494d7610be1..5c8aaa0cb0b9 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
153 free_page((unsigned long)buf[i]); 153 free_page((unsigned long)buf[i]);
154} 154}
155 155
156static int do_one_async_hash_op(struct ahash_request *req,
157 struct tcrypt_result *tr,
158 int ret)
159{
160 if (ret == -EINPROGRESS || ret == -EBUSY) {
161 ret = wait_for_completion_interruptible(&tr->completion);
162 if (!ret)
163 ret = tr->err;
164 INIT_COMPLETION(tr->completion);
165 }
166 return ret;
167}
168
156static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, 169static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
157 unsigned int tcount) 170 unsigned int tcount, bool use_digest)
158{ 171{
159 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); 172 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
160 unsigned int i, j, k, temp; 173 unsigned int i, j, k, temp;
@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
206 } 219 }
207 220
208 ahash_request_set_crypt(req, sg, result, template[i].psize); 221 ahash_request_set_crypt(req, sg, result, template[i].psize);
209 ret = crypto_ahash_digest(req); 222 if (use_digest) {
210 switch (ret) { 223 ret = do_one_async_hash_op(req, &tresult,
211 case 0: 224 crypto_ahash_digest(req));
212 break; 225 if (ret) {
213 case -EINPROGRESS: 226 pr_err("alg: hash: digest failed on test %d "
214 case -EBUSY: 227 "for %s: ret=%d\n", j, algo, -ret);
215 ret = wait_for_completion_interruptible( 228 goto out;
216 &tresult.completion); 229 }
217 if (!ret && !(ret = tresult.err)) { 230 } else {
218 INIT_COMPLETION(tresult.completion); 231 ret = do_one_async_hash_op(req, &tresult,
219 break; 232 crypto_ahash_init(req));
233 if (ret) {
234 pr_err("alt: hash: init failed on test %d "
235 "for %s: ret=%d\n", j, algo, -ret);
236 goto out;
237 }
238 ret = do_one_async_hash_op(req, &tresult,
239 crypto_ahash_update(req));
240 if (ret) {
241 pr_err("alt: hash: update failed on test %d "
242 "for %s: ret=%d\n", j, algo, -ret);
243 goto out;
244 }
245 ret = do_one_async_hash_op(req, &tresult,
246 crypto_ahash_final(req));
247 if (ret) {
248 pr_err("alt: hash: final failed on test %d "
249 "for %s: ret=%d\n", j, algo, -ret);
250 goto out;
220 } 251 }
221 /* fall through */
222 default:
223 printk(KERN_ERR "alg: hash: digest failed on test %d "
224 "for %s: ret=%d\n", j, algo, -ret);
225 goto out;
226 } 252 }
227 253
228 if (memcmp(result, template[i].digest, 254 if (memcmp(result, template[i].digest,
@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1402 return PTR_ERR(tfm); 1428 return PTR_ERR(tfm);
1403 } 1429 }
1404 1430
1405 err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); 1431 err = test_hash(tfm, desc->suite.hash.vecs,
1432 desc->suite.hash.count, true);
1433 if (!err)
1434 err = test_hash(tfm, desc->suite.hash.vecs,
1435 desc->suite.hash.count, false);
1406 1436
1407 crypto_free_ahash(tfm); 1437 crypto_free_ahash(tfm);
1408 return err; 1438 return err;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index fb765173d41c..74e35377fd30 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
1669 } 1669 }
1670}; 1670};
1671 1671
1672#define VMAC_AES_TEST_VECTORS 1 1672#define VMAC_AES_TEST_VECTORS 8
1673static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', 1673static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
1674 '\x02', '\x03', '\x02', '\x02', 1674 '\x02', '\x03', '\x02', '\x02',
1675 '\x02', '\x04', '\x01', '\x07', 1675 '\x02', '\x04', '\x01', '\x07',
1676 '\x04', '\x01', '\x04', '\x03',}; 1676 '\x04', '\x01', '\x04', '\x03',};
1677static char vmac_string2[128] = {'a', 'b', 'c',};
1678static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
1679 'a', 'b', 'c', 'a', 'b', 'c',
1680 'a', 'b', 'c', 'a', 'b', 'c',
1681 'a', 'b', 'c', 'a', 'b', 'c',
1682 'a', 'b', 'c', 'a', 'b', 'c',
1683 'a', 'b', 'c', 'a', 'b', 'c',
1684 'a', 'b', 'c', 'a', 'b', 'c',
1685 'a', 'b', 'c', 'a', 'b', 'c',
1686 };
1687
1677static struct hash_testvec aes_vmac128_tv_template[] = { 1688static struct hash_testvec aes_vmac128_tv_template[] = {
1678 { 1689 {
1690 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1691 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1692 .plaintext = NULL,
1693 .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
1694 .psize = 0,
1695 .ksize = 16,
1696 }, {
1697 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1698 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1699 .plaintext = vmac_string1,
1700 .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
1701 .psize = 128,
1702 .ksize = 16,
1703 }, {
1704 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1705 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1706 .plaintext = vmac_string2,
1707 .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
1708 .psize = 128,
1709 .ksize = 16,
1710 }, {
1679 .key = "\x00\x01\x02\x03\x04\x05\x06\x07" 1711 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1680 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", 1712 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1681 .plaintext = vmac_string, 1713 .plaintext = vmac_string3,
1682 .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", 1714 .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
1715 .psize = 128,
1716 .ksize = 16,
1717 }, {
1718 .key = "abcdefghijklmnop",
1719 .plaintext = NULL,
1720 .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
1721 .psize = 0,
1722 .ksize = 16,
1723 }, {
1724 .key = "abcdefghijklmnop",
1725 .plaintext = vmac_string1,
1726 .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
1727 .psize = 128,
1728 .ksize = 16,
1729 }, {
1730 .key = "abcdefghijklmnop",
1731 .plaintext = vmac_string2,
1732 .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
1733 .psize = 128,
1734 .ksize = 16,
1735 }, {
1736 .key = "abcdefghijklmnop",
1737 .plaintext = vmac_string3,
1738 .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
1683 .psize = 128, 1739 .psize = 128,
1684 .ksize = 16, 1740 .ksize = 16,
1685 }, 1741 },
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0a9468e575de..0999274a27ac 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ 43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ 44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
45 45
46#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
47
46#ifdef __LITTLE_ENDIAN 48#ifdef __LITTLE_ENDIAN
47#define INDEX_HIGH 1 49#define INDEX_HIGH 1
48#define INDEX_LOW 0 50#define INDEX_LOW 0
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
110 int i; u64 th, tl; \ 112 int i; u64 th, tl; \
111 rh = rl = 0; \ 113 rh = rl = 0; \
112 for (i = 0; i < nw; i += 2) { \ 114 for (i = 0; i < nw; i += 2) { \
113 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 115 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
114 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 116 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
115 ADD128(rh, rl, th, tl); \ 117 ADD128(rh, rl, th, tl); \
116 } \ 118 } \
117 } while (0) 119 } while (0)
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
121 int i; u64 th, tl; \ 123 int i; u64 th, tl; \
122 rh1 = rl1 = rh = rl = 0; \ 124 rh1 = rl1 = rh = rl = 0; \
123 for (i = 0; i < nw; i += 2) { \ 125 for (i = 0; i < nw; i += 2) { \
124 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 126 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
125 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 127 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
126 ADD128(rh, rl, th, tl); \ 128 ADD128(rh, rl, th, tl); \
127 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ 129 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
128 le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ 130 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
129 ADD128(rh1, rl1, th, tl); \ 131 ADD128(rh1, rl1, th, tl); \
130 } \ 132 } \
131 } while (0) 133 } while (0)
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
136 int i; u64 th, tl; \ 138 int i; u64 th, tl; \
137 rh = rl = 0; \ 139 rh = rl = 0; \
138 for (i = 0; i < nw; i += 8) { \ 140 for (i = 0; i < nw; i += 8) { \
139 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 141 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
140 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 142 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
141 ADD128(rh, rl, th, tl); \ 143 ADD128(rh, rl, th, tl); \
142 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ 144 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
143 le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ 145 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
144 ADD128(rh, rl, th, tl); \ 146 ADD128(rh, rl, th, tl); \
145 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ 147 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
146 le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ 148 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
147 ADD128(rh, rl, th, tl); \ 149 ADD128(rh, rl, th, tl); \
148 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ 150 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
149 le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ 151 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
150 ADD128(rh, rl, th, tl); \ 152 ADD128(rh, rl, th, tl); \
151 } \ 153 } \
152 } while (0) 154 } while (0)
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
156 int i; u64 th, tl; \ 158 int i; u64 th, tl; \
157 rh1 = rl1 = rh = rl = 0; \ 159 rh1 = rl1 = rh = rl = 0; \
158 for (i = 0; i < nw; i += 8) { \ 160 for (i = 0; i < nw; i += 8) { \
159 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ 161 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
160 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ 162 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
161 ADD128(rh, rl, th, tl); \ 163 ADD128(rh, rl, th, tl); \
162 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ 164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
163 le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ 165 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
164 ADD128(rh1, rl1, th, tl); \ 166 ADD128(rh1, rl1, th, tl); \
165 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ 167 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
166 le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ 168 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
167 ADD128(rh, rl, th, tl); \ 169 ADD128(rh, rl, th, tl); \
168 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ 170 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
169 le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ 171 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
170 ADD128(rh1, rl1, th, tl); \ 172 ADD128(rh1, rl1, th, tl); \
171 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ 173 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
172 le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ 174 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
173 ADD128(rh, rl, th, tl); \ 175 ADD128(rh, rl, th, tl); \
174 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ 176 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
175 le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ 177 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
176 ADD128(rh1, rl1, th, tl); \ 178 ADD128(rh1, rl1, th, tl); \
177 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ 179 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
178 le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ 180 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
179 ADD128(rh, rl, th, tl); \ 181 ADD128(rh, rl, th, tl); \
180 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ 182 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
181 le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ 183 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
182 ADD128(rh1, rl1, th, tl); \ 184 ADD128(rh1, rl1, th, tl); \
183 } \ 185 } \
184 } while (0) 186 } while (0)
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
216 int i; \ 218 int i; \
217 rh = rl = t = 0; \ 219 rh = rl = t = 0; \
218 for (i = 0; i < nw; i += 2) { \ 220 for (i = 0; i < nw; i += 2) { \
219 t1 = le64_to_cpup(mp+i) + kp[i]; \ 221 t1 = pe64_to_cpup(mp+i) + kp[i]; \
220 t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ 222 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
221 m2 = MUL32(t1 >> 32, t2); \ 223 m2 = MUL32(t1 >> 32, t2); \
222 m1 = MUL32(t1, t2 >> 32); \ 224 m1 = MUL32(t1, t2 >> 32); \
223 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ 225 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
322 ctx->first_block_processed = 0; 324 ctx->first_block_processed = 0;
323} 325}
324 326
325static u64 l3hash(u64 p1, u64 p2, 327static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
326 u64 k1, u64 k2, u64 len)
327{ 328{
328 u64 rh, rl, t, z = 0; 329 u64 rh, rl, t, z = 0;
329 330
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
474 } 475 }
475 p = be64_to_cpup(out_p + i); 476 p = be64_to_cpup(out_p + i);
476 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); 477 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
477 return p + h; 478 return le64_to_cpu(p + h);
478} 479}
479 480
480static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) 481static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
549 550
550static int vmac_init(struct shash_desc *pdesc) 551static int vmac_init(struct shash_desc *pdesc)
551{ 552{
552 struct crypto_shash *parent = pdesc->tfm;
553 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
554
555 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
556 return 0; 553 return 0;
557} 554}
558 555