aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS16
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c2
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c2
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/devices.c71
-rw-r--r--crypto/Kconfig21
-rw-r--r--crypto/cryptd.c206
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/hifn_795x.c3
-rw-r--r--drivers/crypto/omap-aes.c948
-rw-r--r--drivers/crypto/omap-sham.c6
-rw-r--r--drivers/crypto/talitos.c29
-rw-r--r--include/crypto/cryptd.h24
-rw-r--r--include/linux/padata.h4
16 files changed, 1310 insertions, 38 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 494e1a07366a..9bd4422618e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4453,6 +4453,15 @@ L: linux-i2c@vger.kernel.org
4453S: Maintained 4453S: Maintained
4454F: drivers/i2c/busses/i2c-pasemi.c 4454F: drivers/i2c/busses/i2c-pasemi.c
4455 4455
4456PADATA PARALLEL EXECUTION MECHANISM
4457M: Steffen Klassert <steffen.klassert@secunet.com>
4458L: linux-kernel@vger.kernel.org
4459L: linux-crypto@vger.kernel.org
4460S: Maintained
4461F: kernel/padata.c
4462F: include/linux/padata.h
4463F: Documentation/padata.txt
4464
4456PANASONIC LAPTOP ACPI EXTRAS DRIVER 4465PANASONIC LAPTOP ACPI EXTRAS DRIVER
4457M: Harald Welte <laforge@gnumonks.org> 4466M: Harald Welte <laforge@gnumonks.org>
4458L: platform-driver-x86@vger.kernel.org 4467L: platform-driver-x86@vger.kernel.org
@@ -4580,6 +4589,13 @@ L: netdev@vger.kernel.org
4580S: Maintained 4589S: Maintained
4581F: drivers/net/pcnet32.c 4590F: drivers/net/pcnet32.c
4582 4591
4592PCRYPT PARALLEL CRYPTO ENGINE
4593M: Steffen Klassert <steffen.klassert@secunet.com>
4594L: linux-crypto@vger.kernel.org
4595S: Maintained
4596F: crypto/pcrypt.c
4597F: include/crypto/pcrypt.h
4598
4583PER-TASK DELAY ACCOUNTING 4599PER-TASK DELAY ACCOUNTING
4584M: Balbir Singh <balbir@linux.vnet.ibm.com> 4600M: Balbir Singh <balbir@linux.vnet.ibm.com>
4585S: Maintained 4601S: Maintained
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 37d65d62ed8f..5f2066a6ba74 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1838,7 +1838,7 @@ static struct omap_clk omap2420_clks[] = {
1838 CLK(NULL, "des_ick", &des_ick, CK_242X), 1838 CLK(NULL, "des_ick", &des_ick, CK_242X),
1839 CLK("omap-sham", "ick", &sha_ick, CK_242X), 1839 CLK("omap-sham", "ick", &sha_ick, CK_242X),
1840 CLK("omap_rng", "ick", &rng_ick, CK_242X), 1840 CLK("omap_rng", "ick", &rng_ick, CK_242X),
1841 CLK(NULL, "aes_ick", &aes_ick, CK_242X), 1841 CLK("omap-aes", "ick", &aes_ick, CK_242X),
1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X), 1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X),
1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X), 1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X),
1844 CLK("musb_hdrc", "fck", &osc_ck, CK_242X), 1844 CLK("musb_hdrc", "fck", &osc_ck, CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index b33118fb6a87..701a1716019e 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1926,7 +1926,7 @@ static struct omap_clk omap2430_clks[] = {
1926 CLK(NULL, "des_ick", &des_ick, CK_243X), 1926 CLK(NULL, "des_ick", &des_ick, CK_243X),
1927 CLK("omap-sham", "ick", &sha_ick, CK_243X), 1927 CLK("omap-sham", "ick", &sha_ick, CK_243X),
1928 CLK("omap_rng", "ick", &rng_ick, CK_243X), 1928 CLK("omap_rng", "ick", &rng_ick, CK_243X),
1929 CLK(NULL, "aes_ick", &aes_ick, CK_243X), 1929 CLK("omap-aes", "ick", &aes_ick, CK_243X),
1930 CLK(NULL, "pka_ick", &pka_ick, CK_243X), 1930 CLK(NULL, "pka_ick", &pka_ick, CK_243X),
1931 CLK(NULL, "usb_fck", &usb_fck, CK_243X), 1931 CLK(NULL, "usb_fck", &usb_fck, CK_243X),
1932 CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), 1932 CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index dfdce2d82779..c73906d17458 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3288,7 +3288,7 @@ static struct omap_clk omap3xxx_clks[] = {
3288 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2 | CK_AM35XX), 3288 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2 | CK_AM35XX),
3289 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX), 3289 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX),
3290 CLK(NULL, "icr_ick", &icr_ick, CK_343X), 3290 CLK(NULL, "icr_ick", &icr_ick, CK_343X),
3291 CLK(NULL, "aes2_ick", &aes2_ick, CK_343X), 3291 CLK("omap-aes", "ick", &aes2_ick, CK_343X),
3292 CLK("omap-sham", "ick", &sha12_ick, CK_343X), 3292 CLK("omap-sham", "ick", &sha12_ick, CK_343X),
3293 CLK(NULL, "des2_ick", &des2_ick, CK_343X), 3293 CLK(NULL, "des2_ick", &des2_ick, CK_343X),
3294 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX), 3294 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 2dbb265bedd4..b27e7cbb3f29 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -498,6 +498,76 @@ static void omap_init_sham(void)
498static inline void omap_init_sham(void) { } 498static inline void omap_init_sham(void) { }
499#endif 499#endif
500 500
501#if defined(CONFIG_CRYPTO_DEV_OMAP_AES) || defined(CONFIG_CRYPTO_DEV_OMAP_AES_MODULE)
502
503#ifdef CONFIG_ARCH_OMAP24XX
504static struct resource omap2_aes_resources[] = {
505 {
506 .start = OMAP24XX_SEC_AES_BASE,
507 .end = OMAP24XX_SEC_AES_BASE + 0x4C,
508 .flags = IORESOURCE_MEM,
509 },
510 {
511 .start = OMAP24XX_DMA_AES_TX,
512 .flags = IORESOURCE_DMA,
513 },
514 {
515 .start = OMAP24XX_DMA_AES_RX,
516 .flags = IORESOURCE_DMA,
517 }
518};
519static int omap2_aes_resources_sz = ARRAY_SIZE(omap2_aes_resources);
520#else
521#define omap2_aes_resources NULL
522#define omap2_aes_resources_sz 0
523#endif
524
525#ifdef CONFIG_ARCH_OMAP34XX
526static struct resource omap3_aes_resources[] = {
527 {
528 .start = OMAP34XX_SEC_AES_BASE,
529 .end = OMAP34XX_SEC_AES_BASE + 0x4C,
530 .flags = IORESOURCE_MEM,
531 },
532 {
533 .start = OMAP34XX_DMA_AES2_TX,
534 .flags = IORESOURCE_DMA,
535 },
536 {
537 .start = OMAP34XX_DMA_AES2_RX,
538 .flags = IORESOURCE_DMA,
539 }
540};
541static int omap3_aes_resources_sz = ARRAY_SIZE(omap3_aes_resources);
542#else
543#define omap3_aes_resources NULL
544#define omap3_aes_resources_sz 0
545#endif
546
547static struct platform_device aes_device = {
548 .name = "omap-aes",
549 .id = -1,
550};
551
552static void omap_init_aes(void)
553{
554 if (cpu_is_omap24xx()) {
555 aes_device.resource = omap2_aes_resources;
556 aes_device.num_resources = omap2_aes_resources_sz;
557 } else if (cpu_is_omap34xx()) {
558 aes_device.resource = omap3_aes_resources;
559 aes_device.num_resources = omap3_aes_resources_sz;
560 } else {
561 pr_err("%s: platform not supported\n", __func__);
562 return;
563 }
564 platform_device_register(&aes_device);
565}
566
567#else
568static inline void omap_init_aes(void) { }
569#endif
570
501/*-------------------------------------------------------------------------*/ 571/*-------------------------------------------------------------------------*/
502 572
503#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) 573#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
@@ -854,6 +924,7 @@ static int __init omap2_init_devices(void)
854 omap_hdq_init(); 924 omap_hdq_init();
855 omap_init_sti(); 925 omap_init_sti();
856 omap_init_sham(); 926 omap_init_sham();
927 omap_init_aes();
857 omap_init_vout(); 928 omap_init_vout();
858 929
859 return 0; 930 return 0;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e573077f1672..e4bac29a32e7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,13 +23,12 @@ comment "Crypto core or helper"
23 23
24config CRYPTO_FIPS 24config CRYPTO_FIPS
25 bool "FIPS 200 compliance" 25 bool "FIPS 200 compliance"
26 depends on CRYPTO_ANSI_CPRNG 26 depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
27 help 27 help
28 This options enables the fips boot option which is 28 This options enables the fips boot option which is
29 required if you want to system to operate in a FIPS 200 29 required if you want to system to operate in a FIPS 200
30 certification. You should say no unless you know what 30 certification. You should say no unless you know what
31 this is. Note that CRYPTO_ANSI_CPRNG is required if this 31 this is.
32 option is selected
33 32
34config CRYPTO_ALGAPI 33config CRYPTO_ALGAPI
35 tristate 34 tristate
@@ -365,7 +364,7 @@ config CRYPTO_RMD128
365 RIPEMD-160 should be used. 364 RIPEMD-160 should be used.
366 365
367 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 366 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
368 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 367 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
369 368
370config CRYPTO_RMD160 369config CRYPTO_RMD160
371 tristate "RIPEMD-160 digest algorithm" 370 tristate "RIPEMD-160 digest algorithm"
@@ -382,7 +381,7 @@ config CRYPTO_RMD160
382 against RIPEMD-160. 381 against RIPEMD-160.
383 382
384 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 383 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
385 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 384 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
386 385
387config CRYPTO_RMD256 386config CRYPTO_RMD256
388 tristate "RIPEMD-256 digest algorithm" 387 tristate "RIPEMD-256 digest algorithm"
@@ -394,7 +393,7 @@ config CRYPTO_RMD256
394 (than RIPEMD-128). 393 (than RIPEMD-128).
395 394
396 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 395 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
397 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 396 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
398 397
399config CRYPTO_RMD320 398config CRYPTO_RMD320
400 tristate "RIPEMD-320 digest algorithm" 399 tristate "RIPEMD-320 digest algorithm"
@@ -406,7 +405,7 @@ config CRYPTO_RMD320
406 (than RIPEMD-160). 405 (than RIPEMD-160).
407 406
408 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 407 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
409 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 408 See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
410 409
411config CRYPTO_SHA1 410config CRYPTO_SHA1
412 tristate "SHA1 digest algorithm" 411 tristate "SHA1 digest algorithm"
@@ -461,7 +460,7 @@ config CRYPTO_WP512
461 Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard 460 Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard
462 461
463 See also: 462 See also:
464 <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> 463 <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
465 464
466config CRYPTO_GHASH_CLMUL_NI_INTEL 465config CRYPTO_GHASH_CLMUL_NI_INTEL
467 tristate "GHASH digest algorithm (CLMUL-NI accelerated)" 466 tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
@@ -579,8 +578,8 @@ config CRYPTO_ANUBIS
579 in the NESSIE competition. 578 in the NESSIE competition.
580 579
581 See also: 580 See also:
582 <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/> 581 <https://www.cosic.esat.kuleuven.be/nessie/reports/>
583 <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html> 582 <http://www.larc.usp.br/~pbarreto/AnubisPage.html>
584 583
585config CRYPTO_ARC4 584config CRYPTO_ARC4
586 tristate "ARC4 cipher algorithm" 585 tristate "ARC4 cipher algorithm"
@@ -659,7 +658,7 @@ config CRYPTO_KHAZAD
659 on 32-bit processors. Khazad uses an 128 bit key size. 658 on 32-bit processors. Khazad uses an 128 bit key size.
660 659
661 See also: 660 See also:
662 <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html> 661 <http://www.larc.usp.br/~pbarreto/KhazadPage.html>
663 662
664config CRYPTO_SALSA20 663config CRYPTO_SALSA20
665 tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" 664 tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ef71318976c7..e46d21ae26bc 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -3,6 +3,13 @@
3 * 3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
6 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 14 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option) 15 * Software Foundation; either version 2 of the License, or (at your option)
@@ -12,6 +19,7 @@
12 19
13#include <crypto/algapi.h> 20#include <crypto/algapi.h>
14#include <crypto/internal/hash.h> 21#include <crypto/internal/hash.h>
22#include <crypto/internal/aead.h>
15#include <crypto/cryptd.h> 23#include <crypto/cryptd.h>
16#include <crypto/crypto_wq.h> 24#include <crypto/crypto_wq.h>
17#include <linux/err.h> 25#include <linux/err.h>
@@ -44,6 +52,11 @@ struct hashd_instance_ctx {
44 struct cryptd_queue *queue; 52 struct cryptd_queue *queue;
45}; 53};
46 54
55struct aead_instance_ctx {
56 struct crypto_aead_spawn aead_spawn;
57 struct cryptd_queue *queue;
58};
59
47struct cryptd_blkcipher_ctx { 60struct cryptd_blkcipher_ctx {
48 struct crypto_blkcipher *child; 61 struct crypto_blkcipher *child;
49}; 62};
@@ -61,6 +74,14 @@ struct cryptd_hash_request_ctx {
61 struct shash_desc desc; 74 struct shash_desc desc;
62}; 75};
63 76
77struct cryptd_aead_ctx {
78 struct crypto_aead *child;
79};
80
81struct cryptd_aead_request_ctx {
82 crypto_completion_t complete;
83};
84
64static void cryptd_queue_worker(struct work_struct *work); 85static void cryptd_queue_worker(struct work_struct *work);
65 86
66static int cryptd_init_queue(struct cryptd_queue *queue, 87static int cryptd_init_queue(struct cryptd_queue *queue,
@@ -601,6 +622,144 @@ out_put_alg:
601 return err; 622 return err;
602} 623}
603 624
625static void cryptd_aead_crypt(struct aead_request *req,
626 struct crypto_aead *child,
627 int err,
628 int (*crypt)(struct aead_request *req))
629{
630 struct cryptd_aead_request_ctx *rctx;
631 rctx = aead_request_ctx(req);
632
633 if (unlikely(err == -EINPROGRESS))
634 goto out;
635 aead_request_set_tfm(req, child);
636 err = crypt( req );
637 req->base.complete = rctx->complete;
638out:
639 local_bh_disable();
640 rctx->complete(&req->base, err);
641 local_bh_enable();
642}
643
644static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
645{
646 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
647 struct crypto_aead *child = ctx->child;
648 struct aead_request *req;
649
650 req = container_of(areq, struct aead_request, base);
651 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
652}
653
654static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
655{
656 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
657 struct crypto_aead *child = ctx->child;
658 struct aead_request *req;
659
660 req = container_of(areq, struct aead_request, base);
661 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
662}
663
664static int cryptd_aead_enqueue(struct aead_request *req,
665 crypto_completion_t complete)
666{
667 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
668 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
669 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
670
671 rctx->complete = req->base.complete;
672 req->base.complete = complete;
673 return cryptd_enqueue_request(queue, &req->base);
674}
675
676static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
677{
678 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
679}
680
681static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
682{
683 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
684}
685
686static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
687{
688 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
689 struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
690 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
691 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
692 struct crypto_aead *cipher;
693
694 cipher = crypto_spawn_aead(spawn);
695 if (IS_ERR(cipher))
696 return PTR_ERR(cipher);
697
698 crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
699 ctx->child = cipher;
700 tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
701 return 0;
702}
703
704static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
705{
706 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
707 crypto_free_aead(ctx->child);
708}
709
710static int cryptd_create_aead(struct crypto_template *tmpl,
711 struct rtattr **tb,
712 struct cryptd_queue *queue)
713{
714 struct aead_instance_ctx *ctx;
715 struct crypto_instance *inst;
716 struct crypto_alg *alg;
717 int err;
718
719 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
720 CRYPTO_ALG_TYPE_MASK);
721 if (IS_ERR(alg))
722 return PTR_ERR(alg);
723
724 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
725 err = PTR_ERR(inst);
726 if (IS_ERR(inst))
727 goto out_put_alg;
728
729 ctx = crypto_instance_ctx(inst);
730 ctx->queue = queue;
731
732 err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
733 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
734 if (err)
735 goto out_free_inst;
736
737 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
738 inst->alg.cra_type = alg->cra_type;
739 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
740 inst->alg.cra_init = cryptd_aead_init_tfm;
741 inst->alg.cra_exit = cryptd_aead_exit_tfm;
742 inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
743 inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
744 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
745 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
746 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
747 inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
748 inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
749 inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
750 inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
751
752 err = crypto_register_instance(tmpl, inst);
753 if (err) {
754 crypto_drop_spawn(&ctx->aead_spawn.base);
755out_free_inst:
756 kfree(inst);
757 }
758out_put_alg:
759 crypto_mod_put(alg);
760 return err;
761}
762
604static struct cryptd_queue queue; 763static struct cryptd_queue queue;
605 764
606static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 765static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -616,6 +775,8 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
616 return cryptd_create_blkcipher(tmpl, tb, &queue); 775 return cryptd_create_blkcipher(tmpl, tb, &queue);
617 case CRYPTO_ALG_TYPE_DIGEST: 776 case CRYPTO_ALG_TYPE_DIGEST:
618 return cryptd_create_hash(tmpl, tb, &queue); 777 return cryptd_create_hash(tmpl, tb, &queue);
778 case CRYPTO_ALG_TYPE_AEAD:
779 return cryptd_create_aead(tmpl, tb, &queue);
619 } 780 }
620 781
621 return -EINVAL; 782 return -EINVAL;
@@ -625,16 +786,21 @@ static void cryptd_free(struct crypto_instance *inst)
625{ 786{
626 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 787 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
627 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 788 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
789 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
628 790
629 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 791 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
630 case CRYPTO_ALG_TYPE_AHASH: 792 case CRYPTO_ALG_TYPE_AHASH:
631 crypto_drop_shash(&hctx->spawn); 793 crypto_drop_shash(&hctx->spawn);
632 kfree(ahash_instance(inst)); 794 kfree(ahash_instance(inst));
633 return; 795 return;
796 case CRYPTO_ALG_TYPE_AEAD:
797 crypto_drop_spawn(&aead_ctx->aead_spawn.base);
798 kfree(inst);
799 return;
800 default:
801 crypto_drop_spawn(&ctx->spawn);
802 kfree(inst);
634 } 803 }
635
636 crypto_drop_spawn(&ctx->spawn);
637 kfree(inst);
638} 804}
639 805
640static struct crypto_template cryptd_tmpl = { 806static struct crypto_template cryptd_tmpl = {
@@ -724,6 +890,40 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm)
724} 890}
725EXPORT_SYMBOL_GPL(cryptd_free_ahash); 891EXPORT_SYMBOL_GPL(cryptd_free_ahash);
726 892
893struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
894 u32 type, u32 mask)
895{
896 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
897 struct crypto_aead *tfm;
898
899 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
900 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
901 return ERR_PTR(-EINVAL);
902 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
903 if (IS_ERR(tfm))
904 return ERR_CAST(tfm);
905 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
906 crypto_free_aead(tfm);
907 return ERR_PTR(-EINVAL);
908 }
909 return __cryptd_aead_cast(tfm);
910}
911EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
912
913struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
914{
915 struct cryptd_aead_ctx *ctx;
916 ctx = crypto_aead_ctx(&tfm->base);
917 return ctx->child;
918}
919EXPORT_SYMBOL_GPL(cryptd_aead_child);
920
921void cryptd_free_aead(struct cryptd_aead *tfm)
922{
923 crypto_free_aead(&tfm->base);
924}
925EXPORT_SYMBOL_GPL(cryptd_free_aead);
926
727static int __init cryptd_init(void) 927static int __init cryptd_init(void)
728{ 928{
729 int err; 929 int err;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ea0b3863ad0f..eab2cf7a0269 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -172,6 +172,7 @@ config CRYPTO_DEV_MV_CESA
172 172
173config CRYPTO_DEV_NIAGARA2 173config CRYPTO_DEV_NIAGARA2
174 tristate "Niagara2 Stream Processing Unit driver" 174 tristate "Niagara2 Stream Processing Unit driver"
175 select CRYPTO_DES
175 select CRYPTO_ALGAPI 176 select CRYPTO_ALGAPI
176 depends on SPARC64 177 depends on SPARC64
177 help 178 help
@@ -243,4 +244,12 @@ config CRYPTO_DEV_OMAP_SHAM
243 OMAP processors have SHA1/MD5 hw accelerator. Select this if you 244 OMAP processors have SHA1/MD5 hw accelerator. Select this if you
244 want to use the OMAP module for SHA1/MD5 algorithms. 245 want to use the OMAP module for SHA1/MD5 algorithms.
245 246
247config CRYPTO_DEV_OMAP_AES
248 tristate "Support for OMAP AES hw engine"
249 depends on ARCH_OMAP2 || ARCH_OMAP3
250 select CRYPTO_AES
251 help
252 OMAP processors have AES module accelerator. Select this if you
253 want to use the OMAP module for AES algorithms.
254
246endif # CRYPTO_HW 255endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6dbbe00c4524..256697330a41 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,11 +2,12 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o 4obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
5n2_crypto-objs := n2_core.o n2_asm.o 5n2_crypto-y := n2_core.o n2_asm.o
6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
12obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
12 13
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index aa376e8d5ed5..5c0c62b65d69 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o 1obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
2crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o 2crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e449ac5627a5..0eac3da566ba 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2700,8 +2700,7 @@ static void __devexit hifn_remove(struct pci_dev *pdev)
2700 dev = pci_get_drvdata(pdev); 2700 dev = pci_get_drvdata(pdev);
2701 2701
2702 if (dev) { 2702 if (dev) {
2703 cancel_delayed_work(&dev->work); 2703 cancel_delayed_work_sync(&dev->work);
2704 flush_scheduled_work();
2705 2704
2706 hifn_unregister_rng(dev); 2705 hifn_unregister_rng(dev);
2707 hifn_unregister_alg(dev); 2706 hifn_unregister_alg(dev);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
new file mode 100644
index 000000000000..799ca517c121
--- /dev/null
+++ b/drivers/crypto/omap-aes.c
@@ -0,0 +1,948 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/crypto.h>
28#include <linux/interrupt.h>
29#include <crypto/scatterwalk.h>
30#include <crypto/aes.h>
31
32#include <plat/cpu.h>
33#include <plat/dma.h>
34
35/* OMAP TRM gives bitfields as start:end, where start is the higher bit
36 number. For example 7:0 */
37#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
38#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
39
40#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
41#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
42
43#define AES_REG_CTRL 0x30
44#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
45#define AES_REG_CTRL_CTR (1 << 6)
46#define AES_REG_CTRL_CBC (1 << 5)
47#define AES_REG_CTRL_KEY_SIZE (3 << 3)
48#define AES_REG_CTRL_DIRECTION (1 << 2)
49#define AES_REG_CTRL_INPUT_READY (1 << 1)
50#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
51
52#define AES_REG_DATA 0x34
53#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
54
55#define AES_REG_REV 0x44
56#define AES_REG_REV_MAJOR 0xF0
57#define AES_REG_REV_MINOR 0x0F
58
59#define AES_REG_MASK 0x48
60#define AES_REG_MASK_SIDLE (1 << 6)
61#define AES_REG_MASK_START (1 << 5)
62#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
63#define AES_REG_MASK_DMA_IN_EN (1 << 2)
64#define AES_REG_MASK_SOFTRESET (1 << 1)
65#define AES_REG_AUTOIDLE (1 << 0)
66
67#define AES_REG_SYSSTATUS 0x4C
68#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
69
70#define DEFAULT_TIMEOUT (5*HZ)
71
72#define FLAGS_MODE_MASK 0x000f
73#define FLAGS_ENCRYPT BIT(0)
74#define FLAGS_CBC BIT(1)
75#define FLAGS_GIV BIT(2)
76
77#define FLAGS_NEW_KEY BIT(4)
78#define FLAGS_NEW_IV BIT(5)
79#define FLAGS_INIT BIT(6)
80#define FLAGS_FAST BIT(7)
81#define FLAGS_BUSY 8
82
83struct omap_aes_ctx {
84 struct omap_aes_dev *dd;
85
86 int keylen;
87 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
88 unsigned long flags;
89};
90
91struct omap_aes_reqctx {
92 unsigned long mode;
93};
94
95#define OMAP_AES_QUEUE_LENGTH 1
96#define OMAP_AES_CACHE_SIZE 0
97
98struct omap_aes_dev {
99 struct list_head list;
100 unsigned long phys_base;
101 void __iomem *io_base;
102 struct clk *iclk;
103 struct omap_aes_ctx *ctx;
104 struct device *dev;
105 unsigned long flags;
106
107 u32 *iv;
108 u32 ctrl;
109
110 spinlock_t lock;
111 struct crypto_queue queue;
112
113 struct tasklet_struct task;
114
115 struct ablkcipher_request *req;
116 size_t total;
117 struct scatterlist *in_sg;
118 size_t in_offset;
119 struct scatterlist *out_sg;
120 size_t out_offset;
121
122 size_t buflen;
123 void *buf_in;
124 size_t dma_size;
125 int dma_in;
126 int dma_lch_in;
127 dma_addr_t dma_addr_in;
128 void *buf_out;
129 int dma_out;
130 int dma_lch_out;
131 dma_addr_t dma_addr_out;
132};
133
134/* keep registered devices data here */
135static LIST_HEAD(dev_list);
136static DEFINE_SPINLOCK(list_lock);
137
138static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
139{
140 return __raw_readl(dd->io_base + offset);
141}
142
143static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
144 u32 value)
145{
146 __raw_writel(value, dd->io_base + offset);
147}
148
149static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
150 u32 value, u32 mask)
151{
152 u32 val;
153
154 val = omap_aes_read(dd, offset);
155 val &= ~mask;
156 val |= value;
157 omap_aes_write(dd, offset, val);
158}
159
160static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
161 u32 *value, int count)
162{
163 for (; count--; value++, offset += 4)
164 omap_aes_write(dd, offset, *value);
165}
166
167static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit)
168{
169 unsigned long timeout = jiffies + DEFAULT_TIMEOUT;
170
171 while (!(omap_aes_read(dd, offset) & bit)) {
172 if (time_is_before_jiffies(timeout)) {
173 dev_err(dd->dev, "omap-aes timeout\n");
174 return -ETIMEDOUT;
175 }
176 }
177 return 0;
178}
179
180static int omap_aes_hw_init(struct omap_aes_dev *dd)
181{
182 int err = 0;
183
184 clk_enable(dd->iclk);
185 if (!(dd->flags & FLAGS_INIT)) {
186 /* is it necessary to reset before every operation? */
187 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
188 AES_REG_MASK_SOFTRESET);
189 /*
190 * prevent OCP bus error (SRESP) in case an access to the module
191 * is performed while the module is coming out of soft reset
192 */
193 __asm__ __volatile__("nop");
194 __asm__ __volatile__("nop");
195
196 err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
197 AES_REG_SYSSTATUS_RESETDONE);
198 if (!err)
199 dd->flags |= FLAGS_INIT;
200 }
201
202 return err;
203}
204
205static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
206{
207 clk_disable(dd->iclk);
208}
209
210static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
211{
212 unsigned int key32;
213 int i;
214 u32 val, mask;
215
216 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
217 if (dd->flags & FLAGS_CBC)
218 val |= AES_REG_CTRL_CBC;
219 if (dd->flags & FLAGS_ENCRYPT)
220 val |= AES_REG_CTRL_DIRECTION;
221
222 if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
223 !(dd->ctx->flags & FLAGS_NEW_KEY))
224 goto out;
225
226 /* only need to write control registers for new settings */
227
228 dd->ctrl = val;
229
230 val = 0;
231 if (dd->dma_lch_out >= 0)
232 val |= AES_REG_MASK_DMA_OUT_EN;
233 if (dd->dma_lch_in >= 0)
234 val |= AES_REG_MASK_DMA_IN_EN;
235
236 mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
237
238 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
239
240 pr_debug("Set key\n");
241 key32 = dd->ctx->keylen / sizeof(u32);
242 /* set a key */
243 for (i = 0; i < key32; i++) {
244 omap_aes_write(dd, AES_REG_KEY(i),
245 __le32_to_cpu(dd->ctx->key[i]));
246 }
247 dd->ctx->flags &= ~FLAGS_NEW_KEY;
248
249 if (dd->flags & FLAGS_NEW_IV) {
250 pr_debug("Set IV\n");
251 omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
252 dd->flags &= ~FLAGS_NEW_IV;
253 }
254
255 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
256 AES_REG_CTRL_KEY_SIZE;
257
258 omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
259
260out:
261 /* start DMA or disable idle mode */
262 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
263 AES_REG_MASK_START);
264}
265
266static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
267{
268 struct omap_aes_dev *dd = NULL, *tmp;
269
270 spin_lock_bh(&list_lock);
271 if (!ctx->dd) {
272 list_for_each_entry(tmp, &dev_list, list) {
273 /* FIXME: take fist available aes core */
274 dd = tmp;
275 break;
276 }
277 ctx->dd = dd;
278 } else {
279 /* already found before */
280 dd = ctx->dd;
281 }
282 spin_unlock_bh(&list_lock);
283
284 return dd;
285}
286
287static void omap_aes_dma_callback(int lch, u16 ch_status, void *data)
288{
289 struct omap_aes_dev *dd = data;
290
291 if (lch == dd->dma_lch_out)
292 tasklet_schedule(&dd->task);
293}
294
295static int omap_aes_dma_init(struct omap_aes_dev *dd)
296{
297 int err = -ENOMEM;
298
299 dd->dma_lch_out = -1;
300 dd->dma_lch_in = -1;
301
302 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
303 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
304 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
305 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
306
307 if (!dd->buf_in || !dd->buf_out) {
308 dev_err(dd->dev, "unable to alloc pages.\n");
309 goto err_alloc;
310 }
311
312 /* MAP here */
313 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
314 DMA_TO_DEVICE);
315 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
316 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
317 err = -EINVAL;
318 goto err_map_in;
319 }
320
321 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
322 DMA_FROM_DEVICE);
323 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
324 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
325 err = -EINVAL;
326 goto err_map_out;
327 }
328
329 err = omap_request_dma(dd->dma_in, "omap-aes-rx",
330 omap_aes_dma_callback, dd, &dd->dma_lch_in);
331 if (err) {
332 dev_err(dd->dev, "Unable to request DMA channel\n");
333 goto err_dma_in;
334 }
335 err = omap_request_dma(dd->dma_out, "omap-aes-tx",
336 omap_aes_dma_callback, dd, &dd->dma_lch_out);
337 if (err) {
338 dev_err(dd->dev, "Unable to request DMA channel\n");
339 goto err_dma_out;
340 }
341
342 omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
343 dd->phys_base + AES_REG_DATA, 0, 4);
344
345 omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
346 omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
347
348 omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
349 dd->phys_base + AES_REG_DATA, 0, 4);
350
351 omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
352 omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
353
354 return 0;
355
356err_dma_out:
357 omap_free_dma(dd->dma_lch_in);
358err_dma_in:
359 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
360 DMA_FROM_DEVICE);
361err_map_out:
362 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
363err_map_in:
364 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
365 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
366err_alloc:
367 if (err)
368 pr_err("error: %d\n", err);
369 return err;
370}
371
372static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
373{
374 omap_free_dma(dd->dma_lch_out);
375 omap_free_dma(dd->dma_lch_in);
376 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
377 DMA_FROM_DEVICE);
378 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
379 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
380 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
381}
382
383static void sg_copy_buf(void *buf, struct scatterlist *sg,
384 unsigned int start, unsigned int nbytes, int out)
385{
386 struct scatter_walk walk;
387
388 if (!nbytes)
389 return;
390
391 scatterwalk_start(&walk, sg);
392 scatterwalk_advance(&walk, start);
393 scatterwalk_copychunks(buf, &walk, nbytes, out);
394 scatterwalk_done(&walk, out, 0);
395}
396
397static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
398 size_t buflen, size_t total, int out)
399{
400 unsigned int count, off = 0;
401
402 while (buflen && total) {
403 count = min((*sg)->length - *offset, total);
404 count = min(count, buflen);
405
406 if (!count)
407 return off;
408
409 sg_copy_buf(buf + off, *sg, *offset, count, out);
410
411 off += count;
412 buflen -= count;
413 *offset += count;
414 total -= count;
415
416 if (*offset == (*sg)->length) {
417 *sg = sg_next(*sg);
418 if (*sg)
419 *offset = 0;
420 else
421 total = 0;
422 }
423 }
424
425 return off;
426}
427
428static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
429 dma_addr_t dma_addr_out, int length)
430{
431 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
432 struct omap_aes_dev *dd = ctx->dd;
433 int len32;
434
435 pr_debug("len: %d\n", length);
436
437 dd->dma_size = length;
438
439 if (!(dd->flags & FLAGS_FAST))
440 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
441 DMA_TO_DEVICE);
442
443 len32 = DIV_ROUND_UP(length, sizeof(u32));
444
445 /* IN */
446 omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32,
447 len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in,
448 OMAP_DMA_DST_SYNC);
449
450 omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC,
451 dma_addr_in, 0, 0);
452
453 /* OUT */
454 omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32,
455 len32, 1, OMAP_DMA_SYNC_PACKET,
456 dd->dma_out, OMAP_DMA_SRC_SYNC);
457
458 omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC,
459 dma_addr_out, 0, 0);
460
461 omap_start_dma(dd->dma_lch_in);
462 omap_start_dma(dd->dma_lch_out);
463
464 omap_aes_write_ctrl(dd);
465
466 return 0;
467}
468
469static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
470{
471 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
472 crypto_ablkcipher_reqtfm(dd->req));
473 int err, fast = 0, in, out;
474 size_t count;
475 dma_addr_t addr_in, addr_out;
476
477 pr_debug("total: %d\n", dd->total);
478
479 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
480 /* check for alignment */
481 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
482 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
483
484 fast = in && out;
485 }
486
487 if (fast) {
488 count = min(dd->total, sg_dma_len(dd->in_sg));
489 count = min(count, sg_dma_len(dd->out_sg));
490
491 if (count != dd->total)
492 return -EINVAL;
493
494 pr_debug("fast\n");
495
496 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
497 if (!err) {
498 dev_err(dd->dev, "dma_map_sg() error\n");
499 return -EINVAL;
500 }
501
502 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
503 if (!err) {
504 dev_err(dd->dev, "dma_map_sg() error\n");
505 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
506 return -EINVAL;
507 }
508
509 addr_in = sg_dma_address(dd->in_sg);
510 addr_out = sg_dma_address(dd->out_sg);
511
512 dd->flags |= FLAGS_FAST;
513
514 } else {
515 /* use cache buffers */
516 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
517 dd->buflen, dd->total, 0);
518
519 addr_in = dd->dma_addr_in;
520 addr_out = dd->dma_addr_out;
521
522 dd->flags &= ~FLAGS_FAST;
523
524 }
525
526 dd->total -= count;
527
528 err = omap_aes_hw_init(dd);
529
530 err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
531
532 return err;
533}
534
535static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
536{
537 struct omap_aes_ctx *ctx;
538
539 pr_debug("err: %d\n", err);
540
541 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
542
543 if (!dd->total)
544 dd->req->base.complete(&dd->req->base, err);
545}
546
547static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
548{
549 int err = 0;
550 size_t count;
551
552 pr_debug("total: %d\n", dd->total);
553
554 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
555
556 omap_aes_hw_cleanup(dd);
557
558 omap_stop_dma(dd->dma_lch_in);
559 omap_stop_dma(dd->dma_lch_out);
560
561 if (dd->flags & FLAGS_FAST) {
562 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
563 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
564 } else {
565 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
566 dd->dma_size, DMA_FROM_DEVICE);
567
568 /* copy data */
569 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
570 dd->buflen, dd->dma_size, 1);
571 if (count != dd->dma_size) {
572 err = -EINVAL;
573 pr_err("not all data converted: %u\n", count);
574 }
575 }
576
577 if (err || !dd->total)
578 omap_aes_finish_req(dd, err);
579
580 return err;
581}
582
583static int omap_aes_handle_req(struct omap_aes_dev *dd)
584{
585 struct crypto_async_request *async_req, *backlog;
586 struct omap_aes_ctx *ctx;
587 struct omap_aes_reqctx *rctx;
588 struct ablkcipher_request *req;
589 unsigned long flags;
590
591 if (dd->total)
592 goto start;
593
594 spin_lock_irqsave(&dd->lock, flags);
595 backlog = crypto_get_backlog(&dd->queue);
596 async_req = crypto_dequeue_request(&dd->queue);
597 if (!async_req)
598 clear_bit(FLAGS_BUSY, &dd->flags);
599 spin_unlock_irqrestore(&dd->lock, flags);
600
601 if (!async_req)
602 return 0;
603
604 if (backlog)
605 backlog->complete(backlog, -EINPROGRESS);
606
607 req = ablkcipher_request_cast(async_req);
608
609 pr_debug("get new req\n");
610
611 /* assign new request to device */
612 dd->req = req;
613 dd->total = req->nbytes;
614 dd->in_offset = 0;
615 dd->in_sg = req->src;
616 dd->out_offset = 0;
617 dd->out_sg = req->dst;
618
619 rctx = ablkcipher_request_ctx(req);
620 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
621 rctx->mode &= FLAGS_MODE_MASK;
622 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
623
624 dd->iv = req->info;
625 if ((dd->flags & FLAGS_CBC) && dd->iv)
626 dd->flags |= FLAGS_NEW_IV;
627 else
628 dd->flags &= ~FLAGS_NEW_IV;
629
630 ctx->dd = dd;
631 if (dd->ctx != ctx) {
632 /* assign new context to device */
633 dd->ctx = ctx;
634 ctx->flags |= FLAGS_NEW_KEY;
635 }
636
637 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
638 pr_err("request size is not exact amount of AES blocks\n");
639
640start:
641 return omap_aes_crypt_dma_start(dd);
642}
643
644static void omap_aes_task(unsigned long data)
645{
646 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
647 int err;
648
649 pr_debug("enter\n");
650
651 err = omap_aes_crypt_dma_stop(dd);
652
653 err = omap_aes_handle_req(dd);
654
655 pr_debug("exit\n");
656}
657
658static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
659{
660 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
661 crypto_ablkcipher_reqtfm(req));
662 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
663 struct omap_aes_dev *dd;
664 unsigned long flags;
665 int err;
666
667 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
668 !!(mode & FLAGS_ENCRYPT),
669 !!(mode & FLAGS_CBC));
670
671 dd = omap_aes_find_dev(ctx);
672 if (!dd)
673 return -ENODEV;
674
675 rctx->mode = mode;
676
677 spin_lock_irqsave(&dd->lock, flags);
678 err = ablkcipher_enqueue_request(&dd->queue, req);
679 spin_unlock_irqrestore(&dd->lock, flags);
680
681 if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
682 omap_aes_handle_req(dd);
683
684 pr_debug("exit\n");
685
686 return err;
687}
688
689/* ********************** ALG API ************************************ */
690
691static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
692 unsigned int keylen)
693{
694 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
695
696 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
697 keylen != AES_KEYSIZE_256)
698 return -EINVAL;
699
700 pr_debug("enter, keylen: %d\n", keylen);
701
702 memcpy(ctx->key, key, keylen);
703 ctx->keylen = keylen;
704 ctx->flags |= FLAGS_NEW_KEY;
705
706 return 0;
707}
708
709static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
710{
711 return omap_aes_crypt(req, FLAGS_ENCRYPT);
712}
713
714static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
715{
716 return omap_aes_crypt(req, 0);
717}
718
719static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
720{
721 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
722}
723
724static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
725{
726 return omap_aes_crypt(req, FLAGS_CBC);
727}
728
729static int omap_aes_cra_init(struct crypto_tfm *tfm)
730{
731 pr_debug("enter\n");
732
733 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
734
735 return 0;
736}
737
738static void omap_aes_cra_exit(struct crypto_tfm *tfm)
739{
740 pr_debug("enter\n");
741}
742
743/* ********************** ALGS ************************************ */
744
745static struct crypto_alg algs[] = {
746{
747 .cra_name = "ecb(aes)",
748 .cra_driver_name = "ecb-aes-omap",
749 .cra_priority = 100,
750 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
751 .cra_blocksize = AES_BLOCK_SIZE,
752 .cra_ctxsize = sizeof(struct omap_aes_ctx),
753 .cra_alignmask = 0,
754 .cra_type = &crypto_ablkcipher_type,
755 .cra_module = THIS_MODULE,
756 .cra_init = omap_aes_cra_init,
757 .cra_exit = omap_aes_cra_exit,
758 .cra_u.ablkcipher = {
759 .min_keysize = AES_MIN_KEY_SIZE,
760 .max_keysize = AES_MAX_KEY_SIZE,
761 .setkey = omap_aes_setkey,
762 .encrypt = omap_aes_ecb_encrypt,
763 .decrypt = omap_aes_ecb_decrypt,
764 }
765},
766{
767 .cra_name = "cbc(aes)",
768 .cra_driver_name = "cbc-aes-omap",
769 .cra_priority = 100,
770 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
771 .cra_blocksize = AES_BLOCK_SIZE,
772 .cra_ctxsize = sizeof(struct omap_aes_ctx),
773 .cra_alignmask = 0,
774 .cra_type = &crypto_ablkcipher_type,
775 .cra_module = THIS_MODULE,
776 .cra_init = omap_aes_cra_init,
777 .cra_exit = omap_aes_cra_exit,
778 .cra_u.ablkcipher = {
779 .min_keysize = AES_MIN_KEY_SIZE,
780 .max_keysize = AES_MAX_KEY_SIZE,
781 .ivsize = AES_BLOCK_SIZE,
782 .setkey = omap_aes_setkey,
783 .encrypt = omap_aes_cbc_encrypt,
784 .decrypt = omap_aes_cbc_decrypt,
785 }
786}
787};
788
789static int omap_aes_probe(struct platform_device *pdev)
790{
791 struct device *dev = &pdev->dev;
792 struct omap_aes_dev *dd;
793 struct resource *res;
794 int err = -ENOMEM, i, j;
795 u32 reg;
796
797 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
798 if (dd == NULL) {
799 dev_err(dev, "unable to alloc data struct.\n");
800 goto err_data;
801 }
802 dd->dev = dev;
803 platform_set_drvdata(pdev, dd);
804
805 spin_lock_init(&dd->lock);
806 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
807
808 /* Get the base address */
809 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
810 if (!res) {
811 dev_err(dev, "invalid resource type\n");
812 err = -ENODEV;
813 goto err_res;
814 }
815 dd->phys_base = res->start;
816
817 /* Get the DMA */
818 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
819 if (!res)
820 dev_info(dev, "no DMA info\n");
821 else
822 dd->dma_out = res->start;
823
824 /* Get the DMA */
825 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
826 if (!res)
827 dev_info(dev, "no DMA info\n");
828 else
829 dd->dma_in = res->start;
830
831 /* Initializing the clock */
832 dd->iclk = clk_get(dev, "ick");
833 if (!dd->iclk) {
834 dev_err(dev, "clock intialization failed.\n");
835 err = -ENODEV;
836 goto err_res;
837 }
838
839 dd->io_base = ioremap(dd->phys_base, SZ_4K);
840 if (!dd->io_base) {
841 dev_err(dev, "can't ioremap\n");
842 err = -ENOMEM;
843 goto err_io;
844 }
845
846 clk_enable(dd->iclk);
847 reg = omap_aes_read(dd, AES_REG_REV);
848 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
849 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
850 clk_disable(dd->iclk);
851
852 tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
853
854 err = omap_aes_dma_init(dd);
855 if (err)
856 goto err_dma;
857
858 INIT_LIST_HEAD(&dd->list);
859 spin_lock(&list_lock);
860 list_add_tail(&dd->list, &dev_list);
861 spin_unlock(&list_lock);
862
863 for (i = 0; i < ARRAY_SIZE(algs); i++) {
864 pr_debug("i: %d\n", i);
865 INIT_LIST_HEAD(&algs[i].cra_list);
866 err = crypto_register_alg(&algs[i]);
867 if (err)
868 goto err_algs;
869 }
870
871 pr_info("probe() done\n");
872
873 return 0;
874err_algs:
875 for (j = 0; j < i; j++)
876 crypto_unregister_alg(&algs[j]);
877 omap_aes_dma_cleanup(dd);
878err_dma:
879 tasklet_kill(&dd->task);
880 iounmap(dd->io_base);
881err_io:
882 clk_put(dd->iclk);
883err_res:
884 kfree(dd);
885 dd = NULL;
886err_data:
887 dev_err(dev, "initialization failed.\n");
888 return err;
889}
890
891static int omap_aes_remove(struct platform_device *pdev)
892{
893 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
894 int i;
895
896 if (!dd)
897 return -ENODEV;
898
899 spin_lock(&list_lock);
900 list_del(&dd->list);
901 spin_unlock(&list_lock);
902
903 for (i = 0; i < ARRAY_SIZE(algs); i++)
904 crypto_unregister_alg(&algs[i]);
905
906 tasklet_kill(&dd->task);
907 omap_aes_dma_cleanup(dd);
908 iounmap(dd->io_base);
909 clk_put(dd->iclk);
910 kfree(dd);
911 dd = NULL;
912
913 return 0;
914}
915
916static struct platform_driver omap_aes_driver = {
917 .probe = omap_aes_probe,
918 .remove = omap_aes_remove,
919 .driver = {
920 .name = "omap-aes",
921 .owner = THIS_MODULE,
922 },
923};
924
925static int __init omap_aes_mod_init(void)
926{
927 pr_info("loading %s driver\n", "omap-aes");
928
929 if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) {
930 pr_err("Unsupported cpu\n");
931 return -ENODEV;
932 }
933
934 return platform_driver_register(&omap_aes_driver);
935}
936
937static void __exit omap_aes_mod_exit(void)
938{
939 platform_driver_unregister(&omap_aes_driver);
940}
941
942module_init(omap_aes_mod_init);
943module_exit(omap_aes_mod_exit);
944
945MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
946MODULE_LICENSE("GPL v2");
947MODULE_AUTHOR("Dmitry Kasatkin");
948
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7d1485676886..a081c7c7d03f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -311,7 +311,8 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
311 len32 = DIV_ROUND_UP(length, sizeof(u32)); 311 len32 = DIV_ROUND_UP(length, sizeof(u32));
312 312
313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
314 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); 314 1, OMAP_DMA_SYNC_PACKET, dd->dma,
315 OMAP_DMA_DST_SYNC_PREFETCH);
315 316
316 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 317 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
317 dma_addr, 0, 0); 318 dma_addr, 0, 0);
@@ -1072,6 +1073,9 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd)
1072 omap_set_dma_dest_burst_mode(dd->dma_lch, 1073 omap_set_dma_dest_burst_mode(dd->dma_lch,
1073 OMAP_DMA_DATA_BURST_16); 1074 OMAP_DMA_DATA_BURST_16);
1074 1075
1076 omap_set_dma_src_burst_mode(dd->dma_lch,
1077 OMAP_DMA_DATA_BURST_4);
1078
1075 return 0; 1079 return 0;
1076} 1080}
1077 1081
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 4bcd825b5739..b879c3f5d7c0 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -161,7 +161,7 @@ struct talitos_private {
161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) 161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
162{ 162{
163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
164 talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); 164 talitos_ptr->eptr = upper_32_bits(dma_addr);
165} 165}
166 166
167/* 167/*
@@ -332,10 +332,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
332 332
333 /* GO! */ 333 /* GO! */
334 wmb(); 334 wmb();
335 out_be32(priv->reg + TALITOS_FF(ch), 335 out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
336 cpu_to_be32(upper_32_bits(request->dma_desc)));
337 out_be32(priv->reg + TALITOS_FF_LO(ch), 336 out_be32(priv->reg + TALITOS_FF_LO(ch),
338 cpu_to_be32(lower_32_bits(request->dma_desc))); 337 lower_32_bits(request->dma_desc));
339 338
340 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 339 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
341 340
@@ -1751,14 +1750,14 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
1751 ahash_init(areq); 1750 ahash_init(areq);
1752 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1751 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1753 1752
1754 req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); 1753 req_ctx->hw_context[0] = SHA224_H0;
1755 req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); 1754 req_ctx->hw_context[1] = SHA224_H1;
1756 req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); 1755 req_ctx->hw_context[2] = SHA224_H2;
1757 req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); 1756 req_ctx->hw_context[3] = SHA224_H3;
1758 req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); 1757 req_ctx->hw_context[4] = SHA224_H4;
1759 req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); 1758 req_ctx->hw_context[5] = SHA224_H5;
1760 req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); 1759 req_ctx->hw_context[6] = SHA224_H6;
1761 req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); 1760 req_ctx->hw_context[7] = SHA224_H7;
1762 1761
1763 /* init 64-bit count */ 1762 /* init 64-bit count */
1764 req_ctx->hw_context[8] = 0; 1763 req_ctx->hw_context[8] = 0;
@@ -2333,8 +2332,7 @@ static int talitos_remove(struct platform_device *ofdev)
2333 talitos_unregister_rng(dev); 2332 talitos_unregister_rng(dev);
2334 2333
2335 for (i = 0; i < priv->num_channels; i++) 2334 for (i = 0; i < priv->num_channels; i++)
2336 if (priv->chan[i].fifo) 2335 kfree(priv->chan[i].fifo);
2337 kfree(priv->chan[i].fifo);
2338 2336
2339 kfree(priv->chan); 2337 kfree(priv->chan);
2340 2338
@@ -2389,6 +2387,9 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2389 DESC_HDR_MODE0_MDEU_SHA256; 2387 DESC_HDR_MODE0_MDEU_SHA256;
2390 } 2388 }
2391 break; 2389 break;
2390 default:
2391 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2392 return ERR_PTR(-EINVAL);
2392 } 2393 }
2393 2394
2394 alg->cra_module = THIS_MODULE; 2395 alg->cra_module = THIS_MODULE;
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1c96b255017c..ba98918bbd9b 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -1,5 +1,12 @@
1/* 1/*
2 * Software async crypto daemon 2 * Software async crypto daemon
3 *
4 * Added AEAD support to cryptd.
5 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
6 * Adrian Hoban <adrian.hoban@intel.com>
7 * Gabriele Paoloni <gabriele.paoloni@intel.com>
8 * Aidan O'Mahony (aidan.o.mahony@intel.com)
9 * Copyright (c) 2010, Intel Corporation.
3 */ 10 */
4 11
5#ifndef _CRYPTO_CRYPT_H 12#ifndef _CRYPTO_CRYPT_H
@@ -42,4 +49,21 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
42struct shash_desc *cryptd_shash_desc(struct ahash_request *req); 49struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
43void cryptd_free_ahash(struct cryptd_ahash *tfm); 50void cryptd_free_ahash(struct cryptd_ahash *tfm);
44 51
52struct cryptd_aead {
53 struct crypto_aead base;
54};
55
56static inline struct cryptd_aead *__cryptd_aead_cast(
57 struct crypto_aead *tfm)
58{
59 return (struct cryptd_aead *)tfm;
60}
61
62struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
63 u32 type, u32 mask);
64
65struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
66
67void cryptd_free_aead(struct cryptd_aead *tfm);
68
45#endif 69#endif
diff --git a/include/linux/padata.h b/include/linux/padata.h
index bdcd1e9eacea..4633b2f726b6 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -127,8 +127,8 @@ struct padata_cpumask {
127 */ 127 */
128struct parallel_data { 128struct parallel_data {
129 struct padata_instance *pinst; 129 struct padata_instance *pinst;
130 struct padata_parallel_queue *pqueue; 130 struct padata_parallel_queue __percpu *pqueue;
131 struct padata_serial_queue *squeue; 131 struct padata_serial_queue __percpu *squeue;
132 atomic_t seq_nr; 132 atomic_t seq_nr;
133 atomic_t reorder_objects; 133 atomic_t reorder_objects;
134 atomic_t refcnt; 134 atomic_t refcnt;