aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 12:38:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 12:38:37 -0400
commit332a3392188e0ad966543c87b8da2b9d246f301d (patch)
treeac0d570590bffdd1924426adc5b255857d2f3297 /drivers
parenta9c86d42599519f3d83b5f46bdab25046fe47b84 (diff)
parent81bd5f6c966cf2f137c2759dfc78abdffcff055e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (102 commits) crypto: sha-s390 - Fix warnings in import function crypto: vmac - New hash algorithm for intel_txt support crypto: api - Do not displace newly registered algorithms crypto: ansi_cprng - Fix module initialization crypto: xcbc - Fix alignment calculation of xcbc_tfm_ctx crypto: fips - Depend on ansi_cprng crypto: blkcipher - Do not use eseqiv on stream ciphers crypto: ctr - Use chainiv on raw counter mode Revert crypto: fips - Select CPRNG crypto: rng - Fix typo crypto: talitos - add support for 36 bit addressing crypto: talitos - align locks on cache lines crypto: talitos - simplify hmac data size calculation crypto: mv_cesa - Add support for Orion5X crypto engine crypto: cryptd - Add support to access underlaying shash crypto: gcm - Use GHASH digest algorithm crypto: ghash - Add GHASH digest algorithm for GCM crypto: authenc - Convert to ahash crypto: api - Fix aligned ctx helper crypto: hmac - Prehash ipad/opad ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c3
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c73
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h25
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
-rw-r--r--drivers/crypto/padlock-sha.c329
-rw-r--r--drivers/crypto/talitos.c216
-rw-r--r--drivers/crypto/talitos.h1
13 files changed, 1082 insertions, 327 deletions
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index cd0ba51f7c8..0d8c5788b8e 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -44,8 +44,8 @@
44 * want to register another driver on the same PCI id. 44 * want to register another driver on the same PCI id.
45 */ 45 */
46static const struct pci_device_id pci_tbl[] = { 46static const struct pci_device_id pci_tbl[] = {
47 { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, 47 { PCI_VDEVICE(AMD, 0x7443), 0, },
48 { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, 48 { PCI_VDEVICE(AMD, 0x746b), 0, },
49 { 0, }, /* terminate list */ 49 { 0, }, /* terminate list */
50}; 50};
51MODULE_DEVICE_TABLE(pci, pci_tbl); 51MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 64d513f6836..4c4d4e140f9 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -46,8 +46,7 @@
46 * want to register another driver on the same PCI id. 46 * want to register another driver on the same PCI id.
47 */ 47 */
48static const struct pci_device_id pci_tbl[] = { 48static const struct pci_device_id pci_tbl[] = {
49 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, 49 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
50 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
51 { 0, }, /* terminate list */ 50 { 0, }, /* terminate list */
52}; 51};
53MODULE_DEVICE_TABLE(pci, pci_tbl); 52MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8c7444857a4..d8a9255e1a3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -240,6 +240,7 @@
240#include <linux/spinlock.h> 240#include <linux/spinlock.h>
241#include <linux/percpu.h> 241#include <linux/percpu.h>
242#include <linux/cryptohash.h> 242#include <linux/cryptohash.h>
243#include <linux/fips.h>
243 244
244#ifdef CONFIG_GENERIC_HARDIRQS 245#ifdef CONFIG_GENERIC_HARDIRQS
245# include <linux/irq.h> 246# include <linux/irq.h>
@@ -413,6 +414,7 @@ struct entropy_store {
413 unsigned add_ptr; 414 unsigned add_ptr;
414 int entropy_count; 415 int entropy_count;
415 int input_rotate; 416 int input_rotate;
417 __u8 *last_data;
416}; 418};
417 419
418static __u32 input_pool_data[INPUT_POOL_WORDS]; 420static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
852{ 854{
853 ssize_t ret = 0, i; 855 ssize_t ret = 0, i;
854 __u8 tmp[EXTRACT_SIZE]; 856 __u8 tmp[EXTRACT_SIZE];
857 unsigned long flags;
855 858
856 xfer_secondary_pool(r, nbytes); 859 xfer_secondary_pool(r, nbytes);
857 nbytes = account(r, nbytes, min, reserved); 860 nbytes = account(r, nbytes, min, reserved);
858 861
859 while (nbytes) { 862 while (nbytes) {
860 extract_buf(r, tmp); 863 extract_buf(r, tmp);
864
865 if (r->last_data) {
866 spin_lock_irqsave(&r->lock, flags);
867 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
868 panic("Hardware RNG duplicated output!\n");
869 memcpy(r->last_data, tmp, EXTRACT_SIZE);
870 spin_unlock_irqrestore(&r->lock, flags);
871 }
861 i = min_t(int, nbytes, EXTRACT_SIZE); 872 i = min_t(int, nbytes, EXTRACT_SIZE);
862 memcpy(buf, tmp, i); 873 memcpy(buf, tmp, i);
863 nbytes -= i; 874 nbytes -= i;
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
940 now = ktime_get_real(); 951 now = ktime_get_real();
941 mix_pool_bytes(r, &now, sizeof(now)); 952 mix_pool_bytes(r, &now, sizeof(now));
942 mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); 953 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
954 /* Enable continuous test in fips mode */
955 if (fips_enabled)
956 r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
943} 957}
944 958
945static int rand_initialize(void) 959static int rand_initialize(void)
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692372b..b08403d7d1c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -13,7 +13,6 @@ if CRYPTO_HW
13config CRYPTO_DEV_PADLOCK 13config CRYPTO_DEV_PADLOCK
14 tristate "Support for VIA PadLock ACE" 14 tristate "Support for VIA PadLock ACE"
15 depends on X86 && !UML 15 depends on X86 && !UML
16 select CRYPTO_ALGAPI
17 help 16 help
18 Some VIA processors come with an integrated crypto engine 17 Some VIA processors come with an integrated crypto engine
19 (so called VIA PadLock ACE, Advanced Cryptography Engine) 18 (so called VIA PadLock ACE, Advanced Cryptography Engine)
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
39config CRYPTO_DEV_PADLOCK_SHA 38config CRYPTO_DEV_PADLOCK_SHA
40 tristate "PadLock driver for SHA1 and SHA256 algorithms" 39 tristate "PadLock driver for SHA1 and SHA256 algorithms"
41 depends on CRYPTO_DEV_PADLOCK 40 depends on CRYPTO_DEV_PADLOCK
41 select CRYPTO_HASH
42 select CRYPTO_SHA1 42 select CRYPTO_SHA1
43 select CRYPTO_SHA256 43 select CRYPTO_SHA256
44 help 44 help
@@ -157,6 +157,19 @@ config S390_PRNG
157 ANSI X9.17 standard. The PRNG is usable via the char device 157 ANSI X9.17 standard. The PRNG is usable via the char device
158 /dev/prandom. 158 /dev/prandom.
159 159
160config CRYPTO_DEV_MV_CESA
161 tristate "Marvell's Cryptographic Engine"
162 depends on PLAT_ORION
163 select CRYPTO_ALGAPI
164 select CRYPTO_AES
165 select CRYPTO_BLKCIPHER2
166 help
167 This driver allows you to utilize the Cryptographic Engines and
168 Security Accelerator (CESA) which can be found on the Marvell Orion
169 and Kirkwood SoCs, such as QNAP's TS-209.
170
171 Currently the driver supports AES in ECB and CBC mode without DMA.
172
160config CRYPTO_DEV_HIFN_795X 173config CRYPTO_DEV_HIFN_795X
161 tristate "Driver HIFN 795x crypto accelerator chips" 174 tristate "Driver HIFN 795x crypto accelerator chips"
162 select CRYPTO_DES 175 select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc884..6ffcb3f7f94 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 6obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 7obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
7obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 8obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 61b6e1bec8c..a33243c17b0 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
208 } 208 }
209 } 209 }
210 210
211 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); 211 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
212 sizeof(struct crypto4xx_ctx));
212 sa = (struct dynamic_sa_ctl *) ctx->sa_in; 213 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
213 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 214 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
214 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, 215 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 4c0dfb2b872..46e899ac924 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -31,8 +31,6 @@
31#include <asm/dcr.h> 31#include <asm/dcr.h>
32#include <asm/dcr-regs.h> 32#include <asm/dcr-regs.h>
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34#include <crypto/internal/hash.h>
35#include <crypto/algapi.h>
36#include <crypto/aes.h> 34#include <crypto/aes.h>
37#include <crypto/sha.h> 35#include <crypto/sha.h>
38#include "crypto4xx_reg_def.h" 36#include "crypto4xx_reg_def.h"
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
998 ctx->sa_out_dma_addr = 0; 996 ctx->sa_out_dma_addr = 0;
999 ctx->sa_len = 0; 997 ctx->sa_len = 0;
1000 998
1001 if (alg->cra_type == &crypto_ablkcipher_type) 999 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1000 default:
1002 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); 1001 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1003 else if (alg->cra_type == &crypto_ahash_type) 1002 break;
1004 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); 1003 case CRYPTO_ALG_TYPE_AHASH:
1004 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1005 sizeof(struct crypto4xx_ctx));
1006 break;
1007 }
1005 1008
1006 return 0; 1009 return 0;
1007} 1010}
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1015} 1018}
1016 1019
1017int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, 1020int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1018 struct crypto_alg *crypto_alg, int array_size) 1021 struct crypto4xx_alg_common *crypto_alg,
1022 int array_size)
1019{ 1023{
1020 struct crypto4xx_alg *alg; 1024 struct crypto4xx_alg *alg;
1021 int i; 1025 int i;
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1027 return -ENOMEM; 1031 return -ENOMEM;
1028 1032
1029 alg->alg = crypto_alg[i]; 1033 alg->alg = crypto_alg[i];
1030 INIT_LIST_HEAD(&alg->alg.cra_list);
1031 if (alg->alg.cra_init == NULL)
1032 alg->alg.cra_init = crypto4xx_alg_init;
1033 if (alg->alg.cra_exit == NULL)
1034 alg->alg.cra_exit = crypto4xx_alg_exit;
1035 alg->dev = sec_dev; 1034 alg->dev = sec_dev;
1036 rc = crypto_register_alg(&alg->alg); 1035
1036 switch (alg->alg.type) {
1037 case CRYPTO_ALG_TYPE_AHASH:
1038 rc = crypto_register_ahash(&alg->alg.u.hash);
1039 break;
1040
1041 default:
1042 rc = crypto_register_alg(&alg->alg.u.cipher);
1043 break;
1044 }
1045
1037 if (rc) { 1046 if (rc) {
1038 list_del(&alg->entry); 1047 list_del(&alg->entry);
1039 kfree(alg); 1048 kfree(alg);
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1051 1060
1052 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { 1061 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1053 list_del(&alg->entry); 1062 list_del(&alg->entry);
1054 crypto_unregister_alg(&alg->alg); 1063 switch (alg->alg.type) {
1064 case CRYPTO_ALG_TYPE_AHASH:
1065 crypto_unregister_ahash(&alg->alg.u.hash);
1066 break;
1067
1068 default:
1069 crypto_unregister_alg(&alg->alg.u.cipher);
1070 }
1055 kfree(alg); 1071 kfree(alg);
1056 } 1072 }
1057} 1073}
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1104/** 1120/**
1105 * Supported Crypto Algorithms 1121 * Supported Crypto Algorithms
1106 */ 1122 */
1107struct crypto_alg crypto4xx_alg[] = { 1123struct crypto4xx_alg_common crypto4xx_alg[] = {
1108 /* Crypto AES modes */ 1124 /* Crypto AES modes */
1109 { 1125 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1110 .cra_name = "cbc(aes)", 1126 .cra_name = "cbc(aes)",
1111 .cra_driver_name = "cbc-aes-ppc4xx", 1127 .cra_driver_name = "cbc-aes-ppc4xx",
1112 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1128 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1113 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1129 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1114 .cra_blocksize = AES_BLOCK_SIZE, 1130 .cra_blocksize = AES_BLOCK_SIZE,
1115 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1131 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1116 .cra_alignmask = 0,
1117 .cra_type = &crypto_ablkcipher_type, 1132 .cra_type = &crypto_ablkcipher_type,
1133 .cra_init = crypto4xx_alg_init,
1134 .cra_exit = crypto4xx_alg_exit,
1118 .cra_module = THIS_MODULE, 1135 .cra_module = THIS_MODULE,
1119 .cra_u = { 1136 .cra_u = {
1120 .ablkcipher = { 1137 .ablkcipher = {
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
1126 .decrypt = crypto4xx_decrypt, 1143 .decrypt = crypto4xx_decrypt,
1127 } 1144 }
1128 } 1145 }
1129 }, 1146 }},
1130 /* Hash SHA1 */
1131 {
1132 .cra_name = "sha1",
1133 .cra_driver_name = "sha1-ppc4xx",
1134 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
1136 .cra_blocksize = SHA1_BLOCK_SIZE,
1137 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1138 .cra_alignmask = 0,
1139 .cra_type = &crypto_ahash_type,
1140 .cra_init = crypto4xx_sha1_alg_init,
1141 .cra_module = THIS_MODULE,
1142 .cra_u = {
1143 .ahash = {
1144 .digestsize = SHA1_DIGEST_SIZE,
1145 .init = crypto4xx_hash_init,
1146 .update = crypto4xx_hash_update,
1147 .final = crypto4xx_hash_final,
1148 .digest = crypto4xx_hash_digest,
1149 }
1150 }
1151 },
1152}; 1147};
1153 1148
1154/** 1149/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 1ef10344936..da9cbe3b9fc 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,6 +22,8 @@
22#ifndef __CRYPTO4XX_CORE_H__ 22#ifndef __CRYPTO4XX_CORE_H__
23#define __CRYPTO4XX_CORE_H__ 23#define __CRYPTO4XX_CORE_H__
24 24
25#include <crypto/internal/hash.h>
26
25#define PPC460SX_SDR0_SRST 0x201 27#define PPC460SX_SDR0_SRST 0x201
26#define PPC405EX_SDR0_SRST 0x200 28#define PPC405EX_SDR0_SRST 0x200
27#define PPC460EX_SDR0_SRST 0x201 29#define PPC460EX_SDR0_SRST 0x201
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
138 u16 sa_len; 140 u16 sa_len;
139}; 141};
140 142
143struct crypto4xx_alg_common {
144 u32 type;
145 union {
146 struct crypto_alg cipher;
147 struct ahash_alg hash;
148 } u;
149};
150
141struct crypto4xx_alg { 151struct crypto4xx_alg {
142 struct list_head entry; 152 struct list_head entry;
143 struct crypto_alg alg; 153 struct crypto4xx_alg_common alg;
144 struct crypto4xx_device *dev; 154 struct crypto4xx_device *dev;
145}; 155};
146 156
147#define crypto_alg_to_crypto4xx_alg(x) \ 157static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
148 container_of(x, struct crypto4xx_alg, alg) 158 struct crypto_alg *x)
159{
160 switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
161 case CRYPTO_ALG_TYPE_AHASH:
162 return container_of(__crypto_ahash_alg(x),
163 struct crypto4xx_alg, alg.u.hash);
164 }
165
166 return container_of(x, struct crypto4xx_alg, alg.u.cipher);
167}
149 168
150extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); 169extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
151extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); 170extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 00000000000..b21ef635f35
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
1/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17
18#include "mv_cesa.h"
19/*
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
28 */
29enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
33};
34
35/**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
45 *
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
49 */
50struct req_progress {
51 struct sg_mapping_iter src_sg_it;
52 struct sg_mapping_iter dst_sg_it;
53
54 /* src mostly */
55 int sg_src_left;
56 int src_start;
57 int crypt_len;
58 /* dst mostly */
59 int sg_dst_left;
60 int dst_start;
61 int total_req_bytes;
62};
63
64struct crypto_priv {
65 void __iomem *reg;
66 void __iomem *sram;
67 int irq;
68 struct task_struct *queue_th;
69
70 /* the lock protects queue and eng_st */
71 spinlock_t lock;
72 struct crypto_queue queue;
73 enum engine_status eng_st;
74 struct ablkcipher_request *cur_req;
75 struct req_progress p;
76 int max_req_size;
77 int sram_size;
78};
79
80static struct crypto_priv *cpg;
81
82struct mv_ctx {
83 u8 aes_enc_key[AES_KEY_LEN];
84 u32 aes_dec_key[8];
85 int key_len;
86 u32 need_calc_aes_dkey;
87};
88
89enum crypto_op {
90 COP_AES_ECB,
91 COP_AES_CBC,
92};
93
94struct mv_req_ctx {
95 enum crypto_op op;
96 int decrypt;
97};
98
99static void compute_aes_dec_key(struct mv_ctx *ctx)
100{
101 struct crypto_aes_ctx gen_aes_key;
102 int key_pos;
103
104 if (!ctx->need_calc_aes_dkey)
105 return;
106
107 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
108
109 key_pos = ctx->key_len + 24;
110 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
111 switch (ctx->key_len) {
112 case AES_KEYSIZE_256:
113 key_pos -= 2;
114 /* fall */
115 case AES_KEYSIZE_192:
116 key_pos -= 2;
117 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
118 4 * 4);
119 break;
120 }
121 ctx->need_calc_aes_dkey = 0;
122}
123
124static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
125 unsigned int len)
126{
127 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
128 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
129
130 switch (len) {
131 case AES_KEYSIZE_128:
132 case AES_KEYSIZE_192:
133 case AES_KEYSIZE_256:
134 break;
135 default:
136 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
137 return -EINVAL;
138 }
139 ctx->key_len = len;
140 ctx->need_calc_aes_dkey = 1;
141
142 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
143 return 0;
144}
145
146static void setup_data_in(struct ablkcipher_request *req)
147{
148 int ret;
149 void *buf;
150
151 if (!cpg->p.sg_src_left) {
152 ret = sg_miter_next(&cpg->p.src_sg_it);
153 BUG_ON(!ret);
154 cpg->p.sg_src_left = cpg->p.src_sg_it.length;
155 cpg->p.src_start = 0;
156 }
157
158 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
159
160 buf = cpg->p.src_sg_it.addr;
161 buf += cpg->p.src_start;
162
163 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
164
165 cpg->p.sg_src_left -= cpg->p.crypt_len;
166 cpg->p.src_start += cpg->p.crypt_len;
167}
168
169static void mv_process_current_q(int first_block)
170{
171 struct ablkcipher_request *req = cpg->cur_req;
172 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
173 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
174 struct sec_accel_config op;
175
176 switch (req_ctx->op) {
177 case COP_AES_ECB:
178 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
179 break;
180 case COP_AES_CBC:
181 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
182 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
184 if (first_block)
185 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
186 break;
187 }
188 if (req_ctx->decrypt) {
189 op.config |= CFG_DIR_DEC;
190 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
191 AES_KEY_LEN);
192 } else {
193 op.config |= CFG_DIR_ENC;
194 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
195 AES_KEY_LEN);
196 }
197
198 switch (ctx->key_len) {
199 case AES_KEYSIZE_128:
200 op.config |= CFG_AES_LEN_128;
201 break;
202 case AES_KEYSIZE_192:
203 op.config |= CFG_AES_LEN_192;
204 break;
205 case AES_KEYSIZE_256:
206 op.config |= CFG_AES_LEN_256;
207 break;
208 }
209 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
210 ENC_P_DST(SRAM_DATA_OUT_START);
211 op.enc_key_p = SRAM_DATA_KEY_P;
212
213 setup_data_in(req);
214 op.enc_len = cpg->p.crypt_len;
215 memcpy(cpg->sram + SRAM_CONFIG, &op,
216 sizeof(struct sec_accel_config));
217
218 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
219 /* GO */
220 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
221
222 /*
223 * XXX: add timer if the interrupt does not occur for some mystery
224 * reason
225 */
226}
227
228static void mv_crypto_algo_completion(void)
229{
230 struct ablkcipher_request *req = cpg->cur_req;
231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
232
233 if (req_ctx->op != COP_AES_CBC)
234 return ;
235
236 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
237}
238
239static void dequeue_complete_req(void)
240{
241 struct ablkcipher_request *req = cpg->cur_req;
242 void *buf;
243 int ret;
244
245 cpg->p.total_req_bytes += cpg->p.crypt_len;
246 do {
247 int dst_copy;
248
249 if (!cpg->p.sg_dst_left) {
250 ret = sg_miter_next(&cpg->p.dst_sg_it);
251 BUG_ON(!ret);
252 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
253 cpg->p.dst_start = 0;
254 }
255
256 buf = cpg->p.dst_sg_it.addr;
257 buf += cpg->p.dst_start;
258
259 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
260
261 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
262
263 cpg->p.sg_dst_left -= dst_copy;
264 cpg->p.crypt_len -= dst_copy;
265 cpg->p.dst_start += dst_copy;
266 } while (cpg->p.crypt_len > 0);
267
268 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
269 if (cpg->p.total_req_bytes < req->nbytes) {
270 /* process next scatter list entry */
271 cpg->eng_st = ENGINE_BUSY;
272 mv_process_current_q(0);
273 } else {
274 sg_miter_stop(&cpg->p.src_sg_it);
275 sg_miter_stop(&cpg->p.dst_sg_it);
276 mv_crypto_algo_completion();
277 cpg->eng_st = ENGINE_IDLE;
278 req->base.complete(&req->base, 0);
279 }
280}
281
282static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
283{
284 int i = 0;
285
286 do {
287 total_bytes -= sl[i].length;
288 i++;
289
290 } while (total_bytes > 0);
291
292 return i;
293}
294
295static void mv_enqueue_new_req(struct ablkcipher_request *req)
296{
297 int num_sgs;
298
299 cpg->cur_req = req;
300 memset(&cpg->p, 0, sizeof(struct req_progress));
301
302 num_sgs = count_sgs(req->src, req->nbytes);
303 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
304
305 num_sgs = count_sgs(req->dst, req->nbytes);
306 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
307 mv_process_current_q(1);
308}
309
310static int queue_manag(void *data)
311{
312 cpg->eng_st = ENGINE_IDLE;
313 do {
314 struct ablkcipher_request *req;
315 struct crypto_async_request *async_req = NULL;
316 struct crypto_async_request *backlog;
317
318 __set_current_state(TASK_INTERRUPTIBLE);
319
320 if (cpg->eng_st == ENGINE_W_DEQUEUE)
321 dequeue_complete_req();
322
323 spin_lock_irq(&cpg->lock);
324 if (cpg->eng_st == ENGINE_IDLE) {
325 backlog = crypto_get_backlog(&cpg->queue);
326 async_req = crypto_dequeue_request(&cpg->queue);
327 if (async_req) {
328 BUG_ON(cpg->eng_st != ENGINE_IDLE);
329 cpg->eng_st = ENGINE_BUSY;
330 }
331 }
332 spin_unlock_irq(&cpg->lock);
333
334 if (backlog) {
335 backlog->complete(backlog, -EINPROGRESS);
336 backlog = NULL;
337 }
338
339 if (async_req) {
340 req = container_of(async_req,
341 struct ablkcipher_request, base);
342 mv_enqueue_new_req(req);
343 async_req = NULL;
344 }
345
346 schedule();
347
348 } while (!kthread_should_stop());
349 return 0;
350}
351
352static int mv_handle_req(struct ablkcipher_request *req)
353{
354 unsigned long flags;
355 int ret;
356
357 spin_lock_irqsave(&cpg->lock, flags);
358 ret = ablkcipher_enqueue_request(&cpg->queue, req);
359 spin_unlock_irqrestore(&cpg->lock, flags);
360 wake_up_process(cpg->queue_th);
361 return ret;
362}
363
364static int mv_enc_aes_ecb(struct ablkcipher_request *req)
365{
366 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
367
368 req_ctx->op = COP_AES_ECB;
369 req_ctx->decrypt = 0;
370
371 return mv_handle_req(req);
372}
373
374static int mv_dec_aes_ecb(struct ablkcipher_request *req)
375{
376 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
377 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
378
379 req_ctx->op = COP_AES_ECB;
380 req_ctx->decrypt = 1;
381
382 compute_aes_dec_key(ctx);
383 return mv_handle_req(req);
384}
385
386static int mv_enc_aes_cbc(struct ablkcipher_request *req)
387{
388 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
389
390 req_ctx->op = COP_AES_CBC;
391 req_ctx->decrypt = 0;
392
393 return mv_handle_req(req);
394}
395
396static int mv_dec_aes_cbc(struct ablkcipher_request *req)
397{
398 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
399 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
400
401 req_ctx->op = COP_AES_CBC;
402 req_ctx->decrypt = 1;
403
404 compute_aes_dec_key(ctx);
405 return mv_handle_req(req);
406}
407
408static int mv_cra_init(struct crypto_tfm *tfm)
409{
410 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
411 return 0;
412}
413
414irqreturn_t crypto_int(int irq, void *priv)
415{
416 u32 val;
417
418 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
419 if (!(val & SEC_INT_ACCEL0_DONE))
420 return IRQ_NONE;
421
422 val &= ~SEC_INT_ACCEL0_DONE;
423 writel(val, cpg->reg + FPGA_INT_STATUS);
424 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
425 BUG_ON(cpg->eng_st != ENGINE_BUSY);
426 cpg->eng_st = ENGINE_W_DEQUEUE;
427 wake_up_process(cpg->queue_th);
428 return IRQ_HANDLED;
429}
430
431struct crypto_alg mv_aes_alg_ecb = {
432 .cra_name = "ecb(aes)",
433 .cra_driver_name = "mv-ecb-aes",
434 .cra_priority = 300,
435 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
436 .cra_blocksize = 16,
437 .cra_ctxsize = sizeof(struct mv_ctx),
438 .cra_alignmask = 0,
439 .cra_type = &crypto_ablkcipher_type,
440 .cra_module = THIS_MODULE,
441 .cra_init = mv_cra_init,
442 .cra_u = {
443 .ablkcipher = {
444 .min_keysize = AES_MIN_KEY_SIZE,
445 .max_keysize = AES_MAX_KEY_SIZE,
446 .setkey = mv_setkey_aes,
447 .encrypt = mv_enc_aes_ecb,
448 .decrypt = mv_dec_aes_ecb,
449 },
450 },
451};
452
453struct crypto_alg mv_aes_alg_cbc = {
454 .cra_name = "cbc(aes)",
455 .cra_driver_name = "mv-cbc-aes",
456 .cra_priority = 300,
457 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
458 .cra_blocksize = AES_BLOCK_SIZE,
459 .cra_ctxsize = sizeof(struct mv_ctx),
460 .cra_alignmask = 0,
461 .cra_type = &crypto_ablkcipher_type,
462 .cra_module = THIS_MODULE,
463 .cra_init = mv_cra_init,
464 .cra_u = {
465 .ablkcipher = {
466 .ivsize = AES_BLOCK_SIZE,
467 .min_keysize = AES_MIN_KEY_SIZE,
468 .max_keysize = AES_MAX_KEY_SIZE,
469 .setkey = mv_setkey_aes,
470 .encrypt = mv_enc_aes_cbc,
471 .decrypt = mv_dec_aes_cbc,
472 },
473 },
474};
475
476static int mv_probe(struct platform_device *pdev)
477{
478 struct crypto_priv *cp;
479 struct resource *res;
480 int irq;
481 int ret;
482
483 if (cpg) {
484 printk(KERN_ERR "Second crypto dev?\n");
485 return -EEXIST;
486 }
487
488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
489 if (!res)
490 return -ENXIO;
491
492 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
493 if (!cp)
494 return -ENOMEM;
495
496 spin_lock_init(&cp->lock);
497 crypto_init_queue(&cp->queue, 50);
498 cp->reg = ioremap(res->start, res->end - res->start + 1);
499 if (!cp->reg) {
500 ret = -ENOMEM;
501 goto err;
502 }
503
504 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
505 if (!res) {
506 ret = -ENXIO;
507 goto err_unmap_reg;
508 }
509 cp->sram_size = res->end - res->start + 1;
510 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
511 cp->sram = ioremap(res->start, cp->sram_size);
512 if (!cp->sram) {
513 ret = -ENOMEM;
514 goto err_unmap_reg;
515 }
516
517 irq = platform_get_irq(pdev, 0);
518 if (irq < 0 || irq == NO_IRQ) {
519 ret = irq;
520 goto err_unmap_sram;
521 }
522 cp->irq = irq;
523
524 platform_set_drvdata(pdev, cp);
525 cpg = cp;
526
527 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
528 if (IS_ERR(cp->queue_th)) {
529 ret = PTR_ERR(cp->queue_th);
530 goto err_thread;
531 }
532
533 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
534 cp);
535 if (ret)
536 goto err_unmap_sram;
537
538 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
539 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
540
541 ret = crypto_register_alg(&mv_aes_alg_ecb);
542 if (ret)
543 goto err_reg;
544
545 ret = crypto_register_alg(&mv_aes_alg_cbc);
546 if (ret)
547 goto err_unreg_ecb;
548 return 0;
549err_unreg_ecb:
550 crypto_unregister_alg(&mv_aes_alg_ecb);
551err_thread:
552 free_irq(irq, cp);
553err_reg:
554 kthread_stop(cp->queue_th);
555err_unmap_sram:
556 iounmap(cp->sram);
557err_unmap_reg:
558 iounmap(cp->reg);
559err:
560 kfree(cp);
561 cpg = NULL;
562 platform_set_drvdata(pdev, NULL);
563 return ret;
564}
565
566static int mv_remove(struct platform_device *pdev)
567{
568 struct crypto_priv *cp = platform_get_drvdata(pdev);
569
570 crypto_unregister_alg(&mv_aes_alg_ecb);
571 crypto_unregister_alg(&mv_aes_alg_cbc);
572 kthread_stop(cp->queue_th);
573 free_irq(cp->irq, cp);
574 memset(cp->sram, 0, cp->sram_size);
575 iounmap(cp->sram);
576 iounmap(cp->reg);
577 kfree(cp);
578 cpg = NULL;
579 return 0;
580}
581
582static struct platform_driver marvell_crypto = {
583 .probe = mv_probe,
584 .remove = mv_remove,
585 .driver = {
586 .owner = THIS_MODULE,
587 .name = "mv_crypto",
588 },
589};
590MODULE_ALIAS("platform:mv_crypto");
591
592static int __init mv_crypto_init(void)
593{
594 return platform_driver_register(&marvell_crypto);
595}
596module_init(mv_crypto_init);
597
598static void __exit mv_crypto_exit(void)
599{
600 platform_driver_unregister(&marvell_crypto);
601}
602module_exit(mv_crypto_exit);
603
604MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
605MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
606MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 00000000000..c3e25d3bb17
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
1#ifndef __MV_CRYPTO_H__
2
3#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DES_CMD_REG 0xdd58
5
6#define SEC_ACCEL_CMD 0xde00
7#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
8#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
9#define SEC_CMD_DISABLE_SEC (1 << 2)
10
11#define SEC_ACCEL_DESC_P0 0xde04
12#define SEC_DESC_P0_PTR(x) (x)
13
14#define SEC_ACCEL_DESC_P1 0xde14
15#define SEC_DESC_P1_PTR(x) (x)
16
17#define SEC_ACCEL_CFG 0xde08
18#define SEC_CFG_STOP_DIG_ERR (1 << 0)
19#define SEC_CFG_CH0_W_IDMA (1 << 7)
20#define SEC_CFG_CH1_W_IDMA (1 << 8)
21#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
22#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
23
24#define SEC_ACCEL_STATUS 0xde0c
25#define SEC_ST_ACT_0 (1 << 0)
26#define SEC_ST_ACT_1 (1 << 1)
27
28/*
29 * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
30 * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
31 * someone forgot to remove it while switching to the core and moving to
32 * SEC_ACCEL_INT_STATUS.
33 */
34#define FPGA_INT_STATUS 0xdd68
35#define SEC_ACCEL_INT_STATUS 0xde20
36#define SEC_INT_AUTH_DONE (1 << 0)
37#define SEC_INT_DES_E_DONE (1 << 1)
38#define SEC_INT_AES_E_DONE (1 << 2)
39#define SEC_INT_AES_D_DONE (1 << 3)
40#define SEC_INT_ENC_DONE (1 << 4)
41#define SEC_INT_ACCEL0_DONE (1 << 5)
42#define SEC_INT_ACCEL1_DONE (1 << 6)
43#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
44#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
45
46#define SEC_ACCEL_INT_MASK 0xde24
47
48#define AES_KEY_LEN (8 * 4)
49
50struct sec_accel_config {
51
52 u32 config;
53#define CFG_OP_MAC_ONLY 0
54#define CFG_OP_CRYPT_ONLY 1
55#define CFG_OP_MAC_CRYPT 2
56#define CFG_OP_CRYPT_MAC 3
57#define CFG_MACM_MD5 (4 << 4)
58#define CFG_MACM_SHA1 (5 << 4)
59#define CFG_MACM_HMAC_MD5 (6 << 4)
60#define CFG_MACM_HMAC_SHA1 (7 << 4)
61#define CFG_ENCM_DES (1 << 8)
62#define CFG_ENCM_3DES (2 << 8)
63#define CFG_ENCM_AES (3 << 8)
64#define CFG_DIR_ENC (0 << 12)
65#define CFG_DIR_DEC (1 << 12)
66#define CFG_ENC_MODE_ECB (0 << 16)
67#define CFG_ENC_MODE_CBC (1 << 16)
68#define CFG_3DES_EEE (0 << 20)
69#define CFG_3DES_EDE (1 << 20)
70#define CFG_AES_LEN_128 (0 << 24)
71#define CFG_AES_LEN_192 (1 << 24)
72#define CFG_AES_LEN_256 (2 << 24)
73
74 u32 enc_p;
75#define ENC_P_SRC(x) (x)
76#define ENC_P_DST(x) ((x) << 16)
77
78 u32 enc_len;
79#define ENC_LEN(x) (x)
80
81 u32 enc_key_p;
82#define ENC_KEY_P(x) (x)
83
84 u32 enc_iv;
85#define ENC_IV_POINT(x) ((x) << 0)
86#define ENC_IV_BUF_POINT(x) ((x) << 16)
87
88 u32 mac_src_p;
89#define MAC_SRC_DATA_P(x) (x)
90#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
91
92 u32 mac_digest;
93 u32 mac_iv;
94}__attribute__ ((packed));
95 /*
96 * /-----------\ 0
97 * | ACCEL CFG | 4 * 8
98 * |-----------| 0x20
99 * | CRYPT KEY | 8 * 4
100 * |-----------| 0x40
101 * | IV IN | 4 * 4
102 * |-----------| 0x40 (inplace)
103 * | IV BUF | 4 * 4
104 * |-----------| 0x50
105 * | DATA IN | 16 * x (max ->max_req_size)
106 * |-----------| 0x50 (inplace operation)
107 * | DATA OUT | 16 * x (max ->max_req_size)
108 * \-----------/ SRAM size
109 */
110#define SRAM_CONFIG 0x00
111#define SRAM_DATA_KEY_P 0x20
112#define SRAM_DATA_IV 0x40
113#define SRAM_DATA_IV_BUF 0x40
114#define SRAM_DATA_IN_START 0x50
115#define SRAM_DATA_OUT_START 0x50
116
117#define SRAM_CFG_SPACE 0x50
118
119#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b6..76cb6b345e7 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/algapi.h> 15#include <crypto/internal/hash.h>
16#include <crypto/sha.h> 16#include <crypto/sha.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/cryptohash.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/i387.h> 24#include <asm/i387.h>
26#include "padlock.h" 25#include "padlock.h"
27 26
28#define SHA1_DEFAULT_FALLBACK "sha1-generic" 27struct padlock_sha_desc {
29#define SHA256_DEFAULT_FALLBACK "sha256-generic" 28 struct shash_desc fallback;
29};
30 30
31struct padlock_sha_ctx { 31struct padlock_sha_ctx {
32 char *data; 32 struct crypto_shash *fallback;
33 size_t used;
34 int bypass;
35 void (*f_sha_padlock)(const char *in, char *out, int count);
36 struct hash_desc fallback;
37}; 33};
38 34
39static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 35static int padlock_sha_init(struct shash_desc *desc)
40{
41 return crypto_tfm_ctx(tfm);
42}
43
44/* We'll need aligned address on the stack */
45#define NEAREST_ALIGNED(ptr) \
46 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
47
48static struct crypto_alg sha1_alg, sha256_alg;
49
50static void padlock_sha_bypass(struct crypto_tfm *tfm)
51{ 36{
52 if (ctx(tfm)->bypass) 37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
53 return; 38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
54 39
55 crypto_hash_init(&ctx(tfm)->fallback); 40 dctx->fallback.tfm = ctx->fallback;
56 if (ctx(tfm)->data && ctx(tfm)->used) { 41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
57 struct scatterlist sg; 42 return crypto_shash_init(&dctx->fallback);
58
59 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
60 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
61 }
62
63 ctx(tfm)->used = 0;
64 ctx(tfm)->bypass = 1;
65}
66
67static void padlock_sha_init(struct crypto_tfm *tfm)
68{
69 ctx(tfm)->used = 0;
70 ctx(tfm)->bypass = 0;
71} 43}
72 44
73static void padlock_sha_update(struct crypto_tfm *tfm, 45static int padlock_sha_update(struct shash_desc *desc,
74 const uint8_t *data, unsigned int length) 46 const u8 *data, unsigned int length)
75{ 47{
76 /* Our buffer is always one page. */ 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
77 if (unlikely(!ctx(tfm)->bypass &&
78 (ctx(tfm)->used + length > PAGE_SIZE)))
79 padlock_sha_bypass(tfm);
80
81 if (unlikely(ctx(tfm)->bypass)) {
82 struct scatterlist sg;
83 sg_init_one(&sg, (uint8_t *)data, length);
84 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
85 return;
86 }
87 49
88 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); 50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
89 ctx(tfm)->used += length; 51 return crypto_shash_update(&dctx->fallback, data, length);
90} 52}
91 53
92static inline void padlock_output_block(uint32_t *src, 54static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
96 *dst++ = swab32(*src++); 58 *dst++ = swab32(*src++);
97} 59}
98 60
99static void padlock_do_sha1(const char *in, char *out, int count) 61static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
62 unsigned int count, u8 *out)
100{ 63{
101 /* We can't store directly to *out as it may be unaligned. */ 64 /* We can't store directly to *out as it may be unaligned. */
102 /* BTW Don't reduce the buffer size below 128 Bytes! 65 /* BTW Don't reduce the buffer size below 128 Bytes!
103 * PadLock microcode needs it that big. */ 66 * PadLock microcode needs it that big. */
104 char buf[128+16]; 67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
105 char *result = NEAREST_ALIGNED(buf); 68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state;
70 unsigned int space;
71 unsigned int leftover;
106 int ts_state; 72 int ts_state;
73 int err;
74
75 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
76 err = crypto_shash_export(&dctx->fallback, &state);
77 if (err)
78 goto out;
79
80 if (state.count + count > ULONG_MAX)
81 return crypto_shash_finup(&dctx->fallback, in, count, out);
82
83 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
84 space = SHA1_BLOCK_SIZE - leftover;
85 if (space) {
86 if (count > space) {
87 err = crypto_shash_update(&dctx->fallback, in, space) ?:
88 crypto_shash_export(&dctx->fallback, &state);
89 if (err)
90 goto out;
91 count -= space;
92 in += space;
93 } else {
94 memcpy(state.buffer + leftover, in, count);
95 in = state.buffer;
96 count += leftover;
97 state.count &= ~(SHA1_BLOCK_SIZE - 1);
98 }
99 }
100
101 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
107 102
108 ((uint32_t *)result)[0] = SHA1_H0;
109 ((uint32_t *)result)[1] = SHA1_H1;
110 ((uint32_t *)result)[2] = SHA1_H2;
111 ((uint32_t *)result)[3] = SHA1_H3;
112 ((uint32_t *)result)[4] = SHA1_H4;
113
114 /* prevent taking the spurious DNA fault with padlock. */ 103 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save(); 104 ts_state = irq_ts_save();
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 105 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in), "+D"(result) 106 : \
118 : "c"(count), "a"(0)); 107 : "c"((unsigned long)state.count + count), \
108 "a"((unsigned long)state.count), \
109 "S"(in), "D"(result));
119 irq_ts_restore(ts_state); 110 irq_ts_restore(ts_state);
120 111
121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 112 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
113
114out:
115 return err;
122} 116}
123 117
124static void padlock_do_sha256(const char *in, char *out, int count) 118static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
119{
120 u8 buf[4];
121
122 return padlock_sha1_finup(desc, buf, 0, out);
123}
124
125static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
126 unsigned int count, u8 *out)
125{ 127{
126 /* We can't store directly to *out as it may be unaligned. */ 128 /* We can't store directly to *out as it may be unaligned. */
127 /* BTW Don't reduce the buffer size below 128 Bytes! 129 /* BTW Don't reduce the buffer size below 128 Bytes!
128 * PadLock microcode needs it that big. */ 130 * PadLock microcode needs it that big. */
129 char buf[128+16]; 131 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
130 char *result = NEAREST_ALIGNED(buf); 132 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 struct sha256_state state;
134 unsigned int space;
135 unsigned int leftover;
131 int ts_state; 136 int ts_state;
137 int err;
138
139 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
140 err = crypto_shash_export(&dctx->fallback, &state);
141 if (err)
142 goto out;
143
144 if (state.count + count > ULONG_MAX)
145 return crypto_shash_finup(&dctx->fallback, in, count, out);
146
147 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
148 space = SHA256_BLOCK_SIZE - leftover;
149 if (space) {
150 if (count > space) {
151 err = crypto_shash_update(&dctx->fallback, in, space) ?:
152 crypto_shash_export(&dctx->fallback, &state);
153 if (err)
154 goto out;
155 count -= space;
156 in += space;
157 } else {
158 memcpy(state.buf + leftover, in, count);
159 in = state.buf;
160 count += leftover;
161 state.count &= ~(SHA1_BLOCK_SIZE - 1);
162 }
163 }
132 164
133 ((uint32_t *)result)[0] = SHA256_H0; 165 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
134 ((uint32_t *)result)[1] = SHA256_H1;
135 ((uint32_t *)result)[2] = SHA256_H2;
136 ((uint32_t *)result)[3] = SHA256_H3;
137 ((uint32_t *)result)[4] = SHA256_H4;
138 ((uint32_t *)result)[5] = SHA256_H5;
139 ((uint32_t *)result)[6] = SHA256_H6;
140 ((uint32_t *)result)[7] = SHA256_H7;
141 166
142 /* prevent taking the spurious DNA fault with padlock. */ 167 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save(); 168 ts_state = irq_ts_save();
144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 169 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
145 : "+S"(in), "+D"(result) 170 : \
146 : "c"(count), "a"(0)); 171 : "c"((unsigned long)state.count + count), \
172 "a"((unsigned long)state.count), \
173 "S"(in), "D"(result));
147 irq_ts_restore(ts_state); 174 irq_ts_restore(ts_state);
148 175
149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 176 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
177
178out:
179 return err;
150} 180}
151 181
152static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 182static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
153{ 183{
154 if (unlikely(ctx(tfm)->bypass)) { 184 u8 buf[4];
155 crypto_hash_final(&ctx(tfm)->fallback, out);
156 ctx(tfm)->bypass = 0;
157 return;
158 }
159 185
160 /* Pass the input buffer to PadLock microcode... */ 186 return padlock_sha256_finup(desc, buf, 0, out);
161 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
162
163 ctx(tfm)->used = 0;
164} 187}
165 188
166static int padlock_cra_init(struct crypto_tfm *tfm) 189static int padlock_cra_init(struct crypto_tfm *tfm)
167{ 190{
191 struct crypto_shash *hash = __crypto_shash_cast(tfm);
168 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 192 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169 struct crypto_hash *fallback_tfm; 193 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
170 194 struct crypto_shash *fallback_tfm;
171 /* For now we'll allocate one page. This 195 int err = -ENOMEM;
172 * could eventually be configurable one day. */
173 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174 if (!ctx(tfm)->data)
175 return -ENOMEM;
176 196
177 /* Allocate a fallback and abort if it failed. */ 197 /* Allocate a fallback and abort if it failed. */
178 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 198 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
179 CRYPTO_ALG_ASYNC | 199 CRYPTO_ALG_NEED_FALLBACK);
180 CRYPTO_ALG_NEED_FALLBACK);
181 if (IS_ERR(fallback_tfm)) { 200 if (IS_ERR(fallback_tfm)) {
182 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 201 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183 fallback_driver_name); 202 fallback_driver_name);
184 free_page((unsigned long)(ctx(tfm)->data)); 203 err = PTR_ERR(fallback_tfm);
185 return PTR_ERR(fallback_tfm); 204 goto out;
186 } 205 }
187 206
188 ctx(tfm)->fallback.tfm = fallback_tfm; 207 ctx->fallback = fallback_tfm;
208 hash->descsize += crypto_shash_descsize(fallback_tfm);
189 return 0; 209 return 0;
190}
191
192static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
193{
194 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
195 210
196 return padlock_cra_init(tfm); 211out:
197} 212 return err;
198
199static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
200{
201 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
202
203 return padlock_cra_init(tfm);
204} 213}
205 214
206static void padlock_cra_exit(struct crypto_tfm *tfm) 215static void padlock_cra_exit(struct crypto_tfm *tfm)
207{ 216{
208 if (ctx(tfm)->data) { 217 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
209 free_page((unsigned long)(ctx(tfm)->data));
210 ctx(tfm)->data = NULL;
211 }
212 218
213 crypto_free_hash(ctx(tfm)->fallback.tfm); 219 crypto_free_shash(ctx->fallback);
214 ctx(tfm)->fallback.tfm = NULL;
215} 220}
216 221
217static struct crypto_alg sha1_alg = { 222static struct shash_alg sha1_alg = {
218 .cra_name = "sha1", 223 .digestsize = SHA1_DIGEST_SIZE,
219 .cra_driver_name = "sha1-padlock", 224 .init = padlock_sha_init,
220 .cra_priority = PADLOCK_CRA_PRIORITY, 225 .update = padlock_sha_update,
221 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 226 .finup = padlock_sha1_finup,
222 CRYPTO_ALG_NEED_FALLBACK, 227 .final = padlock_sha1_final,
223 .cra_blocksize = SHA1_BLOCK_SIZE, 228 .descsize = sizeof(struct padlock_sha_desc),
224 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 229 .base = {
225 .cra_module = THIS_MODULE, 230 .cra_name = "sha1",
226 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), 231 .cra_driver_name = "sha1-padlock",
227 .cra_init = padlock_sha1_cra_init, 232 .cra_priority = PADLOCK_CRA_PRIORITY,
228 .cra_exit = padlock_cra_exit, 233 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
229 .cra_u = { 234 CRYPTO_ALG_NEED_FALLBACK,
230 .digest = { 235 .cra_blocksize = SHA1_BLOCK_SIZE,
231 .dia_digestsize = SHA1_DIGEST_SIZE, 236 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
232 .dia_init = padlock_sha_init, 237 .cra_module = THIS_MODULE,
233 .dia_update = padlock_sha_update, 238 .cra_init = padlock_cra_init,
234 .dia_final = padlock_sha_final, 239 .cra_exit = padlock_cra_exit,
235 }
236 } 240 }
237}; 241};
238 242
239static struct crypto_alg sha256_alg = { 243static struct shash_alg sha256_alg = {
240 .cra_name = "sha256", 244 .digestsize = SHA256_DIGEST_SIZE,
241 .cra_driver_name = "sha256-padlock", 245 .init = padlock_sha_init,
242 .cra_priority = PADLOCK_CRA_PRIORITY, 246 .update = padlock_sha_update,
243 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 247 .finup = padlock_sha256_finup,
244 CRYPTO_ALG_NEED_FALLBACK, 248 .final = padlock_sha256_final,
245 .cra_blocksize = SHA256_BLOCK_SIZE, 249 .descsize = sizeof(struct padlock_sha_desc),
246 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 250 .base = {
247 .cra_module = THIS_MODULE, 251 .cra_name = "sha256",
248 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), 252 .cra_driver_name = "sha256-padlock",
249 .cra_init = padlock_sha256_cra_init, 253 .cra_priority = PADLOCK_CRA_PRIORITY,
250 .cra_exit = padlock_cra_exit, 254 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
251 .cra_u = { 255 CRYPTO_ALG_NEED_FALLBACK,
252 .digest = { 256 .cra_blocksize = SHA256_BLOCK_SIZE,
253 .dia_digestsize = SHA256_DIGEST_SIZE, 257 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
254 .dia_init = padlock_sha_init, 258 .cra_module = THIS_MODULE,
255 .dia_update = padlock_sha_update, 259 .cra_init = padlock_cra_init,
256 .dia_final = padlock_sha_final, 260 .cra_exit = padlock_cra_exit,
257 }
258 } 261 }
259}; 262};
260 263
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
272 return -ENODEV; 275 return -ENODEV;
273 } 276 }
274 277
275 rc = crypto_register_alg(&sha1_alg); 278 rc = crypto_register_shash(&sha1_alg);
276 if (rc) 279 if (rc)
277 goto out; 280 goto out;
278 281
279 rc = crypto_register_alg(&sha256_alg); 282 rc = crypto_register_shash(&sha256_alg);
280 if (rc) 283 if (rc)
281 goto out_unreg1; 284 goto out_unreg1;
282 285
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
285 return 0; 288 return 0;
286 289
287out_unreg1: 290out_unreg1:
288 crypto_unregister_alg(&sha1_alg); 291 crypto_unregister_shash(&sha1_alg);
289out: 292out:
290 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 293 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
291 return rc; 294 return rc;
@@ -293,8 +296,8 @@ out:
293 296
294static void __exit padlock_fini(void) 297static void __exit padlock_fini(void)
295{ 298{
296 crypto_unregister_alg(&sha1_alg); 299 crypto_unregister_shash(&sha1_alg);
297 crypto_unregister_alg(&sha256_alg); 300 crypto_unregister_shash(&sha256_alg);
298} 301}
299 302
300module_init(padlock_init); 303module_init(padlock_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce..c47ffe8a73e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
86 void *context; 86 void *context;
87}; 87};
88 88
89/* per-channel fifo management */
90struct talitos_channel {
91 /* request fifo */
92 struct talitos_request *fifo;
93
94 /* number of requests pending in channel h/w fifo */
95 atomic_t submit_count ____cacheline_aligned;
96
97 /* request submission (head) lock */
98 spinlock_t head_lock ____cacheline_aligned;
99 /* index to next free descriptor request */
100 int head;
101
102 /* request release (tail) lock */
103 spinlock_t tail_lock ____cacheline_aligned;
104 /* index to next in-progress/done descriptor request */
105 int tail;
106};
107
89struct talitos_private { 108struct talitos_private {
90 struct device *dev; 109 struct device *dev;
91 struct of_device *ofdev; 110 struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
101 /* SEC Compatibility info */ 120 /* SEC Compatibility info */
102 unsigned long features; 121 unsigned long features;
103 122
104 /* next channel to be assigned next incoming descriptor */
105 atomic_t last_chan;
106
107 /* per-channel number of requests pending in channel h/w fifo */
108 atomic_t *submit_count;
109
110 /* per-channel request fifo */
111 struct talitos_request **fifo;
112
113 /* 123 /*
114 * length of the request fifo 124 * length of the request fifo
115 * fifo_len is chfifo_len rounded up to next power of 2 125 * fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
117 */ 127 */
118 unsigned int fifo_len; 128 unsigned int fifo_len;
119 129
120 /* per-channel index to next free descriptor request */ 130 struct talitos_channel *chan;
121 int *head;
122
123 /* per-channel index to next in-progress/done descriptor request */
124 int *tail;
125 131
126 /* per-channel request submission (head) and release (tail) locks */ 132 /* next channel to be assigned next incoming descriptor */
127 spinlock_t *head_lock; 133 atomic_t last_chan ____cacheline_aligned;
128 spinlock_t *tail_lock;
129 134
130 /* request callback tasklet */ 135 /* request callback tasklet */
131 struct tasklet_struct done_task; 136 struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
141#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 146#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
142#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 147#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
143 148
149static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
150{
151 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
152 talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
153}
154
144/* 155/*
145 * map virtual single (contiguous) pointer to h/w descriptor pointer 156 * map virtual single (contiguous) pointer to h/w descriptor pointer
146 */ 157 */
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
150 unsigned char extent, 161 unsigned char extent,
151 enum dma_data_direction dir) 162 enum dma_data_direction dir)
152{ 163{
164 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
165
153 talitos_ptr->len = cpu_to_be16(len); 166 talitos_ptr->len = cpu_to_be16(len);
154 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 167 to_talitos_ptr(talitos_ptr, dma_addr);
155 talitos_ptr->j_extent = extent; 168 talitos_ptr->j_extent = extent;
156} 169}
157 170
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
182 return -EIO; 195 return -EIO;
183 } 196 }
184 197
185 /* set done writeback and IRQ */ 198 /* set 36-bit addressing, done writeback enable and done IRQ enable */
186 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 199 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
187 TALITOS_CCCR_LO_CDIE); 200 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
188 201
189 /* and ICCR writeback, if available */ 202 /* and ICCR writeback, if available */
190 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 203 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
282 /* emulate SEC's round-robin channel fifo polling scheme */ 295 /* emulate SEC's round-robin channel fifo polling scheme */
283 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); 296 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
284 297
285 spin_lock_irqsave(&priv->head_lock[ch], flags); 298 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
286 299
287 if (!atomic_inc_not_zero(&priv->submit_count[ch])) { 300 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
288 /* h/w fifo is full */ 301 /* h/w fifo is full */
289 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 302 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
290 return -EAGAIN; 303 return -EAGAIN;
291 } 304 }
292 305
293 head = priv->head[ch]; 306 head = priv->chan[ch].head;
294 request = &priv->fifo[ch][head]; 307 request = &priv->chan[ch].fifo[head];
295 308
296 /* map descriptor and save caller data */ 309 /* map descriptor and save caller data */
297 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 310 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
300 request->context = context; 313 request->context = context;
301 314
302 /* increment fifo head */ 315 /* increment fifo head */
303 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); 316 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304 317
305 smp_wmb(); 318 smp_wmb();
306 request->desc = desc; 319 request->desc = desc;
307 320
308 /* GO! */ 321 /* GO! */
309 wmb(); 322 wmb();
310 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 323 out_be32(priv->reg + TALITOS_FF(ch),
324 cpu_to_be32(upper_32_bits(request->dma_desc)));
325 out_be32(priv->reg + TALITOS_FF_LO(ch),
326 cpu_to_be32(lower_32_bits(request->dma_desc)));
311 327
312 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 328 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
313 329
314 return -EINPROGRESS; 330 return -EINPROGRESS;
315} 331}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
324 unsigned long flags; 340 unsigned long flags;
325 int tail, status; 341 int tail, status;
326 342
327 spin_lock_irqsave(&priv->tail_lock[ch], flags); 343 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
328 344
329 tail = priv->tail[ch]; 345 tail = priv->chan[ch].tail;
330 while (priv->fifo[ch][tail].desc) { 346 while (priv->chan[ch].fifo[tail].desc) {
331 request = &priv->fifo[ch][tail]; 347 request = &priv->chan[ch].fifo[tail];
332 348
333 /* descriptors with their done bits set don't get the error */ 349 /* descriptors with their done bits set don't get the error */
334 rmb(); 350 rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
354 request->desc = NULL; 370 request->desc = NULL;
355 371
356 /* increment fifo tail */ 372 /* increment fifo tail */
357 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 373 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
358 374
359 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 375 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
360 376
361 atomic_dec(&priv->submit_count[ch]); 377 atomic_dec(&priv->chan[ch].submit_count);
362 378
363 saved_req.callback(dev, saved_req.desc, saved_req.context, 379 saved_req.callback(dev, saved_req.desc, saved_req.context,
364 status); 380 status);
365 /* channel may resume processing in single desc error case */ 381 /* channel may resume processing in single desc error case */
366 if (error && !reset_ch && status == error) 382 if (error && !reset_ch && status == error)
367 return; 383 return;
368 spin_lock_irqsave(&priv->tail_lock[ch], flags); 384 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
369 tail = priv->tail[ch]; 385 tail = priv->chan[ch].tail;
370 } 386 }
371 387
372 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 388 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
373} 389}
374 390
375/* 391/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
397static struct talitos_desc *current_desc(struct device *dev, int ch) 413static struct talitos_desc *current_desc(struct device *dev, int ch)
398{ 414{
399 struct talitos_private *priv = dev_get_drvdata(dev); 415 struct talitos_private *priv = dev_get_drvdata(dev);
400 int tail = priv->tail[ch]; 416 int tail = priv->chan[ch].tail;
401 dma_addr_t cur_desc; 417 dma_addr_t cur_desc;
402 418
403 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); 419 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
404 420
405 while (priv->fifo[ch][tail].dma_desc != cur_desc) { 421 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
406 tail = (tail + 1) & (priv->fifo_len - 1); 422 tail = (tail + 1) & (priv->fifo_len - 1);
407 if (tail == priv->tail[ch]) { 423 if (tail == priv->chan[ch].tail) {
408 dev_err(dev, "couldn't locate current descriptor\n"); 424 dev_err(dev, "couldn't locate current descriptor\n");
409 return NULL; 425 return NULL;
410 } 426 }
411 } 427 }
412 428
413 return priv->fifo[ch][tail].desc; 429 return priv->chan[ch].fifo[tail].desc;
414} 430}
415 431
416/* 432/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
929 int n_sg = sg_count; 945 int n_sg = sg_count;
930 946
931 while (n_sg--) { 947 while (n_sg--) {
932 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 948 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
933 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 949 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
934 link_tbl_ptr->j_extent = 0; 950 link_tbl_ptr->j_extent = 0;
935 link_tbl_ptr++; 951 link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
970 struct talitos_desc *desc = &edesc->desc; 986 struct talitos_desc *desc = &edesc->desc;
971 unsigned int cryptlen = areq->cryptlen; 987 unsigned int cryptlen = areq->cryptlen;
972 unsigned int authsize = ctx->authsize; 988 unsigned int authsize = ctx->authsize;
973 unsigned int ivsize; 989 unsigned int ivsize = crypto_aead_ivsize(aead);
974 int sg_count, ret; 990 int sg_count, ret;
975 int sg_link_tbl_len; 991 int sg_link_tbl_len;
976 992
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
978 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 994 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
979 0, DMA_TO_DEVICE); 995 0, DMA_TO_DEVICE);
980 /* hmac data */ 996 /* hmac data */
981 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - 997 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
982 sg_virt(areq->assoc), sg_virt(areq->assoc), 0, 998 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
983 DMA_TO_DEVICE);
984 /* cipher iv */ 999 /* cipher iv */
985 ivsize = crypto_aead_ivsize(aead);
986 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, 1000 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
987 DMA_TO_DEVICE); 1001 DMA_TO_DEVICE);
988 1002
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1006 edesc->src_is_chained); 1020 edesc->src_is_chained);
1007 1021
1008 if (sg_count == 1) { 1022 if (sg_count == 1) {
1009 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1023 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1010 } else { 1024 } else {
1011 sg_link_tbl_len = cryptlen; 1025 sg_link_tbl_len = cryptlen;
1012 1026
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1017 &edesc->link_tbl[0]); 1031 &edesc->link_tbl[0]);
1018 if (sg_count > 1) { 1032 if (sg_count > 1) {
1019 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1033 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1020 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1034 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1021 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1035 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1022 edesc->dma_len, 1036 edesc->dma_len,
1023 DMA_BIDIRECTIONAL); 1037 DMA_BIDIRECTIONAL);
1024 } else { 1038 } else {
1025 /* Only one segment now, so no link tbl needed */ 1039 /* Only one segment now, so no link tbl needed */
1026 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> 1040 to_talitos_ptr(&desc->ptr[4],
1027 src)); 1041 sg_dma_address(areq->src));
1028 } 1042 }
1029 } 1043 }
1030 1044
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1039 edesc->dst_is_chained); 1053 edesc->dst_is_chained);
1040 1054
1041 if (sg_count == 1) { 1055 if (sg_count == 1) {
1042 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1056 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1043 } else { 1057 } else {
1044 struct talitos_ptr *link_tbl_ptr = 1058 struct talitos_ptr *link_tbl_ptr =
1045 &edesc->link_tbl[edesc->src_nents + 1]; 1059 &edesc->link_tbl[edesc->src_nents + 1];
1046 1060
1047 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 1061 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1048 edesc->dma_link_tbl + 1062 (edesc->src_nents + 1) *
1049 edesc->src_nents + 1); 1063 sizeof(struct talitos_ptr));
1050 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1064 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1051 link_tbl_ptr); 1065 link_tbl_ptr);
1052 1066
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1059 link_tbl_ptr->len = cpu_to_be16(authsize); 1073 link_tbl_ptr->len = cpu_to_be16(authsize);
1060 1074
1061 /* icv data follows link tables */ 1075 /* icv data follows link tables */
1062 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 1076 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1063 edesc->dma_link_tbl + 1077 (edesc->src_nents + edesc->dst_nents + 2) *
1064 edesc->src_nents + 1078 sizeof(struct talitos_ptr));
1065 edesc->dst_nents + 2);
1066
1067 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1079 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1068 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1080 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1069 edesc->dma_len, DMA_BIDIRECTIONAL); 1081 edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1338 1350
1339 /* first DWORD empty */ 1351 /* first DWORD empty */
1340 desc->ptr[0].len = 0; 1352 desc->ptr[0].len = 0;
1341 desc->ptr[0].ptr = 0; 1353 to_talitos_ptr(&desc->ptr[0], 0);
1342 desc->ptr[0].j_extent = 0; 1354 desc->ptr[0].j_extent = 0;
1343 1355
1344 /* cipher iv */ 1356 /* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1362 edesc->src_is_chained); 1374 edesc->src_is_chained);
1363 1375
1364 if (sg_count == 1) { 1376 if (sg_count == 1) {
1365 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1377 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1366 } else { 1378 } else {
1367 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 1379 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1368 &edesc->link_tbl[0]); 1380 &edesc->link_tbl[0]);
1369 if (sg_count > 1) { 1381 if (sg_count > 1) {
1382 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1370 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; 1383 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1371 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1372 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1384 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1373 edesc->dma_len, 1385 edesc->dma_len,
1374 DMA_BIDIRECTIONAL); 1386 DMA_BIDIRECTIONAL);
1375 } else { 1387 } else {
1376 /* Only one segment now, so no link tbl needed */ 1388 /* Only one segment now, so no link tbl needed */
1377 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> 1389 to_talitos_ptr(&desc->ptr[3],
1378 src)); 1390 sg_dma_address(areq->src));
1379 } 1391 }
1380 } 1392 }
1381 1393
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1390 edesc->dst_is_chained); 1402 edesc->dst_is_chained);
1391 1403
1392 if (sg_count == 1) { 1404 if (sg_count == 1) {
1393 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1405 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1394 } else { 1406 } else {
1395 struct talitos_ptr *link_tbl_ptr = 1407 struct talitos_ptr *link_tbl_ptr =
1396 &edesc->link_tbl[edesc->src_nents + 1]; 1408 &edesc->link_tbl[edesc->src_nents + 1];
1397 1409
1410 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1411 (edesc->src_nents + 1) *
1412 sizeof(struct talitos_ptr));
1398 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1413 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1399 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1400 edesc->dma_link_tbl +
1401 edesc->src_nents + 1);
1402 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1414 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1403 link_tbl_ptr); 1415 link_tbl_ptr);
1404 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1416 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1411 1423
1412 /* last DWORD empty */ 1424 /* last DWORD empty */
1413 desc->ptr[6].len = 0; 1425 desc->ptr[6].len = 0;
1414 desc->ptr[6].ptr = 0; 1426 to_talitos_ptr(&desc->ptr[6], 0);
1415 desc->ptr[6].j_extent = 0; 1427 desc->ptr[6].j_extent = 0;
1416 1428
1417 ret = talitos_submit(dev, desc, callback, areq); 1429 ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
1742 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1754 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1743 talitos_unregister_rng(dev); 1755 talitos_unregister_rng(dev);
1744 1756
1745 kfree(priv->submit_count); 1757 for (i = 0; i < priv->num_channels; i++)
1746 kfree(priv->tail); 1758 if (priv->chan[i].fifo)
1747 kfree(priv->head); 1759 kfree(priv->chan[i].fifo);
1748
1749 if (priv->fifo)
1750 for (i = 0; i < priv->num_channels; i++)
1751 kfree(priv->fifo[i]);
1752 1760
1753 kfree(priv->fifo); 1761 kfree(priv->chan);
1754 kfree(priv->head_lock);
1755 kfree(priv->tail_lock);
1756 1762
1757 if (priv->irq != NO_IRQ) { 1763 if (priv->irq != NO_IRQ) {
1758 free_irq(priv->irq, dev); 1764 free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
1872 if (of_device_is_compatible(np, "fsl,sec2.1")) 1878 if (of_device_is_compatible(np, "fsl,sec2.1"))
1873 priv->features |= TALITOS_FTR_HW_AUTH_CHECK; 1879 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1874 1880
1875 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1881 priv->chan = kzalloc(sizeof(struct talitos_channel) *
1876 GFP_KERNEL); 1882 priv->num_channels, GFP_KERNEL);
1877 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1883 if (!priv->chan) {
1878 GFP_KERNEL); 1884 dev_err(dev, "failed to allocate channel management space\n");
1879 if (!priv->head_lock || !priv->tail_lock) {
1880 dev_err(dev, "failed to allocate fifo locks\n");
1881 err = -ENOMEM; 1885 err = -ENOMEM;
1882 goto err_out; 1886 goto err_out;
1883 } 1887 }
1884 1888
1885 for (i = 0; i < priv->num_channels; i++) { 1889 for (i = 0; i < priv->num_channels; i++) {
1886 spin_lock_init(&priv->head_lock[i]); 1890 spin_lock_init(&priv->chan[i].head_lock);
1887 spin_lock_init(&priv->tail_lock[i]); 1891 spin_lock_init(&priv->chan[i].tail_lock);
1888 }
1889
1890 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1891 priv->num_channels, GFP_KERNEL);
1892 if (!priv->fifo) {
1893 dev_err(dev, "failed to allocate request fifo\n");
1894 err = -ENOMEM;
1895 goto err_out;
1896 } 1892 }
1897 1893
1898 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 1894 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1899 1895
1900 for (i = 0; i < priv->num_channels; i++) { 1896 for (i = 0; i < priv->num_channels; i++) {
1901 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * 1897 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
1902 priv->fifo_len, GFP_KERNEL); 1898 priv->fifo_len, GFP_KERNEL);
1903 if (!priv->fifo[i]) { 1899 if (!priv->chan[i].fifo) {
1904 dev_err(dev, "failed to allocate request fifo %d\n", i); 1900 dev_err(dev, "failed to allocate request fifo %d\n", i);
1905 err = -ENOMEM; 1901 err = -ENOMEM;
1906 goto err_out; 1902 goto err_out;
1907 } 1903 }
1908 } 1904 }
1909 1905
1910 priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1911 GFP_KERNEL);
1912 if (!priv->submit_count) {
1913 dev_err(dev, "failed to allocate fifo submit count space\n");
1914 err = -ENOMEM;
1915 goto err_out;
1916 }
1917 for (i = 0; i < priv->num_channels; i++) 1906 for (i = 0; i < priv->num_channels; i++)
1918 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); 1907 atomic_set(&priv->chan[i].submit_count,
1908 -(priv->chfifo_len - 1));
1919 1909
1920 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1910 dma_set_mask(dev, DMA_BIT_MASK(36));
1921 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1922 if (!priv->head || !priv->tail) {
1923 dev_err(dev, "failed to allocate request index space\n");
1924 err = -ENOMEM;
1925 goto err_out;
1926 }
1927 1911
1928 /* reset and initialize the h/w */ 1912 /* reset and initialize the h/w */
1929 err = init_device(dev); 1913 err = init_device(dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 575981f0cfd..ff5a1450e14 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -57,6 +57,7 @@
57#define TALITOS_CCCR_RESET 0x1 /* channel reset */ 57#define TALITOS_CCCR_RESET 0x1 /* channel reset */
58#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) 58#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
59#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ 59#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
60#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
60#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ 61#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
61#define TALITOS_CCCR_LO_NT 0x4 /* notification type */ 62#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
62#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ 63#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */