aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c24
-rw-r--r--crypto/ablkcipher.c3
-rw-r--r--crypto/tcrypt.c3
-rw-r--r--crypto/testmgr.c2
-rw-r--r--crypto/testmgr.h30
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c208
-rw-r--r--drivers/char/random.c13
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c1867
-rw-r--r--drivers/crypto/picoxcell_crypto_regs.h128
16 files changed, 2299 insertions, 33 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index e1e60c7d581..e0e6340c8da 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -873,22 +873,18 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
873 crypto_ablkcipher_clear_flags(ctr_tfm, ~0); 873 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
874 874
875 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); 875 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
876 if (ret) { 876 if (ret)
877 crypto_free_ablkcipher(ctr_tfm); 877 goto out_free_ablkcipher;
878 return ret;
879 }
880 878
879 ret = -ENOMEM;
881 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL); 880 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
882 if (!req) { 881 if (!req)
883 crypto_free_ablkcipher(ctr_tfm); 882 goto out_free_ablkcipher;
884 return -EINVAL;
885 }
886 883
887 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); 884 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
888 if (!req_data) { 885 if (!req_data)
889 crypto_free_ablkcipher(ctr_tfm); 886 goto out_free_request;
890 return -ENOMEM; 887
891 }
892 memset(req_data->iv, 0, sizeof(req_data->iv)); 888 memset(req_data->iv, 0, sizeof(req_data->iv));
893 889
894 /* Clear the data in the hash sub key container to zero.*/ 890 /* Clear the data in the hash sub key container to zero.*/
@@ -913,8 +909,10 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
913 if (!ret) 909 if (!ret)
914 ret = req_data->result.err; 910 ret = req_data->result.err;
915 } 911 }
916 ablkcipher_request_free(req);
917 kfree(req_data); 912 kfree(req_data);
913out_free_request:
914 ablkcipher_request_free(req);
915out_free_ablkcipher:
918 crypto_free_ablkcipher(ctr_tfm); 916 crypto_free_ablkcipher(ctr_tfm);
919 return ret; 917 return ret;
920} 918}
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a854df2a5a4..fdc67d38660 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -141,8 +141,7 @@ err:
141 141
142 if (walk->iv != req->info) 142 if (walk->iv != req->info)
143 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); 143 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
144 if (walk->iv_buffer) 144 kfree(walk->iv_buffer);
145 kfree(walk->iv_buffer);
146 145
147 return err; 146 return err;
148} 147}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9aac5e58be9..e912ea5def3 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -146,7 +146,8 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
146 unsigned int tcount, u8 *keysize) 146 unsigned int tcount, u8 *keysize)
147{ 147{
148 unsigned int ret, i, j, iv_len; 148 unsigned int ret, i, j, iv_len;
149 const char *key, iv[128]; 149 const char *key;
150 char iv[128];
150 struct crypto_blkcipher *tfm; 151 struct crypto_blkcipher *tfm;
151 struct blkcipher_desc desc; 152 struct blkcipher_desc desc;
152 const char *e; 153 const char *e;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 27ea9fe9476..2854865f243 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2077,6 +2077,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2077 }, { 2077 }, {
2078 .alg = "ghash", 2078 .alg = "ghash",
2079 .test = alg_test_hash, 2079 .test = alg_test_hash,
2080 .fips_allowed = 1,
2080 .suite = { 2081 .suite = {
2081 .hash = { 2082 .hash = {
2082 .vecs = ghash_tv_template, 2083 .vecs = ghash_tv_template,
@@ -2453,6 +2454,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2453 }, { 2454 }, {
2454 .alg = "xts(aes)", 2455 .alg = "xts(aes)",
2455 .test = alg_test_skcipher, 2456 .test = alg_test_skcipher,
2457 .fips_allowed = 1,
2456 .suite = { 2458 .suite = {
2457 .cipher = { 2459 .cipher = {
2458 .enc = { 2460 .enc = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 834af7f2ade..aa6dac05f84 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -451,8 +451,9 @@ static struct hash_testvec rmd320_tv_template[] = {
451 451
452/* 452/*
453 * SHA1 test vectors from from FIPS PUB 180-1 453 * SHA1 test vectors from from FIPS PUB 180-1
454 * Long vector from CAVS 5.0
454 */ 455 */
455#define SHA1_TEST_VECTORS 2 456#define SHA1_TEST_VECTORS 3
456 457
457static struct hash_testvec sha1_tv_template[] = { 458static struct hash_testvec sha1_tv_template[] = {
458 { 459 {
@@ -467,6 +468,33 @@ static struct hash_testvec sha1_tv_template[] = {
467 "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1", 468 "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1",
468 .np = 2, 469 .np = 2,
469 .tap = { 28, 28 } 470 .tap = { 28, 28 }
471 }, {
472 .plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06"
473 "\xb6\xeb\x30\xa1\xc3\x71\xd7\x44"
474 "\x50\xa1\x05\xc3\xf9\x73\x5f\x7f"
475 "\xa9\xfe\x38\xcf\x67\xf3\x04\xa5"
476 "\x73\x6a\x10\x6e\x92\xe1\x71\x39"
477 "\xa6\x81\x3b\x1c\x81\xa4\xf3\xd3"
478 "\xfb\x95\x46\xab\x42\x96\xfa\x9f"
479 "\x72\x28\x26\xc0\x66\x86\x9e\xda"
480 "\xcd\x73\xb2\x54\x80\x35\x18\x58"
481 "\x13\xe2\x26\x34\xa9\xda\x44\x00"
482 "\x0d\x95\xa2\x81\xff\x9f\x26\x4e"
483 "\xcc\xe0\xa9\x31\x22\x21\x62\xd0"
484 "\x21\xcc\xa2\x8d\xb5\xf3\xc2\xaa"
485 "\x24\x94\x5a\xb1\xe3\x1c\xb4\x13"
486 "\xae\x29\x81\x0f\xd7\x94\xca\xd5"
487 "\xdf\xaf\x29\xec\x43\xcb\x38\xd1"
488 "\x98\xfe\x4a\xe1\xda\x23\x59\x78"
489 "\x02\x21\x40\x5b\xd6\x71\x2a\x53"
490 "\x05\xda\x4b\x1b\x73\x7f\xce\x7c"
491 "\xd2\x1c\x0e\xb7\x72\x8d\x08\x23"
492 "\x5a\x90\x11",
493 .psize = 163,
494 .digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20"
495 "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17",
496 .np = 4,
497 .tap = { 63, 64, 31, 5 }
470 } 498 }
471}; 499};
472 500
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index d31483c5488..beecd1cf9b9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -198,3 +198,15 @@ config HW_RANDOM_NOMADIK
198 module will be called nomadik-rng. 198 module will be called nomadik-rng.
199 199
200 If unsure, say Y. 200 If unsure, say Y.
201
202config HW_RANDOM_PICOXCELL
203 tristate "Picochip picoXcell true random number generator support"
204 depends on HW_RANDOM && ARCH_PICOXCELL && PICOXCELL_PC3X3
205 ---help---
206 This driver provides kernel-side support for the Random Number
207 Generator hardware found on Picochip PC3x3 and later devices.
208
209 To compile this driver as a module, choose M here: the
210 module will be called picoxcell-rng.
211
212 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 4273308aa1e..3db4eb8b19c 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
19obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o 19obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
20obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o 20obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
21obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o 21obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
22obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 06aad0831c7..2cc755a6430 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -91,7 +91,7 @@ static struct hwrng omap_rng_ops = {
91 91
92static int __devinit omap_rng_probe(struct platform_device *pdev) 92static int __devinit omap_rng_probe(struct platform_device *pdev)
93{ 93{
94 struct resource *res, *mem; 94 struct resource *res;
95 int ret; 95 int ret;
96 96
97 /* 97 /*
@@ -116,14 +116,12 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
116 if (!res) 116 if (!res)
117 return -ENOENT; 117 return -ENOENT;
118 118
119 mem = request_mem_region(res->start, resource_size(res), 119 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
120 pdev->name);
121 if (mem == NULL) {
122 ret = -EBUSY; 120 ret = -EBUSY;
123 goto err_region; 121 goto err_region;
124 } 122 }
125 123
126 dev_set_drvdata(&pdev->dev, mem); 124 dev_set_drvdata(&pdev->dev, res);
127 rng_base = ioremap(res->start, resource_size(res)); 125 rng_base = ioremap(res->start, resource_size(res));
128 if (!rng_base) { 126 if (!rng_base) {
129 ret = -ENOMEM; 127 ret = -ENOMEM;
@@ -146,7 +144,7 @@ err_register:
146 iounmap(rng_base); 144 iounmap(rng_base);
147 rng_base = NULL; 145 rng_base = NULL;
148err_ioremap: 146err_ioremap:
149 release_resource(mem); 147 release_mem_region(res->start, resource_size(res));
150err_region: 148err_region:
151 if (cpu_is_omap24xx()) { 149 if (cpu_is_omap24xx()) {
152 clk_disable(rng_ick); 150 clk_disable(rng_ick);
@@ -157,7 +155,7 @@ err_region:
157 155
158static int __exit omap_rng_remove(struct platform_device *pdev) 156static int __exit omap_rng_remove(struct platform_device *pdev)
159{ 157{
160 struct resource *mem = dev_get_drvdata(&pdev->dev); 158 struct resource *res = dev_get_drvdata(&pdev->dev);
161 159
162 hwrng_unregister(&omap_rng_ops); 160 hwrng_unregister(&omap_rng_ops);
163 161
@@ -170,7 +168,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
170 clk_put(rng_ick); 168 clk_put(rng_ick);
171 } 169 }
172 170
173 release_resource(mem); 171 release_mem_region(res->start, resource_size(res));
174 rng_base = NULL; 172 rng_base = NULL;
175 173
176 return 0; 174 return 0;
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
new file mode 100644
index 00000000000..990d55a5e3e
--- /dev/null
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -0,0 +1,208 @@
1/*
2 * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * All enquiries to support@picochip.com
9 */
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/hw_random.h>
14#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18
19#define DATA_REG_OFFSET 0x0200
20#define CSR_REG_OFFSET 0x0278
21#define CSR_OUT_EMPTY_MASK (1 << 24)
22#define CSR_FAULT_MASK (1 << 1)
23#define TRNG_BLOCK_RESET_MASK (1 << 0)
24#define TAI_REG_OFFSET 0x0380
25
26/*
27 * The maximum amount of time in microseconds to spend waiting for data if the
28 * core wants us to wait. The TRNG should generate 32 bits every 320ns so a
29 * timeout of 20us seems reasonable. The TRNG does builtin tests of the data
30 * for randomness so we can't always assume there is data present.
31 */
32#define PICO_TRNG_TIMEOUT 20
33
34static void __iomem *rng_base;
35static struct clk *rng_clk;
36struct device *rng_dev;
37
38static inline u32 picoxcell_trng_read_csr(void)
39{
40 return __raw_readl(rng_base + CSR_REG_OFFSET);
41}
42
43static inline bool picoxcell_trng_is_empty(void)
44{
45 return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK;
46}
47
48/*
49 * Take the random number generator out of reset and make sure the interrupts
50 * are masked. We shouldn't need to get large amounts of random bytes so just
51 * poll the status register. The hardware generates 32 bits every 320ns so we
52 * shouldn't have to wait long enough to warrant waiting for an IRQ.
53 */
54static void picoxcell_trng_start(void)
55{
56 __raw_writel(0, rng_base + TAI_REG_OFFSET);
57 __raw_writel(0, rng_base + CSR_REG_OFFSET);
58}
59
60static void picoxcell_trng_reset(void)
61{
62 __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET);
63 __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET);
64 picoxcell_trng_start();
65}
66
67/*
68 * Get some random data from the random number generator. The hw_random core
69 * layer provides us with locking.
70 */
71static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max,
72 bool wait)
73{
74 int i;
75
76 /* Wait for some data to become available. */
77 for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) {
78 if (!wait)
79 return 0;
80
81 udelay(1);
82 }
83
84 if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) {
85 dev_err(rng_dev, "fault detected, resetting TRNG\n");
86 picoxcell_trng_reset();
87 return -EIO;
88 }
89
90 if (i == PICO_TRNG_TIMEOUT)
91 return 0;
92
93 *(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET);
94 return sizeof(u32);
95}
96
97static struct hwrng picoxcell_trng = {
98 .name = "picoxcell",
99 .read = picoxcell_trng_read,
100};
101
102static int picoxcell_trng_probe(struct platform_device *pdev)
103{
104 int ret;
105 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
106
107 if (!mem) {
108 dev_warn(&pdev->dev, "no memory resource\n");
109 return -ENOMEM;
110 }
111
112 if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
113 "picoxcell_trng")) {
114 dev_warn(&pdev->dev, "unable to request io mem\n");
115 return -EBUSY;
116 }
117
118 rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
119 if (!rng_base) {
120 dev_warn(&pdev->dev, "unable to remap io mem\n");
121 return -ENOMEM;
122 }
123
124 rng_clk = clk_get(&pdev->dev, NULL);
125 if (IS_ERR(rng_clk)) {
126 dev_warn(&pdev->dev, "no clk\n");
127 return PTR_ERR(rng_clk);
128 }
129
130 ret = clk_enable(rng_clk);
131 if (ret) {
132 dev_warn(&pdev->dev, "unable to enable clk\n");
133 goto err_enable;
134 }
135
136 picoxcell_trng_start();
137 ret = hwrng_register(&picoxcell_trng);
138 if (ret)
139 goto err_register;
140
141 rng_dev = &pdev->dev;
142 dev_info(&pdev->dev, "pixoxcell random number generator active\n");
143
144 return 0;
145
146err_register:
147 clk_disable(rng_clk);
148err_enable:
149 clk_put(rng_clk);
150
151 return ret;
152}
153
154static int __devexit picoxcell_trng_remove(struct platform_device *pdev)
155{
156 hwrng_unregister(&picoxcell_trng);
157 clk_disable(rng_clk);
158 clk_put(rng_clk);
159
160 return 0;
161}
162
163#ifdef CONFIG_PM
164static int picoxcell_trng_suspend(struct device *dev)
165{
166 clk_disable(rng_clk);
167
168 return 0;
169}
170
171static int picoxcell_trng_resume(struct device *dev)
172{
173 return clk_enable(rng_clk);
174}
175
176static const struct dev_pm_ops picoxcell_trng_pm_ops = {
177 .suspend = picoxcell_trng_suspend,
178 .resume = picoxcell_trng_resume,
179};
180#endif /* CONFIG_PM */
181
182static struct platform_driver picoxcell_trng_driver = {
183 .probe = picoxcell_trng_probe,
184 .remove = __devexit_p(picoxcell_trng_remove),
185 .driver = {
186 .name = "picoxcell-trng",
187 .owner = THIS_MODULE,
188#ifdef CONFIG_PM
189 .pm = &picoxcell_trng_pm_ops,
190#endif /* CONFIG_PM */
191 },
192};
193
194static int __init picoxcell_trng_init(void)
195{
196 return platform_driver_register(&picoxcell_trng_driver);
197}
198module_init(picoxcell_trng_init);
199
200static void __exit picoxcell_trng_exit(void)
201{
202 platform_driver_unregister(&picoxcell_trng_driver);
203}
204module_exit(picoxcell_trng_exit);
205
206MODULE_LICENSE("GPL");
207MODULE_AUTHOR("Jamie Iles");
208MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 72a4fcb1774..5e29e8031bb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -128,6 +128,7 @@
128 * void add_input_randomness(unsigned int type, unsigned int code, 128 * void add_input_randomness(unsigned int type, unsigned int code,
129 * unsigned int value); 129 * unsigned int value);
130 * void add_interrupt_randomness(int irq); 130 * void add_interrupt_randomness(int irq);
131 * void add_disk_randomness(struct gendisk *disk);
131 * 132 *
132 * add_input_randomness() uses the input layer interrupt timing, as well as 133 * add_input_randomness() uses the input layer interrupt timing, as well as
133 * the event type information from the hardware. 134 * the event type information from the hardware.
@@ -136,9 +137,15 @@
136 * inputs to the entropy pool. Note that not all interrupts are good 137 * inputs to the entropy pool. Note that not all interrupts are good
137 * sources of randomness! For example, the timer interrupts is not a 138 * sources of randomness! For example, the timer interrupts is not a
138 * good choice, because the periodicity of the interrupts is too 139 * good choice, because the periodicity of the interrupts is too
139 * regular, and hence predictable to an attacker. Disk interrupts are 140 * regular, and hence predictable to an attacker. Network Interface
140 * a better measure, since the timing of the disk interrupts are more 141 * Controller interrupts are a better measure, since the timing of the
141 * unpredictable. 142 * NIC interrupts are more unpredictable.
143 *
144 * add_disk_randomness() uses what amounts to the seek time of block
145 * layer request events, on a per-disk_devt basis, as input to the
146 * entropy pool. Note that high-speed solid state drives with very low
147 * seek times do not make for good sources of entropy, as their seek
148 * times are usually fairly consistent.
142 * 149 *
143 * All of these routines try to estimate how many bits of randomness a 150 * All of these routines try to estimate how many bits of randomness a
144 * particular randomness source. They do this by keeping track of the 151 * particular randomness source. They do this by keeping track of the
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index eab2cf7a026..e54185223c8 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -252,4 +252,21 @@ config CRYPTO_DEV_OMAP_AES
252 OMAP processors have AES module accelerator. Select this if you 252 OMAP processors have AES module accelerator. Select this if you
253 want to use the OMAP module for AES algorithms. 253 want to use the OMAP module for AES algorithms.
254 254
255config CRYPTO_DEV_PICOXCELL
256 tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
257 depends on ARCH_PICOXCELL
258 select CRYPTO_AES
259 select CRYPTO_AUTHENC
260 select CRYPTO_ALGAPI
261 select CRYPTO_DES
262 select CRYPTO_CBC
263 select CRYPTO_ECB
264 select CRYPTO_SEQIV
265 help
266 This option enables support for the hardware offload engines in the
267 Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
268 and for 3gpp Layer 2 ciphering support.
269
270 Saying m here will build a module named pipcoxcell_crypto.
271
255endif # CRYPTO_HW 272endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 256697330a4..5203e34248d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
12obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o 12obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
13 13obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index add2a1a72ba..5b970d9e995 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -839,9 +839,9 @@ static int omap_aes_probe(struct platform_device *pdev)
839 839
840 /* Initializing the clock */ 840 /* Initializing the clock */
841 dd->iclk = clk_get(dev, "ick"); 841 dd->iclk = clk_get(dev, "ick");
842 if (!dd->iclk) { 842 if (IS_ERR(dd->iclk)) {
843 dev_err(dev, "clock intialization failed.\n"); 843 dev_err(dev, "clock intialization failed.\n");
844 err = -ENODEV; 844 err = PTR_ERR(dd->iclk);
845 goto err_res; 845 goto err_res;
846 } 846 }
847 847
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 2e71123516e..465cde3e4f6 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1206,9 +1206,9 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
1206 1206
1207 /* Initializing the clock */ 1207 /* Initializing the clock */
1208 dd->iclk = clk_get(dev, "ick"); 1208 dd->iclk = clk_get(dev, "ick");
1209 if (!dd->iclk) { 1209 if (IS_ERR(dd->iclk)) {
1210 dev_err(dev, "clock intialization failed.\n"); 1210 dev_err(dev, "clock intialization failed.\n");
1211 err = -ENODEV; 1211 err = PTR_ERR(dd->iclk);
1212 goto clk_err; 1212 goto clk_err;
1213 } 1213 }
1214 1214
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
new file mode 100644
index 00000000000..b092d0a6583
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -0,0 +1,1867 @@
1/*
2 * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#include <crypto/aead.h>
19#include <crypto/aes.h>
20#include <crypto/algapi.h>
21#include <crypto/authenc.h>
22#include <crypto/des.h>
23#include <crypto/md5.h>
24#include <crypto/sha.h>
25#include <crypto/internal/skcipher.h>
26#include <linux/clk.h>
27#include <linux/crypto.h>
28#include <linux/delay.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h>
31#include <linux/err.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/list.h>
36#include <linux/module.h>
37#include <linux/platform_device.h>
38#include <linux/pm.h>
39#include <linux/rtnetlink.h>
40#include <linux/scatterlist.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/timer.h>
44
45#include "picoxcell_crypto_regs.h"
46
47/*
48 * The threshold for the number of entries in the CMD FIFO available before
49 * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
50 * number of interrupts raised to the CPU.
51 */
52#define CMD0_IRQ_THRESHOLD 1
53
54/*
55 * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
56 * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
57 * When there are packets in flight but lower than the threshold, we enable
58 * the timer and at expiry, attempt to remove any processed packets from the
59 * queue and if there are still packets left, schedule the timer again.
60 */
61#define PACKET_TIMEOUT 1
62
63/* The priority to register each algorithm with. */
64#define SPACC_CRYPTO_ALG_PRIORITY 10000
65
66#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
67#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
68#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
69#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
70#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
71#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
72#define SPACC_CRYPTO_L2_HASH_PG_SZ 64
73#define SPACC_CRYPTO_L2_MAX_CTXS 128
74#define SPACC_CRYPTO_L2_FIFO_SZ 128
75
76#define MAX_DDT_LEN 16
77
78/* DDT format. This must match the hardware DDT format exactly. */
79struct spacc_ddt {
80 dma_addr_t p;
81 u32 len;
82};
83
84/*
85 * Asynchronous crypto request structure.
86 *
87 * This structure defines a request that is either queued for processing or
88 * being processed.
89 */
90struct spacc_req {
91 struct list_head list;
92 struct spacc_engine *engine;
93 struct crypto_async_request *req;
94 int result;
95 bool is_encrypt;
96 unsigned ctx_id;
97 dma_addr_t src_addr, dst_addr;
98 struct spacc_ddt *src_ddt, *dst_ddt;
99 void (*complete)(struct spacc_req *req);
100
101 /* AEAD specific bits. */
102 u8 *giv;
103 size_t giv_len;
104 dma_addr_t giv_pa;
105};
106
107struct spacc_engine {
108 void __iomem *regs;
109 struct list_head pending;
110 int next_ctx;
111 spinlock_t hw_lock;
112 int in_flight;
113 struct list_head completed;
114 struct list_head in_progress;
115 struct tasklet_struct complete;
116 unsigned long fifo_sz;
117 void __iomem *cipher_ctx_base;
118 void __iomem *hash_key_base;
119 struct spacc_alg *algs;
120 unsigned num_algs;
121 struct list_head registered_algs;
122 size_t cipher_pg_sz;
123 size_t hash_pg_sz;
124 const char *name;
125 struct clk *clk;
126 struct device *dev;
127 unsigned max_ctxs;
128 struct timer_list packet_timeout;
129 unsigned stat_irq_thresh;
130 struct dma_pool *req_pool;
131};
132
133/* Algorithm type mask. */
134#define SPACC_CRYPTO_ALG_MASK 0x7
135
136/* SPACC definition of a crypto algorithm. */
137struct spacc_alg {
138 unsigned long ctrl_default;
139 unsigned long type;
140 struct crypto_alg alg;
141 struct spacc_engine *engine;
142 struct list_head entry;
143 int key_offs;
144 int iv_offs;
145};
146
147/* Generic context structure for any algorithm type. */
148struct spacc_generic_ctx {
149 struct spacc_engine *engine;
150 int flags;
151 int key_offs;
152 int iv_offs;
153};
154
155/* Block cipher context. */
156struct spacc_ablk_ctx {
157 struct spacc_generic_ctx generic;
158 u8 key[AES_MAX_KEY_SIZE];
159 u8 key_len;
160 /*
161 * The fallback cipher. If the operation can't be done in hardware,
162 * fallback to a software version.
163 */
164 struct crypto_ablkcipher *sw_cipher;
165};
166
167/* AEAD cipher context. */
168struct spacc_aead_ctx {
169 struct spacc_generic_ctx generic;
170 u8 cipher_key[AES_MAX_KEY_SIZE];
171 u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
172 u8 cipher_key_len;
173 u8 hash_key_len;
174 struct crypto_aead *sw_cipher;
175 size_t auth_size;
176 u8 salt[AES_BLOCK_SIZE];
177};
178
179static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
180{
181 return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
182}
183
184static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
185{
186 u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
187
188 return fifo_stat & SPA_FIFO_CMD_FULL;
189}
190
191/*
192 * Given a cipher context, and a context number, get the base address of the
193 * context page.
194 *
195 * Returns the address of the context page where the key/context may
196 * be written.
197 */
198static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
199 unsigned indx,
200 bool is_cipher_ctx)
201{
202 return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
203 (indx * ctx->engine->cipher_pg_sz) :
204 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
205}
206
207/* The context pages can only be written with 32-bit accesses. */
208static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
209 unsigned count)
210{
211 const u32 *src32 = (const u32 *) src;
212
213 while (count--)
214 writel(*src32++, dst++);
215}
216
217static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
218 void __iomem *page_addr, const u8 *key,
219 size_t key_len, const u8 *iv, size_t iv_len)
220{
221 void __iomem *key_ptr = page_addr + ctx->key_offs;
222 void __iomem *iv_ptr = page_addr + ctx->iv_offs;
223
224 memcpy_toio32(key_ptr, key, key_len / 4);
225 memcpy_toio32(iv_ptr, iv, iv_len / 4);
226}
227
228/*
229 * Load a context into the engines context memory.
230 *
231 * Returns the index of the context page where the context was loaded.
232 */
233static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
234 const u8 *ciph_key, size_t ciph_len,
235 const u8 *iv, size_t ivlen, const u8 *hash_key,
236 size_t hash_len)
237{
238 unsigned indx = ctx->engine->next_ctx++;
239 void __iomem *ciph_page_addr, *hash_page_addr;
240
241 ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
242 hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
243
244 ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
245 spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
246 ivlen);
247 writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
248 (1 << SPA_KEY_SZ_CIPHER_OFFSET),
249 ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
250
251 if (hash_key) {
252 memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
253 writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
254 ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
255 }
256
257 return indx;
258}
259
260/* Count the number of scatterlist entries in a scatterlist. */
261static int sg_count(struct scatterlist *sg_list, int nbytes)
262{
263 struct scatterlist *sg = sg_list;
264 int sg_nents = 0;
265
266 while (nbytes > 0) {
267 ++sg_nents;
268 nbytes -= sg->length;
269 sg = sg_next(sg);
270 }
271
272 return sg_nents;
273}
274
275static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
276{
277 ddt->p = phys;
278 ddt->len = len;
279}
280
281/*
282 * Take a crypto request and scatterlists for the data and turn them into DDTs
283 * for passing to the crypto engines. This also DMA maps the data so that the
284 * crypto engines can DMA to/from them.
285 */
286static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
287 struct scatterlist *payload,
288 unsigned nbytes,
289 enum dma_data_direction dir,
290 dma_addr_t *ddt_phys)
291{
292 unsigned nents, mapped_ents;
293 struct scatterlist *cur;
294 struct spacc_ddt *ddt;
295 int i;
296
297 nents = sg_count(payload, nbytes);
298 mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
299
300 if (mapped_ents + 1 > MAX_DDT_LEN)
301 goto out;
302
303 ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
304 if (!ddt)
305 goto out;
306
307 for_each_sg(payload, cur, mapped_ents, i)
308 ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
309 ddt_set(&ddt[mapped_ents], 0, 0);
310
311 return ddt;
312
313out:
314 dma_unmap_sg(engine->dev, payload, nents, dir);
315 return NULL;
316}
317
318static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
319{
320 struct aead_request *areq = container_of(req->req, struct aead_request,
321 base);
322 struct spacc_engine *engine = req->engine;
323 struct spacc_ddt *src_ddt, *dst_ddt;
324 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
325 unsigned nents = sg_count(areq->src, areq->cryptlen);
326 dma_addr_t iv_addr;
327 struct scatterlist *cur;
328 int i, dst_ents, src_ents, assoc_ents;
329 u8 *iv = giv ? giv : areq->iv;
330
331 src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
332 if (!src_ddt)
333 return -ENOMEM;
334
335 dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
336 if (!dst_ddt) {
337 dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
338 return -ENOMEM;
339 }
340
341 req->src_ddt = src_ddt;
342 req->dst_ddt = dst_ddt;
343
344 assoc_ents = dma_map_sg(engine->dev, areq->assoc,
345 sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
346 if (areq->src != areq->dst) {
347 src_ents = dma_map_sg(engine->dev, areq->src, nents,
348 DMA_TO_DEVICE);
349 dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
350 DMA_FROM_DEVICE);
351 } else {
352 src_ents = dma_map_sg(engine->dev, areq->src, nents,
353 DMA_BIDIRECTIONAL);
354 dst_ents = 0;
355 }
356
357 /*
358 * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
359 * formed by the crypto block and sent as the ESP IV for IPSEC.
360 */
361 iv_addr = dma_map_single(engine->dev, iv, ivsize,
362 giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
363 req->giv_pa = iv_addr;
364
365 /*
366 * Map the associated data. For decryption we don't copy the
367 * associated data.
368 */
369 for_each_sg(areq->assoc, cur, assoc_ents, i) {
370 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
371 if (req->is_encrypt)
372 ddt_set(dst_ddt++, sg_dma_address(cur),
373 sg_dma_len(cur));
374 }
375 ddt_set(src_ddt++, iv_addr, ivsize);
376
377 if (giv || req->is_encrypt)
378 ddt_set(dst_ddt++, iv_addr, ivsize);
379
380 /*
381 * Now map in the payload for the source and destination and terminate
382 * with the NULL pointers.
383 */
384 for_each_sg(areq->src, cur, src_ents, i) {
385 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
386 if (areq->src == areq->dst)
387 ddt_set(dst_ddt++, sg_dma_address(cur),
388 sg_dma_len(cur));
389 }
390
391 for_each_sg(areq->dst, cur, dst_ents, i)
392 ddt_set(dst_ddt++, sg_dma_address(cur),
393 sg_dma_len(cur));
394
395 ddt_set(src_ddt, 0, 0);
396 ddt_set(dst_ddt, 0, 0);
397
398 return 0;
399}
400
401static void spacc_aead_free_ddts(struct spacc_req *req)
402{
403 struct aead_request *areq = container_of(req->req, struct aead_request,
404 base);
405 struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
406 struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
407 struct spacc_engine *engine = aead_ctx->generic.engine;
408 unsigned ivsize = alg->alg.cra_aead.ivsize;
409 unsigned nents = sg_count(areq->src, areq->cryptlen);
410
411 if (areq->src != areq->dst) {
412 dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
413 dma_unmap_sg(engine->dev, areq->dst,
414 sg_count(areq->dst, areq->cryptlen),
415 DMA_FROM_DEVICE);
416 } else
417 dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
418
419 dma_unmap_sg(engine->dev, areq->assoc,
420 sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
421
422 dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
423
424 dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
425 dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
426}
427
428static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
429 dma_addr_t ddt_addr, struct scatterlist *payload,
430 unsigned nbytes, enum dma_data_direction dir)
431{
432 unsigned nents = sg_count(payload, nbytes);
433
434 dma_unmap_sg(req->engine->dev, payload, nents, dir);
435 dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
436}
437
438/*
439 * Set key for a DES operation in an AEAD cipher. This also performs weak key
440 * checking if required.
441 */
442static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
443 unsigned int len)
444{
445 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
446 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
447 u32 tmp[DES_EXPKEY_WORDS];
448
449 if (unlikely(!des_ekey(tmp, key)) &&
450 (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
451 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
452 return -EINVAL;
453 }
454
455 memcpy(ctx->cipher_key, key, len);
456 ctx->cipher_key_len = len;
457
458 return 0;
459}
460
461/* Set the key for the AES block cipher component of the AEAD transform. */
462static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
463 unsigned int len)
464{
465 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
466 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
467
468 /*
469 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
470 * request for any other size (192 bits) then we need to do a software
471 * fallback.
472 */
473 if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
474 /*
475 * Set the fallback transform to use the same request flags as
476 * the hardware transform.
477 */
478 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
479 ctx->sw_cipher->base.crt_flags |=
480 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
481 return crypto_aead_setkey(ctx->sw_cipher, key, len);
482 }
483
484 memcpy(ctx->cipher_key, key, len);
485 ctx->cipher_key_len = len;
486
487 return 0;
488}
489
490static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
491 unsigned int keylen)
492{
493 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
494 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
495 struct rtattr *rta = (void *)key;
496 struct crypto_authenc_key_param *param;
497 unsigned int authkeylen, enckeylen;
498 int err = -EINVAL;
499
500 if (!RTA_OK(rta, keylen))
501 goto badkey;
502
503 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
504 goto badkey;
505
506 if (RTA_PAYLOAD(rta) < sizeof(*param))
507 goto badkey;
508
509 param = RTA_DATA(rta);
510 enckeylen = be32_to_cpu(param->enckeylen);
511
512 key += RTA_ALIGN(rta->rta_len);
513 keylen -= RTA_ALIGN(rta->rta_len);
514
515 if (keylen < enckeylen)
516 goto badkey;
517
518 authkeylen = keylen - enckeylen;
519
520 if (enckeylen > AES_MAX_KEY_SIZE)
521 goto badkey;
522
523 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
524 SPA_CTRL_CIPH_ALG_AES)
525 err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
526 else
527 err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
528
529 if (err)
530 goto badkey;
531
532 memcpy(ctx->hash_ctx, key, authkeylen);
533 ctx->hash_key_len = authkeylen;
534
535 return 0;
536
537badkey:
538 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
539 return -EINVAL;
540}
541
542static int spacc_aead_setauthsize(struct crypto_aead *tfm,
543 unsigned int authsize)
544{
545 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
546
547 ctx->auth_size = authsize;
548
549 return 0;
550}
551
552/*
553 * Check if an AEAD request requires a fallback operation. Some requests can't
554 * be completed in hardware because the hardware may not support certain key
555 * sizes. In these cases we need to complete the request in software.
556 */
557static int spacc_aead_need_fallback(struct spacc_req *req)
558{
559 struct aead_request *aead_req;
560 struct crypto_tfm *tfm = req->req->tfm;
561 struct crypto_alg *alg = req->req->tfm->__crt_alg;
562 struct spacc_alg *spacc_alg = to_spacc_alg(alg);
563 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
564
565 aead_req = container_of(req->req, struct aead_request, base);
566 /*
567 * If we have a non-supported key-length, then we need to do a
568 * software fallback.
569 */
570 if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
571 SPA_CTRL_CIPH_ALG_AES &&
572 ctx->cipher_key_len != AES_KEYSIZE_128 &&
573 ctx->cipher_key_len != AES_KEYSIZE_256)
574 return 1;
575
576 return 0;
577}
578
579static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
580 bool is_encrypt)
581{
582 struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
583 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
584 int err;
585
586 if (ctx->sw_cipher) {
587 /*
588 * Change the request to use the software fallback transform,
589 * and once the ciphering has completed, put the old transform
590 * back into the request.
591 */
592 aead_request_set_tfm(req, ctx->sw_cipher);
593 err = is_encrypt ? crypto_aead_encrypt(req) :
594 crypto_aead_decrypt(req);
595 aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
596 } else
597 err = -EINVAL;
598
599 return err;
600}
601
602static void spacc_aead_complete(struct spacc_req *req)
603{
604 spacc_aead_free_ddts(req);
605 req->req->complete(req->req, req->result);
606}
607
608static int spacc_aead_submit(struct spacc_req *req)
609{
610 struct crypto_tfm *tfm = req->req->tfm;
611 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
612 struct crypto_alg *alg = req->req->tfm->__crt_alg;
613 struct spacc_alg *spacc_alg = to_spacc_alg(alg);
614 struct spacc_engine *engine = ctx->generic.engine;
615 u32 ctrl, proc_len, assoc_len;
616 struct aead_request *aead_req =
617 container_of(req->req, struct aead_request, base);
618
619 req->result = -EINPROGRESS;
620 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
621 ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
622 ctx->hash_ctx, ctx->hash_key_len);
623
624 /* Set the source and destination DDT pointers. */
625 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
626 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
627 writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
628
629 assoc_len = aead_req->assoclen;
630 proc_len = aead_req->cryptlen + assoc_len;
631
632 /*
633 * If we aren't generating an IV, then we need to include the IV in the
634 * associated data so that it is included in the hash.
635 */
636 if (!req->giv) {
637 assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
638 proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
639 } else
640 proc_len += req->giv_len;
641
642 /*
643 * If we are decrypting, we need to take the length of the ICV out of
644 * the processing length.
645 */
646 if (!req->is_encrypt)
647 proc_len -= ctx->auth_size;
648
649 writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
650 writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
651 writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
652 writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
653 writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
654
655 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
656 (1 << SPA_CTRL_ICV_APPEND);
657 if (req->is_encrypt)
658 ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
659 else
660 ctrl |= (1 << SPA_CTRL_KEY_EXP);
661
662 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
663
664 writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
665
666 return -EINPROGRESS;
667}
668
669/*
670 * Setup an AEAD request for processing. This will configure the engine, load
671 * the context and then start the packet processing.
672 *
673 * @giv Pointer to destination address for a generated IV. If the
674 * request does not need to generate an IV then this should be set to NULL.
675 */
676static int spacc_aead_setup(struct aead_request *req, u8 *giv,
677 unsigned alg_type, bool is_encrypt)
678{
679 struct crypto_alg *alg = req->base.tfm->__crt_alg;
680 struct spacc_engine *engine = to_spacc_alg(alg)->engine;
681 struct spacc_req *dev_req = aead_request_ctx(req);
682 int err = -EINPROGRESS;
683 unsigned long flags;
684 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
685
686 dev_req->giv = giv;
687 dev_req->giv_len = ivsize;
688 dev_req->req = &req->base;
689 dev_req->is_encrypt = is_encrypt;
690 dev_req->result = -EBUSY;
691 dev_req->engine = engine;
692 dev_req->complete = spacc_aead_complete;
693
694 if (unlikely(spacc_aead_need_fallback(dev_req)))
695 return spacc_aead_do_fallback(req, alg_type, is_encrypt);
696
697 spacc_aead_make_ddts(dev_req, dev_req->giv);
698
699 err = -EINPROGRESS;
700 spin_lock_irqsave(&engine->hw_lock, flags);
701 if (unlikely(spacc_fifo_cmd_full(engine))) {
702 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
703 err = -EBUSY;
704 spin_unlock_irqrestore(&engine->hw_lock, flags);
705 goto out_free_ddts;
706 }
707 list_add_tail(&dev_req->list, &engine->pending);
708 } else {
709 ++engine->in_flight;
710 list_add_tail(&dev_req->list, &engine->in_progress);
711 spacc_aead_submit(dev_req);
712 }
713 spin_unlock_irqrestore(&engine->hw_lock, flags);
714
715 goto out;
716
717out_free_ddts:
718 spacc_aead_free_ddts(dev_req);
719out:
720 return err;
721}
722
723static int spacc_aead_encrypt(struct aead_request *req)
724{
725 struct crypto_aead *aead = crypto_aead_reqtfm(req);
726 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
727 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
728
729 return spacc_aead_setup(req, NULL, alg->type, 1);
730}
731
732static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
733{
734 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
735 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
736 size_t ivsize = crypto_aead_ivsize(tfm);
737 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
738 unsigned len;
739 __be64 seq;
740
741 memcpy(req->areq.iv, ctx->salt, ivsize);
742 len = ivsize;
743 if (ivsize > sizeof(u64)) {
744 memset(req->giv, 0, ivsize - sizeof(u64));
745 len = sizeof(u64);
746 }
747 seq = cpu_to_be64(req->seq);
748 memcpy(req->giv + ivsize - len, &seq, len);
749
750 return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
751}
752
753static int spacc_aead_decrypt(struct aead_request *req)
754{
755 struct crypto_aead *aead = crypto_aead_reqtfm(req);
756 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
757 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
758
759 return spacc_aead_setup(req, NULL, alg->type, 0);
760}
761
762/*
763 * Initialise a new AEAD context. This is responsible for allocating the
764 * fallback cipher and initialising the context.
765 */
766static int spacc_aead_cra_init(struct crypto_tfm *tfm)
767{
768 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
769 struct crypto_alg *alg = tfm->__crt_alg;
770 struct spacc_alg *spacc_alg = to_spacc_alg(alg);
771 struct spacc_engine *engine = spacc_alg->engine;
772
773 ctx->generic.flags = spacc_alg->type;
774 ctx->generic.engine = engine;
775 ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
776 CRYPTO_ALG_ASYNC |
777 CRYPTO_ALG_NEED_FALLBACK);
778 if (IS_ERR(ctx->sw_cipher)) {
779 dev_warn(engine->dev, "failed to allocate fallback for %s\n",
780 alg->cra_name);
781 ctx->sw_cipher = NULL;
782 }
783 ctx->generic.key_offs = spacc_alg->key_offs;
784 ctx->generic.iv_offs = spacc_alg->iv_offs;
785
786 get_random_bytes(ctx->salt, sizeof(ctx->salt));
787
788 tfm->crt_aead.reqsize = sizeof(struct spacc_req);
789
790 return 0;
791}
792
793/*
794 * Destructor for an AEAD context. This is called when the transform is freed
795 * and must free the fallback cipher.
796 */
797static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
798{
799 struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
800
801 if (ctx->sw_cipher)
802 crypto_free_aead(ctx->sw_cipher);
803 ctx->sw_cipher = NULL;
804}
805
806/*
807 * Set the DES key for a block cipher transform. This also performs weak key
808 * checking if the transform has requested it.
809 */
810static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
811 unsigned int len)
812{
813 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
814 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
815 u32 tmp[DES_EXPKEY_WORDS];
816
817 if (len > DES3_EDE_KEY_SIZE) {
818 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
819 return -EINVAL;
820 }
821
822 if (unlikely(!des_ekey(tmp, key)) &&
823 (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
824 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
825 return -EINVAL;
826 }
827
828 memcpy(ctx->key, key, len);
829 ctx->key_len = len;
830
831 return 0;
832}
833
834/*
835 * Set the key for an AES block cipher. Some key lengths are not supported in
836 * hardware so this must also check whether a fallback is needed.
837 */
838static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
839 unsigned int len)
840{
841 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
842 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
843 int err = 0;
844
845 if (len > AES_MAX_KEY_SIZE) {
846 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
847 return -EINVAL;
848 }
849
850 /*
851 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
852 * request for any other size (192 bits) then we need to do a software
853 * fallback.
854 */
855 if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
856 ctx->sw_cipher) {
857 /*
858 * Set the fallback transform to use the same request flags as
859 * the hardware transform.
860 */
861 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
862 ctx->sw_cipher->base.crt_flags |=
863 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
864
865 err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
866 if (err)
867 goto sw_setkey_failed;
868 } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
869 !ctx->sw_cipher)
870 err = -EINVAL;
871
872 memcpy(ctx->key, key, len);
873 ctx->key_len = len;
874
875sw_setkey_failed:
876 if (err && ctx->sw_cipher) {
877 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
878 tfm->crt_flags |=
879 ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
880 }
881
882 return err;
883}
884
885static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
886 const u8 *key, unsigned int len)
887{
888 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
889 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
890 int err = 0;
891
892 if (len > AES_MAX_KEY_SIZE) {
893 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
894 err = -EINVAL;
895 goto out;
896 }
897
898 memcpy(ctx->key, key, len);
899 ctx->key_len = len;
900
901out:
902 return err;
903}
904
905static int spacc_ablk_need_fallback(struct spacc_req *req)
906{
907 struct spacc_ablk_ctx *ctx;
908 struct crypto_tfm *tfm = req->req->tfm;
909 struct crypto_alg *alg = req->req->tfm->__crt_alg;
910 struct spacc_alg *spacc_alg = to_spacc_alg(alg);
911
912 ctx = crypto_tfm_ctx(tfm);
913
914 return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
915 SPA_CTRL_CIPH_ALG_AES &&
916 ctx->key_len != AES_KEYSIZE_128 &&
917 ctx->key_len != AES_KEYSIZE_256;
918}
919
920static void spacc_ablk_complete(struct spacc_req *req)
921{
922 struct ablkcipher_request *ablk_req =
923 container_of(req->req, struct ablkcipher_request, base);
924
925 if (ablk_req->src != ablk_req->dst) {
926 spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
927 ablk_req->nbytes, DMA_TO_DEVICE);
928 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
929 ablk_req->nbytes, DMA_FROM_DEVICE);
930 } else
931 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
932 ablk_req->nbytes, DMA_BIDIRECTIONAL);
933
934 req->req->complete(req->req, req->result);
935}
936
937static int spacc_ablk_submit(struct spacc_req *req)
938{
939 struct crypto_tfm *tfm = req->req->tfm;
940 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
941 struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
942 struct crypto_alg *alg = req->req->tfm->__crt_alg;
943 struct spacc_alg *spacc_alg = to_spacc_alg(alg);
944 struct spacc_engine *engine = ctx->generic.engine;
945 u32 ctrl;
946
947 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
948 ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
949 NULL, 0);
950
951 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
952 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
953 writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
954
955 writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
956 writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
957 writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
958 writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
959
960 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
961 (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
962 (1 << SPA_CTRL_KEY_EXP));
963
964 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
965
966 writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
967
968 return -EINPROGRESS;
969}
970
971static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
972 unsigned alg_type, bool is_encrypt)
973{
974 struct crypto_tfm *old_tfm =
975 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
976 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
977 int err;
978
979 if (!ctx->sw_cipher)
980 return -EINVAL;
981
982 /*
983 * Change the request to use the software fallback transform, and once
984 * the ciphering has completed, put the old transform back into the
985 * request.
986 */
987 ablkcipher_request_set_tfm(req, ctx->sw_cipher);
988 err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
989 crypto_ablkcipher_decrypt(req);
990 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
991
992 return err;
993}
994
995static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
996 bool is_encrypt)
997{
998 struct crypto_alg *alg = req->base.tfm->__crt_alg;
999 struct spacc_engine *engine = to_spacc_alg(alg)->engine;
1000 struct spacc_req *dev_req = ablkcipher_request_ctx(req);
1001 unsigned long flags;
1002 int err = -ENOMEM;
1003
1004 dev_req->req = &req->base;
1005 dev_req->is_encrypt = is_encrypt;
1006 dev_req->engine = engine;
1007 dev_req->complete = spacc_ablk_complete;
1008 dev_req->result = -EINPROGRESS;
1009
1010 if (unlikely(spacc_ablk_need_fallback(dev_req)))
1011 return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
1012
1013 /*
1014 * Create the DDT's for the engine. If we share the same source and
1015 * destination then we can optimize by reusing the DDT's.
1016 */
1017 if (req->src != req->dst) {
1018 dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
1019 req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
1020 if (!dev_req->src_ddt)
1021 goto out;
1022
1023 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
1024 req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
1025 if (!dev_req->dst_ddt)
1026 goto out_free_src;
1027 } else {
1028 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
1029 req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
1030 if (!dev_req->dst_ddt)
1031 goto out;
1032
1033 dev_req->src_ddt = NULL;
1034 dev_req->src_addr = dev_req->dst_addr;
1035 }
1036
1037 err = -EINPROGRESS;
1038 spin_lock_irqsave(&engine->hw_lock, flags);
1039 /*
1040 * Check if the engine will accept the operation now. If it won't then
1041 * we either stick it on the end of a pending list if we can backlog,
1042 * or bailout with an error if not.
1043 */
1044 if (unlikely(spacc_fifo_cmd_full(engine))) {
1045 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1046 err = -EBUSY;
1047 spin_unlock_irqrestore(&engine->hw_lock, flags);
1048 goto out_free_ddts;
1049 }
1050 list_add_tail(&dev_req->list, &engine->pending);
1051 } else {
1052 ++engine->in_flight;
1053 list_add_tail(&dev_req->list, &engine->in_progress);
1054 spacc_ablk_submit(dev_req);
1055 }
1056 spin_unlock_irqrestore(&engine->hw_lock, flags);
1057
1058 goto out;
1059
1060out_free_ddts:
1061 spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
1062 req->nbytes, req->src == req->dst ?
1063 DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
1064out_free_src:
1065 if (req->src != req->dst)
1066 spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
1067 req->src, req->nbytes, DMA_TO_DEVICE);
1068out:
1069 return err;
1070}
1071
1072static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1073{
1074 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1075 struct crypto_alg *alg = tfm->__crt_alg;
1076 struct spacc_alg *spacc_alg = to_spacc_alg(alg);
1077 struct spacc_engine *engine = spacc_alg->engine;
1078
1079 ctx->generic.flags = spacc_alg->type;
1080 ctx->generic.engine = engine;
1081 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1082 ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
1083 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1084 if (IS_ERR(ctx->sw_cipher)) {
1085 dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1086 alg->cra_name);
1087 ctx->sw_cipher = NULL;
1088 }
1089 }
1090 ctx->generic.key_offs = spacc_alg->key_offs;
1091 ctx->generic.iv_offs = spacc_alg->iv_offs;
1092
1093 tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
1094
1095 return 0;
1096}
1097
1098static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1099{
1100 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1101
1102 if (ctx->sw_cipher)
1103 crypto_free_ablkcipher(ctx->sw_cipher);
1104 ctx->sw_cipher = NULL;
1105}
1106
1107static int spacc_ablk_encrypt(struct ablkcipher_request *req)
1108{
1109 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1110 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1111 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1112
1113 return spacc_ablk_setup(req, alg->type, 1);
1114}
1115
1116static int spacc_ablk_decrypt(struct ablkcipher_request *req)
1117{
1118 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1119 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1120 struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1121
1122 return spacc_ablk_setup(req, alg->type, 0);
1123}
1124
1125static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1126{
1127 return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1128 SPA_FIFO_STAT_EMPTY;
1129}
1130
1131static void spacc_process_done(struct spacc_engine *engine)
1132{
1133 struct spacc_req *req;
1134 unsigned long flags;
1135
1136 spin_lock_irqsave(&engine->hw_lock, flags);
1137
1138 while (!spacc_fifo_stat_empty(engine)) {
1139 req = list_first_entry(&engine->in_progress, struct spacc_req,
1140 list);
1141 list_move_tail(&req->list, &engine->completed);
1142
1143 /* POP the status register. */
1144 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1145 req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1146 SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1147
1148 /*
1149 * Convert the SPAcc error status into the standard POSIX error
1150 * codes.
1151 */
1152 if (unlikely(req->result)) {
1153 switch (req->result) {
1154 case SPA_STATUS_ICV_FAIL:
1155 req->result = -EBADMSG;
1156 break;
1157
1158 case SPA_STATUS_MEMORY_ERROR:
1159 dev_warn(engine->dev,
1160 "memory error triggered\n");
1161 req->result = -EFAULT;
1162 break;
1163
1164 case SPA_STATUS_BLOCK_ERROR:
1165 dev_warn(engine->dev,
1166 "block error triggered\n");
1167 req->result = -EIO;
1168 break;
1169 }
1170 }
1171 }
1172
1173 tasklet_schedule(&engine->complete);
1174
1175 spin_unlock_irqrestore(&engine->hw_lock, flags);
1176}
1177
1178static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1179{
1180 struct spacc_engine *engine = (struct spacc_engine *)dev;
1181 u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1182
1183 writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1184 spacc_process_done(engine);
1185
1186 return IRQ_HANDLED;
1187}
1188
1189static void spacc_packet_timeout(unsigned long data)
1190{
1191 struct spacc_engine *engine = (struct spacc_engine *)data;
1192
1193 spacc_process_done(engine);
1194}
1195
1196static int spacc_req_submit(struct spacc_req *req)
1197{
1198 struct crypto_alg *alg = req->req->tfm->__crt_alg;
1199
1200 if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1201 return spacc_aead_submit(req);
1202 else
1203 return spacc_ablk_submit(req);
1204}
1205
1206static void spacc_spacc_complete(unsigned long data)
1207{
1208 struct spacc_engine *engine = (struct spacc_engine *)data;
1209 struct spacc_req *req, *tmp;
1210 unsigned long flags;
1211 int num_removed = 0;
1212 LIST_HEAD(completed);
1213
1214 spin_lock_irqsave(&engine->hw_lock, flags);
1215 list_splice_init(&engine->completed, &completed);
1216 spin_unlock_irqrestore(&engine->hw_lock, flags);
1217
1218 list_for_each_entry_safe(req, tmp, &completed, list) {
1219 ++num_removed;
1220 req->complete(req);
1221 }
1222
1223 /* Try and fill the engine back up again. */
1224 spin_lock_irqsave(&engine->hw_lock, flags);
1225
1226 engine->in_flight -= num_removed;
1227
1228 list_for_each_entry_safe(req, tmp, &engine->pending, list) {
1229 if (spacc_fifo_cmd_full(engine))
1230 break;
1231
1232 list_move_tail(&req->list, &engine->in_progress);
1233 ++engine->in_flight;
1234 req->result = spacc_req_submit(req);
1235 }
1236
1237 if (engine->in_flight)
1238 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1239
1240 spin_unlock_irqrestore(&engine->hw_lock, flags);
1241}
1242
1243#ifdef CONFIG_PM
1244static int spacc_suspend(struct device *dev)
1245{
1246 struct platform_device *pdev = to_platform_device(dev);
1247 struct spacc_engine *engine = platform_get_drvdata(pdev);
1248
1249 /*
1250 * We only support standby mode. All we have to do is gate the clock to
1251 * the spacc. The hardware will preserve state until we turn it back
1252 * on again.
1253 */
1254 clk_disable(engine->clk);
1255
1256 return 0;
1257}
1258
1259static int spacc_resume(struct device *dev)
1260{
1261 struct platform_device *pdev = to_platform_device(dev);
1262 struct spacc_engine *engine = platform_get_drvdata(pdev);
1263
1264 return clk_enable(engine->clk);
1265}
1266
1267static const struct dev_pm_ops spacc_pm_ops = {
1268 .suspend = spacc_suspend,
1269 .resume = spacc_resume,
1270};
1271#endif /* CONFIG_PM */
1272
1273static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1274{
1275 return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
1276}
1277
1278static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1279 struct device_attribute *attr,
1280 char *buf)
1281{
1282 struct spacc_engine *engine = spacc_dev_to_engine(dev);
1283
1284 return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1285}
1286
1287static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1288 struct device_attribute *attr,
1289 const char *buf, size_t len)
1290{
1291 struct spacc_engine *engine = spacc_dev_to_engine(dev);
1292 unsigned long thresh;
1293
1294 if (strict_strtoul(buf, 0, &thresh))
1295 return -EINVAL;
1296
1297 thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1298
1299 engine->stat_irq_thresh = thresh;
1300 writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1301 engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1302
1303 return len;
1304}
1305static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1306 spacc_stat_irq_thresh_store);
1307
1308static struct spacc_alg ipsec_engine_algs[] = {
1309 {
1310 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1311 .key_offs = 0,
1312 .iv_offs = AES_MAX_KEY_SIZE,
1313 .alg = {
1314 .cra_name = "cbc(aes)",
1315 .cra_driver_name = "cbc-aes-picoxcell",
1316 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1317 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1318 CRYPTO_ALG_ASYNC |
1319 CRYPTO_ALG_NEED_FALLBACK,
1320 .cra_blocksize = AES_BLOCK_SIZE,
1321 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1322 .cra_type = &crypto_ablkcipher_type,
1323 .cra_module = THIS_MODULE,
1324 .cra_ablkcipher = {
1325 .setkey = spacc_aes_setkey,
1326 .encrypt = spacc_ablk_encrypt,
1327 .decrypt = spacc_ablk_decrypt,
1328 .min_keysize = AES_MIN_KEY_SIZE,
1329 .max_keysize = AES_MAX_KEY_SIZE,
1330 .ivsize = AES_BLOCK_SIZE,
1331 },
1332 .cra_init = spacc_ablk_cra_init,
1333 .cra_exit = spacc_ablk_cra_exit,
1334 },
1335 },
1336 {
1337 .key_offs = 0,
1338 .iv_offs = AES_MAX_KEY_SIZE,
1339 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1340 .alg = {
1341 .cra_name = "ecb(aes)",
1342 .cra_driver_name = "ecb-aes-picoxcell",
1343 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1344 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1345 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1346 .cra_blocksize = AES_BLOCK_SIZE,
1347 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1348 .cra_type = &crypto_ablkcipher_type,
1349 .cra_module = THIS_MODULE,
1350 .cra_ablkcipher = {
1351 .setkey = spacc_aes_setkey,
1352 .encrypt = spacc_ablk_encrypt,
1353 .decrypt = spacc_ablk_decrypt,
1354 .min_keysize = AES_MIN_KEY_SIZE,
1355 .max_keysize = AES_MAX_KEY_SIZE,
1356 },
1357 .cra_init = spacc_ablk_cra_init,
1358 .cra_exit = spacc_ablk_cra_exit,
1359 },
1360 },
1361 {
1362 .key_offs = DES_BLOCK_SIZE,
1363 .iv_offs = 0,
1364 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1365 .alg = {
1366 .cra_name = "cbc(des)",
1367 .cra_driver_name = "cbc-des-picoxcell",
1368 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1369 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1370 .cra_blocksize = DES_BLOCK_SIZE,
1371 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1372 .cra_type = &crypto_ablkcipher_type,
1373 .cra_module = THIS_MODULE,
1374 .cra_ablkcipher = {
1375 .setkey = spacc_des_setkey,
1376 .encrypt = spacc_ablk_encrypt,
1377 .decrypt = spacc_ablk_decrypt,
1378 .min_keysize = DES_KEY_SIZE,
1379 .max_keysize = DES_KEY_SIZE,
1380 .ivsize = DES_BLOCK_SIZE,
1381 },
1382 .cra_init = spacc_ablk_cra_init,
1383 .cra_exit = spacc_ablk_cra_exit,
1384 },
1385 },
1386 {
1387 .key_offs = DES_BLOCK_SIZE,
1388 .iv_offs = 0,
1389 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1390 .alg = {
1391 .cra_name = "ecb(des)",
1392 .cra_driver_name = "ecb-des-picoxcell",
1393 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1394 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1395 .cra_blocksize = DES_BLOCK_SIZE,
1396 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1397 .cra_type = &crypto_ablkcipher_type,
1398 .cra_module = THIS_MODULE,
1399 .cra_ablkcipher = {
1400 .setkey = spacc_des_setkey,
1401 .encrypt = spacc_ablk_encrypt,
1402 .decrypt = spacc_ablk_decrypt,
1403 .min_keysize = DES_KEY_SIZE,
1404 .max_keysize = DES_KEY_SIZE,
1405 },
1406 .cra_init = spacc_ablk_cra_init,
1407 .cra_exit = spacc_ablk_cra_exit,
1408 },
1409 },
1410 {
1411 .key_offs = DES_BLOCK_SIZE,
1412 .iv_offs = 0,
1413 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1414 .alg = {
1415 .cra_name = "cbc(des3_ede)",
1416 .cra_driver_name = "cbc-des3-ede-picoxcell",
1417 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1418 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1419 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1420 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1421 .cra_type = &crypto_ablkcipher_type,
1422 .cra_module = THIS_MODULE,
1423 .cra_ablkcipher = {
1424 .setkey = spacc_des_setkey,
1425 .encrypt = spacc_ablk_encrypt,
1426 .decrypt = spacc_ablk_decrypt,
1427 .min_keysize = DES3_EDE_KEY_SIZE,
1428 .max_keysize = DES3_EDE_KEY_SIZE,
1429 .ivsize = DES3_EDE_BLOCK_SIZE,
1430 },
1431 .cra_init = spacc_ablk_cra_init,
1432 .cra_exit = spacc_ablk_cra_exit,
1433 },
1434 },
1435 {
1436 .key_offs = DES_BLOCK_SIZE,
1437 .iv_offs = 0,
1438 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1439 .alg = {
1440 .cra_name = "ecb(des3_ede)",
1441 .cra_driver_name = "ecb-des3-ede-picoxcell",
1442 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1443 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1444 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1445 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1446 .cra_type = &crypto_ablkcipher_type,
1447 .cra_module = THIS_MODULE,
1448 .cra_ablkcipher = {
1449 .setkey = spacc_des_setkey,
1450 .encrypt = spacc_ablk_encrypt,
1451 .decrypt = spacc_ablk_decrypt,
1452 .min_keysize = DES3_EDE_KEY_SIZE,
1453 .max_keysize = DES3_EDE_KEY_SIZE,
1454 },
1455 .cra_init = spacc_ablk_cra_init,
1456 .cra_exit = spacc_ablk_cra_exit,
1457 },
1458 },
1459 {
1460 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1461 SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
1462 .key_offs = 0,
1463 .iv_offs = AES_MAX_KEY_SIZE,
1464 .alg = {
1465 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1466 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
1467 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1468 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1469 .cra_blocksize = AES_BLOCK_SIZE,
1470 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1471 .cra_type = &crypto_aead_type,
1472 .cra_module = THIS_MODULE,
1473 .cra_aead = {
1474 .setkey = spacc_aead_setkey,
1475 .setauthsize = spacc_aead_setauthsize,
1476 .encrypt = spacc_aead_encrypt,
1477 .decrypt = spacc_aead_decrypt,
1478 .givencrypt = spacc_aead_givencrypt,
1479 .ivsize = AES_BLOCK_SIZE,
1480 .maxauthsize = SHA1_DIGEST_SIZE,
1481 },
1482 .cra_init = spacc_aead_cra_init,
1483 .cra_exit = spacc_aead_cra_exit,
1484 },
1485 },
1486 {
1487 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1488 SPA_CTRL_HASH_ALG_SHA256 |
1489 SPA_CTRL_HASH_MODE_HMAC,
1490 .key_offs = 0,
1491 .iv_offs = AES_MAX_KEY_SIZE,
1492 .alg = {
1493 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1494 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
1495 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1496 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1497 .cra_blocksize = AES_BLOCK_SIZE,
1498 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1499 .cra_type = &crypto_aead_type,
1500 .cra_module = THIS_MODULE,
1501 .cra_aead = {
1502 .setkey = spacc_aead_setkey,
1503 .setauthsize = spacc_aead_setauthsize,
1504 .encrypt = spacc_aead_encrypt,
1505 .decrypt = spacc_aead_decrypt,
1506 .givencrypt = spacc_aead_givencrypt,
1507 .ivsize = AES_BLOCK_SIZE,
1508 .maxauthsize = SHA256_DIGEST_SIZE,
1509 },
1510 .cra_init = spacc_aead_cra_init,
1511 .cra_exit = spacc_aead_cra_exit,
1512 },
1513 },
1514 {
1515 .key_offs = 0,
1516 .iv_offs = AES_MAX_KEY_SIZE,
1517 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1518 SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
1519 .alg = {
1520 .cra_name = "authenc(hmac(md5),cbc(aes))",
1521 .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
1522 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1523 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1524 .cra_blocksize = AES_BLOCK_SIZE,
1525 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1526 .cra_type = &crypto_aead_type,
1527 .cra_module = THIS_MODULE,
1528 .cra_aead = {
1529 .setkey = spacc_aead_setkey,
1530 .setauthsize = spacc_aead_setauthsize,
1531 .encrypt = spacc_aead_encrypt,
1532 .decrypt = spacc_aead_decrypt,
1533 .givencrypt = spacc_aead_givencrypt,
1534 .ivsize = AES_BLOCK_SIZE,
1535 .maxauthsize = MD5_DIGEST_SIZE,
1536 },
1537 .cra_init = spacc_aead_cra_init,
1538 .cra_exit = spacc_aead_cra_exit,
1539 },
1540 },
1541 {
1542 .key_offs = DES_BLOCK_SIZE,
1543 .iv_offs = 0,
1544 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
1545 SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
1546 .alg = {
1547 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1548 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
1549 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1550 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1551 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1552 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1553 .cra_type = &crypto_aead_type,
1554 .cra_module = THIS_MODULE,
1555 .cra_aead = {
1556 .setkey = spacc_aead_setkey,
1557 .setauthsize = spacc_aead_setauthsize,
1558 .encrypt = spacc_aead_encrypt,
1559 .decrypt = spacc_aead_decrypt,
1560 .givencrypt = spacc_aead_givencrypt,
1561 .ivsize = DES3_EDE_BLOCK_SIZE,
1562 .maxauthsize = SHA1_DIGEST_SIZE,
1563 },
1564 .cra_init = spacc_aead_cra_init,
1565 .cra_exit = spacc_aead_cra_exit,
1566 },
1567 },
1568 {
1569 .key_offs = DES_BLOCK_SIZE,
1570 .iv_offs = 0,
1571 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1572 SPA_CTRL_HASH_ALG_SHA256 |
1573 SPA_CTRL_HASH_MODE_HMAC,
1574 .alg = {
1575 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1576 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
1577 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1578 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1579 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1580 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1581 .cra_type = &crypto_aead_type,
1582 .cra_module = THIS_MODULE,
1583 .cra_aead = {
1584 .setkey = spacc_aead_setkey,
1585 .setauthsize = spacc_aead_setauthsize,
1586 .encrypt = spacc_aead_encrypt,
1587 .decrypt = spacc_aead_decrypt,
1588 .givencrypt = spacc_aead_givencrypt,
1589 .ivsize = DES3_EDE_BLOCK_SIZE,
1590 .maxauthsize = SHA256_DIGEST_SIZE,
1591 },
1592 .cra_init = spacc_aead_cra_init,
1593 .cra_exit = spacc_aead_cra_exit,
1594 },
1595 },
1596 {
1597 .key_offs = DES_BLOCK_SIZE,
1598 .iv_offs = 0,
1599 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
1600 SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
1601 .alg = {
1602 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1603 .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
1604 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1605 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1606 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1607 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1608 .cra_type = &crypto_aead_type,
1609 .cra_module = THIS_MODULE,
1610 .cra_aead = {
1611 .setkey = spacc_aead_setkey,
1612 .setauthsize = spacc_aead_setauthsize,
1613 .encrypt = spacc_aead_encrypt,
1614 .decrypt = spacc_aead_decrypt,
1615 .givencrypt = spacc_aead_givencrypt,
1616 .ivsize = DES3_EDE_BLOCK_SIZE,
1617 .maxauthsize = MD5_DIGEST_SIZE,
1618 },
1619 .cra_init = spacc_aead_cra_init,
1620 .cra_exit = spacc_aead_cra_exit,
1621 },
1622 },
1623};
1624
1625static struct spacc_alg l2_engine_algs[] = {
1626 {
1627 .key_offs = 0,
1628 .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1629 .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1630 SPA_CTRL_CIPH_MODE_F8,
1631 .alg = {
1632 .cra_name = "f8(kasumi)",
1633 .cra_driver_name = "f8-kasumi-picoxcell",
1634 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1635 .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC,
1636 .cra_blocksize = 8,
1637 .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1638 .cra_type = &crypto_ablkcipher_type,
1639 .cra_module = THIS_MODULE,
1640 .cra_ablkcipher = {
1641 .setkey = spacc_kasumi_f8_setkey,
1642 .encrypt = spacc_ablk_encrypt,
1643 .decrypt = spacc_ablk_decrypt,
1644 .min_keysize = 16,
1645 .max_keysize = 16,
1646 .ivsize = 8,
1647 },
1648 .cra_init = spacc_ablk_cra_init,
1649 .cra_exit = spacc_ablk_cra_exit,
1650 },
1651 },
1652};
1653
1654static int __devinit spacc_probe(struct platform_device *pdev,
1655 unsigned max_ctxs, size_t cipher_pg_sz,
1656 size_t hash_pg_sz, size_t fifo_sz,
1657 struct spacc_alg *algs, size_t num_algs)
1658{
1659 int i, err, ret = -EINVAL;
1660 struct resource *mem, *irq;
1661 struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1662 GFP_KERNEL);
1663 if (!engine)
1664 return -ENOMEM;
1665
1666 engine->max_ctxs = max_ctxs;
1667 engine->cipher_pg_sz = cipher_pg_sz;
1668 engine->hash_pg_sz = hash_pg_sz;
1669 engine->fifo_sz = fifo_sz;
1670 engine->algs = algs;
1671 engine->num_algs = num_algs;
1672 engine->name = dev_name(&pdev->dev);
1673
1674 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1675 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1676 if (!mem || !irq) {
1677 dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1678 return -ENXIO;
1679 }
1680
1681 if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
1682 engine->name))
1683 return -ENOMEM;
1684
1685 engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
1686 if (!engine->regs) {
1687 dev_err(&pdev->dev, "memory map failed\n");
1688 return -ENOMEM;
1689 }
1690
1691 if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1692 engine->name, engine)) {
1693 dev_err(engine->dev, "failed to request IRQ\n");
1694 return -EBUSY;
1695 }
1696
1697 engine->dev = &pdev->dev;
1698 engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1699 engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1700
1701 engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1702 MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1703 if (!engine->req_pool)
1704 return -ENOMEM;
1705
1706 spin_lock_init(&engine->hw_lock);
1707
1708 engine->clk = clk_get(&pdev->dev, NULL);
1709 if (IS_ERR(engine->clk)) {
1710 dev_info(&pdev->dev, "clk unavailable\n");
1711 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1712 return PTR_ERR(engine->clk);
1713 }
1714
1715 if (clk_enable(engine->clk)) {
1716 dev_info(&pdev->dev, "unable to enable clk\n");
1717 clk_put(engine->clk);
1718 return -EIO;
1719 }
1720
1721 err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1722 if (err) {
1723 clk_disable(engine->clk);
1724 clk_put(engine->clk);
1725 return err;
1726 }
1727
1728
1729 /*
1730 * Use an IRQ threshold of 50% as a default. This seems to be a
1731 * reasonable trade off of latency against throughput but can be
1732 * changed at runtime.
1733 */
1734 engine->stat_irq_thresh = (engine->fifo_sz / 2);
1735
1736 /*
1737 * Configure the interrupts. We only use the STAT_CNT interrupt as we
1738 * only submit a new packet for processing when we complete another in
1739 * the queue. This minimizes time spent in the interrupt handler.
1740 */
1741 writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1742 engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1743 writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1744 engine->regs + SPA_IRQ_EN_REG_OFFSET);
1745
1746 setup_timer(&engine->packet_timeout, spacc_packet_timeout,
1747 (unsigned long)engine);
1748
1749 INIT_LIST_HEAD(&engine->pending);
1750 INIT_LIST_HEAD(&engine->completed);
1751 INIT_LIST_HEAD(&engine->in_progress);
1752 engine->in_flight = 0;
1753 tasklet_init(&engine->complete, spacc_spacc_complete,
1754 (unsigned long)engine);
1755
1756 platform_set_drvdata(pdev, engine);
1757
1758 INIT_LIST_HEAD(&engine->registered_algs);
1759 for (i = 0; i < engine->num_algs; ++i) {
1760 engine->algs[i].engine = engine;
1761 err = crypto_register_alg(&engine->algs[i].alg);
1762 if (!err) {
1763 list_add_tail(&engine->algs[i].entry,
1764 &engine->registered_algs);
1765 ret = 0;
1766 }
1767 if (err)
1768 dev_err(engine->dev, "failed to register alg \"%s\"\n",
1769 engine->algs[i].alg.cra_name);
1770 else
1771 dev_dbg(engine->dev, "registered alg \"%s\"\n",
1772 engine->algs[i].alg.cra_name);
1773 }
1774
1775 return ret;
1776}
1777
1778static int __devexit spacc_remove(struct platform_device *pdev)
1779{
1780 struct spacc_alg *alg, *next;
1781 struct spacc_engine *engine = platform_get_drvdata(pdev);
1782
1783 del_timer_sync(&engine->packet_timeout);
1784 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1785
1786 list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1787 list_del(&alg->entry);
1788 crypto_unregister_alg(&alg->alg);
1789 }
1790
1791 clk_disable(engine->clk);
1792 clk_put(engine->clk);
1793
1794 return 0;
1795}
1796
1797static int __devinit ipsec_probe(struct platform_device *pdev)
1798{
1799 return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
1800 SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
1801 SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
1802 SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
1803 ARRAY_SIZE(ipsec_engine_algs));
1804}
1805
1806static struct platform_driver ipsec_driver = {
1807 .probe = ipsec_probe,
1808 .remove = __devexit_p(spacc_remove),
1809 .driver = {
1810 .name = "picoxcell-ipsec",
1811#ifdef CONFIG_PM
1812 .pm = &spacc_pm_ops,
1813#endif /* CONFIG_PM */
1814 },
1815};
1816
1817static int __devinit l2_probe(struct platform_device *pdev)
1818{
1819 return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
1820 SPACC_CRYPTO_L2_CIPHER_PG_SZ,
1821 SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
1822 l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
1823}
1824
1825static struct platform_driver l2_driver = {
1826 .probe = l2_probe,
1827 .remove = __devexit_p(spacc_remove),
1828 .driver = {
1829 .name = "picoxcell-l2",
1830#ifdef CONFIG_PM
1831 .pm = &spacc_pm_ops,
1832#endif /* CONFIG_PM */
1833 },
1834};
1835
1836static int __init spacc_init(void)
1837{
1838 int ret = platform_driver_register(&ipsec_driver);
1839 if (ret) {
1840 pr_err("failed to register ipsec spacc driver");
1841 goto out;
1842 }
1843
1844 ret = platform_driver_register(&l2_driver);
1845 if (ret) {
1846 pr_err("failed to register l2 spacc driver");
1847 goto l2_failed;
1848 }
1849
1850 return 0;
1851
1852l2_failed:
1853 platform_driver_unregister(&ipsec_driver);
1854out:
1855 return ret;
1856}
1857module_init(spacc_init);
1858
1859static void __exit spacc_exit(void)
1860{
1861 platform_driver_unregister(&ipsec_driver);
1862 platform_driver_unregister(&l2_driver);
1863}
1864module_exit(spacc_exit);
1865
1866MODULE_LICENSE("GPL");
1867MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h
new file mode 100644
index 00000000000..af93442564c
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto_regs.h
@@ -0,0 +1,128 @@
1/*
2 * Copyright (c) 2010 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef __PICOXCELL_CRYPTO_REGS_H__
19#define __PICOXCELL_CRYPTO_REGS_H__
20
21#define SPA_STATUS_OK 0
22#define SPA_STATUS_ICV_FAIL 1
23#define SPA_STATUS_MEMORY_ERROR 2
24#define SPA_STATUS_BLOCK_ERROR 3
25
26#define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16
27#define SPA_IRQ_STAT_STAT_MASK (1 << 4)
28#define SPA_FIFO_STAT_STAT_OFFSET 16
29#define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET)
30#define SPA_STATUS_RES_CODE_OFFSET 24
31#define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET)
32#define SPA_KEY_SZ_CTX_INDEX_OFFSET 8
33#define SPA_KEY_SZ_CIPHER_OFFSET 31
34
35#define SPA_IRQ_EN_REG_OFFSET 0x00000000
36#define SPA_IRQ_STAT_REG_OFFSET 0x00000004
37#define SPA_IRQ_CTRL_REG_OFFSET 0x00000008
38#define SPA_FIFO_STAT_REG_OFFSET 0x0000000C
39#define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010
40#define SPA_SRC_PTR_REG_OFFSET 0x00000020
41#define SPA_DST_PTR_REG_OFFSET 0x00000024
42#define SPA_OFFSET_REG_OFFSET 0x00000028
43#define SPA_AAD_LEN_REG_OFFSET 0x0000002C
44#define SPA_PROC_LEN_REG_OFFSET 0x00000030
45#define SPA_ICV_LEN_REG_OFFSET 0x00000034
46#define SPA_ICV_OFFSET_REG_OFFSET 0x00000038
47#define SPA_SW_CTRL_REG_OFFSET 0x0000003C
48#define SPA_CTRL_REG_OFFSET 0x00000040
49#define SPA_AUX_INFO_REG_OFFSET 0x0000004C
50#define SPA_STAT_POP_REG_OFFSET 0x00000050
51#define SPA_STATUS_REG_OFFSET 0x00000054
52#define SPA_KEY_SZ_REG_OFFSET 0x00000100
53#define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000
54#define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000
55#define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000
56
57#define SPA_IRQ_EN_REG_RESET 0x00000000
58#define SPA_IRQ_CTRL_REG_RESET 0x00000000
59#define SPA_FIFO_STAT_REG_RESET 0x00000000
60#define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000
61#define SPA_SRC_PTR_REG_RESET 0x00000000
62#define SPA_DST_PTR_REG_RESET 0x00000000
63#define SPA_OFFSET_REG_RESET 0x00000000
64#define SPA_AAD_LEN_REG_RESET 0x00000000
65#define SPA_PROC_LEN_REG_RESET 0x00000000
66#define SPA_ICV_LEN_REG_RESET 0x00000000
67#define SPA_ICV_OFFSET_REG_RESET 0x00000000
68#define SPA_SW_CTRL_REG_RESET 0x00000000
69#define SPA_CTRL_REG_RESET 0x00000000
70#define SPA_AUX_INFO_REG_RESET 0x00000000
71#define SPA_STAT_POP_REG_RESET 0x00000000
72#define SPA_STATUS_REG_RESET 0x00000000
73#define SPA_KEY_SZ_REG_RESET 0x00000000
74
75#define SPA_CTRL_HASH_ALG_IDX 4
76#define SPA_CTRL_CIPH_MODE_IDX 8
77#define SPA_CTRL_HASH_MODE_IDX 12
78#define SPA_CTRL_CTX_IDX 16
79#define SPA_CTRL_ENCRYPT_IDX 24
80#define SPA_CTRL_AAD_COPY 25
81#define SPA_CTRL_ICV_PT 26
82#define SPA_CTRL_ICV_ENC 27
83#define SPA_CTRL_ICV_APPEND 28
84#define SPA_CTRL_KEY_EXP 29
85
86#define SPA_KEY_SZ_CXT_IDX 8
87#define SPA_KEY_SZ_CIPHER_IDX 31
88
89#define SPA_IRQ_EN_CMD0_EN (1 << 0)
90#define SPA_IRQ_EN_STAT_EN (1 << 4)
91#define SPA_IRQ_EN_GLBL_EN (1 << 31)
92
93#define SPA_CTRL_CIPH_ALG_NULL 0x00
94#define SPA_CTRL_CIPH_ALG_DES 0x01
95#define SPA_CTRL_CIPH_ALG_AES 0x02
96#define SPA_CTRL_CIPH_ALG_RC4 0x03
97#define SPA_CTRL_CIPH_ALG_MULTI2 0x04
98#define SPA_CTRL_CIPH_ALG_KASUMI 0x05
99
100#define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX)
101#define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX)
102#define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX)
103#define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX)
104#define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX)
105#define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX)
106#define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX)
107#define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX)
108#define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX)
109#define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX)
110
111#define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX)
112#define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX)
113#define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX)
114#define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX)
115#define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX)
116#define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX)
117#define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX)
118#define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX)
119#define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX)
120
121#define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX)
122#define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX)
123#define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX)
124
125#define SPA_FIFO_STAT_EMPTY (1 << 31)
126#define SPA_FIFO_CMD_FULL (1 << 7)
127
128#endif /* __PICOXCELL_CRYPTO_REGS_H__ */