aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/crypto
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig21
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/geode-aes.c36
-rw-r--r--drivers/crypto/hifn_795x.c18
-rw-r--r--drivers/crypto/mv_cesa.c692
-rw-r--r--drivers/crypto/mv_cesa.h40
-rw-r--r--drivers/crypto/n2_asm.S95
-rw-r--r--drivers/crypto/n2_core.c2083
-rw-r--r--drivers/crypto/n2_core.h231
-rw-r--r--drivers/crypto/omap-sham.c1259
-rw-r--r--drivers/crypto/talitos.c699
-rw-r--r--drivers/crypto/talitos.h12
12 files changed, 5039 insertions, 151 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1ca..fbf94cf496f0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA
170 170
171 Currently the driver supports AES in ECB and CBC mode without DMA. 171 Currently the driver supports AES in ECB and CBC mode without DMA.
172 172
173config CRYPTO_DEV_NIAGARA2
174 tristate "Niagara2 Stream Processing Unit driver"
175 select CRYPTO_ALGAPI
176 depends on SPARC64
177 help
178 Each core of a Niagara2 processor contains a Stream
179 Processing Unit, which itself contains several cryptographic
180 sub-units. One set provides the Modular Arithmetic Unit,
181 used for SSL offload. The other set provides the Cipher
182 Group, which can perform encryption, decryption, hashing,
183 checksumming, and raw copies.
184
173config CRYPTO_DEV_HIFN_795X 185config CRYPTO_DEV_HIFN_795X
174 tristate "Driver HIFN 795x crypto accelerator chips" 186 tristate "Driver HIFN 795x crypto accelerator chips"
175 select CRYPTO_DES 187 select CRYPTO_DES
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX
222 help 234 help
223 This option allows you to have support for AMCC crypto acceleration. 235 This option allows you to have support for AMCC crypto acceleration.
224 236
237config CRYPTO_DEV_OMAP_SHAM
238 tristate "Support for OMAP SHA1/MD5 hw accelerator"
239 depends on ARCH_OMAP2 || ARCH_OMAP3
240 select CRYPTO_SHA1
241 select CRYPTO_MD5
242 help
243 OMAP processors have SHA1/MD5 hw accelerator. Select this if you
244 want to use the OMAP module for SHA1/MD5 algorithms.
245
225endif # CRYPTO_HW 246endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f942..6dbbe00c4524 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,8 +1,12 @@
1obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o 1obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
5n2_crypto-objs := n2_core.o n2_asm.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 6obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 7obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
6obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 8obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
7obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 9obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
8obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 10obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
11obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
12
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index c7a5a43ba691..09389dd2f96b 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -15,14 +15,14 @@
15#include <crypto/algapi.h> 15#include <crypto/algapi.h>
16#include <crypto/aes.h> 16#include <crypto/aes.h>
17 17
18#include <asm/io.h> 18#include <linux/io.h>
19#include <asm/delay.h> 19#include <linux/delay.h>
20 20
21#include "geode-aes.h" 21#include "geode-aes.h"
22 22
23/* Static structures */ 23/* Static structures */
24 24
25static void __iomem * _iobase; 25static void __iomem *_iobase;
26static spinlock_t lock; 26static spinlock_t lock;
27 27
28/* Write a 128 bit field (either a writable key or IV) */ 28/* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@ static inline void
30_writefield(u32 offset, void *value) 30_writefield(u32 offset, void *value)
31{ 31{
32 int i; 32 int i;
33 for(i = 0; i < 4; i++) 33 for (i = 0; i < 4; i++)
34 iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); 34 iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
35} 35}
36 36
@@ -39,7 +39,7 @@ static inline void
39_readfield(u32 offset, void *value) 39_readfield(u32 offset, void *value)
40{ 40{
41 int i; 41 int i;
42 for(i = 0; i < 4; i++) 42 for (i = 0; i < 4; i++)
43 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); 43 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
44} 44}
45 45
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
59 do { 59 do {
60 status = ioread32(_iobase + AES_INTR_REG); 60 status = ioread32(_iobase + AES_INTR_REG);
61 cpu_relax(); 61 cpu_relax();
62 } while(!(status & AES_INTRA_PENDING) && --counter); 62 } while (!(status & AES_INTRA_PENDING) && --counter);
63 63
64 /* Clear the event */ 64 /* Clear the event */
65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); 65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
317 err = blkcipher_walk_virt(desc, &walk); 317 err = blkcipher_walk_virt(desc, &walk);
318 op->iv = walk.iv; 318 op->iv = walk.iv;
319 319
320 while((nbytes = walk.nbytes)) { 320 while ((nbytes = walk.nbytes)) {
321 op->src = walk.src.virt.addr, 321 op->src = walk.src.virt.addr,
322 op->dst = walk.dst.virt.addr; 322 op->dst = walk.dst.virt.addr;
323 op->mode = AES_MODE_CBC; 323 op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
349 err = blkcipher_walk_virt(desc, &walk); 349 err = blkcipher_walk_virt(desc, &walk);
350 op->iv = walk.iv; 350 op->iv = walk.iv;
351 351
352 while((nbytes = walk.nbytes)) { 352 while ((nbytes = walk.nbytes)) {
353 op->src = walk.src.virt.addr, 353 op->src = walk.src.virt.addr,
354 op->dst = walk.dst.virt.addr; 354 op->dst = walk.dst.virt.addr;
355 op->mode = AES_MODE_CBC; 355 op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
429 blkcipher_walk_init(&walk, dst, src, nbytes); 429 blkcipher_walk_init(&walk, dst, src, nbytes);
430 err = blkcipher_walk_virt(desc, &walk); 430 err = blkcipher_walk_virt(desc, &walk);
431 431
432 while((nbytes = walk.nbytes)) { 432 while ((nbytes = walk.nbytes)) {
433 op->src = walk.src.virt.addr, 433 op->src = walk.src.virt.addr,
434 op->dst = walk.dst.virt.addr; 434 op->dst = walk.dst.virt.addr;
435 op->mode = AES_MODE_ECB; 435 op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
459 blkcipher_walk_init(&walk, dst, src, nbytes); 459 blkcipher_walk_init(&walk, dst, src, nbytes);
460 err = blkcipher_walk_virt(desc, &walk); 460 err = blkcipher_walk_virt(desc, &walk);
461 461
462 while((nbytes = walk.nbytes)) { 462 while ((nbytes = walk.nbytes)) {
463 op->src = walk.src.virt.addr, 463 op->src = walk.src.virt.addr,
464 op->dst = walk.dst.virt.addr; 464 op->dst = walk.dst.virt.addr;
465 op->mode = AES_MODE_ECB; 465 op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@ static int __devinit
518geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) 518geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
519{ 519{
520 int ret; 520 int ret;
521 521 ret = pci_enable_device(dev);
522 if ((ret = pci_enable_device(dev))) 522 if (ret)
523 return ret; 523 return ret;
524 524
525 if ((ret = pci_request_regions(dev, "geode-aes"))) 525 ret = pci_request_regions(dev, "geode-aes");
526 if (ret)
526 goto eenable; 527 goto eenable;
527 528
528 _iobase = pci_iomap(dev, 0, 0); 529 _iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
537 /* Clear any pending activity */ 538 /* Clear any pending activity */
538 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); 539 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
539 540
540 if ((ret = crypto_register_alg(&geode_alg))) 541 ret = crypto_register_alg(&geode_alg);
542 if (ret)
541 goto eiomap; 543 goto eiomap;
542 544
543 if ((ret = crypto_register_alg(&geode_ecb_alg))) 545 ret = crypto_register_alg(&geode_ecb_alg);
546 if (ret)
544 goto ealg; 547 goto ealg;
545 548
546 if ((ret = crypto_register_alg(&geode_cbc_alg))) 549 ret = crypto_register_alg(&geode_cbc_alg);
550 if (ret)
547 goto eecb; 551 goto eecb;
548 552
549 printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); 553 printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 73e8b1713b54..16fce3aadf4d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -638,7 +638,7 @@ struct hifn_crypto_alg
638 638
639#define ASYNC_FLAGS_MISALIGNED (1<<0) 639#define ASYNC_FLAGS_MISALIGNED (1<<0)
640 640
641struct ablkcipher_walk 641struct hifn_cipher_walk
642{ 642{
643 struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; 643 struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
644 u32 flags; 644 u32 flags;
@@ -657,7 +657,7 @@ struct hifn_request_context
657 u8 *iv; 657 u8 *iv;
658 unsigned int ivsize; 658 unsigned int ivsize;
659 u8 op, type, mode, unused; 659 u8 op, type, mode, unused;
660 struct ablkcipher_walk walk; 660 struct hifn_cipher_walk walk;
661}; 661};
662 662
663#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) 663#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev,
1417 return 0; 1417 return 0;
1418} 1418}
1419 1419
1420static int ablkcipher_walk_init(struct ablkcipher_walk *w, 1420static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
1421 int num, gfp_t gfp_flags) 1421 int num, gfp_t gfp_flags)
1422{ 1422{
1423 int i; 1423 int i;
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1442 return i; 1442 return i;
1443} 1443}
1444 1444
1445static void ablkcipher_walk_exit(struct ablkcipher_walk *w) 1445static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
1446{ 1446{
1447 int i; 1447 int i;
1448 1448
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
1486 return idx; 1486 return idx;
1487} 1487}
1488 1488
1489static int ablkcipher_walk(struct ablkcipher_request *req, 1489static int hifn_cipher_walk(struct ablkcipher_request *req,
1490 struct ablkcipher_walk *w) 1490 struct hifn_cipher_walk *w)
1491{ 1491{
1492 struct scatterlist *dst, *t; 1492 struct scatterlist *dst, *t;
1493 unsigned int nbytes = req->nbytes, offset, copy, diff; 1493 unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req)
1600 } 1600 }
1601 1601
1602 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { 1602 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1603 err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); 1603 err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
1604 if (err < 0) 1604 if (err < 0)
1605 return err; 1605 return err;
1606 } 1606 }
1607 1607
1608 sg_num = ablkcipher_walk(req, &rctx->walk); 1608 sg_num = hifn_cipher_walk(req, &rctx->walk);
1609 if (sg_num < 0) { 1609 if (sg_num < 0) {
1610 err = sg_num; 1610 err = sg_num;
1611 goto err_out_exit; 1611 goto err_out_exit;
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
1806 kunmap_atomic(saddr, KM_SOFTIRQ0); 1806 kunmap_atomic(saddr, KM_SOFTIRQ0);
1807 } 1807 }
1808 1808
1809 ablkcipher_walk_exit(&rctx->walk); 1809 hifn_cipher_walk_exit(&rctx->walk);
1810 } 1810 }
1811 1811
1812 req->base.complete(&req->base, error); 1812 req->base.complete(&req->base, error);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 6f29012bcc43..e095422b58dd 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,8 +15,14 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <crypto/internal/hash.h>
19#include <crypto/sha.h>
18 20
19#include "mv_cesa.h" 21#include "mv_cesa.h"
22
23#define MV_CESA "MV-CESA:"
24#define MAX_HW_HASH_SIZE 0xFFFF
25
20/* 26/*
21 * STM: 27 * STM:
22 * /---------------------------------------\ 28 * /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
39 * @dst_sg_it: sg iterator for dst 45 * @dst_sg_it: sg iterator for dst
40 * @sg_src_left: bytes left in src to process (scatter list) 46 * @sg_src_left: bytes left in src to process (scatter list)
41 * @src_start: offset to add to src start position (scatter list) 47 * @src_start: offset to add to src start position (scatter list)
42 * @crypt_len: length of current crypt process 48 * @crypt_len: length of current hw crypt/hash process
49 * @hw_nbytes: total bytes to process in hw for this request
50 * @copy_back: whether to copy data back (crypt) or not (hash)
43 * @sg_dst_left: bytes left dst to process in this scatter list 51 * @sg_dst_left: bytes left dst to process in this scatter list
44 * @dst_start: offset to add to dst start position (scatter list) 52 * @dst_start: offset to add to dst start position (scatter list)
45 * @total_req_bytes: total number of bytes processed (request). 53 * @hw_processed_bytes: number of bytes processed by hw (request).
46 * 54 *
47 * sg helper are used to iterate over the scatterlist. Since the size of the 55 * sg helper are used to iterate over the scatterlist. Since the size of the
48 * SRAM may be less than the scatter size, this struct struct is used to keep 56 * SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
51struct req_progress { 59struct req_progress {
52 struct sg_mapping_iter src_sg_it; 60 struct sg_mapping_iter src_sg_it;
53 struct sg_mapping_iter dst_sg_it; 61 struct sg_mapping_iter dst_sg_it;
62 void (*complete) (void);
63 void (*process) (int is_first);
54 64
55 /* src mostly */ 65 /* src mostly */
56 int sg_src_left; 66 int sg_src_left;
57 int src_start; 67 int src_start;
58 int crypt_len; 68 int crypt_len;
69 int hw_nbytes;
59 /* dst mostly */ 70 /* dst mostly */
71 int copy_back;
60 int sg_dst_left; 72 int sg_dst_left;
61 int dst_start; 73 int dst_start;
62 int total_req_bytes; 74 int hw_processed_bytes;
63}; 75};
64 76
65struct crypto_priv { 77struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
72 spinlock_t lock; 84 spinlock_t lock;
73 struct crypto_queue queue; 85 struct crypto_queue queue;
74 enum engine_status eng_st; 86 enum engine_status eng_st;
75 struct ablkcipher_request *cur_req; 87 struct crypto_async_request *cur_req;
76 struct req_progress p; 88 struct req_progress p;
77 int max_req_size; 89 int max_req_size;
78 int sram_size; 90 int sram_size;
91 int has_sha1;
92 int has_hmac_sha1;
79}; 93};
80 94
81static struct crypto_priv *cpg; 95static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
97 int decrypt; 111 int decrypt;
98}; 112};
99 113
114enum hash_op {
115 COP_SHA1,
116 COP_HMAC_SHA1
117};
118
119struct mv_tfm_hash_ctx {
120 struct crypto_shash *fallback;
121 struct crypto_shash *base_hash;
122 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
123 int count_add;
124 enum hash_op op;
125};
126
127struct mv_req_hash_ctx {
128 u64 count;
129 u32 state[SHA1_DIGEST_SIZE / 4];
130 u8 buffer[SHA1_BLOCK_SIZE];
131 int first_hash; /* marks that we don't have previous state */
132 int last_chunk; /* marks that this is the 'final' request */
133 int extra_bytes; /* unprocessed bytes in buffer */
134 enum hash_op op;
135 int count_add;
136 struct scatterlist dummysg;
137};
138
100static void compute_aes_dec_key(struct mv_ctx *ctx) 139static void compute_aes_dec_key(struct mv_ctx *ctx)
101{ 140{
102 struct crypto_aes_ctx gen_aes_key; 141 struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
144 return 0; 183 return 0;
145} 184}
146 185
147static void setup_data_in(struct ablkcipher_request *req) 186static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
148{ 187{
149 int ret; 188 int ret;
150 void *buf; 189 void *sbuf;
190 int copied = 0;
151 191
152 if (!cpg->p.sg_src_left) { 192 while (1) {
153 ret = sg_miter_next(&cpg->p.src_sg_it); 193 if (!p->sg_src_left) {
154 BUG_ON(!ret); 194 ret = sg_miter_next(&p->src_sg_it);
155 cpg->p.sg_src_left = cpg->p.src_sg_it.length; 195 BUG_ON(!ret);
156 cpg->p.src_start = 0; 196 p->sg_src_left = p->src_sg_it.length;
157 } 197 p->src_start = 0;
158 198 }
159 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
160
161 buf = cpg->p.src_sg_it.addr;
162 buf += cpg->p.src_start;
163 199
164 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); 200 sbuf = p->src_sg_it.addr + p->src_start;
201
202 if (p->sg_src_left <= len - copied) {
203 memcpy(dbuf + copied, sbuf, p->sg_src_left);
204 copied += p->sg_src_left;
205 p->sg_src_left = 0;
206 if (copied >= len)
207 break;
208 } else {
209 int copy_len = len - copied;
210 memcpy(dbuf + copied, sbuf, copy_len);
211 p->src_start += copy_len;
212 p->sg_src_left -= copy_len;
213 break;
214 }
215 }
216}
165 217
166 cpg->p.sg_src_left -= cpg->p.crypt_len; 218static void setup_data_in(void)
167 cpg->p.src_start += cpg->p.crypt_len; 219{
220 struct req_progress *p = &cpg->p;
221 int data_in_sram =
222 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
223 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
224 data_in_sram - p->crypt_len);
225 p->crypt_len = data_in_sram;
168} 226}
169 227
170static void mv_process_current_q(int first_block) 228static void mv_process_current_q(int first_block)
171{ 229{
172 struct ablkcipher_request *req = cpg->cur_req; 230 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
173 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 231 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
174 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 232 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
175 struct sec_accel_config op; 233 struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
179 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; 237 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
180 break; 238 break;
181 case COP_AES_CBC: 239 case COP_AES_CBC:
240 default:
182 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; 241 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
183 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | 242 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
184 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); 243 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
211 ENC_P_DST(SRAM_DATA_OUT_START); 270 ENC_P_DST(SRAM_DATA_OUT_START);
212 op.enc_key_p = SRAM_DATA_KEY_P; 271 op.enc_key_p = SRAM_DATA_KEY_P;
213 272
214 setup_data_in(req); 273 setup_data_in();
215 op.enc_len = cpg->p.crypt_len; 274 op.enc_len = cpg->p.crypt_len;
216 memcpy(cpg->sram + SRAM_CONFIG, &op, 275 memcpy(cpg->sram + SRAM_CONFIG, &op,
217 sizeof(struct sec_accel_config)); 276 sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
228 287
229static void mv_crypto_algo_completion(void) 288static void mv_crypto_algo_completion(void)
230{ 289{
231 struct ablkcipher_request *req = cpg->cur_req; 290 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
232 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 291 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
233 292
293 sg_miter_stop(&cpg->p.src_sg_it);
294 sg_miter_stop(&cpg->p.dst_sg_it);
295
234 if (req_ctx->op != COP_AES_CBC) 296 if (req_ctx->op != COP_AES_CBC)
235 return ; 297 return ;
236 298
237 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); 299 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
238} 300}
239 301
302static void mv_process_hash_current(int first_block)
303{
304 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
305 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
306 struct req_progress *p = &cpg->p;
307 struct sec_accel_config op = { 0 };
308 int is_last;
309
310 switch (req_ctx->op) {
311 case COP_SHA1:
312 default:
313 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
314 break;
315 case COP_HMAC_SHA1:
316 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
317 break;
318 }
319
320 op.mac_src_p =
321 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
322 req_ctx->
323 count);
324
325 setup_data_in();
326
327 op.mac_digest =
328 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
329 op.mac_iv =
330 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
331 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
332
333 is_last = req_ctx->last_chunk
334 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
335 && (req_ctx->count <= MAX_HW_HASH_SIZE);
336 if (req_ctx->first_hash) {
337 if (is_last)
338 op.config |= CFG_NOT_FRAG;
339 else
340 op.config |= CFG_FIRST_FRAG;
341
342 req_ctx->first_hash = 0;
343 } else {
344 if (is_last)
345 op.config |= CFG_LAST_FRAG;
346 else
347 op.config |= CFG_MID_FRAG;
348 }
349
350 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
351
352 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
353 /* GO */
354 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
355
356 /*
357 * XXX: add timer if the interrupt does not occur for some mystery
358 * reason
359 */
360}
361
362static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
363 struct shash_desc *desc)
364{
365 int i;
366 struct sha1_state shash_state;
367
368 shash_state.count = ctx->count + ctx->count_add;
369 for (i = 0; i < 5; i++)
370 shash_state.state[i] = ctx->state[i];
371 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
372 return crypto_shash_import(desc, &shash_state);
373}
374
375static int mv_hash_final_fallback(struct ahash_request *req)
376{
377 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
378 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
379 struct {
380 struct shash_desc shash;
381 char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
382 } desc;
383 int rc;
384
385 desc.shash.tfm = tfm_ctx->fallback;
386 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
387 if (unlikely(req_ctx->first_hash)) {
388 crypto_shash_init(&desc.shash);
389 crypto_shash_update(&desc.shash, req_ctx->buffer,
390 req_ctx->extra_bytes);
391 } else {
392 /* only SHA1 for now....
393 */
394 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
395 if (rc)
396 goto out;
397 }
398 rc = crypto_shash_final(&desc.shash, req->result);
399out:
400 return rc;
401}
402
403static void mv_hash_algo_completion(void)
404{
405 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
406 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
407
408 if (ctx->extra_bytes)
409 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
410 sg_miter_stop(&cpg->p.src_sg_it);
411
412 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
413 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
414 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
415 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
416 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
417
418 if (likely(ctx->last_chunk)) {
419 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
420 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
421 crypto_ahash_digestsize(crypto_ahash_reqtfm
422 (req)));
423 } else
424 mv_hash_final_fallback(req);
425 }
426}
427
240static void dequeue_complete_req(void) 428static void dequeue_complete_req(void)
241{ 429{
242 struct ablkcipher_request *req = cpg->cur_req; 430 struct crypto_async_request *req = cpg->cur_req;
243 void *buf; 431 void *buf;
244 int ret; 432 int ret;
433 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
434 if (cpg->p.copy_back) {
435 int need_copy_len = cpg->p.crypt_len;
436 int sram_offset = 0;
437 do {
438 int dst_copy;
439
440 if (!cpg->p.sg_dst_left) {
441 ret = sg_miter_next(&cpg->p.dst_sg_it);
442 BUG_ON(!ret);
443 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
444 cpg->p.dst_start = 0;
445 }
245 446
246 cpg->p.total_req_bytes += cpg->p.crypt_len; 447 buf = cpg->p.dst_sg_it.addr;
247 do { 448 buf += cpg->p.dst_start;
248 int dst_copy;
249
250 if (!cpg->p.sg_dst_left) {
251 ret = sg_miter_next(&cpg->p.dst_sg_it);
252 BUG_ON(!ret);
253 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
254 cpg->p.dst_start = 0;
255 }
256
257 buf = cpg->p.dst_sg_it.addr;
258 buf += cpg->p.dst_start;
259 449
260 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); 450 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
261 451
262 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); 452 memcpy(buf,
453 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
454 dst_copy);
455 sram_offset += dst_copy;
456 cpg->p.sg_dst_left -= dst_copy;
457 need_copy_len -= dst_copy;
458 cpg->p.dst_start += dst_copy;
459 } while (need_copy_len > 0);
460 }
263 461
264 cpg->p.sg_dst_left -= dst_copy; 462 cpg->p.crypt_len = 0;
265 cpg->p.crypt_len -= dst_copy;
266 cpg->p.dst_start += dst_copy;
267 } while (cpg->p.crypt_len > 0);
268 463
269 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); 464 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
270 if (cpg->p.total_req_bytes < req->nbytes) { 465 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
271 /* process next scatter list entry */ 466 /* process next scatter list entry */
272 cpg->eng_st = ENGINE_BUSY; 467 cpg->eng_st = ENGINE_BUSY;
273 mv_process_current_q(0); 468 cpg->p.process(0);
274 } else { 469 } else {
275 sg_miter_stop(&cpg->p.src_sg_it); 470 cpg->p.complete();
276 sg_miter_stop(&cpg->p.dst_sg_it);
277 mv_crypto_algo_completion();
278 cpg->eng_st = ENGINE_IDLE; 471 cpg->eng_st = ENGINE_IDLE;
279 req->base.complete(&req->base, 0); 472 local_bh_disable();
473 req->complete(req, 0);
474 local_bh_enable();
280 } 475 }
281} 476}
282 477
283static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) 478static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
284{ 479{
285 int i = 0; 480 int i = 0;
286 481 size_t cur_len;
287 do { 482
288 total_bytes -= sl[i].length; 483 while (1) {
289 i++; 484 cur_len = sl[i].length;
290 485 ++i;
291 } while (total_bytes > 0); 486 if (total_bytes > cur_len)
487 total_bytes -= cur_len;
488 else
489 break;
490 }
292 491
293 return i; 492 return i;
294} 493}
295 494
296static void mv_enqueue_new_req(struct ablkcipher_request *req) 495static void mv_start_new_crypt_req(struct ablkcipher_request *req)
297{ 496{
497 struct req_progress *p = &cpg->p;
298 int num_sgs; 498 int num_sgs;
299 499
300 cpg->cur_req = req; 500 cpg->cur_req = &req->base;
301 memset(&cpg->p, 0, sizeof(struct req_progress)); 501 memset(p, 0, sizeof(struct req_progress));
502 p->hw_nbytes = req->nbytes;
503 p->complete = mv_crypto_algo_completion;
504 p->process = mv_process_current_q;
505 p->copy_back = 1;
302 506
303 num_sgs = count_sgs(req->src, req->nbytes); 507 num_sgs = count_sgs(req->src, req->nbytes);
304 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); 508 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
305 509
306 num_sgs = count_sgs(req->dst, req->nbytes); 510 num_sgs = count_sgs(req->dst, req->nbytes);
307 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); 511 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
512
308 mv_process_current_q(1); 513 mv_process_current_q(1);
309} 514}
310 515
516static void mv_start_new_hash_req(struct ahash_request *req)
517{
518 struct req_progress *p = &cpg->p;
519 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
520 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
521 int num_sgs, hw_bytes, old_extra_bytes, rc;
522 cpg->cur_req = &req->base;
523 memset(p, 0, sizeof(struct req_progress));
524 hw_bytes = req->nbytes + ctx->extra_bytes;
525 old_extra_bytes = ctx->extra_bytes;
526
527 if (unlikely(ctx->extra_bytes)) {
528 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
529 ctx->extra_bytes);
530 p->crypt_len = ctx->extra_bytes;
531 }
532
533 memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
534
535 if (unlikely(!ctx->first_hash)) {
536 writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
537 writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
538 writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
539 writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
540 writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
541 }
542
543 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
544 if (ctx->extra_bytes != 0
545 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
546 hw_bytes -= ctx->extra_bytes;
547 else
548 ctx->extra_bytes = 0;
549
550 num_sgs = count_sgs(req->src, req->nbytes);
551 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
552
553 if (hw_bytes) {
554 p->hw_nbytes = hw_bytes;
555 p->complete = mv_hash_algo_completion;
556 p->process = mv_process_hash_current;
557
558 mv_process_hash_current(1);
559 } else {
560 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
561 ctx->extra_bytes - old_extra_bytes);
562 sg_miter_stop(&p->src_sg_it);
563 if (ctx->last_chunk)
564 rc = mv_hash_final_fallback(req);
565 else
566 rc = 0;
567 cpg->eng_st = ENGINE_IDLE;
568 local_bh_disable();
569 req->base.complete(&req->base, rc);
570 local_bh_enable();
571 }
572}
573
311static int queue_manag(void *data) 574static int queue_manag(void *data)
312{ 575{
313 cpg->eng_st = ENGINE_IDLE; 576 cpg->eng_st = ENGINE_IDLE;
314 do { 577 do {
315 struct ablkcipher_request *req;
316 struct crypto_async_request *async_req = NULL; 578 struct crypto_async_request *async_req = NULL;
317 struct crypto_async_request *backlog; 579 struct crypto_async_request *backlog;
318 580
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
338 } 600 }
339 601
340 if (async_req) { 602 if (async_req) {
341 req = container_of(async_req, 603 if (async_req->tfm->__crt_alg->cra_type !=
342 struct ablkcipher_request, base); 604 &crypto_ahash_type) {
343 mv_enqueue_new_req(req); 605 struct ablkcipher_request *req =
606 container_of(async_req,
607 struct ablkcipher_request,
608 base);
609 mv_start_new_crypt_req(req);
610 } else {
611 struct ahash_request *req =
612 ahash_request_cast(async_req);
613 mv_start_new_hash_req(req);
614 }
344 async_req = NULL; 615 async_req = NULL;
345 } 616 }
346 617
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
350 return 0; 621 return 0;
351} 622}
352 623
353static int mv_handle_req(struct ablkcipher_request *req) 624static int mv_handle_req(struct crypto_async_request *req)
354{ 625{
355 unsigned long flags; 626 unsigned long flags;
356 int ret; 627 int ret;
357 628
358 spin_lock_irqsave(&cpg->lock, flags); 629 spin_lock_irqsave(&cpg->lock, flags);
359 ret = ablkcipher_enqueue_request(&cpg->queue, req); 630 ret = crypto_enqueue_request(&cpg->queue, req);
360 spin_unlock_irqrestore(&cpg->lock, flags); 631 spin_unlock_irqrestore(&cpg->lock, flags);
361 wake_up_process(cpg->queue_th); 632 wake_up_process(cpg->queue_th);
362 return ret; 633 return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
369 req_ctx->op = COP_AES_ECB; 640 req_ctx->op = COP_AES_ECB;
370 req_ctx->decrypt = 0; 641 req_ctx->decrypt = 0;
371 642
372 return mv_handle_req(req); 643 return mv_handle_req(&req->base);
373} 644}
374 645
375static int mv_dec_aes_ecb(struct ablkcipher_request *req) 646static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
381 req_ctx->decrypt = 1; 652 req_ctx->decrypt = 1;
382 653
383 compute_aes_dec_key(ctx); 654 compute_aes_dec_key(ctx);
384 return mv_handle_req(req); 655 return mv_handle_req(&req->base);
385} 656}
386 657
387static int mv_enc_aes_cbc(struct ablkcipher_request *req) 658static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
391 req_ctx->op = COP_AES_CBC; 662 req_ctx->op = COP_AES_CBC;
392 req_ctx->decrypt = 0; 663 req_ctx->decrypt = 0;
393 664
394 return mv_handle_req(req); 665 return mv_handle_req(&req->base);
395} 666}
396 667
397static int mv_dec_aes_cbc(struct ablkcipher_request *req) 668static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
403 req_ctx->decrypt = 1; 674 req_ctx->decrypt = 1;
404 675
405 compute_aes_dec_key(ctx); 676 compute_aes_dec_key(ctx);
406 return mv_handle_req(req); 677 return mv_handle_req(&req->base);
407} 678}
408 679
409static int mv_cra_init(struct crypto_tfm *tfm) 680static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
412 return 0; 683 return 0;
413} 684}
414 685
686static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
687 int is_last, unsigned int req_len,
688 int count_add)
689{
690 memset(ctx, 0, sizeof(*ctx));
691 ctx->op = op;
692 ctx->count = req_len;
693 ctx->first_hash = 1;
694 ctx->last_chunk = is_last;
695 ctx->count_add = count_add;
696}
697
698static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
699 unsigned req_len)
700{
701 ctx->last_chunk = is_last;
702 ctx->count += req_len;
703}
704
705static int mv_hash_init(struct ahash_request *req)
706{
707 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
708 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
709 tfm_ctx->count_add);
710 return 0;
711}
712
713static int mv_hash_update(struct ahash_request *req)
714{
715 if (!req->nbytes)
716 return 0;
717
718 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
719 return mv_handle_req(&req->base);
720}
721
722static int mv_hash_final(struct ahash_request *req)
723{
724 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
725 /* dummy buffer of 4 bytes */
726 sg_init_one(&ctx->dummysg, ctx->buffer, 4);
727 /* I think I'm allowed to do that... */
728 ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
729 mv_update_hash_req_ctx(ctx, 1, 0);
730 return mv_handle_req(&req->base);
731}
732
733static int mv_hash_finup(struct ahash_request *req)
734{
735 if (!req->nbytes)
736 return mv_hash_final(req);
737
738 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
739 return mv_handle_req(&req->base);
740}
741
742static int mv_hash_digest(struct ahash_request *req)
743{
744 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
745 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
746 req->nbytes, tfm_ctx->count_add);
747 return mv_handle_req(&req->base);
748}
749
750static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
751 const void *ostate)
752{
753 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
754 int i;
755 for (i = 0; i < 5; i++) {
756 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
757 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
758 }
759}
760
761static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
762 unsigned int keylen)
763{
764 int rc;
765 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
766 int bs, ds, ss;
767
768 if (!ctx->base_hash)
769 return 0;
770
771 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
772 if (rc)
773 return rc;
774
775 /* Can't see a way to extract the ipad/opad from the fallback tfm
776 so I'm basically copying code from the hmac module */
777 bs = crypto_shash_blocksize(ctx->base_hash);
778 ds = crypto_shash_digestsize(ctx->base_hash);
779 ss = crypto_shash_statesize(ctx->base_hash);
780
781 {
782 struct {
783 struct shash_desc shash;
784 char ctx[crypto_shash_descsize(ctx->base_hash)];
785 } desc;
786 unsigned int i;
787 char ipad[ss];
788 char opad[ss];
789
790 desc.shash.tfm = ctx->base_hash;
791 desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
792 CRYPTO_TFM_REQ_MAY_SLEEP;
793
794 if (keylen > bs) {
795 int err;
796
797 err =
798 crypto_shash_digest(&desc.shash, key, keylen, ipad);
799 if (err)
800 return err;
801
802 keylen = ds;
803 } else
804 memcpy(ipad, key, keylen);
805
806 memset(ipad + keylen, 0, bs - keylen);
807 memcpy(opad, ipad, bs);
808
809 for (i = 0; i < bs; i++) {
810 ipad[i] ^= 0x36;
811 opad[i] ^= 0x5c;
812 }
813
814 rc = crypto_shash_init(&desc.shash) ? :
815 crypto_shash_update(&desc.shash, ipad, bs) ? :
816 crypto_shash_export(&desc.shash, ipad) ? :
817 crypto_shash_init(&desc.shash) ? :
818 crypto_shash_update(&desc.shash, opad, bs) ? :
819 crypto_shash_export(&desc.shash, opad);
820
821 if (rc == 0)
822 mv_hash_init_ivs(ctx, ipad, opad);
823
824 return rc;
825 }
826}
827
828static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
829 enum hash_op op, int count_add)
830{
831 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
832 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
833 struct crypto_shash *fallback_tfm = NULL;
834 struct crypto_shash *base_hash = NULL;
835 int err = -ENOMEM;
836
837 ctx->op = op;
838 ctx->count_add = count_add;
839
840 /* Allocate a fallback and abort if it failed. */
841 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
842 CRYPTO_ALG_NEED_FALLBACK);
843 if (IS_ERR(fallback_tfm)) {
844 printk(KERN_WARNING MV_CESA
845 "Fallback driver '%s' could not be loaded!\n",
846 fallback_driver_name);
847 err = PTR_ERR(fallback_tfm);
848 goto out;
849 }
850 ctx->fallback = fallback_tfm;
851
852 if (base_hash_name) {
853 /* Allocate a hash to compute the ipad/opad of hmac. */
854 base_hash = crypto_alloc_shash(base_hash_name, 0,
855 CRYPTO_ALG_NEED_FALLBACK);
856 if (IS_ERR(base_hash)) {
857 printk(KERN_WARNING MV_CESA
858 "Base driver '%s' could not be loaded!\n",
859 base_hash_name);
860 err = PTR_ERR(fallback_tfm);
861 goto err_bad_base;
862 }
863 }
864 ctx->base_hash = base_hash;
865
866 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
867 sizeof(struct mv_req_hash_ctx) +
868 crypto_shash_descsize(ctx->fallback));
869 return 0;
870err_bad_base:
871 crypto_free_shash(fallback_tfm);
872out:
873 return err;
874}
875
876static void mv_cra_hash_exit(struct crypto_tfm *tfm)
877{
878 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
879
880 crypto_free_shash(ctx->fallback);
881 if (ctx->base_hash)
882 crypto_free_shash(ctx->base_hash);
883}
884
885static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
886{
887 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
888}
889
890static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
891{
892 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
893}
894
415irqreturn_t crypto_int(int irq, void *priv) 895irqreturn_t crypto_int(int irq, void *priv)
416{ 896{
417 u32 val; 897 u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
474 }, 954 },
475}; 955};
476 956
957struct ahash_alg mv_sha1_alg = {
958 .init = mv_hash_init,
959 .update = mv_hash_update,
960 .final = mv_hash_final,
961 .finup = mv_hash_finup,
962 .digest = mv_hash_digest,
963 .halg = {
964 .digestsize = SHA1_DIGEST_SIZE,
965 .base = {
966 .cra_name = "sha1",
967 .cra_driver_name = "mv-sha1",
968 .cra_priority = 300,
969 .cra_flags =
970 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
971 .cra_blocksize = SHA1_BLOCK_SIZE,
972 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
973 .cra_init = mv_cra_hash_sha1_init,
974 .cra_exit = mv_cra_hash_exit,
975 .cra_module = THIS_MODULE,
976 }
977 }
978};
979
980struct ahash_alg mv_hmac_sha1_alg = {
981 .init = mv_hash_init,
982 .update = mv_hash_update,
983 .final = mv_hash_final,
984 .finup = mv_hash_finup,
985 .digest = mv_hash_digest,
986 .setkey = mv_hash_setkey,
987 .halg = {
988 .digestsize = SHA1_DIGEST_SIZE,
989 .base = {
990 .cra_name = "hmac(sha1)",
991 .cra_driver_name = "mv-hmac-sha1",
992 .cra_priority = 300,
993 .cra_flags =
994 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
995 .cra_blocksize = SHA1_BLOCK_SIZE,
996 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
997 .cra_init = mv_cra_hash_hmac_sha1_init,
998 .cra_exit = mv_cra_hash_exit,
999 .cra_module = THIS_MODULE,
1000 }
1001 }
1002};
1003
477static int mv_probe(struct platform_device *pdev) 1004static int mv_probe(struct platform_device *pdev)
478{ 1005{
479 struct crypto_priv *cp; 1006 struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
482 int ret; 1009 int ret;
483 1010
484 if (cpg) { 1011 if (cpg) {
485 printk(KERN_ERR "Second crypto dev?\n"); 1012 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
486 return -EEXIST; 1013 return -EEXIST;
487 } 1014 }
488 1015
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev)
496 1023
497 spin_lock_init(&cp->lock); 1024 spin_lock_init(&cp->lock);
498 crypto_init_queue(&cp->queue, 50); 1025 crypto_init_queue(&cp->queue, 50);
499 cp->reg = ioremap(res->start, res->end - res->start + 1); 1026 cp->reg = ioremap(res->start, resource_size(res));
500 if (!cp->reg) { 1027 if (!cp->reg) {
501 ret = -ENOMEM; 1028 ret = -ENOMEM;
502 goto err; 1029 goto err;
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev)
507 ret = -ENXIO; 1034 ret = -ENXIO;
508 goto err_unmap_reg; 1035 goto err_unmap_reg;
509 } 1036 }
510 cp->sram_size = res->end - res->start + 1; 1037 cp->sram_size = resource_size(res);
511 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; 1038 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
512 cp->sram = ioremap(res->start, cp->sram_size); 1039 cp->sram = ioremap(res->start, cp->sram_size);
513 if (!cp->sram) { 1040 if (!cp->sram) {
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
546 ret = crypto_register_alg(&mv_aes_alg_cbc); 1073 ret = crypto_register_alg(&mv_aes_alg_cbc);
547 if (ret) 1074 if (ret)
548 goto err_unreg_ecb; 1075 goto err_unreg_ecb;
1076
1077 ret = crypto_register_ahash(&mv_sha1_alg);
1078 if (ret == 0)
1079 cpg->has_sha1 = 1;
1080 else
1081 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1082
1083 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1084 if (ret == 0) {
1085 cpg->has_hmac_sha1 = 1;
1086 } else {
1087 printk(KERN_WARNING MV_CESA
1088 "Could not register hmac-sha1 driver\n");
1089 }
1090
549 return 0; 1091 return 0;
550err_unreg_ecb: 1092err_unreg_ecb:
551 crypto_unregister_alg(&mv_aes_alg_ecb); 1093 crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
570 1112
571 crypto_unregister_alg(&mv_aes_alg_ecb); 1113 crypto_unregister_alg(&mv_aes_alg_ecb);
572 crypto_unregister_alg(&mv_aes_alg_cbc); 1114 crypto_unregister_alg(&mv_aes_alg_cbc);
1115 if (cp->has_sha1)
1116 crypto_unregister_ahash(&mv_sha1_alg);
1117 if (cp->has_hmac_sha1)
1118 crypto_unregister_ahash(&mv_hmac_sha1_alg);
573 kthread_stop(cp->queue_th); 1119 kthread_stop(cp->queue_th);
574 free_irq(cp->irq, cp); 1120 free_irq(cp->irq, cp);
575 memset(cp->sram, 0, cp->sram_size); 1121 memset(cp->sram, 0, cp->sram_size);
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index c3e25d3bb171..08fcb1116d90 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,6 +1,10 @@
1#ifndef __MV_CRYPTO_H__ 1#ifndef __MV_CRYPTO_H__
2 2
3#define DIGEST_INITIAL_VAL_A 0xdd00 3#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DIGEST_INITIAL_VAL_B 0xdd04
5#define DIGEST_INITIAL_VAL_C 0xdd08
6#define DIGEST_INITIAL_VAL_D 0xdd0c
7#define DIGEST_INITIAL_VAL_E 0xdd10
4#define DES_CMD_REG 0xdd58 8#define DES_CMD_REG 0xdd58
5 9
6#define SEC_ACCEL_CMD 0xde00 10#define SEC_ACCEL_CMD 0xde00
@@ -70,6 +74,10 @@ struct sec_accel_config {
70#define CFG_AES_LEN_128 (0 << 24) 74#define CFG_AES_LEN_128 (0 << 24)
71#define CFG_AES_LEN_192 (1 << 24) 75#define CFG_AES_LEN_192 (1 << 24)
72#define CFG_AES_LEN_256 (2 << 24) 76#define CFG_AES_LEN_256 (2 << 24)
77#define CFG_NOT_FRAG (0 << 30)
78#define CFG_FIRST_FRAG (1 << 30)
79#define CFG_LAST_FRAG (2 << 30)
80#define CFG_MID_FRAG (3 << 30)
73 81
74 u32 enc_p; 82 u32 enc_p;
75#define ENC_P_SRC(x) (x) 83#define ENC_P_SRC(x) (x)
@@ -90,7 +98,11 @@ struct sec_accel_config {
90#define MAC_SRC_TOTAL_LEN(x) ((x) << 16) 98#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
91 99
92 u32 mac_digest; 100 u32 mac_digest;
101#define MAC_DIGEST_P(x) (x)
102#define MAC_FRAG_LEN(x) ((x) << 16)
93 u32 mac_iv; 103 u32 mac_iv;
104#define MAC_INNER_IV_P(x) (x)
105#define MAC_OUTER_IV_P(x) ((x) << 16)
94}__attribute__ ((packed)); 106}__attribute__ ((packed));
95 /* 107 /*
96 * /-----------\ 0 108 * /-----------\ 0
@@ -101,19 +113,37 @@ struct sec_accel_config {
101 * | IV IN | 4 * 4 113 * | IV IN | 4 * 4
102 * |-----------| 0x40 (inplace) 114 * |-----------| 0x40 (inplace)
103 * | IV BUF | 4 * 4 115 * | IV BUF | 4 * 4
104 * |-----------| 0x50 116 * |-----------| 0x80
105 * | DATA IN | 16 * x (max ->max_req_size) 117 * | DATA IN | 16 * x (max ->max_req_size)
106 * |-----------| 0x50 (inplace operation) 118 * |-----------| 0x80 (inplace operation)
107 * | DATA OUT | 16 * x (max ->max_req_size) 119 * | DATA OUT | 16 * x (max ->max_req_size)
108 * \-----------/ SRAM size 120 * \-----------/ SRAM size
109 */ 121 */
122
123 /* Hashing memory map:
124 * /-----------\ 0
125 * | ACCEL CFG | 4 * 8
126 * |-----------| 0x20
127 * | Inner IV | 5 * 4
128 * |-----------| 0x34
129 * | Outer IV | 5 * 4
130 * |-----------| 0x48
131 * | Output BUF| 5 * 4
132 * |-----------| 0x80
133 * | DATA IN | 64 * x (max ->max_req_size)
134 * \-----------/ SRAM size
135 */
110#define SRAM_CONFIG 0x00 136#define SRAM_CONFIG 0x00
111#define SRAM_DATA_KEY_P 0x20 137#define SRAM_DATA_KEY_P 0x20
112#define SRAM_DATA_IV 0x40 138#define SRAM_DATA_IV 0x40
113#define SRAM_DATA_IV_BUF 0x40 139#define SRAM_DATA_IV_BUF 0x40
114#define SRAM_DATA_IN_START 0x50 140#define SRAM_DATA_IN_START 0x80
115#define SRAM_DATA_OUT_START 0x50 141#define SRAM_DATA_OUT_START 0x80
142
143#define SRAM_HMAC_IV_IN 0x20
144#define SRAM_HMAC_IV_OUT 0x34
145#define SRAM_DIGEST_BUF 0x48
116 146
117#define SRAM_CFG_SPACE 0x50 147#define SRAM_CFG_SPACE 0x80
118 148
119#endif 149#endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644
index 000000000000..f7c793745a1e
--- /dev/null
+++ b/drivers/crypto/n2_asm.S
@@ -0,0 +1,95 @@
1/* n2_asm.S: Hypervisor calls for NCS support.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/linkage.h>
7#include <asm/hypervisor.h>
8#include "n2_core.h"
9
10 /* o0: queue type
11 * o1: RA of queue
12 * o2: num entries in queue
13 * o3: address of queue handle return
14 */
15ENTRY(sun4v_ncs_qconf)
16 mov HV_FAST_NCS_QCONF, %o5
17 ta HV_FAST_TRAP
18 stx %o1, [%o3]
19 retl
20 nop
21ENDPROC(sun4v_ncs_qconf)
22
23 /* %o0: queue handle
24 * %o1: address of queue type return
25 * %o2: address of queue base address return
26 * %o3: address of queue num entries return
27 */
28ENTRY(sun4v_ncs_qinfo)
29 mov %o1, %g1
30 mov %o2, %g2
31 mov %o3, %g3
32 mov HV_FAST_NCS_QINFO, %o5
33 ta HV_FAST_TRAP
34 stx %o1, [%g1]
35 stx %o2, [%g2]
36 stx %o3, [%g3]
37 retl
38 nop
39ENDPROC(sun4v_ncs_qinfo)
40
41 /* %o0: queue handle
42 * %o1: address of head offset return
43 */
44ENTRY(sun4v_ncs_gethead)
45 mov %o1, %o2
46 mov HV_FAST_NCS_GETHEAD, %o5
47 ta HV_FAST_TRAP
48 stx %o1, [%o2]
49 retl
50 nop
51ENDPROC(sun4v_ncs_gethead)
52
53 /* %o0: queue handle
54 * %o1: address of tail offset return
55 */
56ENTRY(sun4v_ncs_gettail)
57 mov %o1, %o2
58 mov HV_FAST_NCS_GETTAIL, %o5
59 ta HV_FAST_TRAP
60 stx %o1, [%o2]
61 retl
62 nop
63ENDPROC(sun4v_ncs_gettail)
64
65 /* %o0: queue handle
66 * %o1: new tail offset
67 */
68ENTRY(sun4v_ncs_settail)
69 mov HV_FAST_NCS_SETTAIL, %o5
70 ta HV_FAST_TRAP
71 retl
72 nop
73ENDPROC(sun4v_ncs_settail)
74
75 /* %o0: queue handle
76 * %o1: address of devino return
77 */
78ENTRY(sun4v_ncs_qhandle_to_devino)
79 mov %o1, %o2
80 mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
81 ta HV_FAST_TRAP
82 stx %o1, [%o2]
83 retl
84 nop
85ENDPROC(sun4v_ncs_qhandle_to_devino)
86
87 /* %o0: queue handle
88 * %o1: new head offset
89 */
90ENTRY(sun4v_ncs_sethead_marker)
91 mov HV_FAST_NCS_SETHEAD_MARKER, %o5
92 ta HV_FAST_TRAP
93 retl
94 nop
95ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644
index 000000000000..8566be832f51
--- /dev/null
+++ b/drivers/crypto/n2_core.c
@@ -0,0 +1,2083 @@
1/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 *
3 * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/cpumask.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15#include <linux/crypto.h>
16#include <crypto/md5.h>
17#include <crypto/sha.h>
18#include <crypto/aes.h>
19#include <crypto/des.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23
24#include <crypto/internal/hash.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27
28#include <asm/hypervisor.h>
29#include <asm/mdesc.h>
30
31#include "n2_core.h"
32
33#define DRV_MODULE_NAME "n2_crypto"
34#define DRV_MODULE_VERSION "0.1"
35#define DRV_MODULE_RELDATE "April 29, 2010"
36
37static char version[] __devinitdata =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Niagara2 Crypto driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(DRV_MODULE_VERSION);
44
45#define N2_CRA_PRIORITY 300
46
47static DEFINE_MUTEX(spu_lock);
48
49struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
52
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
59
60 unsigned long devino;
61
62 char irq_name[32];
63 unsigned int irq;
64
65 struct list_head list;
66};
67
68static struct spu_queue **cpu_to_cwq;
69static struct spu_queue **cpu_to_mau;
70
71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72{
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
81 }
82 return off;
83}
84
85struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
88};
89#define OFFSET_NOT_RUNNING (~(unsigned int)0)
90
91/* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
94 */
95static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
97{
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
104 }
105 return false;
106}
107
108/* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
112 */
113static irqreturn_t cwq_intr(int irq, void *dev_id)
114{
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
117
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
120
121 spin_lock(&q->lock);
122
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
127
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
130 }
131
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
135
136 spin_unlock(&q->lock);
137
138 return IRQ_HANDLED;
139}
140
141static irqreturn_t mau_intr(int irq, void *dev_id)
142{
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
145
146 spin_lock(&q->lock);
147
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
150
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
155
156 sun4v_ncs_sethead_marker(q->qhandle, head);
157
158 spin_unlock(&q->lock);
159
160 return IRQ_HANDLED;
161}
162
163static void *spu_queue_next(struct spu_queue *q, void *cur)
164{
165 return q->q + spu_next_offset(q, cur - q->q);
166}
167
168static int spu_queue_num_free(struct spu_queue *q)
169{
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
174
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
179
180 return (diff / CWQ_ENTRY_SIZE) - 1;
181}
182
183static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184{
185 int avail = spu_queue_num_free(q);
186
187 if (avail >= num_entries)
188 return q->q + q->tail;
189
190 return NULL;
191}
192
193static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194{
195 unsigned long hv_ret, new_tail;
196
197 new_tail = spu_next_offset(q, last - q->q);
198
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
203}
204
205static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
210{
211 u64 word = (len - 1) & CONTROL_LEN;
212
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229 return word;
230}
231
232#if 0
233static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234{
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
239}
240#endif
241
242struct n2_base_ctx {
243 struct list_head list;
244};
245
246static void n2_base_ctx_init(struct n2_base_ctx *ctx)
247{
248 INIT_LIST_HEAD(&ctx->list);
249}
250
251struct n2_hash_ctx {
252 struct n2_base_ctx base;
253
254 struct crypto_ahash *fallback;
255
256 /* These next three members must match the layout created by
257 * crypto_init_shash_ops_async. This allows us to properly
258 * plumb requests we can't do in hardware down to the fallback
259 * operation, providing all of the data structures and layouts
260 * expected by those paths.
261 */
262 struct ahash_request fallback_req;
263 struct shash_desc fallback_desc;
264 union {
265 struct md5_state md5;
266 struct sha1_state sha1;
267 struct sha256_state sha256;
268 } u;
269
270 unsigned char hash_key[64];
271 unsigned char keyed_zero_hash[32];
272};
273
274static int n2_hash_async_init(struct ahash_request *req)
275{
276 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
277 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
278
279 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
280 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
281
282 return crypto_ahash_init(&ctx->fallback_req);
283}
284
285static int n2_hash_async_update(struct ahash_request *req)
286{
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
289
290 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
291 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
292 ctx->fallback_req.nbytes = req->nbytes;
293 ctx->fallback_req.src = req->src;
294
295 return crypto_ahash_update(&ctx->fallback_req);
296}
297
298static int n2_hash_async_final(struct ahash_request *req)
299{
300 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
301 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
302
303 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
304 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
305 ctx->fallback_req.result = req->result;
306
307 return crypto_ahash_final(&ctx->fallback_req);
308}
309
310static int n2_hash_async_finup(struct ahash_request *req)
311{
312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
314
315 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
316 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
317 ctx->fallback_req.nbytes = req->nbytes;
318 ctx->fallback_req.src = req->src;
319 ctx->fallback_req.result = req->result;
320
321 return crypto_ahash_finup(&ctx->fallback_req);
322}
323
324static int n2_hash_cra_init(struct crypto_tfm *tfm)
325{
326 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
327 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
328 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
329 struct crypto_ahash *fallback_tfm;
330 int err;
331
332 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
333 CRYPTO_ALG_NEED_FALLBACK);
334 if (IS_ERR(fallback_tfm)) {
335 pr_warning("Fallback driver '%s' could not be loaded!\n",
336 fallback_driver_name);
337 err = PTR_ERR(fallback_tfm);
338 goto out;
339 }
340
341 ctx->fallback = fallback_tfm;
342 return 0;
343
344out:
345 return err;
346}
347
348static void n2_hash_cra_exit(struct crypto_tfm *tfm)
349{
350 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
352
353 crypto_free_ahash(ctx->fallback);
354}
355
356static unsigned long wait_for_tail(struct spu_queue *qp)
357{
358 unsigned long head, hv_ret;
359
360 do {
361 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
362 if (hv_ret != HV_EOK) {
363 pr_err("Hypervisor error on gethead\n");
364 break;
365 }
366 if (head == qp->tail) {
367 qp->head = head;
368 break;
369 }
370 } while (1);
371 return hv_ret;
372}
373
374static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
375 struct cwq_initial_entry *ent)
376{
377 unsigned long hv_ret = spu_queue_submit(qp, ent);
378
379 if (hv_ret == HV_EOK)
380 hv_ret = wait_for_tail(qp);
381
382 return hv_ret;
383}
384
385static int n2_hash_async_digest(struct ahash_request *req,
386 unsigned int auth_type, unsigned int digest_size,
387 unsigned int result_size, void *hash_loc)
388{
389 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
390 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
391 struct cwq_initial_entry *ent;
392 struct crypto_hash_walk walk;
393 struct spu_queue *qp;
394 unsigned long flags;
395 int err = -ENODEV;
396 int nbytes, cpu;
397
398 /* The total effective length of the operation may not
399 * exceed 2^16.
400 */
401 if (unlikely(req->nbytes > (1 << 16))) {
402 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
403 ctx->fallback_req.base.flags =
404 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
405 ctx->fallback_req.nbytes = req->nbytes;
406 ctx->fallback_req.src = req->src;
407 ctx->fallback_req.result = req->result;
408
409 return crypto_ahash_digest(&ctx->fallback_req);
410 }
411
412 n2_base_ctx_init(&ctx->base);
413
414 nbytes = crypto_hash_walk_first(req, &walk);
415
416 cpu = get_cpu();
417 qp = cpu_to_cwq[cpu];
418 if (!qp)
419 goto out;
420
421 spin_lock_irqsave(&qp->lock, flags);
422
423 /* XXX can do better, improve this later by doing a by-hand scatterlist
424 * XXX walk, etc.
425 */
426 ent = qp->q + qp->tail;
427
428 ent->control = control_word_base(nbytes, 0, 0,
429 auth_type, digest_size,
430 false, true, false, false,
431 OPCODE_INPLACE_BIT |
432 OPCODE_AUTH_MAC);
433 ent->src_addr = __pa(walk.data);
434 ent->auth_key_addr = 0UL;
435 ent->auth_iv_addr = __pa(hash_loc);
436 ent->final_auth_state_addr = 0UL;
437 ent->enc_key_addr = 0UL;
438 ent->enc_iv_addr = 0UL;
439 ent->dest_addr = __pa(hash_loc);
440
441 nbytes = crypto_hash_walk_done(&walk, 0);
442 while (nbytes > 0) {
443 ent = spu_queue_next(qp, ent);
444
445 ent->control = (nbytes - 1);
446 ent->src_addr = __pa(walk.data);
447 ent->auth_key_addr = 0UL;
448 ent->auth_iv_addr = 0UL;
449 ent->final_auth_state_addr = 0UL;
450 ent->enc_key_addr = 0UL;
451 ent->enc_iv_addr = 0UL;
452 ent->dest_addr = 0UL;
453
454 nbytes = crypto_hash_walk_done(&walk, 0);
455 }
456 ent->control |= CONTROL_END_OF_BLOCK;
457
458 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
459 err = -EINVAL;
460 else
461 err = 0;
462
463 spin_unlock_irqrestore(&qp->lock, flags);
464
465 if (!err)
466 memcpy(req->result, hash_loc, result_size);
467out:
468 put_cpu();
469
470 return err;
471}
472
473static int n2_md5_async_digest(struct ahash_request *req)
474{
475 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
476 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
477 struct md5_state *m = &ctx->u.md5;
478
479 if (unlikely(req->nbytes == 0)) {
480 static const char md5_zero[MD5_DIGEST_SIZE] = {
481 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
482 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
483 };
484
485 memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
486 return 0;
487 }
488 m->hash[0] = cpu_to_le32(0x67452301);
489 m->hash[1] = cpu_to_le32(0xefcdab89);
490 m->hash[2] = cpu_to_le32(0x98badcfe);
491 m->hash[3] = cpu_to_le32(0x10325476);
492
493 return n2_hash_async_digest(req, AUTH_TYPE_MD5,
494 MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
495 m->hash);
496}
497
498static int n2_sha1_async_digest(struct ahash_request *req)
499{
500 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
501 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
502 struct sha1_state *s = &ctx->u.sha1;
503
504 if (unlikely(req->nbytes == 0)) {
505 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
506 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
507 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
508 0x07, 0x09
509 };
510
511 memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
512 return 0;
513 }
514 s->state[0] = SHA1_H0;
515 s->state[1] = SHA1_H1;
516 s->state[2] = SHA1_H2;
517 s->state[3] = SHA1_H3;
518 s->state[4] = SHA1_H4;
519
520 return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
521 SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
522 s->state);
523}
524
525static int n2_sha256_async_digest(struct ahash_request *req)
526{
527 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
528 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
529 struct sha256_state *s = &ctx->u.sha256;
530
531 if (req->nbytes == 0) {
532 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
533 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
534 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
535 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
536 0x1b, 0x78, 0x52, 0xb8, 0x55
537 };
538
539 memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
540 return 0;
541 }
542 s->state[0] = SHA256_H0;
543 s->state[1] = SHA256_H1;
544 s->state[2] = SHA256_H2;
545 s->state[3] = SHA256_H3;
546 s->state[4] = SHA256_H4;
547 s->state[5] = SHA256_H5;
548 s->state[6] = SHA256_H6;
549 s->state[7] = SHA256_H7;
550
551 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
552 SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
553 s->state);
554}
555
556static int n2_sha224_async_digest(struct ahash_request *req)
557{
558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
559 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
560 struct sha256_state *s = &ctx->u.sha256;
561
562 if (req->nbytes == 0) {
563 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
564 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
565 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
566 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
567 0x2f
568 };
569
570 memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
571 return 0;
572 }
573 s->state[0] = SHA224_H0;
574 s->state[1] = SHA224_H1;
575 s->state[2] = SHA224_H2;
576 s->state[3] = SHA224_H3;
577 s->state[4] = SHA224_H4;
578 s->state[5] = SHA224_H5;
579 s->state[6] = SHA224_H6;
580 s->state[7] = SHA224_H7;
581
582 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
583 SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
584 s->state);
585}
586
587struct n2_cipher_context {
588 int key_len;
589 int enc_type;
590 union {
591 u8 aes[AES_MAX_KEY_SIZE];
592 u8 des[DES_KEY_SIZE];
593 u8 des3[3 * DES_KEY_SIZE];
594 u8 arc4[258]; /* S-box, X, Y */
595 } key;
596};
597
598#define N2_CHUNK_ARR_LEN 16
599
600struct n2_crypto_chunk {
601 struct list_head entry;
602 unsigned long iv_paddr : 44;
603 unsigned long arr_len : 20;
604 unsigned long dest_paddr;
605 unsigned long dest_final;
606 struct {
607 unsigned long src_paddr : 44;
608 unsigned long src_len : 20;
609 } arr[N2_CHUNK_ARR_LEN];
610};
611
612struct n2_request_context {
613 struct ablkcipher_walk walk;
614 struct list_head chunk_list;
615 struct n2_crypto_chunk chunk;
616 u8 temp_iv[16];
617};
618
619/* The SPU allows some level of flexibility for partial cipher blocks
620 * being specified in a descriptor.
621 *
622 * It merely requires that every descriptor's length field is at least
623 * as large as the cipher block size. This means that a cipher block
624 * can span at most 2 descriptors. However, this does not allow a
625 * partial block to span into the final descriptor as that would
626 * violate the rule (since every descriptor's length must be at lest
627 * the block size). So, for example, assuming an 8 byte block size:
628 *
629 * 0xe --> 0xa --> 0x8
630 *
631 * is a valid length sequence, whereas:
632 *
633 * 0xe --> 0xb --> 0x7
634 *
635 * is not a valid sequence.
636 */
637
638struct n2_cipher_alg {
639 struct list_head entry;
640 u8 enc_type;
641 struct crypto_alg alg;
642};
643
644static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
645{
646 struct crypto_alg *alg = tfm->__crt_alg;
647
648 return container_of(alg, struct n2_cipher_alg, alg);
649}
650
651struct n2_cipher_request_context {
652 struct ablkcipher_walk walk;
653};
654
655static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
656 unsigned int keylen)
657{
658 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
659 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
660 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
661
662 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
663
664 switch (keylen) {
665 case AES_KEYSIZE_128:
666 ctx->enc_type |= ENC_TYPE_ALG_AES128;
667 break;
668 case AES_KEYSIZE_192:
669 ctx->enc_type |= ENC_TYPE_ALG_AES192;
670 break;
671 case AES_KEYSIZE_256:
672 ctx->enc_type |= ENC_TYPE_ALG_AES256;
673 break;
674 default:
675 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
676 return -EINVAL;
677 }
678
679 ctx->key_len = keylen;
680 memcpy(ctx->key.aes, key, keylen);
681 return 0;
682}
683
684static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
685 unsigned int keylen)
686{
687 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
688 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
689 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
690 u32 tmp[DES_EXPKEY_WORDS];
691 int err;
692
693 ctx->enc_type = n2alg->enc_type;
694
695 if (keylen != DES_KEY_SIZE) {
696 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
697 return -EINVAL;
698 }
699
700 err = des_ekey(tmp, key);
701 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
702 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
703 return -EINVAL;
704 }
705
706 ctx->key_len = keylen;
707 memcpy(ctx->key.des, key, keylen);
708 return 0;
709}
710
711static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
712 unsigned int keylen)
713{
714 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
715 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
716 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
717
718 ctx->enc_type = n2alg->enc_type;
719
720 if (keylen != (3 * DES_KEY_SIZE)) {
721 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
722 return -EINVAL;
723 }
724 ctx->key_len = keylen;
725 memcpy(ctx->key.des3, key, keylen);
726 return 0;
727}
728
729static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
730 unsigned int keylen)
731{
732 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
733 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
734 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
735 u8 *s = ctx->key.arc4;
736 u8 *x = s + 256;
737 u8 *y = x + 1;
738 int i, j, k;
739
740 ctx->enc_type = n2alg->enc_type;
741
742 j = k = 0;
743 *x = 0;
744 *y = 0;
745 for (i = 0; i < 256; i++)
746 s[i] = i;
747 for (i = 0; i < 256; i++) {
748 u8 a = s[i];
749 j = (j + key[k] + a) & 0xff;
750 s[i] = s[j];
751 s[j] = a;
752 if (++k >= keylen)
753 k = 0;
754 }
755
756 return 0;
757}
758
759static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
760{
761 int this_len = nbytes;
762
763 this_len -= (nbytes & (block_size - 1));
764 return this_len > (1 << 16) ? (1 << 16) : this_len;
765}
766
767static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
768 struct spu_queue *qp, bool encrypt)
769{
770 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
771 struct cwq_initial_entry *ent;
772 bool in_place;
773 int i;
774
775 ent = spu_queue_alloc(qp, cp->arr_len);
776 if (!ent) {
777 pr_info("queue_alloc() of %d fails\n",
778 cp->arr_len);
779 return -EBUSY;
780 }
781
782 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
783
784 ent->control = control_word_base(cp->arr[0].src_len,
785 0, ctx->enc_type, 0, 0,
786 false, true, false, encrypt,
787 OPCODE_ENCRYPT |
788 (in_place ? OPCODE_INPLACE_BIT : 0));
789 ent->src_addr = cp->arr[0].src_paddr;
790 ent->auth_key_addr = 0UL;
791 ent->auth_iv_addr = 0UL;
792 ent->final_auth_state_addr = 0UL;
793 ent->enc_key_addr = __pa(&ctx->key);
794 ent->enc_iv_addr = cp->iv_paddr;
795 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
796
797 for (i = 1; i < cp->arr_len; i++) {
798 ent = spu_queue_next(qp, ent);
799
800 ent->control = cp->arr[i].src_len - 1;
801 ent->src_addr = cp->arr[i].src_paddr;
802 ent->auth_key_addr = 0UL;
803 ent->auth_iv_addr = 0UL;
804 ent->final_auth_state_addr = 0UL;
805 ent->enc_key_addr = 0UL;
806 ent->enc_iv_addr = 0UL;
807 ent->dest_addr = 0UL;
808 }
809 ent->control |= CONTROL_END_OF_BLOCK;
810
811 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
812}
813
814static int n2_compute_chunks(struct ablkcipher_request *req)
815{
816 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
817 struct ablkcipher_walk *walk = &rctx->walk;
818 struct n2_crypto_chunk *chunk;
819 unsigned long dest_prev;
820 unsigned int tot_len;
821 bool prev_in_place;
822 int err, nbytes;
823
824 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
825 err = ablkcipher_walk_phys(req, walk);
826 if (err)
827 return err;
828
829 INIT_LIST_HEAD(&rctx->chunk_list);
830
831 chunk = &rctx->chunk;
832 INIT_LIST_HEAD(&chunk->entry);
833
834 chunk->iv_paddr = 0UL;
835 chunk->arr_len = 0;
836 chunk->dest_paddr = 0UL;
837
838 prev_in_place = false;
839 dest_prev = ~0UL;
840 tot_len = 0;
841
842 while ((nbytes = walk->nbytes) != 0) {
843 unsigned long dest_paddr, src_paddr;
844 bool in_place;
845 int this_len;
846
847 src_paddr = (page_to_phys(walk->src.page) +
848 walk->src.offset);
849 dest_paddr = (page_to_phys(walk->dst.page) +
850 walk->dst.offset);
851 in_place = (src_paddr == dest_paddr);
852 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
853
854 if (chunk->arr_len != 0) {
855 if (in_place != prev_in_place ||
856 (!prev_in_place &&
857 dest_paddr != dest_prev) ||
858 chunk->arr_len == N2_CHUNK_ARR_LEN ||
859 tot_len + this_len > (1 << 16)) {
860 chunk->dest_final = dest_prev;
861 list_add_tail(&chunk->entry,
862 &rctx->chunk_list);
863 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
864 if (!chunk) {
865 err = -ENOMEM;
866 break;
867 }
868 INIT_LIST_HEAD(&chunk->entry);
869 }
870 }
871 if (chunk->arr_len == 0) {
872 chunk->dest_paddr = dest_paddr;
873 tot_len = 0;
874 }
875 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
876 chunk->arr[chunk->arr_len].src_len = this_len;
877 chunk->arr_len++;
878
879 dest_prev = dest_paddr + this_len;
880 prev_in_place = in_place;
881 tot_len += this_len;
882
883 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
884 if (err)
885 break;
886 }
887 if (!err && chunk->arr_len != 0) {
888 chunk->dest_final = dest_prev;
889 list_add_tail(&chunk->entry, &rctx->chunk_list);
890 }
891
892 return err;
893}
894
895static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
896{
897 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
898 struct n2_crypto_chunk *c, *tmp;
899
900 if (final_iv)
901 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
902
903 ablkcipher_walk_complete(&rctx->walk);
904 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
905 list_del(&c->entry);
906 if (unlikely(c != &rctx->chunk))
907 kfree(c);
908 }
909
910}
911
912static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
913{
914 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
915 struct crypto_tfm *tfm = req->base.tfm;
916 int err = n2_compute_chunks(req);
917 struct n2_crypto_chunk *c, *tmp;
918 unsigned long flags, hv_ret;
919 struct spu_queue *qp;
920
921 if (err)
922 return err;
923
924 qp = cpu_to_cwq[get_cpu()];
925 err = -ENODEV;
926 if (!qp)
927 goto out;
928
929 spin_lock_irqsave(&qp->lock, flags);
930
931 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
932 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
933 if (err)
934 break;
935 list_del(&c->entry);
936 if (unlikely(c != &rctx->chunk))
937 kfree(c);
938 }
939 if (!err) {
940 hv_ret = wait_for_tail(qp);
941 if (hv_ret != HV_EOK)
942 err = -EINVAL;
943 }
944
945 spin_unlock_irqrestore(&qp->lock, flags);
946
947 put_cpu();
948
949out:
950 n2_chunk_complete(req, NULL);
951 return err;
952}
953
954static int n2_encrypt_ecb(struct ablkcipher_request *req)
955{
956 return n2_do_ecb(req, true);
957}
958
959static int n2_decrypt_ecb(struct ablkcipher_request *req)
960{
961 return n2_do_ecb(req, false);
962}
963
964static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
965{
966 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
967 struct crypto_tfm *tfm = req->base.tfm;
968 unsigned long flags, hv_ret, iv_paddr;
969 int err = n2_compute_chunks(req);
970 struct n2_crypto_chunk *c, *tmp;
971 struct spu_queue *qp;
972 void *final_iv_addr;
973
974 final_iv_addr = NULL;
975
976 if (err)
977 return err;
978
979 qp = cpu_to_cwq[get_cpu()];
980 err = -ENODEV;
981 if (!qp)
982 goto out;
983
984 spin_lock_irqsave(&qp->lock, flags);
985
986 if (encrypt) {
987 iv_paddr = __pa(rctx->walk.iv);
988 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
989 entry) {
990 c->iv_paddr = iv_paddr;
991 err = __n2_crypt_chunk(tfm, c, qp, true);
992 if (err)
993 break;
994 iv_paddr = c->dest_final - rctx->walk.blocksize;
995 list_del(&c->entry);
996 if (unlikely(c != &rctx->chunk))
997 kfree(c);
998 }
999 final_iv_addr = __va(iv_paddr);
1000 } else {
1001 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1002 entry) {
1003 if (c == &rctx->chunk) {
1004 iv_paddr = __pa(rctx->walk.iv);
1005 } else {
1006 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1007 tmp->arr[tmp->arr_len-1].src_len -
1008 rctx->walk.blocksize);
1009 }
1010 if (!final_iv_addr) {
1011 unsigned long pa;
1012
1013 pa = (c->arr[c->arr_len-1].src_paddr +
1014 c->arr[c->arr_len-1].src_len -
1015 rctx->walk.blocksize);
1016 final_iv_addr = rctx->temp_iv;
1017 memcpy(rctx->temp_iv, __va(pa),
1018 rctx->walk.blocksize);
1019 }
1020 c->iv_paddr = iv_paddr;
1021 err = __n2_crypt_chunk(tfm, c, qp, false);
1022 if (err)
1023 break;
1024 list_del(&c->entry);
1025 if (unlikely(c != &rctx->chunk))
1026 kfree(c);
1027 }
1028 }
1029 if (!err) {
1030 hv_ret = wait_for_tail(qp);
1031 if (hv_ret != HV_EOK)
1032 err = -EINVAL;
1033 }
1034
1035 spin_unlock_irqrestore(&qp->lock, flags);
1036
1037 put_cpu();
1038
1039out:
1040 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1041 return err;
1042}
1043
1044static int n2_encrypt_chaining(struct ablkcipher_request *req)
1045{
1046 return n2_do_chaining(req, true);
1047}
1048
1049static int n2_decrypt_chaining(struct ablkcipher_request *req)
1050{
1051 return n2_do_chaining(req, false);
1052}
1053
1054struct n2_cipher_tmpl {
1055 const char *name;
1056 const char *drv_name;
1057 u8 block_size;
1058 u8 enc_type;
1059 struct ablkcipher_alg ablkcipher;
1060};
1061
1062static const struct n2_cipher_tmpl cipher_tmpls[] = {
1063 /* ARC4: only ECB is supported (chaining bits ignored) */
1064 { .name = "ecb(arc4)",
1065 .drv_name = "ecb-arc4",
1066 .block_size = 1,
1067 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1068 ENC_TYPE_CHAINING_ECB),
1069 .ablkcipher = {
1070 .min_keysize = 1,
1071 .max_keysize = 256,
1072 .setkey = n2_arc4_setkey,
1073 .encrypt = n2_encrypt_ecb,
1074 .decrypt = n2_decrypt_ecb,
1075 },
1076 },
1077
1078 /* DES: ECB CBC and CFB are supported */
1079 { .name = "ecb(des)",
1080 .drv_name = "ecb-des",
1081 .block_size = DES_BLOCK_SIZE,
1082 .enc_type = (ENC_TYPE_ALG_DES |
1083 ENC_TYPE_CHAINING_ECB),
1084 .ablkcipher = {
1085 .min_keysize = DES_KEY_SIZE,
1086 .max_keysize = DES_KEY_SIZE,
1087 .setkey = n2_des_setkey,
1088 .encrypt = n2_encrypt_ecb,
1089 .decrypt = n2_decrypt_ecb,
1090 },
1091 },
1092 { .name = "cbc(des)",
1093 .drv_name = "cbc-des",
1094 .block_size = DES_BLOCK_SIZE,
1095 .enc_type = (ENC_TYPE_ALG_DES |
1096 ENC_TYPE_CHAINING_CBC),
1097 .ablkcipher = {
1098 .ivsize = DES_BLOCK_SIZE,
1099 .min_keysize = DES_KEY_SIZE,
1100 .max_keysize = DES_KEY_SIZE,
1101 .setkey = n2_des_setkey,
1102 .encrypt = n2_encrypt_chaining,
1103 .decrypt = n2_decrypt_chaining,
1104 },
1105 },
1106 { .name = "cfb(des)",
1107 .drv_name = "cfb-des",
1108 .block_size = DES_BLOCK_SIZE,
1109 .enc_type = (ENC_TYPE_ALG_DES |
1110 ENC_TYPE_CHAINING_CFB),
1111 .ablkcipher = {
1112 .min_keysize = DES_KEY_SIZE,
1113 .max_keysize = DES_KEY_SIZE,
1114 .setkey = n2_des_setkey,
1115 .encrypt = n2_encrypt_chaining,
1116 .decrypt = n2_decrypt_chaining,
1117 },
1118 },
1119
1120 /* 3DES: ECB CBC and CFB are supported */
1121 { .name = "ecb(des3_ede)",
1122 .drv_name = "ecb-3des",
1123 .block_size = DES_BLOCK_SIZE,
1124 .enc_type = (ENC_TYPE_ALG_3DES |
1125 ENC_TYPE_CHAINING_ECB),
1126 .ablkcipher = {
1127 .min_keysize = 3 * DES_KEY_SIZE,
1128 .max_keysize = 3 * DES_KEY_SIZE,
1129 .setkey = n2_3des_setkey,
1130 .encrypt = n2_encrypt_ecb,
1131 .decrypt = n2_decrypt_ecb,
1132 },
1133 },
1134 { .name = "cbc(des3_ede)",
1135 .drv_name = "cbc-3des",
1136 .block_size = DES_BLOCK_SIZE,
1137 .enc_type = (ENC_TYPE_ALG_3DES |
1138 ENC_TYPE_CHAINING_CBC),
1139 .ablkcipher = {
1140 .ivsize = DES_BLOCK_SIZE,
1141 .min_keysize = 3 * DES_KEY_SIZE,
1142 .max_keysize = 3 * DES_KEY_SIZE,
1143 .setkey = n2_3des_setkey,
1144 .encrypt = n2_encrypt_chaining,
1145 .decrypt = n2_decrypt_chaining,
1146 },
1147 },
1148 { .name = "cfb(des3_ede)",
1149 .drv_name = "cfb-3des",
1150 .block_size = DES_BLOCK_SIZE,
1151 .enc_type = (ENC_TYPE_ALG_3DES |
1152 ENC_TYPE_CHAINING_CFB),
1153 .ablkcipher = {
1154 .min_keysize = 3 * DES_KEY_SIZE,
1155 .max_keysize = 3 * DES_KEY_SIZE,
1156 .setkey = n2_3des_setkey,
1157 .encrypt = n2_encrypt_chaining,
1158 .decrypt = n2_decrypt_chaining,
1159 },
1160 },
1161 /* AES: ECB CBC and CTR are supported */
1162 { .name = "ecb(aes)",
1163 .drv_name = "ecb-aes",
1164 .block_size = AES_BLOCK_SIZE,
1165 .enc_type = (ENC_TYPE_ALG_AES128 |
1166 ENC_TYPE_CHAINING_ECB),
1167 .ablkcipher = {
1168 .min_keysize = AES_MIN_KEY_SIZE,
1169 .max_keysize = AES_MAX_KEY_SIZE,
1170 .setkey = n2_aes_setkey,
1171 .encrypt = n2_encrypt_ecb,
1172 .decrypt = n2_decrypt_ecb,
1173 },
1174 },
1175 { .name = "cbc(aes)",
1176 .drv_name = "cbc-aes",
1177 .block_size = AES_BLOCK_SIZE,
1178 .enc_type = (ENC_TYPE_ALG_AES128 |
1179 ENC_TYPE_CHAINING_CBC),
1180 .ablkcipher = {
1181 .ivsize = AES_BLOCK_SIZE,
1182 .min_keysize = AES_MIN_KEY_SIZE,
1183 .max_keysize = AES_MAX_KEY_SIZE,
1184 .setkey = n2_aes_setkey,
1185 .encrypt = n2_encrypt_chaining,
1186 .decrypt = n2_decrypt_chaining,
1187 },
1188 },
1189 { .name = "ctr(aes)",
1190 .drv_name = "ctr-aes",
1191 .block_size = AES_BLOCK_SIZE,
1192 .enc_type = (ENC_TYPE_ALG_AES128 |
1193 ENC_TYPE_CHAINING_COUNTER),
1194 .ablkcipher = {
1195 .ivsize = AES_BLOCK_SIZE,
1196 .min_keysize = AES_MIN_KEY_SIZE,
1197 .max_keysize = AES_MAX_KEY_SIZE,
1198 .setkey = n2_aes_setkey,
1199 .encrypt = n2_encrypt_chaining,
1200 .decrypt = n2_encrypt_chaining,
1201 },
1202 },
1203
1204};
1205#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1206
1207static LIST_HEAD(cipher_algs);
1208
1209struct n2_hash_tmpl {
1210 const char *name;
1211 int (*digest)(struct ahash_request *req);
1212 u8 digest_size;
1213 u8 block_size;
1214};
1215static const struct n2_hash_tmpl hash_tmpls[] = {
1216 { .name = "md5",
1217 .digest = n2_md5_async_digest,
1218 .digest_size = MD5_DIGEST_SIZE,
1219 .block_size = MD5_HMAC_BLOCK_SIZE },
1220 { .name = "sha1",
1221 .digest = n2_sha1_async_digest,
1222 .digest_size = SHA1_DIGEST_SIZE,
1223 .block_size = SHA1_BLOCK_SIZE },
1224 { .name = "sha256",
1225 .digest = n2_sha256_async_digest,
1226 .digest_size = SHA256_DIGEST_SIZE,
1227 .block_size = SHA256_BLOCK_SIZE },
1228 { .name = "sha224",
1229 .digest = n2_sha224_async_digest,
1230 .digest_size = SHA224_DIGEST_SIZE,
1231 .block_size = SHA224_BLOCK_SIZE },
1232};
1233#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1234
1235struct n2_ahash_alg {
1236 struct list_head entry;
1237 struct ahash_alg alg;
1238};
1239static LIST_HEAD(ahash_algs);
1240
1241static int algs_registered;
1242
1243static void __n2_unregister_algs(void)
1244{
1245 struct n2_cipher_alg *cipher, *cipher_tmp;
1246 struct n2_ahash_alg *alg, *alg_tmp;
1247
1248 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1249 crypto_unregister_alg(&cipher->alg);
1250 list_del(&cipher->entry);
1251 kfree(cipher);
1252 }
1253 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1254 crypto_unregister_ahash(&alg->alg);
1255 list_del(&alg->entry);
1256 kfree(alg);
1257 }
1258}
1259
1260static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1261{
1262 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1263 return 0;
1264}
1265
1266static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1267{
1268 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1269 struct crypto_alg *alg;
1270 int err;
1271
1272 if (!p)
1273 return -ENOMEM;
1274
1275 alg = &p->alg;
1276
1277 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1278 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1279 alg->cra_priority = N2_CRA_PRIORITY;
1280 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1281 alg->cra_blocksize = tmpl->block_size;
1282 p->enc_type = tmpl->enc_type;
1283 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1284 alg->cra_type = &crypto_ablkcipher_type;
1285 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1286 alg->cra_init = n2_cipher_cra_init;
1287 alg->cra_module = THIS_MODULE;
1288
1289 list_add(&p->entry, &cipher_algs);
1290 err = crypto_register_alg(alg);
1291 if (err) {
1292 list_del(&p->entry);
1293 kfree(p);
1294 }
1295 return err;
1296}
1297
1298static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1299{
1300 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1301 struct hash_alg_common *halg;
1302 struct crypto_alg *base;
1303 struct ahash_alg *ahash;
1304 int err;
1305
1306 if (!p)
1307 return -ENOMEM;
1308
1309 ahash = &p->alg;
1310 ahash->init = n2_hash_async_init;
1311 ahash->update = n2_hash_async_update;
1312 ahash->final = n2_hash_async_final;
1313 ahash->finup = n2_hash_async_finup;
1314 ahash->digest = tmpl->digest;
1315
1316 halg = &ahash->halg;
1317 halg->digestsize = tmpl->digest_size;
1318
1319 base = &halg->base;
1320 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1321 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1322 base->cra_priority = N2_CRA_PRIORITY;
1323 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
1324 base->cra_blocksize = tmpl->block_size;
1325 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1326 base->cra_module = THIS_MODULE;
1327 base->cra_init = n2_hash_cra_init;
1328 base->cra_exit = n2_hash_cra_exit;
1329
1330 list_add(&p->entry, &ahash_algs);
1331 err = crypto_register_ahash(ahash);
1332 if (err) {
1333 list_del(&p->entry);
1334 kfree(p);
1335 }
1336 return err;
1337}
1338
1339static int __devinit n2_register_algs(void)
1340{
1341 int i, err = 0;
1342
1343 mutex_lock(&spu_lock);
1344 if (algs_registered++)
1345 goto out;
1346
1347 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1348 err = __n2_register_one_ahash(&hash_tmpls[i]);
1349 if (err) {
1350 __n2_unregister_algs();
1351 goto out;
1352 }
1353 }
1354 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1355 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1356 if (err) {
1357 __n2_unregister_algs();
1358 goto out;
1359 }
1360 }
1361
1362out:
1363 mutex_unlock(&spu_lock);
1364 return err;
1365}
1366
1367static void __exit n2_unregister_algs(void)
1368{
1369 mutex_lock(&spu_lock);
1370 if (!--algs_registered)
1371 __n2_unregister_algs();
1372 mutex_unlock(&spu_lock);
1373}
1374
1375/* To map CWQ queues to interrupt sources, the hypervisor API provides
1376 * a devino. This isn't very useful to us because all of the
1377 * interrupts listed in the of_device node have been translated to
1378 * Linux virtual IRQ cookie numbers.
1379 *
1380 * So we have to back-translate, going through the 'intr' and 'ino'
1381 * property tables of the n2cp MDESC node, matching it with the OF
1382 * 'interrupts' property entries, in order to to figure out which
1383 * devino goes to which already-translated IRQ.
1384 */
1385static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
1386 unsigned long dev_ino)
1387{
1388 const unsigned int *dev_intrs;
1389 unsigned int intr;
1390 int i;
1391
1392 for (i = 0; i < ip->num_intrs; i++) {
1393 if (ip->ino_table[i].ino == dev_ino)
1394 break;
1395 }
1396 if (i == ip->num_intrs)
1397 return -ENODEV;
1398
1399 intr = ip->ino_table[i].intr;
1400
1401 dev_intrs = of_get_property(dev->node, "interrupts", NULL);
1402 if (!dev_intrs)
1403 return -ENODEV;
1404
1405 for (i = 0; i < dev->num_irqs; i++) {
1406 if (dev_intrs[i] == intr)
1407 return i;
1408 }
1409
1410 return -ENODEV;
1411}
1412
1413static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
1414 const char *irq_name, struct spu_queue *p,
1415 irq_handler_t handler)
1416{
1417 unsigned long herr;
1418 int index;
1419
1420 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1421 if (herr)
1422 return -EINVAL;
1423
1424 index = find_devino_index(dev, ip, p->devino);
1425 if (index < 0)
1426 return index;
1427
1428 p->irq = dev->irqs[index];
1429
1430 sprintf(p->irq_name, "%s-%d", irq_name, index);
1431
1432 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
1433 p->irq_name, p);
1434}
1435
1436static struct kmem_cache *queue_cache[2];
1437
1438static void *new_queue(unsigned long q_type)
1439{
1440 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1441}
1442
1443static void free_queue(void *p, unsigned long q_type)
1444{
1445 return kmem_cache_free(queue_cache[q_type - 1], p);
1446}
1447
1448static int queue_cache_init(void)
1449{
1450 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1451 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1452 kmem_cache_create("cwq_queue",
1453 (MAU_NUM_ENTRIES *
1454 MAU_ENTRY_SIZE),
1455 MAU_ENTRY_SIZE, 0, NULL);
1456 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1457 return -ENOMEM;
1458
1459 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1460 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1461 kmem_cache_create("cwq_queue",
1462 (CWQ_NUM_ENTRIES *
1463 CWQ_ENTRY_SIZE),
1464 CWQ_ENTRY_SIZE, 0, NULL);
1465 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1466 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1467 return -ENOMEM;
1468 }
1469 return 0;
1470}
1471
1472static void queue_cache_destroy(void)
1473{
1474 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1475 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1476}
1477
1478static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1479{
1480 cpumask_var_t old_allowed;
1481 unsigned long hv_ret;
1482
1483 if (cpumask_empty(&p->sharing))
1484 return -EINVAL;
1485
1486 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1487 return -ENOMEM;
1488
1489 cpumask_copy(old_allowed, &current->cpus_allowed);
1490
1491 set_cpus_allowed_ptr(current, &p->sharing);
1492
1493 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1494 CWQ_NUM_ENTRIES, &p->qhandle);
1495 if (!hv_ret)
1496 sun4v_ncs_sethead_marker(p->qhandle, 0);
1497
1498 set_cpus_allowed_ptr(current, old_allowed);
1499
1500 free_cpumask_var(old_allowed);
1501
1502 return (hv_ret ? -EINVAL : 0);
1503}
1504
1505static int spu_queue_setup(struct spu_queue *p)
1506{
1507 int err;
1508
1509 p->q = new_queue(p->q_type);
1510 if (!p->q)
1511 return -ENOMEM;
1512
1513 err = spu_queue_register(p, p->q_type);
1514 if (err) {
1515 free_queue(p->q, p->q_type);
1516 p->q = NULL;
1517 }
1518
1519 return err;
1520}
1521
1522static void spu_queue_destroy(struct spu_queue *p)
1523{
1524 unsigned long hv_ret;
1525
1526 if (!p->q)
1527 return;
1528
1529 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1530
1531 if (!hv_ret)
1532 free_queue(p->q, p->q_type);
1533}
1534
1535static void spu_list_destroy(struct list_head *list)
1536{
1537 struct spu_queue *p, *n;
1538
1539 list_for_each_entry_safe(p, n, list, list) {
1540 int i;
1541
1542 for (i = 0; i < NR_CPUS; i++) {
1543 if (cpu_to_cwq[i] == p)
1544 cpu_to_cwq[i] = NULL;
1545 }
1546
1547 if (p->irq) {
1548 free_irq(p->irq, p);
1549 p->irq = 0;
1550 }
1551 spu_queue_destroy(p);
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555}
1556
1557/* Walk the backward arcs of a CWQ 'exec-unit' node,
1558 * gathering cpu membership information.
1559 */
1560static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1561 struct of_device *dev,
1562 u64 node, struct spu_queue *p,
1563 struct spu_queue **table)
1564{
1565 u64 arc;
1566
1567 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1568 u64 tgt = mdesc_arc_target(mdesc, arc);
1569 const char *name = mdesc_node_name(mdesc, tgt);
1570 const u64 *id;
1571
1572 if (strcmp(name, "cpu"))
1573 continue;
1574 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1575 if (table[*id] != NULL) {
1576 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1577 dev->node->full_name);
1578 return -EINVAL;
1579 }
1580 cpu_set(*id, p->sharing);
1581 table[*id] = p;
1582 }
1583 return 0;
1584}
1585
1586/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1587static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1588 struct of_device *dev, struct mdesc_handle *mdesc,
1589 u64 node, const char *iname, unsigned long q_type,
1590 irq_handler_t handler, struct spu_queue **table)
1591{
1592 struct spu_queue *p;
1593 int err;
1594
1595 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1596 if (!p) {
1597 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1598 dev->node->full_name);
1599 return -ENOMEM;
1600 }
1601
1602 cpus_clear(p->sharing);
1603 spin_lock_init(&p->lock);
1604 p->q_type = q_type;
1605 INIT_LIST_HEAD(&p->jobs);
1606 list_add(&p->list, list);
1607
1608 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1609 if (err)
1610 return err;
1611
1612 err = spu_queue_setup(p);
1613 if (err)
1614 return err;
1615
1616 return spu_map_ino(dev, ip, iname, p, handler);
1617}
1618
1619static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
1620 struct spu_mdesc_info *ip, struct list_head *list,
1621 const char *exec_name, unsigned long q_type,
1622 irq_handler_t handler, struct spu_queue **table)
1623{
1624 int err = 0;
1625 u64 node;
1626
1627 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1628 const char *type;
1629
1630 type = mdesc_get_property(mdesc, node, "type", NULL);
1631 if (!type || strcmp(type, exec_name))
1632 continue;
1633
1634 err = handle_exec_unit(ip, list, dev, mdesc, node,
1635 exec_name, q_type, handler, table);
1636 if (err) {
1637 spu_list_destroy(list);
1638 break;
1639 }
1640 }
1641
1642 return err;
1643}
1644
1645static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1646 struct spu_mdesc_info *ip)
1647{
1648 const u64 *intr, *ino;
1649 int intr_len, ino_len;
1650 int i;
1651
1652 intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
1653 if (!intr)
1654 return -ENODEV;
1655
1656 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1657 if (!intr)
1658 return -ENODEV;
1659
1660 if (intr_len != ino_len)
1661 return -EINVAL;
1662
1663 ip->num_intrs = intr_len / sizeof(u64);
1664 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1665 ip->num_intrs),
1666 GFP_KERNEL);
1667 if (!ip->ino_table)
1668 return -ENOMEM;
1669
1670 for (i = 0; i < ip->num_intrs; i++) {
1671 struct ino_blob *b = &ip->ino_table[i];
1672 b->intr = intr[i];
1673 b->ino = ino[i];
1674 }
1675
1676 return 0;
1677}
1678
1679static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1680 struct of_device *dev,
1681 struct spu_mdesc_info *ip,
1682 const char *node_name)
1683{
1684 const unsigned int *reg;
1685 u64 node;
1686
1687 reg = of_get_property(dev->node, "reg", NULL);
1688 if (!reg)
1689 return -ENODEV;
1690
1691 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1692 const char *name;
1693 const u64 *chdl;
1694
1695 name = mdesc_get_property(mdesc, node, "name", NULL);
1696 if (!name || strcmp(name, node_name))
1697 continue;
1698 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1699 if (!chdl || (*chdl != *reg))
1700 continue;
1701 ip->cfg_handle = *chdl;
1702 return get_irq_props(mdesc, node, ip);
1703 }
1704
1705 return -ENODEV;
1706}
1707
1708static unsigned long n2_spu_hvapi_major;
1709static unsigned long n2_spu_hvapi_minor;
1710
1711static int __devinit n2_spu_hvapi_register(void)
1712{
1713 int err;
1714
1715 n2_spu_hvapi_major = 2;
1716 n2_spu_hvapi_minor = 0;
1717
1718 err = sun4v_hvapi_register(HV_GRP_NCS,
1719 n2_spu_hvapi_major,
1720 &n2_spu_hvapi_minor);
1721
1722 if (!err)
1723 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1724 n2_spu_hvapi_major,
1725 n2_spu_hvapi_minor);
1726
1727 return err;
1728}
1729
1730static void n2_spu_hvapi_unregister(void)
1731{
1732 sun4v_hvapi_unregister(HV_GRP_NCS);
1733}
1734
1735static int global_ref;
1736
1737static int __devinit grab_global_resources(void)
1738{
1739 int err = 0;
1740
1741 mutex_lock(&spu_lock);
1742
1743 if (global_ref++)
1744 goto out;
1745
1746 err = n2_spu_hvapi_register();
1747 if (err)
1748 goto out;
1749
1750 err = queue_cache_init();
1751 if (err)
1752 goto out_hvapi_release;
1753
1754 err = -ENOMEM;
1755 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1756 GFP_KERNEL);
1757 if (!cpu_to_cwq)
1758 goto out_queue_cache_destroy;
1759
1760 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1761 GFP_KERNEL);
1762 if (!cpu_to_mau)
1763 goto out_free_cwq_table;
1764
1765 err = 0;
1766
1767out:
1768 if (err)
1769 global_ref--;
1770 mutex_unlock(&spu_lock);
1771 return err;
1772
1773out_free_cwq_table:
1774 kfree(cpu_to_cwq);
1775 cpu_to_cwq = NULL;
1776
1777out_queue_cache_destroy:
1778 queue_cache_destroy();
1779
1780out_hvapi_release:
1781 n2_spu_hvapi_unregister();
1782 goto out;
1783}
1784
1785static void release_global_resources(void)
1786{
1787 mutex_lock(&spu_lock);
1788 if (!--global_ref) {
1789 kfree(cpu_to_cwq);
1790 cpu_to_cwq = NULL;
1791
1792 kfree(cpu_to_mau);
1793 cpu_to_mau = NULL;
1794
1795 queue_cache_destroy();
1796 n2_spu_hvapi_unregister();
1797 }
1798 mutex_unlock(&spu_lock);
1799}
1800
1801static struct n2_crypto * __devinit alloc_n2cp(void)
1802{
1803 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1804
1805 if (np)
1806 INIT_LIST_HEAD(&np->cwq_list);
1807
1808 return np;
1809}
1810
1811static void free_n2cp(struct n2_crypto *np)
1812{
1813 if (np->cwq_info.ino_table) {
1814 kfree(np->cwq_info.ino_table);
1815 np->cwq_info.ino_table = NULL;
1816 }
1817
1818 kfree(np);
1819}
1820
1821static void __devinit n2_spu_driver_version(void)
1822{
1823 static int n2_spu_version_printed;
1824
1825 if (n2_spu_version_printed++ == 0)
1826 pr_info("%s", version);
1827}
1828
1829static int __devinit n2_crypto_probe(struct of_device *dev,
1830 const struct of_device_id *match)
1831{
1832 struct mdesc_handle *mdesc;
1833 const char *full_name;
1834 struct n2_crypto *np;
1835 int err;
1836
1837 n2_spu_driver_version();
1838
1839 full_name = dev->node->full_name;
1840 pr_info("Found N2CP at %s\n", full_name);
1841
1842 np = alloc_n2cp();
1843 if (!np) {
1844 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
1845 full_name);
1846 return -ENOMEM;
1847 }
1848
1849 err = grab_global_resources();
1850 if (err) {
1851 dev_err(&dev->dev, "%s: Unable to grab "
1852 "global resources.\n", full_name);
1853 goto out_free_n2cp;
1854 }
1855
1856 mdesc = mdesc_grab();
1857
1858 if (!mdesc) {
1859 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1860 full_name);
1861 err = -ENODEV;
1862 goto out_free_global;
1863 }
1864 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1865 if (err) {
1866 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1867 full_name);
1868 mdesc_release(mdesc);
1869 goto out_free_global;
1870 }
1871
1872 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1873 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1874 cpu_to_cwq);
1875 mdesc_release(mdesc);
1876
1877 if (err) {
1878 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
1879 full_name);
1880 goto out_free_global;
1881 }
1882
1883 err = n2_register_algs();
1884 if (err) {
1885 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
1886 full_name);
1887 goto out_free_spu_list;
1888 }
1889
1890 dev_set_drvdata(&dev->dev, np);
1891
1892 return 0;
1893
1894out_free_spu_list:
1895 spu_list_destroy(&np->cwq_list);
1896
1897out_free_global:
1898 release_global_resources();
1899
1900out_free_n2cp:
1901 free_n2cp(np);
1902
1903 return err;
1904}
1905
1906static int __devexit n2_crypto_remove(struct of_device *dev)
1907{
1908 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1909
1910 n2_unregister_algs();
1911
1912 spu_list_destroy(&np->cwq_list);
1913
1914 release_global_resources();
1915
1916 free_n2cp(np);
1917
1918 return 0;
1919}
1920
1921static struct n2_mau * __devinit alloc_ncp(void)
1922{
1923 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
1924
1925 if (mp)
1926 INIT_LIST_HEAD(&mp->mau_list);
1927
1928 return mp;
1929}
1930
1931static void free_ncp(struct n2_mau *mp)
1932{
1933 if (mp->mau_info.ino_table) {
1934 kfree(mp->mau_info.ino_table);
1935 mp->mau_info.ino_table = NULL;
1936 }
1937
1938 kfree(mp);
1939}
1940
1941static int __devinit n2_mau_probe(struct of_device *dev,
1942 const struct of_device_id *match)
1943{
1944 struct mdesc_handle *mdesc;
1945 const char *full_name;
1946 struct n2_mau *mp;
1947 int err;
1948
1949 n2_spu_driver_version();
1950
1951 full_name = dev->node->full_name;
1952 pr_info("Found NCP at %s\n", full_name);
1953
1954 mp = alloc_ncp();
1955 if (!mp) {
1956 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
1957 full_name);
1958 return -ENOMEM;
1959 }
1960
1961 err = grab_global_resources();
1962 if (err) {
1963 dev_err(&dev->dev, "%s: Unable to grab "
1964 "global resources.\n", full_name);
1965 goto out_free_ncp;
1966 }
1967
1968 mdesc = mdesc_grab();
1969
1970 if (!mdesc) {
1971 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1972 full_name);
1973 err = -ENODEV;
1974 goto out_free_global;
1975 }
1976
1977 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
1978 if (err) {
1979 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1980 full_name);
1981 mdesc_release(mdesc);
1982 goto out_free_global;
1983 }
1984
1985 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
1986 "mau", HV_NCS_QTYPE_MAU, mau_intr,
1987 cpu_to_mau);
1988 mdesc_release(mdesc);
1989
1990 if (err) {
1991 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
1992 full_name);
1993 goto out_free_global;
1994 }
1995
1996 dev_set_drvdata(&dev->dev, mp);
1997
1998 return 0;
1999
2000out_free_global:
2001 release_global_resources();
2002
2003out_free_ncp:
2004 free_ncp(mp);
2005
2006 return err;
2007}
2008
2009static int __devexit n2_mau_remove(struct of_device *dev)
2010{
2011 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2012
2013 spu_list_destroy(&mp->mau_list);
2014
2015 release_global_resources();
2016
2017 free_ncp(mp);
2018
2019 return 0;
2020}
2021
2022static struct of_device_id n2_crypto_match[] = {
2023 {
2024 .name = "n2cp",
2025 .compatible = "SUNW,n2-cwq",
2026 },
2027 {
2028 .name = "n2cp",
2029 .compatible = "SUNW,vf-cwq",
2030 },
2031 {},
2032};
2033
2034MODULE_DEVICE_TABLE(of, n2_crypto_match);
2035
2036static struct of_platform_driver n2_crypto_driver = {
2037 .name = "n2cp",
2038 .match_table = n2_crypto_match,
2039 .probe = n2_crypto_probe,
2040 .remove = __devexit_p(n2_crypto_remove),
2041};
2042
2043static struct of_device_id n2_mau_match[] = {
2044 {
2045 .name = "ncp",
2046 .compatible = "SUNW,n2-mau",
2047 },
2048 {
2049 .name = "ncp",
2050 .compatible = "SUNW,vf-mau",
2051 },
2052 {},
2053};
2054
2055MODULE_DEVICE_TABLE(of, n2_mau_match);
2056
2057static struct of_platform_driver n2_mau_driver = {
2058 .name = "ncp",
2059 .match_table = n2_mau_match,
2060 .probe = n2_mau_probe,
2061 .remove = __devexit_p(n2_mau_remove),
2062};
2063
2064static int __init n2_init(void)
2065{
2066 int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
2067
2068 if (!err) {
2069 err = of_register_driver(&n2_mau_driver, &of_bus_type);
2070 if (err)
2071 of_unregister_driver(&n2_crypto_driver);
2072 }
2073 return err;
2074}
2075
2076static void __exit n2_exit(void)
2077{
2078 of_unregister_driver(&n2_mau_driver);
2079 of_unregister_driver(&n2_crypto_driver);
2080}
2081
2082module_init(n2_init);
2083module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644
index 000000000000..4bcbbeae98f5
--- /dev/null
+++ b/drivers/crypto/n2_core.h
@@ -0,0 +1,231 @@
1#ifndef _N2_CORE_H
2#define _N2_CORE_H
3
4#ifndef __ASSEMBLY__
5
6struct ino_blob {
7 u64 intr;
8 u64 ino;
9};
10
11struct spu_mdesc_info {
12 u64 cfg_handle;
13 struct ino_blob *ino_table;
14 int num_intrs;
15};
16
17struct n2_crypto {
18 struct spu_mdesc_info cwq_info;
19 struct list_head cwq_list;
20};
21
22struct n2_mau {
23 struct spu_mdesc_info mau_info;
24 struct list_head mau_list;
25};
26
27#define CWQ_ENTRY_SIZE 64
28#define CWQ_NUM_ENTRIES 64
29
30#define MAU_ENTRY_SIZE 64
31#define MAU_NUM_ENTRIES 64
32
33struct cwq_initial_entry {
34 u64 control;
35 u64 src_addr;
36 u64 auth_key_addr;
37 u64 auth_iv_addr;
38 u64 final_auth_state_addr;
39 u64 enc_key_addr;
40 u64 enc_iv_addr;
41 u64 dest_addr;
42};
43
44struct cwq_ext_entry {
45 u64 len;
46 u64 src_addr;
47 u64 resv1;
48 u64 resv2;
49 u64 resv3;
50 u64 resv4;
51 u64 resv5;
52 u64 resv6;
53};
54
55struct cwq_final_entry {
56 u64 control;
57 u64 src_addr;
58 u64 resv1;
59 u64 resv2;
60 u64 resv3;
61 u64 resv4;
62 u64 resv5;
63 u64 resv6;
64};
65
66#define CONTROL_LEN 0x000000000000ffffULL
67#define CONTROL_LEN_SHIFT 0
68#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
69#define CONTROL_HMAC_KEY_LEN_SHIFT 16
70#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
71#define CONTROL_ENC_TYPE_SHIFT 24
72#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
73#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
74#define ENC_TYPE_ALG_DES 0x08ULL
75#define ENC_TYPE_ALG_3DES 0x0cULL
76#define ENC_TYPE_ALG_AES128 0x10ULL
77#define ENC_TYPE_ALG_AES192 0x14ULL
78#define ENC_TYPE_ALG_AES256 0x18ULL
79#define ENC_TYPE_ALG_RESERVED 0x1cULL
80#define ENC_TYPE_ALG_MASK 0x1cULL
81#define ENC_TYPE_CHAINING_ECB 0x00ULL
82#define ENC_TYPE_CHAINING_CBC 0x01ULL
83#define ENC_TYPE_CHAINING_CFB 0x02ULL
84#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
85#define ENC_TYPE_CHAINING_MASK 0x03ULL
86#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
87#define CONTROL_AUTH_TYPE_SHIFT 32
88#define AUTH_TYPE_RESERVED 0x00ULL
89#define AUTH_TYPE_MD5 0x01ULL
90#define AUTH_TYPE_SHA1 0x02ULL
91#define AUTH_TYPE_SHA256 0x03ULL
92#define AUTH_TYPE_CRC32 0x04ULL
93#define AUTH_TYPE_HMAC_MD5 0x05ULL
94#define AUTH_TYPE_HMAC_SHA1 0x06ULL
95#define AUTH_TYPE_HMAC_SHA256 0x07ULL
96#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
97#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
98#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
99#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
100#define CONTROL_STRAND 0x000000e000000000ULL
101#define CONTROL_STRAND_SHIFT 37
102#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
103#define CONTROL_HASH_LEN_SHIFT 40
104#define CONTROL_INTERRUPT 0x0001000000000000ULL
105#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
106#define CONTROL_RESERVED 0x001c000000000000ULL
107#define CONTROL_HV_DONE 0x0004000000000000ULL
108#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
109#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
110#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
111#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
112#define CONTROL_ENCRYPT 0x0080000000000000ULL
113#define CONTROL_OPCODE 0xff00000000000000ULL
114#define CONTROL_OPCODE_SHIFT 56
115#define OPCODE_INPLACE_BIT 0x80ULL
116#define OPCODE_SSL_KEYBLOCK 0x10ULL
117#define OPCODE_COPY 0x20ULL
118#define OPCODE_ENCRYPT 0x40ULL
119#define OPCODE_AUTH_MAC 0x41ULL
120
121#endif /* !(__ASSEMBLY__) */
122
123/* NCS v2.0 hypervisor interfaces */
124#define HV_NCS_QTYPE_MAU 0x01
125#define HV_NCS_QTYPE_CWQ 0x02
126
127/* ncs_qconf()
128 * TRAP: HV_FAST_TRAP
129 * FUNCTION: HV_FAST_NCS_QCONF
130 * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
131 * ARG1: Real address of queue, or handle for unconfigure
132 * ARG2: Number of entries in queue, zero for unconfigure
133 * RET0: status
134 * RET1: queue handle
135 *
136 * Configure a queue in the stream processing unit.
137 *
138 * The real address given as the base must be 64-byte
139 * aligned.
140 *
141 * The queue size can range from a minimum of 2 to a maximum
142 * of 64. The queue size must be a power of two.
143 *
144 * To unconfigure a queue, specify a length of zero and place
145 * the queue handle into ARG1.
146 *
147 * On configure success the hypervisor will set the FIRST, HEAD,
148 * and TAIL registers to the address of the first entry in the
149 * queue. The LAST register will be set to point to the last
150 * entry in the queue.
151 */
152#define HV_FAST_NCS_QCONF 0x111
153
154/* ncs_qinfo()
155 * TRAP: HV_FAST_TRAP
156 * FUNCTION: HV_FAST_NCS_QINFO
157 * ARG0: Queue handle
158 * RET0: status
159 * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
160 * RET2: Queue base address
161 * RET3: Number of entries
162 */
163#define HV_FAST_NCS_QINFO 0x112
164
165/* ncs_gethead()
166 * TRAP: HV_FAST_TRAP
167 * FUNCTION: HV_FAST_NCS_GETHEAD
168 * ARG0: Queue handle
169 * RET0: status
170 * RET1: queue head offset
171 */
172#define HV_FAST_NCS_GETHEAD 0x113
173
174/* ncs_gettail()
175 * TRAP: HV_FAST_TRAP
176 * FUNCTION: HV_FAST_NCS_GETTAIL
177 * ARG0: Queue handle
178 * RET0: status
179 * RET1: queue tail offset
180 */
181#define HV_FAST_NCS_GETTAIL 0x114
182
183/* ncs_settail()
184 * TRAP: HV_FAST_TRAP
185 * FUNCTION: HV_FAST_NCS_SETTAIL
186 * ARG0: Queue handle
187 * ARG1: New tail offset
188 * RET0: status
189 */
190#define HV_FAST_NCS_SETTAIL 0x115
191
192/* ncs_qhandle_to_devino()
193 * TRAP: HV_FAST_TRAP
194 * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
195 * ARG0: Queue handle
196 * RET0: status
197 * RET1: devino
198 */
199#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
200
201/* ncs_sethead_marker()
202 * TRAP: HV_FAST_TRAP
203 * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
204 * ARG0: Queue handle
205 * ARG1: New head offset
206 * RET0: status
207 */
208#define HV_FAST_NCS_SETHEAD_MARKER 0x117
209
210#ifndef __ASSEMBLY__
211extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
212 unsigned long queue_ra,
213 unsigned long num_entries,
214 unsigned long *qhandle);
215extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
216 unsigned long *queue_type,
217 unsigned long *queue_ra,
218 unsigned long *num_entries);
219extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
220 unsigned long *head);
221extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
222 unsigned long *tail);
223extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
224 unsigned long tail);
225extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
226 unsigned long *devino);
227extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
228 unsigned long head);
229#endif /* !(__ASSEMBLY__) */
230
231#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 000000000000..8b034337793f
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,1259 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from old omap-sha1-md5.c driver.
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
18#include <linux/version.h>
19#include <linux/err.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/kernel.h>
26#include <linux/clk.h>
27#include <linux/irq.h>
28#include <linux/io.h>
29#include <linux/platform_device.h>
30#include <linux/scatterlist.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/crypto.h>
34#include <linux/cryptohash.h>
35#include <crypto/scatterwalk.h>
36#include <crypto/algapi.h>
37#include <crypto/sha.h>
38#include <crypto/hash.h>
39#include <crypto/internal/hash.h>
40
41#include <plat/cpu.h>
42#include <plat/dma.h>
43#include <mach/irqs.h>
44
45#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
46#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
47
48#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
49#define MD5_DIGEST_SIZE 16
50
51#define SHA_REG_DIGCNT 0x14
52
53#define SHA_REG_CTRL 0x18
54#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
55#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
56#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
57#define SHA_REG_CTRL_ALGO (1 << 2)
58#define SHA_REG_CTRL_INPUT_READY (1 << 1)
59#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
60
61#define SHA_REG_REV 0x5C
62#define SHA_REG_REV_MAJOR 0xF0
63#define SHA_REG_REV_MINOR 0x0F
64
65#define SHA_REG_MASK 0x60
66#define SHA_REG_MASK_DMA_EN (1 << 3)
67#define SHA_REG_MASK_IT_EN (1 << 2)
68#define SHA_REG_MASK_SOFTRESET (1 << 1)
69#define SHA_REG_AUTOIDLE (1 << 0)
70
71#define SHA_REG_SYSSTATUS 0x64
72#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
73
74#define DEFAULT_TIMEOUT_INTERVAL HZ
75
76#define FLAGS_FIRST 0x0001
77#define FLAGS_FINUP 0x0002
78#define FLAGS_FINAL 0x0004
79#define FLAGS_FAST 0x0008
80#define FLAGS_SHA1 0x0010
81#define FLAGS_DMA_ACTIVE 0x0020
82#define FLAGS_OUTPUT_READY 0x0040
83#define FLAGS_CLEAN 0x0080
84#define FLAGS_INIT 0x0100
85#define FLAGS_CPU 0x0200
86#define FLAGS_HMAC 0x0400
87
88/* 3rd byte */
89#define FLAGS_BUSY 16
90
91#define OP_UPDATE 1
92#define OP_FINAL 2
93
94struct omap_sham_dev;
95
96struct omap_sham_reqctx {
97 struct omap_sham_dev *dd;
98 unsigned long flags;
99 unsigned long op;
100
101 size_t digcnt;
102 u8 *buffer;
103 size_t bufcnt;
104 size_t buflen;
105 dma_addr_t dma_addr;
106
107 /* walk state */
108 struct scatterlist *sg;
109 unsigned int offset; /* offset in current sg */
110 unsigned int total; /* total request */
111};
112
113struct omap_sham_hmac_ctx {
114 struct crypto_shash *shash;
115 u8 ipad[SHA1_MD5_BLOCK_SIZE];
116 u8 opad[SHA1_MD5_BLOCK_SIZE];
117};
118
119struct omap_sham_ctx {
120 struct omap_sham_dev *dd;
121
122 unsigned long flags;
123
124 /* fallback stuff */
125 struct crypto_shash *fallback;
126
127 struct omap_sham_hmac_ctx base[0];
128};
129
130#define OMAP_SHAM_QUEUE_LENGTH 1
131
132struct omap_sham_dev {
133 struct list_head list;
134 unsigned long phys_base;
135 struct device *dev;
136 void __iomem *io_base;
137 int irq;
138 struct clk *iclk;
139 spinlock_t lock;
140 int dma;
141 int dma_lch;
142 struct tasklet_struct done_task;
143 struct tasklet_struct queue_task;
144
145 unsigned long flags;
146 struct crypto_queue queue;
147 struct ahash_request *req;
148};
149
150struct omap_sham_drv {
151 struct list_head dev_list;
152 spinlock_t lock;
153 unsigned long flags;
154};
155
156static struct omap_sham_drv sham = {
157 .dev_list = LIST_HEAD_INIT(sham.dev_list),
158 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
159};
160
161static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
162{
163 return __raw_readl(dd->io_base + offset);
164}
165
166static inline void omap_sham_write(struct omap_sham_dev *dd,
167 u32 offset, u32 value)
168{
169 __raw_writel(value, dd->io_base + offset);
170}
171
172static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
173 u32 value, u32 mask)
174{
175 u32 val;
176
177 val = omap_sham_read(dd, address);
178 val &= ~mask;
179 val |= value;
180 omap_sham_write(dd, address, val);
181}
182
183static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
184{
185 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
186
187 while (!(omap_sham_read(dd, offset) & bit)) {
188 if (time_is_before_jiffies(timeout))
189 return -ETIMEDOUT;
190 }
191
192 return 0;
193}
194
195static void omap_sham_copy_hash(struct ahash_request *req, int out)
196{
197 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
198 u32 *hash = (u32 *)req->result;
199 int i;
200
201 if (likely(ctx->flags & FLAGS_SHA1)) {
202 /* SHA1 results are in big endian */
203 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
204 if (out)
205 hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
206 SHA_REG_DIGEST(i)));
207 else
208 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
209 cpu_to_be32(hash[i]));
210 } else {
211 /* MD5 results are in little endian */
212 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
213 if (out)
214 hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
215 SHA_REG_DIGEST(i)));
216 else
217 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
218 cpu_to_le32(hash[i]));
219 }
220}
221
222static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
223 int final, int dma)
224{
225 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
226 u32 val = length << 5, mask;
227
228 if (unlikely(!ctx->digcnt)) {
229
230 clk_enable(dd->iclk);
231
232 if (!(dd->flags & FLAGS_INIT)) {
233 omap_sham_write_mask(dd, SHA_REG_MASK,
234 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
235
236 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
237 SHA_REG_SYSSTATUS_RESETDONE))
238 return -ETIMEDOUT;
239
240 dd->flags |= FLAGS_INIT;
241 }
242 } else {
243 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
244 }
245
246 omap_sham_write_mask(dd, SHA_REG_MASK,
247 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
248 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
249 /*
250 * Setting ALGO_CONST only for the first iteration
251 * and CLOSE_HASH only for the last one.
252 */
253 if (ctx->flags & FLAGS_SHA1)
254 val |= SHA_REG_CTRL_ALGO;
255 if (!ctx->digcnt)
256 val |= SHA_REG_CTRL_ALGO_CONST;
257 if (final)
258 val |= SHA_REG_CTRL_CLOSE_HASH;
259
260 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
261 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
262
263 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
264
265 return 0;
266}
267
268static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
269 size_t length, int final)
270{
271 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
272 int err, count, len32;
273 const u32 *buffer = (const u32 *)buf;
274
275 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
276 ctx->digcnt, length, final);
277
278 err = omap_sham_write_ctrl(dd, length, final, 0);
279 if (err)
280 return err;
281
282 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
283 return -ETIMEDOUT;
284
285 ctx->digcnt += length;
286
287 if (final)
288 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
289
290 len32 = DIV_ROUND_UP(length, sizeof(u32));
291
292 for (count = 0; count < len32; count++)
293 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
294
295 return -EINPROGRESS;
296}
297
298static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
299 size_t length, int final)
300{
301 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
302 int err, len32;
303
304 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
305 ctx->digcnt, length, final);
306
307 /* flush cache entries related to our page */
308 if (dma_addr == ctx->dma_addr)
309 dma_sync_single_for_device(dd->dev, dma_addr, length,
310 DMA_TO_DEVICE);
311
312 len32 = DIV_ROUND_UP(length, sizeof(u32));
313
314 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
315 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
316
317 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
318 dma_addr, 0, 0);
319
320 err = omap_sham_write_ctrl(dd, length, final, 1);
321 if (err)
322 return err;
323
324 ctx->digcnt += length;
325
326 if (final)
327 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
328
329 dd->flags |= FLAGS_DMA_ACTIVE;
330
331 omap_start_dma(dd->dma_lch);
332
333 return -EINPROGRESS;
334}
335
336static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
337 const u8 *data, size_t length)
338{
339 size_t count = min(length, ctx->buflen - ctx->bufcnt);
340
341 count = min(count, ctx->total);
342 if (count <= 0)
343 return 0;
344 memcpy(ctx->buffer + ctx->bufcnt, data, count);
345 ctx->bufcnt += count;
346
347 return count;
348}
349
350static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
351{
352 size_t count;
353
354 while (ctx->sg) {
355 count = omap_sham_append_buffer(ctx,
356 sg_virt(ctx->sg) + ctx->offset,
357 ctx->sg->length - ctx->offset);
358 if (!count)
359 break;
360 ctx->offset += count;
361 ctx->total -= count;
362 if (ctx->offset == ctx->sg->length) {
363 ctx->sg = sg_next(ctx->sg);
364 if (ctx->sg)
365 ctx->offset = 0;
366 else
367 ctx->total = 0;
368 }
369 }
370
371 return 0;
372}
373
374static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
375{
376 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
377 unsigned int final;
378 size_t count;
379
380 if (!ctx->total)
381 return 0;
382
383 omap_sham_append_sg(ctx);
384
385 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
386
387 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
388 ctx->bufcnt, ctx->digcnt, final);
389
390 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
391 count = ctx->bufcnt;
392 ctx->bufcnt = 0;
393 return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
394 }
395
396 return 0;
397}
398
399static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
400{
401 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
402 unsigned int length;
403
404 ctx->flags |= FLAGS_FAST;
405
406 length = min(ctx->total, sg_dma_len(ctx->sg));
407 ctx->total = length;
408
409 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
410 dev_err(dd->dev, "dma_map_sg error\n");
411 return -EINVAL;
412 }
413
414 ctx->total -= length;
415
416 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
417}
418
419static int omap_sham_update_cpu(struct omap_sham_dev *dd)
420{
421 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
422 int bufcnt;
423
424 omap_sham_append_sg(ctx);
425 bufcnt = ctx->bufcnt;
426 ctx->bufcnt = 0;
427
428 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
429}
430
431static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
432{
433 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
434
435 omap_stop_dma(dd->dma_lch);
436 if (ctx->flags & FLAGS_FAST)
437 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
438
439 return 0;
440}
441
442static void omap_sham_cleanup(struct ahash_request *req)
443{
444 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
445 struct omap_sham_dev *dd = ctx->dd;
446 unsigned long flags;
447
448 spin_lock_irqsave(&dd->lock, flags);
449 if (ctx->flags & FLAGS_CLEAN) {
450 spin_unlock_irqrestore(&dd->lock, flags);
451 return;
452 }
453 ctx->flags |= FLAGS_CLEAN;
454 spin_unlock_irqrestore(&dd->lock, flags);
455
456 if (ctx->digcnt)
457 clk_disable(dd->iclk);
458
459 if (ctx->dma_addr)
460 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
461 DMA_TO_DEVICE);
462
463 if (ctx->buffer)
464 free_page((unsigned long)ctx->buffer);
465
466 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
467}
468
469static int omap_sham_init(struct ahash_request *req)
470{
471 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
472 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
473 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
474 struct omap_sham_dev *dd = NULL, *tmp;
475
476 spin_lock_bh(&sham.lock);
477 if (!tctx->dd) {
478 list_for_each_entry(tmp, &sham.dev_list, list) {
479 dd = tmp;
480 break;
481 }
482 tctx->dd = dd;
483 } else {
484 dd = tctx->dd;
485 }
486 spin_unlock_bh(&sham.lock);
487
488 ctx->dd = dd;
489
490 ctx->flags = 0;
491
492 ctx->flags |= FLAGS_FIRST;
493
494 dev_dbg(dd->dev, "init: digest size: %d\n",
495 crypto_ahash_digestsize(tfm));
496
497 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
498 ctx->flags |= FLAGS_SHA1;
499
500 ctx->bufcnt = 0;
501 ctx->digcnt = 0;
502
503 ctx->buflen = PAGE_SIZE;
504 ctx->buffer = (void *)__get_free_page(
505 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
506 GFP_KERNEL : GFP_ATOMIC);
507 if (!ctx->buffer)
508 return -ENOMEM;
509
510 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
511 DMA_TO_DEVICE);
512 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
513 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
514 free_page((unsigned long)ctx->buffer);
515 return -EINVAL;
516 }
517
518 if (tctx->flags & FLAGS_HMAC) {
519 struct omap_sham_hmac_ctx *bctx = tctx->base;
520
521 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
522 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
523 ctx->flags |= FLAGS_HMAC;
524 }
525
526 return 0;
527
528}
529
530static int omap_sham_update_req(struct omap_sham_dev *dd)
531{
532 struct ahash_request *req = dd->req;
533 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
534 int err;
535
536 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
537 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
538
539 if (ctx->flags & FLAGS_CPU)
540 err = omap_sham_update_cpu(dd);
541 else if (ctx->flags & FLAGS_FAST)
542 err = omap_sham_update_dma_fast(dd);
543 else
544 err = omap_sham_update_dma_slow(dd);
545
546 /* wait for dma completion before can take more data */
547 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
548
549 return err;
550}
551
552static int omap_sham_final_req(struct omap_sham_dev *dd)
553{
554 struct ahash_request *req = dd->req;
555 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
556 int err = 0, use_dma = 1;
557
558 if (ctx->bufcnt <= 64)
559 /* faster to handle last block with cpu */
560 use_dma = 0;
561
562 if (use_dma)
563 err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
564 else
565 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
566
567 ctx->bufcnt = 0;
568
569 if (err != -EINPROGRESS)
570 omap_sham_cleanup(req);
571
572 dev_dbg(dd->dev, "final_req: err: %d\n", err);
573
574 return err;
575}
576
577static int omap_sham_finish_req_hmac(struct ahash_request *req)
578{
579 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
580 struct omap_sham_hmac_ctx *bctx = tctx->base;
581 int bs = crypto_shash_blocksize(bctx->shash);
582 int ds = crypto_shash_digestsize(bctx->shash);
583 struct {
584 struct shash_desc shash;
585 char ctx[crypto_shash_descsize(bctx->shash)];
586 } desc;
587
588 desc.shash.tfm = bctx->shash;
589 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
590
591 return crypto_shash_init(&desc.shash) ?:
592 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
593 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
594}
595
596static void omap_sham_finish_req(struct ahash_request *req, int err)
597{
598 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
599
600 if (!err) {
601 omap_sham_copy_hash(ctx->dd->req, 1);
602 if (ctx->flags & FLAGS_HMAC)
603 err = omap_sham_finish_req_hmac(req);
604 }
605
606 if (ctx->flags & FLAGS_FINAL)
607 omap_sham_cleanup(req);
608
609 clear_bit(FLAGS_BUSY, &ctx->dd->flags);
610
611 if (req->base.complete)
612 req->base.complete(&req->base, err);
613}
614
615static int omap_sham_handle_queue(struct omap_sham_dev *dd)
616{
617 struct crypto_async_request *async_req, *backlog;
618 struct omap_sham_reqctx *ctx;
619 struct ahash_request *req, *prev_req;
620 unsigned long flags;
621 int err = 0;
622
623 if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
624 return 0;
625
626 spin_lock_irqsave(&dd->lock, flags);
627 backlog = crypto_get_backlog(&dd->queue);
628 async_req = crypto_dequeue_request(&dd->queue);
629 if (!async_req)
630 clear_bit(FLAGS_BUSY, &dd->flags);
631 spin_unlock_irqrestore(&dd->lock, flags);
632
633 if (!async_req)
634 return 0;
635
636 if (backlog)
637 backlog->complete(backlog, -EINPROGRESS);
638
639 req = ahash_request_cast(async_req);
640
641 prev_req = dd->req;
642 dd->req = req;
643
644 ctx = ahash_request_ctx(req);
645
646 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
647 ctx->op, req->nbytes);
648
649 if (req != prev_req && ctx->digcnt)
650 /* request has changed - restore hash */
651 omap_sham_copy_hash(req, 0);
652
653 if (ctx->op == OP_UPDATE) {
654 err = omap_sham_update_req(dd);
655 if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
656 /* no final() after finup() */
657 err = omap_sham_final_req(dd);
658 } else if (ctx->op == OP_FINAL) {
659 err = omap_sham_final_req(dd);
660 }
661
662 if (err != -EINPROGRESS) {
663 /* done_task will not finish it, so do it here */
664 omap_sham_finish_req(req, err);
665 tasklet_schedule(&dd->queue_task);
666 }
667
668 dev_dbg(dd->dev, "exit, err: %d\n", err);
669
670 return err;
671}
672
673static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
674{
675 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
676 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
677 struct omap_sham_dev *dd = tctx->dd;
678 unsigned long flags;
679 int err;
680
681 ctx->op = op;
682
683 spin_lock_irqsave(&dd->lock, flags);
684 err = ahash_enqueue_request(&dd->queue, req);
685 spin_unlock_irqrestore(&dd->lock, flags);
686
687 omap_sham_handle_queue(dd);
688
689 return err;
690}
691
692static int omap_sham_update(struct ahash_request *req)
693{
694 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
695
696 if (!req->nbytes)
697 return 0;
698
699 ctx->total = req->nbytes;
700 ctx->sg = req->src;
701 ctx->offset = 0;
702
703 if (ctx->flags & FLAGS_FINUP) {
704 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
705 /*
706 * OMAP HW accel works only with buffers >= 9
707 * will switch to bypass in final()
708 * final has the same request and data
709 */
710 omap_sham_append_sg(ctx);
711 return 0;
712 } else if (ctx->bufcnt + ctx->total <= 64) {
713 ctx->flags |= FLAGS_CPU;
714 } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
715 /* may be can use faster functions */
716 int aligned = IS_ALIGNED((u32)ctx->sg->offset,
717 sizeof(u32));
718
719 if (aligned && (ctx->flags & FLAGS_FIRST))
720 /* digest: first and final */
721 ctx->flags |= FLAGS_FAST;
722
723 ctx->flags &= ~FLAGS_FIRST;
724 }
725 } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
726 /* if not finaup -> not fast */
727 omap_sham_append_sg(ctx);
728 return 0;
729 }
730
731 return omap_sham_enqueue(req, OP_UPDATE);
732}
733
734static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
735 const u8 *data, unsigned int len, u8 *out)
736{
737 struct {
738 struct shash_desc shash;
739 char ctx[crypto_shash_descsize(shash)];
740 } desc;
741
742 desc.shash.tfm = shash;
743 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
744
745 return crypto_shash_digest(&desc.shash, data, len, out);
746}
747
748static int omap_sham_final_shash(struct ahash_request *req)
749{
750 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
751 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
752
753 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
754 ctx->buffer, ctx->bufcnt, req->result);
755}
756
757static int omap_sham_final(struct ahash_request *req)
758{
759 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
760 int err = 0;
761
762 ctx->flags |= FLAGS_FINUP;
763
764 /* OMAP HW accel works only with buffers >= 9 */
765 /* HMAC is always >= 9 because of ipad */
766 if ((ctx->digcnt + ctx->bufcnt) < 9)
767 err = omap_sham_final_shash(req);
768 else if (ctx->bufcnt)
769 return omap_sham_enqueue(req, OP_FINAL);
770
771 omap_sham_cleanup(req);
772
773 return err;
774}
775
776static int omap_sham_finup(struct ahash_request *req)
777{
778 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
779 int err1, err2;
780
781 ctx->flags |= FLAGS_FINUP;
782
783 err1 = omap_sham_update(req);
784 if (err1 == -EINPROGRESS)
785 return err1;
786 /*
787 * final() has to be always called to cleanup resources
788 * even if udpate() failed, except EINPROGRESS
789 */
790 err2 = omap_sham_final(req);
791
792 return err1 ?: err2;
793}
794
795static int omap_sham_digest(struct ahash_request *req)
796{
797 return omap_sham_init(req) ?: omap_sham_finup(req);
798}
799
800static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
801 unsigned int keylen)
802{
803 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
804 struct omap_sham_hmac_ctx *bctx = tctx->base;
805 int bs = crypto_shash_blocksize(bctx->shash);
806 int ds = crypto_shash_digestsize(bctx->shash);
807 int err, i;
808 err = crypto_shash_setkey(tctx->fallback, key, keylen);
809 if (err)
810 return err;
811
812 if (keylen > bs) {
813 err = omap_sham_shash_digest(bctx->shash,
814 crypto_shash_get_flags(bctx->shash),
815 key, keylen, bctx->ipad);
816 if (err)
817 return err;
818 keylen = ds;
819 } else {
820 memcpy(bctx->ipad, key, keylen);
821 }
822
823 memset(bctx->ipad + keylen, 0, bs - keylen);
824 memcpy(bctx->opad, bctx->ipad, bs);
825
826 for (i = 0; i < bs; i++) {
827 bctx->ipad[i] ^= 0x36;
828 bctx->opad[i] ^= 0x5c;
829 }
830
831 return err;
832}
833
834static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
835{
836 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
837 const char *alg_name = crypto_tfm_alg_name(tfm);
838
839 /* Allocate a fallback and abort if it failed. */
840 tctx->fallback = crypto_alloc_shash(alg_name, 0,
841 CRYPTO_ALG_NEED_FALLBACK);
842 if (IS_ERR(tctx->fallback)) {
843 pr_err("omap-sham: fallback driver '%s' "
844 "could not be loaded.\n", alg_name);
845 return PTR_ERR(tctx->fallback);
846 }
847
848 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
849 sizeof(struct omap_sham_reqctx));
850
851 if (alg_base) {
852 struct omap_sham_hmac_ctx *bctx = tctx->base;
853 tctx->flags |= FLAGS_HMAC;
854 bctx->shash = crypto_alloc_shash(alg_base, 0,
855 CRYPTO_ALG_NEED_FALLBACK);
856 if (IS_ERR(bctx->shash)) {
857 pr_err("omap-sham: base driver '%s' "
858 "could not be loaded.\n", alg_base);
859 crypto_free_shash(tctx->fallback);
860 return PTR_ERR(bctx->shash);
861 }
862
863 }
864
865 return 0;
866}
867
868static int omap_sham_cra_init(struct crypto_tfm *tfm)
869{
870 return omap_sham_cra_init_alg(tfm, NULL);
871}
872
873static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
874{
875 return omap_sham_cra_init_alg(tfm, "sha1");
876}
877
878static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
879{
880 return omap_sham_cra_init_alg(tfm, "md5");
881}
882
883static void omap_sham_cra_exit(struct crypto_tfm *tfm)
884{
885 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
886
887 crypto_free_shash(tctx->fallback);
888 tctx->fallback = NULL;
889
890 if (tctx->flags & FLAGS_HMAC) {
891 struct omap_sham_hmac_ctx *bctx = tctx->base;
892 crypto_free_shash(bctx->shash);
893 }
894}
895
896static struct ahash_alg algs[] = {
897{
898 .init = omap_sham_init,
899 .update = omap_sham_update,
900 .final = omap_sham_final,
901 .finup = omap_sham_finup,
902 .digest = omap_sham_digest,
903 .halg.digestsize = SHA1_DIGEST_SIZE,
904 .halg.base = {
905 .cra_name = "sha1",
906 .cra_driver_name = "omap-sha1",
907 .cra_priority = 100,
908 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
909 CRYPTO_ALG_ASYNC |
910 CRYPTO_ALG_NEED_FALLBACK,
911 .cra_blocksize = SHA1_BLOCK_SIZE,
912 .cra_ctxsize = sizeof(struct omap_sham_ctx),
913 .cra_alignmask = 0,
914 .cra_module = THIS_MODULE,
915 .cra_init = omap_sham_cra_init,
916 .cra_exit = omap_sham_cra_exit,
917 }
918},
919{
920 .init = omap_sham_init,
921 .update = omap_sham_update,
922 .final = omap_sham_final,
923 .finup = omap_sham_finup,
924 .digest = omap_sham_digest,
925 .halg.digestsize = MD5_DIGEST_SIZE,
926 .halg.base = {
927 .cra_name = "md5",
928 .cra_driver_name = "omap-md5",
929 .cra_priority = 100,
930 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
931 CRYPTO_ALG_ASYNC |
932 CRYPTO_ALG_NEED_FALLBACK,
933 .cra_blocksize = SHA1_BLOCK_SIZE,
934 .cra_ctxsize = sizeof(struct omap_sham_ctx),
935 .cra_alignmask = 0,
936 .cra_module = THIS_MODULE,
937 .cra_init = omap_sham_cra_init,
938 .cra_exit = omap_sham_cra_exit,
939 }
940},
941{
942 .init = omap_sham_init,
943 .update = omap_sham_update,
944 .final = omap_sham_final,
945 .finup = omap_sham_finup,
946 .digest = omap_sham_digest,
947 .setkey = omap_sham_setkey,
948 .halg.digestsize = SHA1_DIGEST_SIZE,
949 .halg.base = {
950 .cra_name = "hmac(sha1)",
951 .cra_driver_name = "omap-hmac-sha1",
952 .cra_priority = 100,
953 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
954 CRYPTO_ALG_ASYNC |
955 CRYPTO_ALG_NEED_FALLBACK,
956 .cra_blocksize = SHA1_BLOCK_SIZE,
957 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
958 sizeof(struct omap_sham_hmac_ctx),
959 .cra_alignmask = 0,
960 .cra_module = THIS_MODULE,
961 .cra_init = omap_sham_cra_sha1_init,
962 .cra_exit = omap_sham_cra_exit,
963 }
964},
965{
966 .init = omap_sham_init,
967 .update = omap_sham_update,
968 .final = omap_sham_final,
969 .finup = omap_sham_finup,
970 .digest = omap_sham_digest,
971 .setkey = omap_sham_setkey,
972 .halg.digestsize = MD5_DIGEST_SIZE,
973 .halg.base = {
974 .cra_name = "hmac(md5)",
975 .cra_driver_name = "omap-hmac-md5",
976 .cra_priority = 100,
977 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
978 CRYPTO_ALG_ASYNC |
979 CRYPTO_ALG_NEED_FALLBACK,
980 .cra_blocksize = SHA1_BLOCK_SIZE,
981 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
982 sizeof(struct omap_sham_hmac_ctx),
983 .cra_alignmask = 0,
984 .cra_module = THIS_MODULE,
985 .cra_init = omap_sham_cra_md5_init,
986 .cra_exit = omap_sham_cra_exit,
987 }
988}
989};
990
991static void omap_sham_done_task(unsigned long data)
992{
993 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
994 struct ahash_request *req = dd->req;
995 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
996 int ready = 1;
997
998 if (ctx->flags & FLAGS_OUTPUT_READY) {
999 ctx->flags &= ~FLAGS_OUTPUT_READY;
1000 ready = 1;
1001 }
1002
1003 if (dd->flags & FLAGS_DMA_ACTIVE) {
1004 dd->flags &= ~FLAGS_DMA_ACTIVE;
1005 omap_sham_update_dma_stop(dd);
1006 omap_sham_update_dma_slow(dd);
1007 }
1008
1009 if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
1010 dev_dbg(dd->dev, "update done\n");
1011 /* finish curent request */
1012 omap_sham_finish_req(req, 0);
1013 /* start new request */
1014 omap_sham_handle_queue(dd);
1015 }
1016}
1017
1018static void omap_sham_queue_task(unsigned long data)
1019{
1020 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1021
1022 omap_sham_handle_queue(dd);
1023}
1024
1025static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1026{
1027 struct omap_sham_dev *dd = dev_id;
1028 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1029
1030 if (!ctx) {
1031 dev_err(dd->dev, "unknown interrupt.\n");
1032 return IRQ_HANDLED;
1033 }
1034
1035 if (unlikely(ctx->flags & FLAGS_FINAL))
1036 /* final -> allow device to go to power-saving mode */
1037 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1038
1039 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1040 SHA_REG_CTRL_OUTPUT_READY);
1041 omap_sham_read(dd, SHA_REG_CTRL);
1042
1043 ctx->flags |= FLAGS_OUTPUT_READY;
1044 tasklet_schedule(&dd->done_task);
1045
1046 return IRQ_HANDLED;
1047}
1048
1049static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1050{
1051 struct omap_sham_dev *dd = data;
1052
1053 if (likely(lch == dd->dma_lch))
1054 tasklet_schedule(&dd->done_task);
1055}
1056
1057static int omap_sham_dma_init(struct omap_sham_dev *dd)
1058{
1059 int err;
1060
1061 dd->dma_lch = -1;
1062
1063 err = omap_request_dma(dd->dma, dev_name(dd->dev),
1064 omap_sham_dma_callback, dd, &dd->dma_lch);
1065 if (err) {
1066 dev_err(dd->dev, "Unable to request DMA channel\n");
1067 return err;
1068 }
1069 omap_set_dma_dest_params(dd->dma_lch, 0,
1070 OMAP_DMA_AMODE_CONSTANT,
1071 dd->phys_base + SHA_REG_DIN(0), 0, 16);
1072
1073 omap_set_dma_dest_burst_mode(dd->dma_lch,
1074 OMAP_DMA_DATA_BURST_16);
1075
1076 return 0;
1077}
1078
1079static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1080{
1081 if (dd->dma_lch >= 0) {
1082 omap_free_dma(dd->dma_lch);
1083 dd->dma_lch = -1;
1084 }
1085}
1086
1087static int __devinit omap_sham_probe(struct platform_device *pdev)
1088{
1089 struct omap_sham_dev *dd;
1090 struct device *dev = &pdev->dev;
1091 struct resource *res;
1092 int err, i, j;
1093
1094 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1095 if (dd == NULL) {
1096 dev_err(dev, "unable to alloc data struct.\n");
1097 err = -ENOMEM;
1098 goto data_err;
1099 }
1100 dd->dev = dev;
1101 platform_set_drvdata(pdev, dd);
1102
1103 INIT_LIST_HEAD(&dd->list);
1104 spin_lock_init(&dd->lock);
1105 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1106 tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
1107 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1108
1109 dd->irq = -1;
1110
1111 /* Get the base address */
1112 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1113 if (!res) {
1114 dev_err(dev, "no MEM resource info\n");
1115 err = -ENODEV;
1116 goto res_err;
1117 }
1118 dd->phys_base = res->start;
1119
1120 /* Get the DMA */
1121 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1122 if (!res) {
1123 dev_err(dev, "no DMA resource info\n");
1124 err = -ENODEV;
1125 goto res_err;
1126 }
1127 dd->dma = res->start;
1128
1129 /* Get the IRQ */
1130 dd->irq = platform_get_irq(pdev, 0);
1131 if (dd->irq < 0) {
1132 dev_err(dev, "no IRQ resource info\n");
1133 err = dd->irq;
1134 goto res_err;
1135 }
1136
1137 err = request_irq(dd->irq, omap_sham_irq,
1138 IRQF_TRIGGER_LOW, dev_name(dev), dd);
1139 if (err) {
1140 dev_err(dev, "unable to request irq.\n");
1141 goto res_err;
1142 }
1143
1144 err = omap_sham_dma_init(dd);
1145 if (err)
1146 goto dma_err;
1147
1148 /* Initializing the clock */
1149 dd->iclk = clk_get(dev, "ick");
1150 if (!dd->iclk) {
1151 dev_err(dev, "clock intialization failed.\n");
1152 err = -ENODEV;
1153 goto clk_err;
1154 }
1155
1156 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1157 if (!dd->io_base) {
1158 dev_err(dev, "can't ioremap\n");
1159 err = -ENOMEM;
1160 goto io_err;
1161 }
1162
1163 clk_enable(dd->iclk);
1164 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1165 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1166 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1167 clk_disable(dd->iclk);
1168
1169 spin_lock(&sham.lock);
1170 list_add_tail(&dd->list, &sham.dev_list);
1171 spin_unlock(&sham.lock);
1172
1173 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1174 err = crypto_register_ahash(&algs[i]);
1175 if (err)
1176 goto err_algs;
1177 }
1178
1179 return 0;
1180
1181err_algs:
1182 for (j = 0; j < i; j++)
1183 crypto_unregister_ahash(&algs[j]);
1184 iounmap(dd->io_base);
1185io_err:
1186 clk_put(dd->iclk);
1187clk_err:
1188 omap_sham_dma_cleanup(dd);
1189dma_err:
1190 if (dd->irq >= 0)
1191 free_irq(dd->irq, dd);
1192res_err:
1193 kfree(dd);
1194 dd = NULL;
1195data_err:
1196 dev_err(dev, "initialization failed.\n");
1197
1198 return err;
1199}
1200
1201static int __devexit omap_sham_remove(struct platform_device *pdev)
1202{
1203 static struct omap_sham_dev *dd;
1204 int i;
1205
1206 dd = platform_get_drvdata(pdev);
1207 if (!dd)
1208 return -ENODEV;
1209 spin_lock(&sham.lock);
1210 list_del(&dd->list);
1211 spin_unlock(&sham.lock);
1212 for (i = 0; i < ARRAY_SIZE(algs); i++)
1213 crypto_unregister_ahash(&algs[i]);
1214 tasklet_kill(&dd->done_task);
1215 tasklet_kill(&dd->queue_task);
1216 iounmap(dd->io_base);
1217 clk_put(dd->iclk);
1218 omap_sham_dma_cleanup(dd);
1219 if (dd->irq >= 0)
1220 free_irq(dd->irq, dd);
1221 kfree(dd);
1222 dd = NULL;
1223
1224 return 0;
1225}
1226
1227static struct platform_driver omap_sham_driver = {
1228 .probe = omap_sham_probe,
1229 .remove = omap_sham_remove,
1230 .driver = {
1231 .name = "omap-sham",
1232 .owner = THIS_MODULE,
1233 },
1234};
1235
1236static int __init omap_sham_mod_init(void)
1237{
1238 pr_info("loading %s driver\n", "omap-sham");
1239
1240 if (!cpu_class_is_omap2() ||
1241 omap_type() != OMAP2_DEVICE_TYPE_SEC) {
1242 pr_err("Unsupported cpu\n");
1243 return -ENODEV;
1244 }
1245
1246 return platform_driver_register(&omap_sham_driver);
1247}
1248
1249static void __exit omap_sham_mod_exit(void)
1250{
1251 platform_driver_unregister(&omap_sham_driver);
1252}
1253
1254module_init(omap_sham_mod_init);
1255module_exit(omap_sham_mod_exit);
1256
1257MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1258MODULE_LICENSE("GPL v2");
1259MODULE_AUTHOR("Dmitry Kasatkin");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 7e4e42d85fe4..637c105f53d2 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver 2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 * 3 *
4 * Copyright (c) 2008 Freescale Semiconductor, Inc. 4 * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
5 * 5 *
6 * Scatterlist Crypto API glue code copied from files with the following: 6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -43,9 +43,12 @@
43#include <crypto/aes.h> 43#include <crypto/aes.h>
44#include <crypto/des.h> 44#include <crypto/des.h>
45#include <crypto/sha.h> 45#include <crypto/sha.h>
46#include <crypto/md5.h>
46#include <crypto/aead.h> 47#include <crypto/aead.h>
47#include <crypto/authenc.h> 48#include <crypto/authenc.h>
48#include <crypto/skcipher.h> 49#include <crypto/skcipher.h>
50#include <crypto/hash.h>
51#include <crypto/internal/hash.h>
49#include <crypto/scatterwalk.h> 52#include <crypto/scatterwalk.h>
50 53
51#include "talitos.h" 54#include "talitos.h"
@@ -65,6 +68,13 @@ struct talitos_ptr {
65 __be32 ptr; /* address */ 68 __be32 ptr; /* address */
66}; 69};
67 70
71static const struct talitos_ptr zero_entry = {
72 .len = 0,
73 .j_extent = 0,
74 .eptr = 0,
75 .ptr = 0
76};
77
68/* descriptor */ 78/* descriptor */
69struct talitos_desc { 79struct talitos_desc {
70 __be32 hdr; /* header high bits */ 80 __be32 hdr; /* header high bits */
@@ -146,6 +156,7 @@ struct talitos_private {
146/* .features flag */ 156/* .features flag */
147#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 157#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
148#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 158#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
159#define TALITOS_FTR_SHA224_HWINIT 0x00000004
149 160
150static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) 161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
151{ 162{
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev)
692#define TALITOS_MAX_KEY_SIZE 64 703#define TALITOS_MAX_KEY_SIZE 64
693#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 704#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
694 705
695#define MD5_DIGEST_SIZE 16 706#define MD5_BLOCK_SIZE 64
696 707
697struct talitos_ctx { 708struct talitos_ctx {
698 struct device *dev; 709 struct device *dev;
@@ -705,6 +716,23 @@ struct talitos_ctx {
705 unsigned int authsize; 716 unsigned int authsize;
706}; 717};
707 718
719#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
720#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
721
722struct talitos_ahash_req_ctx {
723 u64 count;
724 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
725 unsigned int hw_context_size;
726 u8 buf[HASH_MAX_BLOCK_SIZE];
727 u8 bufnext[HASH_MAX_BLOCK_SIZE];
728 unsigned int swinit;
729 unsigned int first;
730 unsigned int last;
731 unsigned int to_hash_later;
732 struct scatterlist bufsl[2];
733 struct scatterlist *psrc;
734};
735
708static int aead_setauthsize(struct crypto_aead *authenc, 736static int aead_setauthsize(struct crypto_aead *authenc,
709 unsigned int authsize) 737 unsigned int authsize)
710{ 738{
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev,
821 else 849 else
822 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 850 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
823 851
824 if (edesc->dst_is_chained) 852 if (dst) {
825 talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); 853 if (edesc->dst_is_chained)
826 else 854 talitos_unmap_sg_chain(dev, dst,
827 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 855 DMA_FROM_DEVICE);
856 else
857 dma_unmap_sg(dev, dst, dst_nents,
858 DMA_FROM_DEVICE);
859 }
828 } else 860 } else
829 if (edesc->src_is_chained) 861 if (edesc->src_is_chained)
830 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); 862 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
@@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1114 return sg_nents; 1146 return sg_nents;
1115} 1147}
1116 1148
1149/**
1150 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1151 * @sgl: The SG list
1152 * @nents: Number of SG entries
1153 * @buf: Where to copy to
1154 * @buflen: The number of bytes to copy
1155 * @skip: The number of bytes to skip before copying.
1156 * Note: skip + buflen should equal SG total size.
1157 *
1158 * Returns the number of copied bytes.
1159 *
1160 **/
1161static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1162 void *buf, size_t buflen, unsigned int skip)
1163{
1164 unsigned int offset = 0;
1165 unsigned int boffset = 0;
1166 struct sg_mapping_iter miter;
1167 unsigned long flags;
1168 unsigned int sg_flags = SG_MITER_ATOMIC;
1169 size_t total_buffer = buflen + skip;
1170
1171 sg_flags |= SG_MITER_FROM_SG;
1172
1173 sg_miter_start(&miter, sgl, nents, sg_flags);
1174
1175 local_irq_save(flags);
1176
1177 while (sg_miter_next(&miter) && offset < total_buffer) {
1178 unsigned int len;
1179 unsigned int ignore;
1180
1181 if ((offset + miter.length) > skip) {
1182 if (offset < skip) {
1183 /* Copy part of this segment */
1184 ignore = skip - offset;
1185 len = miter.length - ignore;
1186 memcpy(buf + boffset, miter.addr + ignore, len);
1187 } else {
1188 /* Copy all of this segment */
1189 len = miter.length;
1190 memcpy(buf + boffset, miter.addr, len);
1191 }
1192 boffset += len;
1193 }
1194 offset += miter.length;
1195 }
1196
1197 sg_miter_stop(&miter);
1198
1199 local_irq_restore(flags);
1200 return boffset;
1201}
1202
1117/* 1203/*
1118 * allocate and map the extended descriptor 1204 * allocate and map the extended descriptor
1119 */ 1205 */
1120static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1206static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1121 struct scatterlist *src, 1207 struct scatterlist *src,
1122 struct scatterlist *dst, 1208 struct scatterlist *dst,
1209 int hash_result,
1123 unsigned int cryptlen, 1210 unsigned int cryptlen,
1124 unsigned int authsize, 1211 unsigned int authsize,
1125 int icv_stashing, 1212 int icv_stashing,
@@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1139 src_nents = sg_count(src, cryptlen + authsize, &src_chained); 1226 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1140 src_nents = (src_nents == 1) ? 0 : src_nents; 1227 src_nents = (src_nents == 1) ? 0 : src_nents;
1141 1228
1142 if (dst == src) { 1229 if (hash_result) {
1143 dst_nents = src_nents; 1230 dst_nents = 0;
1144 } else { 1231 } else {
1145 dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); 1232 if (dst == src) {
1146 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1233 dst_nents = src_nents;
1234 } else {
1235 dst_nents = sg_count(dst, cryptlen + authsize,
1236 &dst_chained);
1237 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1238 }
1147 } 1239 }
1148 1240
1149 /* 1241 /*
@@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1172 edesc->src_is_chained = src_chained; 1264 edesc->src_is_chained = src_chained;
1173 edesc->dst_is_chained = dst_chained; 1265 edesc->dst_is_chained = dst_chained;
1174 edesc->dma_len = dma_len; 1266 edesc->dma_len = dma_len;
1175 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1267 if (dma_len)
1176 edesc->dma_len, DMA_BIDIRECTIONAL); 1268 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1269 edesc->dma_len,
1270 DMA_BIDIRECTIONAL);
1177 1271
1178 return edesc; 1272 return edesc;
1179} 1273}
@@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1184 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1278 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1185 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1279 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1186 1280
1187 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1281 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1188 areq->cryptlen, ctx->authsize, icv_stashing, 1282 areq->cryptlen, ctx->authsize, icv_stashing,
1189 areq->base.flags); 1283 areq->base.flags);
1190} 1284}
@@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1441 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1535 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1442 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1536 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1443 1537
1444 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, 1538 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1445 0, 0, areq->base.flags); 1539 areq->nbytes, 0, 0, areq->base.flags);
1446} 1540}
1447 1541
1448static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1542static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1478 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); 1572 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1479} 1573}
1480 1574
1575static void common_nonsnoop_hash_unmap(struct device *dev,
1576 struct talitos_edesc *edesc,
1577 struct ahash_request *areq)
1578{
1579 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1580
1581 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1582
1583 /* When using hashctx-in, must unmap it. */
1584 if (edesc->desc.ptr[1].len)
1585 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1586 DMA_TO_DEVICE);
1587
1588 if (edesc->desc.ptr[2].len)
1589 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1590 DMA_TO_DEVICE);
1591
1592 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1593
1594 if (edesc->dma_len)
1595 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1596 DMA_BIDIRECTIONAL);
1597
1598}
1599
1600static void ahash_done(struct device *dev,
1601 struct talitos_desc *desc, void *context,
1602 int err)
1603{
1604 struct ahash_request *areq = context;
1605 struct talitos_edesc *edesc =
1606 container_of(desc, struct talitos_edesc, desc);
1607 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1608
1609 if (!req_ctx->last && req_ctx->to_hash_later) {
1610 /* Position any partial block for next update/final/finup */
1611 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1612 }
1613 common_nonsnoop_hash_unmap(dev, edesc, areq);
1614
1615 kfree(edesc);
1616
1617 areq->base.complete(&areq->base, err);
1618}
1619
1620static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1621 struct ahash_request *areq, unsigned int length,
1622 void (*callback) (struct device *dev,
1623 struct talitos_desc *desc,
1624 void *context, int error))
1625{
1626 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1627 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1628 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1629 struct device *dev = ctx->dev;
1630 struct talitos_desc *desc = &edesc->desc;
1631 int sg_count, ret;
1632
1633 /* first DWORD empty */
1634 desc->ptr[0] = zero_entry;
1635
1636 /* hash context in */
1637 if (!req_ctx->first || req_ctx->swinit) {
1638 map_single_talitos_ptr(dev, &desc->ptr[1],
1639 req_ctx->hw_context_size,
1640 (char *)req_ctx->hw_context, 0,
1641 DMA_TO_DEVICE);
1642 req_ctx->swinit = 0;
1643 } else {
1644 desc->ptr[1] = zero_entry;
1645 /* Indicate next op is not the first. */
1646 req_ctx->first = 0;
1647 }
1648
1649 /* HMAC key */
1650 if (ctx->keylen)
1651 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1652 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1653 else
1654 desc->ptr[2] = zero_entry;
1655
1656 /*
1657 * data in
1658 */
1659 desc->ptr[3].len = cpu_to_be16(length);
1660 desc->ptr[3].j_extent = 0;
1661
1662 sg_count = talitos_map_sg(dev, req_ctx->psrc,
1663 edesc->src_nents ? : 1,
1664 DMA_TO_DEVICE,
1665 edesc->src_is_chained);
1666
1667 if (sg_count == 1) {
1668 to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1669 } else {
1670 sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1671 &edesc->link_tbl[0]);
1672 if (sg_count > 1) {
1673 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1674 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1675 dma_sync_single_for_device(ctx->dev,
1676 edesc->dma_link_tbl,
1677 edesc->dma_len,
1678 DMA_BIDIRECTIONAL);
1679 } else {
1680 /* Only one segment now, so no link tbl needed */
1681 to_talitos_ptr(&desc->ptr[3],
1682 sg_dma_address(req_ctx->psrc));
1683 }
1684 }
1685
1686 /* fifth DWORD empty */
1687 desc->ptr[4] = zero_entry;
1688
1689 /* hash/HMAC out -or- hash context out */
1690 if (req_ctx->last)
1691 map_single_talitos_ptr(dev, &desc->ptr[5],
1692 crypto_ahash_digestsize(tfm),
1693 areq->result, 0, DMA_FROM_DEVICE);
1694 else
1695 map_single_talitos_ptr(dev, &desc->ptr[5],
1696 req_ctx->hw_context_size,
1697 req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1698
1699 /* last DWORD empty */
1700 desc->ptr[6] = zero_entry;
1701
1702 ret = talitos_submit(dev, desc, callback, areq);
1703 if (ret != -EINPROGRESS) {
1704 common_nonsnoop_hash_unmap(dev, edesc, areq);
1705 kfree(edesc);
1706 }
1707 return ret;
1708}
1709
1710static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1711 unsigned int nbytes)
1712{
1713 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1714 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1715 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1716
1717 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1718 nbytes, 0, 0, areq->base.flags);
1719}
1720
1721static int ahash_init(struct ahash_request *areq)
1722{
1723 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1724 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1725
1726 /* Initialize the context */
1727 req_ctx->count = 0;
1728 req_ctx->first = 1; /* first indicates h/w must init its context */
1729 req_ctx->swinit = 0; /* assume h/w init of context */
1730 req_ctx->hw_context_size =
1731 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1732 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1733 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1734
1735 return 0;
1736}
1737
1738/*
1739 * on h/w without explicit sha224 support, we initialize h/w context
1740 * manually with sha224 constants, and tell it to run sha256.
1741 */
1742static int ahash_init_sha224_swinit(struct ahash_request *areq)
1743{
1744 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1745
1746 ahash_init(areq);
1747 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1748
1749 req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
1750 req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
1751 req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
1752 req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
1753 req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
1754 req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
1755 req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
1756 req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
1757
1758 /* init 64-bit count */
1759 req_ctx->hw_context[8] = 0;
1760 req_ctx->hw_context[9] = 0;
1761
1762 return 0;
1763}
1764
1765static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1766{
1767 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1768 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1769 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1770 struct talitos_edesc *edesc;
1771 unsigned int blocksize =
1772 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1773 unsigned int nbytes_to_hash;
1774 unsigned int to_hash_later;
1775 unsigned int index;
1776 int chained;
1777
1778 index = req_ctx->count & (blocksize - 1);
1779 req_ctx->count += nbytes;
1780
1781 if (!req_ctx->last && (index + nbytes) < blocksize) {
1782 /* Buffer the partial block */
1783 sg_copy_to_buffer(areq->src,
1784 sg_count(areq->src, nbytes, &chained),
1785 req_ctx->buf + index, nbytes);
1786 return 0;
1787 }
1788
1789 if (index) {
1790 /* partial block from previous update; chain it in. */
1791 sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
1792 sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
1793 if (nbytes)
1794 scatterwalk_sg_chain(req_ctx->bufsl, 2,
1795 areq->src);
1796 req_ctx->psrc = req_ctx->bufsl;
1797 } else {
1798 req_ctx->psrc = areq->src;
1799 }
1800 nbytes_to_hash = index + nbytes;
1801 if (!req_ctx->last) {
1802 to_hash_later = (nbytes_to_hash & (blocksize - 1));
1803 if (to_hash_later) {
1804 int nents;
1805 /* Must copy to_hash_later bytes from the end
1806 * to bufnext (a partial block) for later.
1807 */
1808 nents = sg_count(areq->src, nbytes, &chained);
1809 sg_copy_end_to_buffer(areq->src, nents,
1810 req_ctx->bufnext,
1811 to_hash_later,
1812 nbytes - to_hash_later);
1813
1814 /* Adjust count for what will be hashed now */
1815 nbytes_to_hash -= to_hash_later;
1816 }
1817 req_ctx->to_hash_later = to_hash_later;
1818 }
1819
1820 /* allocate extended descriptor */
1821 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1822 if (IS_ERR(edesc))
1823 return PTR_ERR(edesc);
1824
1825 edesc->desc.hdr = ctx->desc_hdr_template;
1826
1827 /* On last one, request SEC to pad; otherwise continue */
1828 if (req_ctx->last)
1829 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1830 else
1831 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1832
1833 /* request SEC to INIT hash. */
1834 if (req_ctx->first && !req_ctx->swinit)
1835 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1836
1837 /* When the tfm context has a keylen, it's an HMAC.
1838 * A first or last (ie. not middle) descriptor must request HMAC.
1839 */
1840 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1841 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1842
1843 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1844 ahash_done);
1845}
1846
1847static int ahash_update(struct ahash_request *areq)
1848{
1849 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1850
1851 req_ctx->last = 0;
1852
1853 return ahash_process_req(areq, areq->nbytes);
1854}
1855
1856static int ahash_final(struct ahash_request *areq)
1857{
1858 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1859
1860 req_ctx->last = 1;
1861
1862 return ahash_process_req(areq, 0);
1863}
1864
1865static int ahash_finup(struct ahash_request *areq)
1866{
1867 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1868
1869 req_ctx->last = 1;
1870
1871 return ahash_process_req(areq, areq->nbytes);
1872}
1873
1874static int ahash_digest(struct ahash_request *areq)
1875{
1876 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1877 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1878
1879 ahash->init(areq);
1880 req_ctx->last = 1;
1881
1882 return ahash_process_req(areq, areq->nbytes);
1883}
1884
1481struct talitos_alg_template { 1885struct talitos_alg_template {
1482 struct crypto_alg alg; 1886 u32 type;
1887 union {
1888 struct crypto_alg crypto;
1889 struct ahash_alg hash;
1890 } alg;
1483 __be32 desc_hdr_template; 1891 __be32 desc_hdr_template;
1484}; 1892};
1485 1893
1486static struct talitos_alg_template driver_algs[] = { 1894static struct talitos_alg_template driver_algs[] = {
1487 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 1895 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1488 { 1896 { .type = CRYPTO_ALG_TYPE_AEAD,
1489 .alg = { 1897 .alg.crypto = {
1490 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1898 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1491 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", 1899 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1492 .cra_blocksize = AES_BLOCK_SIZE, 1900 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = {
1511 DESC_HDR_MODE1_MDEU_PAD | 1919 DESC_HDR_MODE1_MDEU_PAD |
1512 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1920 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1513 }, 1921 },
1514 { 1922 { .type = CRYPTO_ALG_TYPE_AEAD,
1515 .alg = { 1923 .alg.crypto = {
1516 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1924 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1517 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", 1925 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1518 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1926 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = {
1538 DESC_HDR_MODE1_MDEU_PAD | 1946 DESC_HDR_MODE1_MDEU_PAD |
1539 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1947 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1540 }, 1948 },
1541 { 1949 { .type = CRYPTO_ALG_TYPE_AEAD,
1542 .alg = { 1950 .alg.crypto = {
1543 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1951 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1544 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", 1952 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1545 .cra_blocksize = AES_BLOCK_SIZE, 1953 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = {
1564 DESC_HDR_MODE1_MDEU_PAD | 1972 DESC_HDR_MODE1_MDEU_PAD |
1565 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 1973 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1566 }, 1974 },
1567 { 1975 { .type = CRYPTO_ALG_TYPE_AEAD,
1568 .alg = { 1976 .alg.crypto = {
1569 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 1977 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1570 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", 1978 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1571 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1979 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = {
1591 DESC_HDR_MODE1_MDEU_PAD | 1999 DESC_HDR_MODE1_MDEU_PAD |
1592 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2000 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1593 }, 2001 },
1594 { 2002 { .type = CRYPTO_ALG_TYPE_AEAD,
1595 .alg = { 2003 .alg.crypto = {
1596 .cra_name = "authenc(hmac(md5),cbc(aes))", 2004 .cra_name = "authenc(hmac(md5),cbc(aes))",
1597 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", 2005 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1598 .cra_blocksize = AES_BLOCK_SIZE, 2006 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = {
1617 DESC_HDR_MODE1_MDEU_PAD | 2025 DESC_HDR_MODE1_MDEU_PAD |
1618 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2026 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1619 }, 2027 },
1620 { 2028 { .type = CRYPTO_ALG_TYPE_AEAD,
1621 .alg = { 2029 .alg.crypto = {
1622 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2030 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1623 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", 2031 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1624 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2032 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = {
1645 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2053 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1646 }, 2054 },
1647 /* ABLKCIPHER algorithms. */ 2055 /* ABLKCIPHER algorithms. */
1648 { 2056 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1649 .alg = { 2057 .alg.crypto = {
1650 .cra_name = "cbc(aes)", 2058 .cra_name = "cbc(aes)",
1651 .cra_driver_name = "cbc-aes-talitos", 2059 .cra_driver_name = "cbc-aes-talitos",
1652 .cra_blocksize = AES_BLOCK_SIZE, 2060 .cra_blocksize = AES_BLOCK_SIZE,
@@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = {
1667 DESC_HDR_SEL0_AESU | 2075 DESC_HDR_SEL0_AESU |
1668 DESC_HDR_MODE0_AESU_CBC, 2076 DESC_HDR_MODE0_AESU_CBC,
1669 }, 2077 },
1670 { 2078 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1671 .alg = { 2079 .alg.crypto = {
1672 .cra_name = "cbc(des3_ede)", 2080 .cra_name = "cbc(des3_ede)",
1673 .cra_driver_name = "cbc-3des-talitos", 2081 .cra_driver_name = "cbc-3des-talitos",
1674 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2082 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = {
1689 DESC_HDR_SEL0_DEU | 2097 DESC_HDR_SEL0_DEU |
1690 DESC_HDR_MODE0_DEU_CBC | 2098 DESC_HDR_MODE0_DEU_CBC |
1691 DESC_HDR_MODE0_DEU_3DES, 2099 DESC_HDR_MODE0_DEU_3DES,
1692 } 2100 },
2101 /* AHASH algorithms. */
2102 { .type = CRYPTO_ALG_TYPE_AHASH,
2103 .alg.hash = {
2104 .init = ahash_init,
2105 .update = ahash_update,
2106 .final = ahash_final,
2107 .finup = ahash_finup,
2108 .digest = ahash_digest,
2109 .halg.digestsize = MD5_DIGEST_SIZE,
2110 .halg.base = {
2111 .cra_name = "md5",
2112 .cra_driver_name = "md5-talitos",
2113 .cra_blocksize = MD5_BLOCK_SIZE,
2114 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2115 CRYPTO_ALG_ASYNC,
2116 .cra_type = &crypto_ahash_type
2117 }
2118 },
2119 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2120 DESC_HDR_SEL0_MDEUA |
2121 DESC_HDR_MODE0_MDEU_MD5,
2122 },
2123 { .type = CRYPTO_ALG_TYPE_AHASH,
2124 .alg.hash = {
2125 .init = ahash_init,
2126 .update = ahash_update,
2127 .final = ahash_final,
2128 .finup = ahash_finup,
2129 .digest = ahash_digest,
2130 .halg.digestsize = SHA1_DIGEST_SIZE,
2131 .halg.base = {
2132 .cra_name = "sha1",
2133 .cra_driver_name = "sha1-talitos",
2134 .cra_blocksize = SHA1_BLOCK_SIZE,
2135 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2136 CRYPTO_ALG_ASYNC,
2137 .cra_type = &crypto_ahash_type
2138 }
2139 },
2140 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2141 DESC_HDR_SEL0_MDEUA |
2142 DESC_HDR_MODE0_MDEU_SHA1,
2143 },
2144 { .type = CRYPTO_ALG_TYPE_AHASH,
2145 .alg.hash = {
2146 .init = ahash_init,
2147 .update = ahash_update,
2148 .final = ahash_final,
2149 .finup = ahash_finup,
2150 .digest = ahash_digest,
2151 .halg.digestsize = SHA224_DIGEST_SIZE,
2152 .halg.base = {
2153 .cra_name = "sha224",
2154 .cra_driver_name = "sha224-talitos",
2155 .cra_blocksize = SHA224_BLOCK_SIZE,
2156 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2157 CRYPTO_ALG_ASYNC,
2158 .cra_type = &crypto_ahash_type
2159 }
2160 },
2161 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2162 DESC_HDR_SEL0_MDEUA |
2163 DESC_HDR_MODE0_MDEU_SHA224,
2164 },
2165 { .type = CRYPTO_ALG_TYPE_AHASH,
2166 .alg.hash = {
2167 .init = ahash_init,
2168 .update = ahash_update,
2169 .final = ahash_final,
2170 .finup = ahash_finup,
2171 .digest = ahash_digest,
2172 .halg.digestsize = SHA256_DIGEST_SIZE,
2173 .halg.base = {
2174 .cra_name = "sha256",
2175 .cra_driver_name = "sha256-talitos",
2176 .cra_blocksize = SHA256_BLOCK_SIZE,
2177 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2178 CRYPTO_ALG_ASYNC,
2179 .cra_type = &crypto_ahash_type
2180 }
2181 },
2182 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2183 DESC_HDR_SEL0_MDEUA |
2184 DESC_HDR_MODE0_MDEU_SHA256,
2185 },
2186 { .type = CRYPTO_ALG_TYPE_AHASH,
2187 .alg.hash = {
2188 .init = ahash_init,
2189 .update = ahash_update,
2190 .final = ahash_final,
2191 .finup = ahash_finup,
2192 .digest = ahash_digest,
2193 .halg.digestsize = SHA384_DIGEST_SIZE,
2194 .halg.base = {
2195 .cra_name = "sha384",
2196 .cra_driver_name = "sha384-talitos",
2197 .cra_blocksize = SHA384_BLOCK_SIZE,
2198 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2199 CRYPTO_ALG_ASYNC,
2200 .cra_type = &crypto_ahash_type
2201 }
2202 },
2203 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2204 DESC_HDR_SEL0_MDEUB |
2205 DESC_HDR_MODE0_MDEUB_SHA384,
2206 },
2207 { .type = CRYPTO_ALG_TYPE_AHASH,
2208 .alg.hash = {
2209 .init = ahash_init,
2210 .update = ahash_update,
2211 .final = ahash_final,
2212 .finup = ahash_finup,
2213 .digest = ahash_digest,
2214 .halg.digestsize = SHA512_DIGEST_SIZE,
2215 .halg.base = {
2216 .cra_name = "sha512",
2217 .cra_driver_name = "sha512-talitos",
2218 .cra_blocksize = SHA512_BLOCK_SIZE,
2219 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2220 CRYPTO_ALG_ASYNC,
2221 .cra_type = &crypto_ahash_type
2222 }
2223 },
2224 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2225 DESC_HDR_SEL0_MDEUB |
2226 DESC_HDR_MODE0_MDEUB_SHA512,
2227 },
1693}; 2228};
1694 2229
1695struct talitos_crypto_alg { 2230struct talitos_crypto_alg {
1696 struct list_head entry; 2231 struct list_head entry;
1697 struct device *dev; 2232 struct device *dev;
1698 __be32 desc_hdr_template; 2233 struct talitos_alg_template algt;
1699 struct crypto_alg crypto_alg;
1700}; 2234};
1701 2235
1702static int talitos_cra_init(struct crypto_tfm *tfm) 2236static int talitos_cra_init(struct crypto_tfm *tfm)
@@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
1705 struct talitos_crypto_alg *talitos_alg; 2239 struct talitos_crypto_alg *talitos_alg;
1706 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2240 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1707 2241
1708 talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); 2242 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2243 talitos_alg = container_of(__crypto_ahash_alg(alg),
2244 struct talitos_crypto_alg,
2245 algt.alg.hash);
2246 else
2247 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2248 algt.alg.crypto);
1709 2249
1710 /* update context with ptr to dev */ 2250 /* update context with ptr to dev */
1711 ctx->dev = talitos_alg->dev; 2251 ctx->dev = talitos_alg->dev;
1712 2252
1713 /* copy descriptor header template value */ 2253 /* copy descriptor header template value */
1714 ctx->desc_hdr_template = talitos_alg->desc_hdr_template; 2254 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2255
2256 return 0;
2257}
2258
2259static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2260{
2261 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2262
2263 talitos_cra_init(tfm);
1715 2264
1716 /* random first IV */ 2265 /* random first IV */
1717 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); 2266 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
@@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
1719 return 0; 2268 return 0;
1720} 2269}
1721 2270
2271static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2272{
2273 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2274
2275 talitos_cra_init(tfm);
2276
2277 ctx->keylen = 0;
2278 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2279 sizeof(struct talitos_ahash_req_ctx));
2280
2281 return 0;
2282}
2283
1722/* 2284/*
1723 * given the alg's descriptor header template, determine whether descriptor 2285 * given the alg's descriptor header template, determine whether descriptor
1724 * type and primary/secondary execution units required match the hw 2286 * type and primary/secondary execution units required match the hw
@@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev)
1747 int i; 2309 int i;
1748 2310
1749 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2311 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
1750 crypto_unregister_alg(&t_alg->crypto_alg); 2312 switch (t_alg->algt.type) {
2313 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2314 case CRYPTO_ALG_TYPE_AEAD:
2315 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2316 break;
2317 case CRYPTO_ALG_TYPE_AHASH:
2318 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2319 break;
2320 }
1751 list_del(&t_alg->entry); 2321 list_del(&t_alg->entry);
1752 kfree(t_alg); 2322 kfree(t_alg);
1753 } 2323 }
@@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1781 struct talitos_alg_template 2351 struct talitos_alg_template
1782 *template) 2352 *template)
1783{ 2353{
2354 struct talitos_private *priv = dev_get_drvdata(dev);
1784 struct talitos_crypto_alg *t_alg; 2355 struct talitos_crypto_alg *t_alg;
1785 struct crypto_alg *alg; 2356 struct crypto_alg *alg;
1786 2357
@@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1788 if (!t_alg) 2359 if (!t_alg)
1789 return ERR_PTR(-ENOMEM); 2360 return ERR_PTR(-ENOMEM);
1790 2361
1791 alg = &t_alg->crypto_alg; 2362 t_alg->algt = *template;
1792 *alg = template->alg; 2363
2364 switch (t_alg->algt.type) {
2365 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2366 alg = &t_alg->algt.alg.crypto;
2367 alg->cra_init = talitos_cra_init;
2368 break;
2369 case CRYPTO_ALG_TYPE_AEAD:
2370 alg = &t_alg->algt.alg.crypto;
2371 alg->cra_init = talitos_cra_init_aead;
2372 break;
2373 case CRYPTO_ALG_TYPE_AHASH:
2374 alg = &t_alg->algt.alg.hash.halg.base;
2375 alg->cra_init = talitos_cra_init_ahash;
2376 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2377 !strcmp(alg->cra_name, "sha224")) {
2378 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2379 t_alg->algt.desc_hdr_template =
2380 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2381 DESC_HDR_SEL0_MDEUA |
2382 DESC_HDR_MODE0_MDEU_SHA256;
2383 }
2384 break;
2385 }
1793 2386
1794 alg->cra_module = THIS_MODULE; 2387 alg->cra_module = THIS_MODULE;
1795 alg->cra_init = talitos_cra_init;
1796 alg->cra_priority = TALITOS_CRA_PRIORITY; 2388 alg->cra_priority = TALITOS_CRA_PRIORITY;
1797 alg->cra_alignmask = 0; 2389 alg->cra_alignmask = 0;
1798 alg->cra_ctxsize = sizeof(struct talitos_ctx); 2390 alg->cra_ctxsize = sizeof(struct talitos_ctx);
1799 2391
1800 t_alg->desc_hdr_template = template->desc_hdr_template;
1801 t_alg->dev = dev; 2392 t_alg->dev = dev;
1802 2393
1803 return t_alg; 2394 return t_alg;
@@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev,
1877 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 2468 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1878 2469
1879 if (of_device_is_compatible(np, "fsl,sec2.1")) 2470 if (of_device_is_compatible(np, "fsl,sec2.1"))
1880 priv->features |= TALITOS_FTR_HW_AUTH_CHECK; 2471 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2472 TALITOS_FTR_SHA224_HWINIT;
1881 2473
1882 priv->chan = kzalloc(sizeof(struct talitos_channel) * 2474 priv->chan = kzalloc(sizeof(struct talitos_channel) *
1883 priv->num_channels, GFP_KERNEL); 2475 priv->num_channels, GFP_KERNEL);
@@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev,
1931 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2523 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1932 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 2524 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
1933 struct talitos_crypto_alg *t_alg; 2525 struct talitos_crypto_alg *t_alg;
2526 char *name = NULL;
1934 2527
1935 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 2528 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
1936 if (IS_ERR(t_alg)) { 2529 if (IS_ERR(t_alg)) {
@@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev,
1938 goto err_out; 2531 goto err_out;
1939 } 2532 }
1940 2533
1941 err = crypto_register_alg(&t_alg->crypto_alg); 2534 switch (t_alg->algt.type) {
2535 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2536 case CRYPTO_ALG_TYPE_AEAD:
2537 err = crypto_register_alg(
2538 &t_alg->algt.alg.crypto);
2539 name = t_alg->algt.alg.crypto.cra_driver_name;
2540 break;
2541 case CRYPTO_ALG_TYPE_AHASH:
2542 err = crypto_register_ahash(
2543 &t_alg->algt.alg.hash);
2544 name =
2545 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2546 break;
2547 }
1942 if (err) { 2548 if (err) {
1943 dev_err(dev, "%s alg registration failed\n", 2549 dev_err(dev, "%s alg registration failed\n",
1944 t_alg->crypto_alg.cra_driver_name); 2550 name);
1945 kfree(t_alg); 2551 kfree(t_alg);
1946 } else { 2552 } else {
1947 list_add_tail(&t_alg->entry, &priv->alg_list); 2553 list_add_tail(&t_alg->entry, &priv->alg_list);
1948 dev_info(dev, "%s\n", 2554 dev_info(dev, "%s\n", name);
1949 t_alg->crypto_alg.cra_driver_name);
1950 } 2555 }
1951 } 2556 }
1952 } 2557 }
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index ff5a1450e145..0b746aca4587 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale SEC (talitos) device register and descriptor header defines 2 * Freescale SEC (talitos) device register and descriptor header defines
3 * 3 *
4 * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. 4 * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
@@ -130,6 +130,9 @@
130#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ 130#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
131#define TALITOS_CRCUISR_LO 0xf034 131#define TALITOS_CRCUISR_LO 0xf034
132 132
133#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28
134#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48
135
133/* 136/*
134 * talitos descriptor header (hdr) bits 137 * talitos descriptor header (hdr) bits
135 */ 138 */
@@ -157,12 +160,16 @@
157#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) 160#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
158#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) 161#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
159#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) 162#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000)
163#define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000)
160#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) 164#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000)
161#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) 165#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000)
162#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) 166#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000)
167#define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000)
163#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) 168#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000)
164#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) 169#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000)
165#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) 170#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000)
171#define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000)
172#define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000)
166#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ 173#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
167 DESC_HDR_MODE0_MDEU_HMAC) 174 DESC_HDR_MODE0_MDEU_HMAC)
168#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ 175#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
@@ -181,9 +188,12 @@
181#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) 188#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000)
182#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) 189#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800)
183#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) 190#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400)
191#define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300)
184#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) 192#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200)
185#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) 193#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100)
186#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) 194#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000)
195#define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000)
196#define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200)
187#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ 197#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
188 DESC_HDR_MODE1_MDEU_HMAC) 198 DESC_HDR_MODE1_MDEU_HMAC)
189#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ 199#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \