diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /drivers/crypto | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 21 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 4 | ||||
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 13 | ||||
-rw-r--r-- | drivers/crypto/geode-aes.c | 36 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 18 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 692 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.h | 40 | ||||
-rw-r--r-- | drivers/crypto/n2_asm.S | 95 | ||||
-rw-r--r-- | drivers/crypto/n2_core.c | 2090 | ||||
-rw-r--r-- | drivers/crypto/n2_core.h | 231 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 1259 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 708 | ||||
-rw-r--r-- | drivers/crypto/talitos.h | 12 |
13 files changed, 5060 insertions, 159 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index b08403d7d1ca..fbf94cf496f0 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA | |||
170 | 170 | ||
171 | Currently the driver supports AES in ECB and CBC mode without DMA. | 171 | Currently the driver supports AES in ECB and CBC mode without DMA. |
172 | 172 | ||
173 | config CRYPTO_DEV_NIAGARA2 | ||
174 | tristate "Niagara2 Stream Processing Unit driver" | ||
175 | select CRYPTO_ALGAPI | ||
176 | depends on SPARC64 | ||
177 | help | ||
178 | Each core of a Niagara2 processor contains a Stream | ||
179 | Processing Unit, which itself contains several cryptographic | ||
180 | sub-units. One set provides the Modular Arithmetic Unit, | ||
181 | used for SSL offload. The other set provides the Cipher | ||
182 | Group, which can perform encryption, decryption, hashing, | ||
183 | checksumming, and raw copies. | ||
184 | |||
173 | config CRYPTO_DEV_HIFN_795X | 185 | config CRYPTO_DEV_HIFN_795X |
174 | tristate "Driver HIFN 795x crypto accelerator chips" | 186 | tristate "Driver HIFN 795x crypto accelerator chips" |
175 | select CRYPTO_DES | 187 | select CRYPTO_DES |
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX | |||
222 | help | 234 | help |
223 | This option allows you to have support for AMCC crypto acceleration. | 235 | This option allows you to have support for AMCC crypto acceleration. |
224 | 236 | ||
237 | config CRYPTO_DEV_OMAP_SHAM | ||
238 | tristate "Support for OMAP SHA1/MD5 hw accelerator" | ||
239 | depends on ARCH_OMAP2 || ARCH_OMAP3 | ||
240 | select CRYPTO_SHA1 | ||
241 | select CRYPTO_MD5 | ||
242 | help | ||
243 | OMAP processors have SHA1/MD5 hw accelerator. Select this if you | ||
244 | want to use the OMAP module for SHA1/MD5 algorithms. | ||
245 | |||
225 | endif # CRYPTO_HW | 246 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 6ffcb3f7f942..6dbbe00c4524 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -1,8 +1,12 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | 1 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o |
2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o | 2 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o |
3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 3 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
4 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o | ||
5 | n2_crypto-objs := n2_core.o n2_asm.o | ||
4 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o | 6 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
5 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o | 7 | obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o |
6 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o | 8 | obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o |
7 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o | 9 | obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o |
8 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ | 10 | obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ |
11 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | ||
12 | |||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6c4c8b7ce3aa..983530ba04a7 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1158,7 +1158,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev, | |||
1158 | struct device *dev = &ofdev->dev; | 1158 | struct device *dev = &ofdev->dev; |
1159 | struct crypto4xx_core_device *core_dev; | 1159 | struct crypto4xx_core_device *core_dev; |
1160 | 1160 | ||
1161 | rc = of_address_to_resource(ofdev->node, 0, &res); | 1161 | rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); |
1162 | if (rc) | 1162 | if (rc) |
1163 | return -ENODEV; | 1163 | return -ENODEV; |
1164 | 1164 | ||
@@ -1215,13 +1215,13 @@ static int __init crypto4xx_probe(struct of_device *ofdev, | |||
1215 | (unsigned long) dev); | 1215 | (unsigned long) dev); |
1216 | 1216 | ||
1217 | /* Register for Crypto isr, Crypto Engine IRQ */ | 1217 | /* Register for Crypto isr, Crypto Engine IRQ */ |
1218 | core_dev->irq = irq_of_parse_and_map(ofdev->node, 0); | 1218 | core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); |
1219 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, | 1219 | rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, |
1220 | core_dev->dev->name, dev); | 1220 | core_dev->dev->name, dev); |
1221 | if (rc) | 1221 | if (rc) |
1222 | goto err_request_irq; | 1222 | goto err_request_irq; |
1223 | 1223 | ||
1224 | core_dev->dev->ce_base = of_iomap(ofdev->node, 0); | 1224 | core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); |
1225 | if (!core_dev->dev->ce_base) { | 1225 | if (!core_dev->dev->ce_base) { |
1226 | dev_err(dev, "failed to of_iomap\n"); | 1226 | dev_err(dev, "failed to of_iomap\n"); |
1227 | goto err_iomap; | 1227 | goto err_iomap; |
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = { | |||
1281 | }; | 1281 | }; |
1282 | 1282 | ||
1283 | static struct of_platform_driver crypto4xx_driver = { | 1283 | static struct of_platform_driver crypto4xx_driver = { |
1284 | .name = "crypto4xx", | 1284 | .driver = { |
1285 | .match_table = crypto4xx_match, | 1285 | .name = "crypto4xx", |
1286 | .owner = THIS_MODULE, | ||
1287 | .of_match_table = crypto4xx_match, | ||
1288 | }, | ||
1286 | .probe = crypto4xx_probe, | 1289 | .probe = crypto4xx_probe, |
1287 | .remove = crypto4xx_remove, | 1290 | .remove = crypto4xx_remove, |
1288 | }; | 1291 | }; |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index c7a5a43ba691..09389dd2f96b 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -15,14 +15,14 @@ | |||
15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
16 | #include <crypto/aes.h> | 16 | #include <crypto/aes.h> |
17 | 17 | ||
18 | #include <asm/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/delay.h> | 19 | #include <linux/delay.h> |
20 | 20 | ||
21 | #include "geode-aes.h" | 21 | #include "geode-aes.h" |
22 | 22 | ||
23 | /* Static structures */ | 23 | /* Static structures */ |
24 | 24 | ||
25 | static void __iomem * _iobase; | 25 | static void __iomem *_iobase; |
26 | static spinlock_t lock; | 26 | static spinlock_t lock; |
27 | 27 | ||
28 | /* Write a 128 bit field (either a writable key or IV) */ | 28 | /* Write a 128 bit field (either a writable key or IV) */ |
@@ -30,7 +30,7 @@ static inline void | |||
30 | _writefield(u32 offset, void *value) | 30 | _writefield(u32 offset, void *value) |
31 | { | 31 | { |
32 | int i; | 32 | int i; |
33 | for(i = 0; i < 4; i++) | 33 | for (i = 0; i < 4; i++) |
34 | iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); | 34 | iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); |
35 | } | 35 | } |
36 | 36 | ||
@@ -39,7 +39,7 @@ static inline void | |||
39 | _readfield(u32 offset, void *value) | 39 | _readfield(u32 offset, void *value) |
40 | { | 40 | { |
41 | int i; | 41 | int i; |
42 | for(i = 0; i < 4; i++) | 42 | for (i = 0; i < 4; i++) |
43 | ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); | 43 | ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); |
44 | } | 44 | } |
45 | 45 | ||
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags) | |||
59 | do { | 59 | do { |
60 | status = ioread32(_iobase + AES_INTR_REG); | 60 | status = ioread32(_iobase + AES_INTR_REG); |
61 | cpu_relax(); | 61 | cpu_relax(); |
62 | } while(!(status & AES_INTRA_PENDING) && --counter); | 62 | } while (!(status & AES_INTRA_PENDING) && --counter); |
63 | 63 | ||
64 | /* Clear the event */ | 64 | /* Clear the event */ |
65 | iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); | 65 | iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); |
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, | |||
317 | err = blkcipher_walk_virt(desc, &walk); | 317 | err = blkcipher_walk_virt(desc, &walk); |
318 | op->iv = walk.iv; | 318 | op->iv = walk.iv; |
319 | 319 | ||
320 | while((nbytes = walk.nbytes)) { | 320 | while ((nbytes = walk.nbytes)) { |
321 | op->src = walk.src.virt.addr, | 321 | op->src = walk.src.virt.addr, |
322 | op->dst = walk.dst.virt.addr; | 322 | op->dst = walk.dst.virt.addr; |
323 | op->mode = AES_MODE_CBC; | 323 | op->mode = AES_MODE_CBC; |
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, | |||
349 | err = blkcipher_walk_virt(desc, &walk); | 349 | err = blkcipher_walk_virt(desc, &walk); |
350 | op->iv = walk.iv; | 350 | op->iv = walk.iv; |
351 | 351 | ||
352 | while((nbytes = walk.nbytes)) { | 352 | while ((nbytes = walk.nbytes)) { |
353 | op->src = walk.src.virt.addr, | 353 | op->src = walk.src.virt.addr, |
354 | op->dst = walk.dst.virt.addr; | 354 | op->dst = walk.dst.virt.addr; |
355 | op->mode = AES_MODE_CBC; | 355 | op->mode = AES_MODE_CBC; |
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc, | |||
429 | blkcipher_walk_init(&walk, dst, src, nbytes); | 429 | blkcipher_walk_init(&walk, dst, src, nbytes); |
430 | err = blkcipher_walk_virt(desc, &walk); | 430 | err = blkcipher_walk_virt(desc, &walk); |
431 | 431 | ||
432 | while((nbytes = walk.nbytes)) { | 432 | while ((nbytes = walk.nbytes)) { |
433 | op->src = walk.src.virt.addr, | 433 | op->src = walk.src.virt.addr, |
434 | op->dst = walk.dst.virt.addr; | 434 | op->dst = walk.dst.virt.addr; |
435 | op->mode = AES_MODE_ECB; | 435 | op->mode = AES_MODE_ECB; |
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc, | |||
459 | blkcipher_walk_init(&walk, dst, src, nbytes); | 459 | blkcipher_walk_init(&walk, dst, src, nbytes); |
460 | err = blkcipher_walk_virt(desc, &walk); | 460 | err = blkcipher_walk_virt(desc, &walk); |
461 | 461 | ||
462 | while((nbytes = walk.nbytes)) { | 462 | while ((nbytes = walk.nbytes)) { |
463 | op->src = walk.src.virt.addr, | 463 | op->src = walk.src.virt.addr, |
464 | op->dst = walk.dst.virt.addr; | 464 | op->dst = walk.dst.virt.addr; |
465 | op->mode = AES_MODE_ECB; | 465 | op->mode = AES_MODE_ECB; |
@@ -518,11 +518,12 @@ static int __devinit | |||
518 | geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | 518 | geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) |
519 | { | 519 | { |
520 | int ret; | 520 | int ret; |
521 | 521 | ret = pci_enable_device(dev); | |
522 | if ((ret = pci_enable_device(dev))) | 522 | if (ret) |
523 | return ret; | 523 | return ret; |
524 | 524 | ||
525 | if ((ret = pci_request_regions(dev, "geode-aes"))) | 525 | ret = pci_request_regions(dev, "geode-aes"); |
526 | if (ret) | ||
526 | goto eenable; | 527 | goto eenable; |
527 | 528 | ||
528 | _iobase = pci_iomap(dev, 0, 0); | 529 | _iobase = pci_iomap(dev, 0, 0); |
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
537 | /* Clear any pending activity */ | 538 | /* Clear any pending activity */ |
538 | iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); | 539 | iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); |
539 | 540 | ||
540 | if ((ret = crypto_register_alg(&geode_alg))) | 541 | ret = crypto_register_alg(&geode_alg); |
542 | if (ret) | ||
541 | goto eiomap; | 543 | goto eiomap; |
542 | 544 | ||
543 | if ((ret = crypto_register_alg(&geode_ecb_alg))) | 545 | ret = crypto_register_alg(&geode_ecb_alg); |
546 | if (ret) | ||
544 | goto ealg; | 547 | goto ealg; |
545 | 548 | ||
546 | if ((ret = crypto_register_alg(&geode_cbc_alg))) | 549 | ret = crypto_register_alg(&geode_cbc_alg); |
550 | if (ret) | ||
547 | goto eecb; | 551 | goto eecb; |
548 | 552 | ||
549 | printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); | 553 | printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n"); |
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 73e8b1713b54..16fce3aadf4d 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -638,7 +638,7 @@ struct hifn_crypto_alg | |||
638 | 638 | ||
639 | #define ASYNC_FLAGS_MISALIGNED (1<<0) | 639 | #define ASYNC_FLAGS_MISALIGNED (1<<0) |
640 | 640 | ||
641 | struct ablkcipher_walk | 641 | struct hifn_cipher_walk |
642 | { | 642 | { |
643 | struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; | 643 | struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; |
644 | u32 flags; | 644 | u32 flags; |
@@ -657,7 +657,7 @@ struct hifn_request_context | |||
657 | u8 *iv; | 657 | u8 *iv; |
658 | unsigned int ivsize; | 658 | unsigned int ivsize; |
659 | u8 op, type, mode, unused; | 659 | u8 op, type, mode, unused; |
660 | struct ablkcipher_walk walk; | 660 | struct hifn_cipher_walk walk; |
661 | }; | 661 | }; |
662 | 662 | ||
663 | #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) | 663 | #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) |
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev, | |||
1417 | return 0; | 1417 | return 0; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | static int ablkcipher_walk_init(struct ablkcipher_walk *w, | 1420 | static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, |
1421 | int num, gfp_t gfp_flags) | 1421 | int num, gfp_t gfp_flags) |
1422 | { | 1422 | { |
1423 | int i; | 1423 | int i; |
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w, | |||
1442 | return i; | 1442 | return i; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | static void ablkcipher_walk_exit(struct ablkcipher_walk *w) | 1445 | static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w) |
1446 | { | 1446 | { |
1447 | int i; | 1447 | int i; |
1448 | 1448 | ||
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, | |||
1486 | return idx; | 1486 | return idx; |
1487 | } | 1487 | } |
1488 | 1488 | ||
1489 | static int ablkcipher_walk(struct ablkcipher_request *req, | 1489 | static int hifn_cipher_walk(struct ablkcipher_request *req, |
1490 | struct ablkcipher_walk *w) | 1490 | struct hifn_cipher_walk *w) |
1491 | { | 1491 | { |
1492 | struct scatterlist *dst, *t; | 1492 | struct scatterlist *dst, *t; |
1493 | unsigned int nbytes = req->nbytes, offset, copy, diff; | 1493 | unsigned int nbytes = req->nbytes, offset, copy, diff; |
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req) | |||
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { | 1602 | if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { |
1603 | err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); | 1603 | err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); |
1604 | if (err < 0) | 1604 | if (err < 0) |
1605 | return err; | 1605 | return err; |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | sg_num = ablkcipher_walk(req, &rctx->walk); | 1608 | sg_num = hifn_cipher_walk(req, &rctx->walk); |
1609 | if (sg_num < 0) { | 1609 | if (sg_num < 0) { |
1610 | err = sg_num; | 1610 | err = sg_num; |
1611 | goto err_out_exit; | 1611 | goto err_out_exit; |
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error) | |||
1806 | kunmap_atomic(saddr, KM_SOFTIRQ0); | 1806 | kunmap_atomic(saddr, KM_SOFTIRQ0); |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | ablkcipher_walk_exit(&rctx->walk); | 1809 | hifn_cipher_walk_exit(&rctx->walk); |
1810 | } | 1810 | } |
1811 | 1811 | ||
1812 | req->base.complete(&req->base, error); | 1812 | req->base.complete(&req->base, error); |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 6f29012bcc43..e095422b58dd 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -15,8 +15,14 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/scatterlist.h> | 16 | #include <linux/scatterlist.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <crypto/internal/hash.h> | ||
19 | #include <crypto/sha.h> | ||
18 | 20 | ||
19 | #include "mv_cesa.h" | 21 | #include "mv_cesa.h" |
22 | |||
23 | #define MV_CESA "MV-CESA:" | ||
24 | #define MAX_HW_HASH_SIZE 0xFFFF | ||
25 | |||
20 | /* | 26 | /* |
21 | * STM: | 27 | * STM: |
22 | * /---------------------------------------\ | 28 | * /---------------------------------------\ |
@@ -39,10 +45,12 @@ enum engine_status { | |||
39 | * @dst_sg_it: sg iterator for dst | 45 | * @dst_sg_it: sg iterator for dst |
40 | * @sg_src_left: bytes left in src to process (scatter list) | 46 | * @sg_src_left: bytes left in src to process (scatter list) |
41 | * @src_start: offset to add to src start position (scatter list) | 47 | * @src_start: offset to add to src start position (scatter list) |
42 | * @crypt_len: length of current crypt process | 48 | * @crypt_len: length of current hw crypt/hash process |
49 | * @hw_nbytes: total bytes to process in hw for this request | ||
50 | * @copy_back: whether to copy data back (crypt) or not (hash) | ||
43 | * @sg_dst_left: bytes left dst to process in this scatter list | 51 | * @sg_dst_left: bytes left dst to process in this scatter list |
44 | * @dst_start: offset to add to dst start position (scatter list) | 52 | * @dst_start: offset to add to dst start position (scatter list) |
45 | * @total_req_bytes: total number of bytes processed (request). | 53 | * @hw_processed_bytes: number of bytes processed by hw (request). |
46 | * | 54 | * |
47 | * sg helper are used to iterate over the scatterlist. Since the size of the | 55 | * sg helper are used to iterate over the scatterlist. Since the size of the |
48 | * SRAM may be less than the scatter size, this struct struct is used to keep | 56 | * SRAM may be less than the scatter size, this struct struct is used to keep |
@@ -51,15 +59,19 @@ enum engine_status { | |||
51 | struct req_progress { | 59 | struct req_progress { |
52 | struct sg_mapping_iter src_sg_it; | 60 | struct sg_mapping_iter src_sg_it; |
53 | struct sg_mapping_iter dst_sg_it; | 61 | struct sg_mapping_iter dst_sg_it; |
62 | void (*complete) (void); | ||
63 | void (*process) (int is_first); | ||
54 | 64 | ||
55 | /* src mostly */ | 65 | /* src mostly */ |
56 | int sg_src_left; | 66 | int sg_src_left; |
57 | int src_start; | 67 | int src_start; |
58 | int crypt_len; | 68 | int crypt_len; |
69 | int hw_nbytes; | ||
59 | /* dst mostly */ | 70 | /* dst mostly */ |
71 | int copy_back; | ||
60 | int sg_dst_left; | 72 | int sg_dst_left; |
61 | int dst_start; | 73 | int dst_start; |
62 | int total_req_bytes; | 74 | int hw_processed_bytes; |
63 | }; | 75 | }; |
64 | 76 | ||
65 | struct crypto_priv { | 77 | struct crypto_priv { |
@@ -72,10 +84,12 @@ struct crypto_priv { | |||
72 | spinlock_t lock; | 84 | spinlock_t lock; |
73 | struct crypto_queue queue; | 85 | struct crypto_queue queue; |
74 | enum engine_status eng_st; | 86 | enum engine_status eng_st; |
75 | struct ablkcipher_request *cur_req; | 87 | struct crypto_async_request *cur_req; |
76 | struct req_progress p; | 88 | struct req_progress p; |
77 | int max_req_size; | 89 | int max_req_size; |
78 | int sram_size; | 90 | int sram_size; |
91 | int has_sha1; | ||
92 | int has_hmac_sha1; | ||
79 | }; | 93 | }; |
80 | 94 | ||
81 | static struct crypto_priv *cpg; | 95 | static struct crypto_priv *cpg; |
@@ -97,6 +111,31 @@ struct mv_req_ctx { | |||
97 | int decrypt; | 111 | int decrypt; |
98 | }; | 112 | }; |
99 | 113 | ||
114 | enum hash_op { | ||
115 | COP_SHA1, | ||
116 | COP_HMAC_SHA1 | ||
117 | }; | ||
118 | |||
119 | struct mv_tfm_hash_ctx { | ||
120 | struct crypto_shash *fallback; | ||
121 | struct crypto_shash *base_hash; | ||
122 | u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; | ||
123 | int count_add; | ||
124 | enum hash_op op; | ||
125 | }; | ||
126 | |||
127 | struct mv_req_hash_ctx { | ||
128 | u64 count; | ||
129 | u32 state[SHA1_DIGEST_SIZE / 4]; | ||
130 | u8 buffer[SHA1_BLOCK_SIZE]; | ||
131 | int first_hash; /* marks that we don't have previous state */ | ||
132 | int last_chunk; /* marks that this is the 'final' request */ | ||
133 | int extra_bytes; /* unprocessed bytes in buffer */ | ||
134 | enum hash_op op; | ||
135 | int count_add; | ||
136 | struct scatterlist dummysg; | ||
137 | }; | ||
138 | |||
100 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 139 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
101 | { | 140 | { |
102 | struct crypto_aes_ctx gen_aes_key; | 141 | struct crypto_aes_ctx gen_aes_key; |
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | |||
144 | return 0; | 183 | return 0; |
145 | } | 184 | } |
146 | 185 | ||
147 | static void setup_data_in(struct ablkcipher_request *req) | 186 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) |
148 | { | 187 | { |
149 | int ret; | 188 | int ret; |
150 | void *buf; | 189 | void *sbuf; |
190 | int copied = 0; | ||
151 | 191 | ||
152 | if (!cpg->p.sg_src_left) { | 192 | while (1) { |
153 | ret = sg_miter_next(&cpg->p.src_sg_it); | 193 | if (!p->sg_src_left) { |
154 | BUG_ON(!ret); | 194 | ret = sg_miter_next(&p->src_sg_it); |
155 | cpg->p.sg_src_left = cpg->p.src_sg_it.length; | 195 | BUG_ON(!ret); |
156 | cpg->p.src_start = 0; | 196 | p->sg_src_left = p->src_sg_it.length; |
157 | } | 197 | p->src_start = 0; |
158 | 198 | } | |
159 | cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size); | ||
160 | |||
161 | buf = cpg->p.src_sg_it.addr; | ||
162 | buf += cpg->p.src_start; | ||
163 | 199 | ||
164 | memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); | 200 | sbuf = p->src_sg_it.addr + p->src_start; |
201 | |||
202 | if (p->sg_src_left <= len - copied) { | ||
203 | memcpy(dbuf + copied, sbuf, p->sg_src_left); | ||
204 | copied += p->sg_src_left; | ||
205 | p->sg_src_left = 0; | ||
206 | if (copied >= len) | ||
207 | break; | ||
208 | } else { | ||
209 | int copy_len = len - copied; | ||
210 | memcpy(dbuf + copied, sbuf, copy_len); | ||
211 | p->src_start += copy_len; | ||
212 | p->sg_src_left -= copy_len; | ||
213 | break; | ||
214 | } | ||
215 | } | ||
216 | } | ||
165 | 217 | ||
166 | cpg->p.sg_src_left -= cpg->p.crypt_len; | 218 | static void setup_data_in(void) |
167 | cpg->p.src_start += cpg->p.crypt_len; | 219 | { |
220 | struct req_progress *p = &cpg->p; | ||
221 | int data_in_sram = | ||
222 | min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); | ||
223 | copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, | ||
224 | data_in_sram - p->crypt_len); | ||
225 | p->crypt_len = data_in_sram; | ||
168 | } | 226 | } |
169 | 227 | ||
170 | static void mv_process_current_q(int first_block) | 228 | static void mv_process_current_q(int first_block) |
171 | { | 229 | { |
172 | struct ablkcipher_request *req = cpg->cur_req; | 230 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
173 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 231 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
174 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 232 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
175 | struct sec_accel_config op; | 233 | struct sec_accel_config op; |
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block) | |||
179 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | 237 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; |
180 | break; | 238 | break; |
181 | case COP_AES_CBC: | 239 | case COP_AES_CBC: |
240 | default: | ||
182 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | 241 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; |
183 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | 242 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | |
184 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | 243 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); |
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block) | |||
211 | ENC_P_DST(SRAM_DATA_OUT_START); | 270 | ENC_P_DST(SRAM_DATA_OUT_START); |
212 | op.enc_key_p = SRAM_DATA_KEY_P; | 271 | op.enc_key_p = SRAM_DATA_KEY_P; |
213 | 272 | ||
214 | setup_data_in(req); | 273 | setup_data_in(); |
215 | op.enc_len = cpg->p.crypt_len; | 274 | op.enc_len = cpg->p.crypt_len; |
216 | memcpy(cpg->sram + SRAM_CONFIG, &op, | 275 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
217 | sizeof(struct sec_accel_config)); | 276 | sizeof(struct sec_accel_config)); |
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block) | |||
228 | 287 | ||
229 | static void mv_crypto_algo_completion(void) | 288 | static void mv_crypto_algo_completion(void) |
230 | { | 289 | { |
231 | struct ablkcipher_request *req = cpg->cur_req; | 290 | struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); |
232 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 291 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
233 | 292 | ||
293 | sg_miter_stop(&cpg->p.src_sg_it); | ||
294 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
295 | |||
234 | if (req_ctx->op != COP_AES_CBC) | 296 | if (req_ctx->op != COP_AES_CBC) |
235 | return ; | 297 | return ; |
236 | 298 | ||
237 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | 299 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); |
238 | } | 300 | } |
239 | 301 | ||
302 | static void mv_process_hash_current(int first_block) | ||
303 | { | ||
304 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | ||
305 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | ||
306 | struct req_progress *p = &cpg->p; | ||
307 | struct sec_accel_config op = { 0 }; | ||
308 | int is_last; | ||
309 | |||
310 | switch (req_ctx->op) { | ||
311 | case COP_SHA1: | ||
312 | default: | ||
313 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; | ||
314 | break; | ||
315 | case COP_HMAC_SHA1: | ||
316 | op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | op.mac_src_p = | ||
321 | MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) | ||
322 | req_ctx-> | ||
323 | count); | ||
324 | |||
325 | setup_data_in(); | ||
326 | |||
327 | op.mac_digest = | ||
328 | MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); | ||
329 | op.mac_iv = | ||
330 | MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | | ||
331 | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); | ||
332 | |||
333 | is_last = req_ctx->last_chunk | ||
334 | && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) | ||
335 | && (req_ctx->count <= MAX_HW_HASH_SIZE); | ||
336 | if (req_ctx->first_hash) { | ||
337 | if (is_last) | ||
338 | op.config |= CFG_NOT_FRAG; | ||
339 | else | ||
340 | op.config |= CFG_FIRST_FRAG; | ||
341 | |||
342 | req_ctx->first_hash = 0; | ||
343 | } else { | ||
344 | if (is_last) | ||
345 | op.config |= CFG_LAST_FRAG; | ||
346 | else | ||
347 | op.config |= CFG_MID_FRAG; | ||
348 | } | ||
349 | |||
350 | memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | ||
351 | |||
352 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | ||
353 | /* GO */ | ||
354 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | ||
355 | |||
356 | /* | ||
357 | * XXX: add timer if the interrupt does not occur for some mystery | ||
358 | * reason | ||
359 | */ | ||
360 | } | ||
361 | |||
362 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | ||
363 | struct shash_desc *desc) | ||
364 | { | ||
365 | int i; | ||
366 | struct sha1_state shash_state; | ||
367 | |||
368 | shash_state.count = ctx->count + ctx->count_add; | ||
369 | for (i = 0; i < 5; i++) | ||
370 | shash_state.state[i] = ctx->state[i]; | ||
371 | memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); | ||
372 | return crypto_shash_import(desc, &shash_state); | ||
373 | } | ||
374 | |||
375 | static int mv_hash_final_fallback(struct ahash_request *req) | ||
376 | { | ||
377 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
378 | struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | ||
379 | struct { | ||
380 | struct shash_desc shash; | ||
381 | char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; | ||
382 | } desc; | ||
383 | int rc; | ||
384 | |||
385 | desc.shash.tfm = tfm_ctx->fallback; | ||
386 | desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
387 | if (unlikely(req_ctx->first_hash)) { | ||
388 | crypto_shash_init(&desc.shash); | ||
389 | crypto_shash_update(&desc.shash, req_ctx->buffer, | ||
390 | req_ctx->extra_bytes); | ||
391 | } else { | ||
392 | /* only SHA1 for now.... | ||
393 | */ | ||
394 | rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); | ||
395 | if (rc) | ||
396 | goto out; | ||
397 | } | ||
398 | rc = crypto_shash_final(&desc.shash, req->result); | ||
399 | out: | ||
400 | return rc; | ||
401 | } | ||
402 | |||
403 | static void mv_hash_algo_completion(void) | ||
404 | { | ||
405 | struct ahash_request *req = ahash_request_cast(cpg->cur_req); | ||
406 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
407 | |||
408 | if (ctx->extra_bytes) | ||
409 | copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | ||
410 | sg_miter_stop(&cpg->p.src_sg_it); | ||
411 | |||
412 | ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | ||
413 | ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | ||
414 | ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | ||
415 | ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | ||
416 | ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | ||
417 | |||
418 | if (likely(ctx->last_chunk)) { | ||
419 | if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | ||
420 | memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | ||
421 | crypto_ahash_digestsize(crypto_ahash_reqtfm | ||
422 | (req))); | ||
423 | } else | ||
424 | mv_hash_final_fallback(req); | ||
425 | } | ||
426 | } | ||
427 | |||
240 | static void dequeue_complete_req(void) | 428 | static void dequeue_complete_req(void) |
241 | { | 429 | { |
242 | struct ablkcipher_request *req = cpg->cur_req; | 430 | struct crypto_async_request *req = cpg->cur_req; |
243 | void *buf; | 431 | void *buf; |
244 | int ret; | 432 | int ret; |
433 | cpg->p.hw_processed_bytes += cpg->p.crypt_len; | ||
434 | if (cpg->p.copy_back) { | ||
435 | int need_copy_len = cpg->p.crypt_len; | ||
436 | int sram_offset = 0; | ||
437 | do { | ||
438 | int dst_copy; | ||
439 | |||
440 | if (!cpg->p.sg_dst_left) { | ||
441 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
442 | BUG_ON(!ret); | ||
443 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
444 | cpg->p.dst_start = 0; | ||
445 | } | ||
245 | 446 | ||
246 | cpg->p.total_req_bytes += cpg->p.crypt_len; | 447 | buf = cpg->p.dst_sg_it.addr; |
247 | do { | 448 | buf += cpg->p.dst_start; |
248 | int dst_copy; | ||
249 | |||
250 | if (!cpg->p.sg_dst_left) { | ||
251 | ret = sg_miter_next(&cpg->p.dst_sg_it); | ||
252 | BUG_ON(!ret); | ||
253 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | ||
254 | cpg->p.dst_start = 0; | ||
255 | } | ||
256 | |||
257 | buf = cpg->p.dst_sg_it.addr; | ||
258 | buf += cpg->p.dst_start; | ||
259 | 449 | ||
260 | dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); | 450 | dst_copy = min(need_copy_len, cpg->p.sg_dst_left); |
261 | 451 | ||
262 | memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); | 452 | memcpy(buf, |
453 | cpg->sram + SRAM_DATA_OUT_START + sram_offset, | ||
454 | dst_copy); | ||
455 | sram_offset += dst_copy; | ||
456 | cpg->p.sg_dst_left -= dst_copy; | ||
457 | need_copy_len -= dst_copy; | ||
458 | cpg->p.dst_start += dst_copy; | ||
459 | } while (need_copy_len > 0); | ||
460 | } | ||
263 | 461 | ||
264 | cpg->p.sg_dst_left -= dst_copy; | 462 | cpg->p.crypt_len = 0; |
265 | cpg->p.crypt_len -= dst_copy; | ||
266 | cpg->p.dst_start += dst_copy; | ||
267 | } while (cpg->p.crypt_len > 0); | ||
268 | 463 | ||
269 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | 464 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); |
270 | if (cpg->p.total_req_bytes < req->nbytes) { | 465 | if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { |
271 | /* process next scatter list entry */ | 466 | /* process next scatter list entry */ |
272 | cpg->eng_st = ENGINE_BUSY; | 467 | cpg->eng_st = ENGINE_BUSY; |
273 | mv_process_current_q(0); | 468 | cpg->p.process(0); |
274 | } else { | 469 | } else { |
275 | sg_miter_stop(&cpg->p.src_sg_it); | 470 | cpg->p.complete(); |
276 | sg_miter_stop(&cpg->p.dst_sg_it); | ||
277 | mv_crypto_algo_completion(); | ||
278 | cpg->eng_st = ENGINE_IDLE; | 471 | cpg->eng_st = ENGINE_IDLE; |
279 | req->base.complete(&req->base, 0); | 472 | local_bh_disable(); |
473 | req->complete(req, 0); | ||
474 | local_bh_enable(); | ||
280 | } | 475 | } |
281 | } | 476 | } |
282 | 477 | ||
283 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | 478 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) |
284 | { | 479 | { |
285 | int i = 0; | 480 | int i = 0; |
286 | 481 | size_t cur_len; | |
287 | do { | 482 | |
288 | total_bytes -= sl[i].length; | 483 | while (1) { |
289 | i++; | 484 | cur_len = sl[i].length; |
290 | 485 | ++i; | |
291 | } while (total_bytes > 0); | 486 | if (total_bytes > cur_len) |
487 | total_bytes -= cur_len; | ||
488 | else | ||
489 | break; | ||
490 | } | ||
292 | 491 | ||
293 | return i; | 492 | return i; |
294 | } | 493 | } |
295 | 494 | ||
296 | static void mv_enqueue_new_req(struct ablkcipher_request *req) | 495 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) |
297 | { | 496 | { |
497 | struct req_progress *p = &cpg->p; | ||
298 | int num_sgs; | 498 | int num_sgs; |
299 | 499 | ||
300 | cpg->cur_req = req; | 500 | cpg->cur_req = &req->base; |
301 | memset(&cpg->p, 0, sizeof(struct req_progress)); | 501 | memset(p, 0, sizeof(struct req_progress)); |
502 | p->hw_nbytes = req->nbytes; | ||
503 | p->complete = mv_crypto_algo_completion; | ||
504 | p->process = mv_process_current_q; | ||
505 | p->copy_back = 1; | ||
302 | 506 | ||
303 | num_sgs = count_sgs(req->src, req->nbytes); | 507 | num_sgs = count_sgs(req->src, req->nbytes); |
304 | sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | 508 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
305 | 509 | ||
306 | num_sgs = count_sgs(req->dst, req->nbytes); | 510 | num_sgs = count_sgs(req->dst, req->nbytes); |
307 | sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | 511 | sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); |
512 | |||
308 | mv_process_current_q(1); | 513 | mv_process_current_q(1); |
309 | } | 514 | } |
310 | 515 | ||
516 | static void mv_start_new_hash_req(struct ahash_request *req) | ||
517 | { | ||
518 | struct req_progress *p = &cpg->p; | ||
519 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
520 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
521 | int num_sgs, hw_bytes, old_extra_bytes, rc; | ||
522 | cpg->cur_req = &req->base; | ||
523 | memset(p, 0, sizeof(struct req_progress)); | ||
524 | hw_bytes = req->nbytes + ctx->extra_bytes; | ||
525 | old_extra_bytes = ctx->extra_bytes; | ||
526 | |||
527 | if (unlikely(ctx->extra_bytes)) { | ||
528 | memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | ||
529 | ctx->extra_bytes); | ||
530 | p->crypt_len = ctx->extra_bytes; | ||
531 | } | ||
532 | |||
533 | memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | ||
534 | |||
535 | if (unlikely(!ctx->first_hash)) { | ||
536 | writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | ||
537 | writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | ||
538 | writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | ||
539 | writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | ||
540 | writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | ||
541 | } | ||
542 | |||
543 | ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | ||
544 | if (ctx->extra_bytes != 0 | ||
545 | && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | ||
546 | hw_bytes -= ctx->extra_bytes; | ||
547 | else | ||
548 | ctx->extra_bytes = 0; | ||
549 | |||
550 | num_sgs = count_sgs(req->src, req->nbytes); | ||
551 | sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | ||
552 | |||
553 | if (hw_bytes) { | ||
554 | p->hw_nbytes = hw_bytes; | ||
555 | p->complete = mv_hash_algo_completion; | ||
556 | p->process = mv_process_hash_current; | ||
557 | |||
558 | mv_process_hash_current(1); | ||
559 | } else { | ||
560 | copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | ||
561 | ctx->extra_bytes - old_extra_bytes); | ||
562 | sg_miter_stop(&p->src_sg_it); | ||
563 | if (ctx->last_chunk) | ||
564 | rc = mv_hash_final_fallback(req); | ||
565 | else | ||
566 | rc = 0; | ||
567 | cpg->eng_st = ENGINE_IDLE; | ||
568 | local_bh_disable(); | ||
569 | req->base.complete(&req->base, rc); | ||
570 | local_bh_enable(); | ||
571 | } | ||
572 | } | ||
573 | |||
311 | static int queue_manag(void *data) | 574 | static int queue_manag(void *data) |
312 | { | 575 | { |
313 | cpg->eng_st = ENGINE_IDLE; | 576 | cpg->eng_st = ENGINE_IDLE; |
314 | do { | 577 | do { |
315 | struct ablkcipher_request *req; | ||
316 | struct crypto_async_request *async_req = NULL; | 578 | struct crypto_async_request *async_req = NULL; |
317 | struct crypto_async_request *backlog; | 579 | struct crypto_async_request *backlog; |
318 | 580 | ||
@@ -338,9 +600,18 @@ static int queue_manag(void *data) | |||
338 | } | 600 | } |
339 | 601 | ||
340 | if (async_req) { | 602 | if (async_req) { |
341 | req = container_of(async_req, | 603 | if (async_req->tfm->__crt_alg->cra_type != |
342 | struct ablkcipher_request, base); | 604 | &crypto_ahash_type) { |
343 | mv_enqueue_new_req(req); | 605 | struct ablkcipher_request *req = |
606 | container_of(async_req, | ||
607 | struct ablkcipher_request, | ||
608 | base); | ||
609 | mv_start_new_crypt_req(req); | ||
610 | } else { | ||
611 | struct ahash_request *req = | ||
612 | ahash_request_cast(async_req); | ||
613 | mv_start_new_hash_req(req); | ||
614 | } | ||
344 | async_req = NULL; | 615 | async_req = NULL; |
345 | } | 616 | } |
346 | 617 | ||
@@ -350,13 +621,13 @@ static int queue_manag(void *data) | |||
350 | return 0; | 621 | return 0; |
351 | } | 622 | } |
352 | 623 | ||
353 | static int mv_handle_req(struct ablkcipher_request *req) | 624 | static int mv_handle_req(struct crypto_async_request *req) |
354 | { | 625 | { |
355 | unsigned long flags; | 626 | unsigned long flags; |
356 | int ret; | 627 | int ret; |
357 | 628 | ||
358 | spin_lock_irqsave(&cpg->lock, flags); | 629 | spin_lock_irqsave(&cpg->lock, flags); |
359 | ret = ablkcipher_enqueue_request(&cpg->queue, req); | 630 | ret = crypto_enqueue_request(&cpg->queue, req); |
360 | spin_unlock_irqrestore(&cpg->lock, flags); | 631 | spin_unlock_irqrestore(&cpg->lock, flags); |
361 | wake_up_process(cpg->queue_th); | 632 | wake_up_process(cpg->queue_th); |
362 | return ret; | 633 | return ret; |
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req) | |||
369 | req_ctx->op = COP_AES_ECB; | 640 | req_ctx->op = COP_AES_ECB; |
370 | req_ctx->decrypt = 0; | 641 | req_ctx->decrypt = 0; |
371 | 642 | ||
372 | return mv_handle_req(req); | 643 | return mv_handle_req(&req->base); |
373 | } | 644 | } |
374 | 645 | ||
375 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | 646 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) |
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req) | |||
381 | req_ctx->decrypt = 1; | 652 | req_ctx->decrypt = 1; |
382 | 653 | ||
383 | compute_aes_dec_key(ctx); | 654 | compute_aes_dec_key(ctx); |
384 | return mv_handle_req(req); | 655 | return mv_handle_req(&req->base); |
385 | } | 656 | } |
386 | 657 | ||
387 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | 658 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) |
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req) | |||
391 | req_ctx->op = COP_AES_CBC; | 662 | req_ctx->op = COP_AES_CBC; |
392 | req_ctx->decrypt = 0; | 663 | req_ctx->decrypt = 0; |
393 | 664 | ||
394 | return mv_handle_req(req); | 665 | return mv_handle_req(&req->base); |
395 | } | 666 | } |
396 | 667 | ||
397 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | 668 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) |
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req) | |||
403 | req_ctx->decrypt = 1; | 674 | req_ctx->decrypt = 1; |
404 | 675 | ||
405 | compute_aes_dec_key(ctx); | 676 | compute_aes_dec_key(ctx); |
406 | return mv_handle_req(req); | 677 | return mv_handle_req(&req->base); |
407 | } | 678 | } |
408 | 679 | ||
409 | static int mv_cra_init(struct crypto_tfm *tfm) | 680 | static int mv_cra_init(struct crypto_tfm *tfm) |
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm) | |||
412 | return 0; | 683 | return 0; |
413 | } | 684 | } |
414 | 685 | ||
686 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, | ||
687 | int is_last, unsigned int req_len, | ||
688 | int count_add) | ||
689 | { | ||
690 | memset(ctx, 0, sizeof(*ctx)); | ||
691 | ctx->op = op; | ||
692 | ctx->count = req_len; | ||
693 | ctx->first_hash = 1; | ||
694 | ctx->last_chunk = is_last; | ||
695 | ctx->count_add = count_add; | ||
696 | } | ||
697 | |||
698 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, | ||
699 | unsigned req_len) | ||
700 | { | ||
701 | ctx->last_chunk = is_last; | ||
702 | ctx->count += req_len; | ||
703 | } | ||
704 | |||
705 | static int mv_hash_init(struct ahash_request *req) | ||
706 | { | ||
707 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
708 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, | ||
709 | tfm_ctx->count_add); | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static int mv_hash_update(struct ahash_request *req) | ||
714 | { | ||
715 | if (!req->nbytes) | ||
716 | return 0; | ||
717 | |||
718 | mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); | ||
719 | return mv_handle_req(&req->base); | ||
720 | } | ||
721 | |||
722 | static int mv_hash_final(struct ahash_request *req) | ||
723 | { | ||
724 | struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | ||
725 | /* dummy buffer of 4 bytes */ | ||
726 | sg_init_one(&ctx->dummysg, ctx->buffer, 4); | ||
727 | /* I think I'm allowed to do that... */ | ||
728 | ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0); | ||
729 | mv_update_hash_req_ctx(ctx, 1, 0); | ||
730 | return mv_handle_req(&req->base); | ||
731 | } | ||
732 | |||
733 | static int mv_hash_finup(struct ahash_request *req) | ||
734 | { | ||
735 | if (!req->nbytes) | ||
736 | return mv_hash_final(req); | ||
737 | |||
738 | mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | ||
739 | return mv_handle_req(&req->base); | ||
740 | } | ||
741 | |||
742 | static int mv_hash_digest(struct ahash_request *req) | ||
743 | { | ||
744 | const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | ||
745 | mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, | ||
746 | req->nbytes, tfm_ctx->count_add); | ||
747 | return mv_handle_req(&req->base); | ||
748 | } | ||
749 | |||
750 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, | ||
751 | const void *ostate) | ||
752 | { | ||
753 | const struct sha1_state *isha1_state = istate, *osha1_state = ostate; | ||
754 | int i; | ||
755 | for (i = 0; i < 5; i++) { | ||
756 | ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); | ||
757 | ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, | ||
762 | unsigned int keylen) | ||
763 | { | ||
764 | int rc; | ||
765 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); | ||
766 | int bs, ds, ss; | ||
767 | |||
768 | if (!ctx->base_hash) | ||
769 | return 0; | ||
770 | |||
771 | rc = crypto_shash_setkey(ctx->fallback, key, keylen); | ||
772 | if (rc) | ||
773 | return rc; | ||
774 | |||
775 | /* Can't see a way to extract the ipad/opad from the fallback tfm | ||
776 | so I'm basically copying code from the hmac module */ | ||
777 | bs = crypto_shash_blocksize(ctx->base_hash); | ||
778 | ds = crypto_shash_digestsize(ctx->base_hash); | ||
779 | ss = crypto_shash_statesize(ctx->base_hash); | ||
780 | |||
781 | { | ||
782 | struct { | ||
783 | struct shash_desc shash; | ||
784 | char ctx[crypto_shash_descsize(ctx->base_hash)]; | ||
785 | } desc; | ||
786 | unsigned int i; | ||
787 | char ipad[ss]; | ||
788 | char opad[ss]; | ||
789 | |||
790 | desc.shash.tfm = ctx->base_hash; | ||
791 | desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & | ||
792 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
793 | |||
794 | if (keylen > bs) { | ||
795 | int err; | ||
796 | |||
797 | err = | ||
798 | crypto_shash_digest(&desc.shash, key, keylen, ipad); | ||
799 | if (err) | ||
800 | return err; | ||
801 | |||
802 | keylen = ds; | ||
803 | } else | ||
804 | memcpy(ipad, key, keylen); | ||
805 | |||
806 | memset(ipad + keylen, 0, bs - keylen); | ||
807 | memcpy(opad, ipad, bs); | ||
808 | |||
809 | for (i = 0; i < bs; i++) { | ||
810 | ipad[i] ^= 0x36; | ||
811 | opad[i] ^= 0x5c; | ||
812 | } | ||
813 | |||
814 | rc = crypto_shash_init(&desc.shash) ? : | ||
815 | crypto_shash_update(&desc.shash, ipad, bs) ? : | ||
816 | crypto_shash_export(&desc.shash, ipad) ? : | ||
817 | crypto_shash_init(&desc.shash) ? : | ||
818 | crypto_shash_update(&desc.shash, opad, bs) ? : | ||
819 | crypto_shash_export(&desc.shash, opad); | ||
820 | |||
821 | if (rc == 0) | ||
822 | mv_hash_init_ivs(ctx, ipad, opad); | ||
823 | |||
824 | return rc; | ||
825 | } | ||
826 | } | ||
827 | |||
828 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | ||
829 | enum hash_op op, int count_add) | ||
830 | { | ||
831 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
832 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
833 | struct crypto_shash *fallback_tfm = NULL; | ||
834 | struct crypto_shash *base_hash = NULL; | ||
835 | int err = -ENOMEM; | ||
836 | |||
837 | ctx->op = op; | ||
838 | ctx->count_add = count_add; | ||
839 | |||
840 | /* Allocate a fallback and abort if it failed. */ | ||
841 | fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | ||
842 | CRYPTO_ALG_NEED_FALLBACK); | ||
843 | if (IS_ERR(fallback_tfm)) { | ||
844 | printk(KERN_WARNING MV_CESA | ||
845 | "Fallback driver '%s' could not be loaded!\n", | ||
846 | fallback_driver_name); | ||
847 | err = PTR_ERR(fallback_tfm); | ||
848 | goto out; | ||
849 | } | ||
850 | ctx->fallback = fallback_tfm; | ||
851 | |||
852 | if (base_hash_name) { | ||
853 | /* Allocate a hash to compute the ipad/opad of hmac. */ | ||
854 | base_hash = crypto_alloc_shash(base_hash_name, 0, | ||
855 | CRYPTO_ALG_NEED_FALLBACK); | ||
856 | if (IS_ERR(base_hash)) { | ||
857 | printk(KERN_WARNING MV_CESA | ||
858 | "Base driver '%s' could not be loaded!\n", | ||
859 | base_hash_name); | ||
860 | err = PTR_ERR(fallback_tfm); | ||
861 | goto err_bad_base; | ||
862 | } | ||
863 | } | ||
864 | ctx->base_hash = base_hash; | ||
865 | |||
866 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
867 | sizeof(struct mv_req_hash_ctx) + | ||
868 | crypto_shash_descsize(ctx->fallback)); | ||
869 | return 0; | ||
870 | err_bad_base: | ||
871 | crypto_free_shash(fallback_tfm); | ||
872 | out: | ||
873 | return err; | ||
874 | } | ||
875 | |||
876 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) | ||
877 | { | ||
878 | struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
879 | |||
880 | crypto_free_shash(ctx->fallback); | ||
881 | if (ctx->base_hash) | ||
882 | crypto_free_shash(ctx->base_hash); | ||
883 | } | ||
884 | |||
885 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) | ||
886 | { | ||
887 | return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); | ||
888 | } | ||
889 | |||
890 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | ||
891 | { | ||
892 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | ||
893 | } | ||
894 | |||
415 | irqreturn_t crypto_int(int irq, void *priv) | 895 | irqreturn_t crypto_int(int irq, void *priv) |
416 | { | 896 | { |
417 | u32 val; | 897 | u32 val; |
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = { | |||
474 | }, | 954 | }, |
475 | }; | 955 | }; |
476 | 956 | ||
957 | struct ahash_alg mv_sha1_alg = { | ||
958 | .init = mv_hash_init, | ||
959 | .update = mv_hash_update, | ||
960 | .final = mv_hash_final, | ||
961 | .finup = mv_hash_finup, | ||
962 | .digest = mv_hash_digest, | ||
963 | .halg = { | ||
964 | .digestsize = SHA1_DIGEST_SIZE, | ||
965 | .base = { | ||
966 | .cra_name = "sha1", | ||
967 | .cra_driver_name = "mv-sha1", | ||
968 | .cra_priority = 300, | ||
969 | .cra_flags = | ||
970 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
971 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
972 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | ||
973 | .cra_init = mv_cra_hash_sha1_init, | ||
974 | .cra_exit = mv_cra_hash_exit, | ||
975 | .cra_module = THIS_MODULE, | ||
976 | } | ||
977 | } | ||
978 | }; | ||
979 | |||
980 | struct ahash_alg mv_hmac_sha1_alg = { | ||
981 | .init = mv_hash_init, | ||
982 | .update = mv_hash_update, | ||
983 | .final = mv_hash_final, | ||
984 | .finup = mv_hash_finup, | ||
985 | .digest = mv_hash_digest, | ||
986 | .setkey = mv_hash_setkey, | ||
987 | .halg = { | ||
988 | .digestsize = SHA1_DIGEST_SIZE, | ||
989 | .base = { | ||
990 | .cra_name = "hmac(sha1)", | ||
991 | .cra_driver_name = "mv-hmac-sha1", | ||
992 | .cra_priority = 300, | ||
993 | .cra_flags = | ||
994 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | ||
995 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
996 | .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | ||
997 | .cra_init = mv_cra_hash_hmac_sha1_init, | ||
998 | .cra_exit = mv_cra_hash_exit, | ||
999 | .cra_module = THIS_MODULE, | ||
1000 | } | ||
1001 | } | ||
1002 | }; | ||
1003 | |||
477 | static int mv_probe(struct platform_device *pdev) | 1004 | static int mv_probe(struct platform_device *pdev) |
478 | { | 1005 | { |
479 | struct crypto_priv *cp; | 1006 | struct crypto_priv *cp; |
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev) | |||
482 | int ret; | 1009 | int ret; |
483 | 1010 | ||
484 | if (cpg) { | 1011 | if (cpg) { |
485 | printk(KERN_ERR "Second crypto dev?\n"); | 1012 | printk(KERN_ERR MV_CESA "Second crypto dev?\n"); |
486 | return -EEXIST; | 1013 | return -EEXIST; |
487 | } | 1014 | } |
488 | 1015 | ||
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev) | |||
496 | 1023 | ||
497 | spin_lock_init(&cp->lock); | 1024 | spin_lock_init(&cp->lock); |
498 | crypto_init_queue(&cp->queue, 50); | 1025 | crypto_init_queue(&cp->queue, 50); |
499 | cp->reg = ioremap(res->start, res->end - res->start + 1); | 1026 | cp->reg = ioremap(res->start, resource_size(res)); |
500 | if (!cp->reg) { | 1027 | if (!cp->reg) { |
501 | ret = -ENOMEM; | 1028 | ret = -ENOMEM; |
502 | goto err; | 1029 | goto err; |
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev) | |||
507 | ret = -ENXIO; | 1034 | ret = -ENXIO; |
508 | goto err_unmap_reg; | 1035 | goto err_unmap_reg; |
509 | } | 1036 | } |
510 | cp->sram_size = res->end - res->start + 1; | 1037 | cp->sram_size = resource_size(res); |
511 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | 1038 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
512 | cp->sram = ioremap(res->start, cp->sram_size); | 1039 | cp->sram = ioremap(res->start, cp->sram_size); |
513 | if (!cp->sram) { | 1040 | if (!cp->sram) { |
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev) | |||
546 | ret = crypto_register_alg(&mv_aes_alg_cbc); | 1073 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
547 | if (ret) | 1074 | if (ret) |
548 | goto err_unreg_ecb; | 1075 | goto err_unreg_ecb; |
1076 | |||
1077 | ret = crypto_register_ahash(&mv_sha1_alg); | ||
1078 | if (ret == 0) | ||
1079 | cpg->has_sha1 = 1; | ||
1080 | else | ||
1081 | printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); | ||
1082 | |||
1083 | ret = crypto_register_ahash(&mv_hmac_sha1_alg); | ||
1084 | if (ret == 0) { | ||
1085 | cpg->has_hmac_sha1 = 1; | ||
1086 | } else { | ||
1087 | printk(KERN_WARNING MV_CESA | ||
1088 | "Could not register hmac-sha1 driver\n"); | ||
1089 | } | ||
1090 | |||
549 | return 0; | 1091 | return 0; |
550 | err_unreg_ecb: | 1092 | err_unreg_ecb: |
551 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1093 | crypto_unregister_alg(&mv_aes_alg_ecb); |
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev) | |||
570 | 1112 | ||
571 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1113 | crypto_unregister_alg(&mv_aes_alg_ecb); |
572 | crypto_unregister_alg(&mv_aes_alg_cbc); | 1114 | crypto_unregister_alg(&mv_aes_alg_cbc); |
1115 | if (cp->has_sha1) | ||
1116 | crypto_unregister_ahash(&mv_sha1_alg); | ||
1117 | if (cp->has_hmac_sha1) | ||
1118 | crypto_unregister_ahash(&mv_hmac_sha1_alg); | ||
573 | kthread_stop(cp->queue_th); | 1119 | kthread_stop(cp->queue_th); |
574 | free_irq(cp->irq, cp); | 1120 | free_irq(cp->irq, cp); |
575 | memset(cp->sram, 0, cp->sram_size); | 1121 | memset(cp->sram, 0, cp->sram_size); |
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h index c3e25d3bb171..08fcb1116d90 100644 --- a/drivers/crypto/mv_cesa.h +++ b/drivers/crypto/mv_cesa.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef __MV_CRYPTO_H__ | 1 | #ifndef __MV_CRYPTO_H__ |
2 | 2 | ||
3 | #define DIGEST_INITIAL_VAL_A 0xdd00 | 3 | #define DIGEST_INITIAL_VAL_A 0xdd00 |
4 | #define DIGEST_INITIAL_VAL_B 0xdd04 | ||
5 | #define DIGEST_INITIAL_VAL_C 0xdd08 | ||
6 | #define DIGEST_INITIAL_VAL_D 0xdd0c | ||
7 | #define DIGEST_INITIAL_VAL_E 0xdd10 | ||
4 | #define DES_CMD_REG 0xdd58 | 8 | #define DES_CMD_REG 0xdd58 |
5 | 9 | ||
6 | #define SEC_ACCEL_CMD 0xde00 | 10 | #define SEC_ACCEL_CMD 0xde00 |
@@ -70,6 +74,10 @@ struct sec_accel_config { | |||
70 | #define CFG_AES_LEN_128 (0 << 24) | 74 | #define CFG_AES_LEN_128 (0 << 24) |
71 | #define CFG_AES_LEN_192 (1 << 24) | 75 | #define CFG_AES_LEN_192 (1 << 24) |
72 | #define CFG_AES_LEN_256 (2 << 24) | 76 | #define CFG_AES_LEN_256 (2 << 24) |
77 | #define CFG_NOT_FRAG (0 << 30) | ||
78 | #define CFG_FIRST_FRAG (1 << 30) | ||
79 | #define CFG_LAST_FRAG (2 << 30) | ||
80 | #define CFG_MID_FRAG (3 << 30) | ||
73 | 81 | ||
74 | u32 enc_p; | 82 | u32 enc_p; |
75 | #define ENC_P_SRC(x) (x) | 83 | #define ENC_P_SRC(x) (x) |
@@ -90,7 +98,11 @@ struct sec_accel_config { | |||
90 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) | 98 | #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) |
91 | 99 | ||
92 | u32 mac_digest; | 100 | u32 mac_digest; |
101 | #define MAC_DIGEST_P(x) (x) | ||
102 | #define MAC_FRAG_LEN(x) ((x) << 16) | ||
93 | u32 mac_iv; | 103 | u32 mac_iv; |
104 | #define MAC_INNER_IV_P(x) (x) | ||
105 | #define MAC_OUTER_IV_P(x) ((x) << 16) | ||
94 | }__attribute__ ((packed)); | 106 | }__attribute__ ((packed)); |
95 | /* | 107 | /* |
96 | * /-----------\ 0 | 108 | * /-----------\ 0 |
@@ -101,19 +113,37 @@ struct sec_accel_config { | |||
101 | * | IV IN | 4 * 4 | 113 | * | IV IN | 4 * 4 |
102 | * |-----------| 0x40 (inplace) | 114 | * |-----------| 0x40 (inplace) |
103 | * | IV BUF | 4 * 4 | 115 | * | IV BUF | 4 * 4 |
104 | * |-----------| 0x50 | 116 | * |-----------| 0x80 |
105 | * | DATA IN | 16 * x (max ->max_req_size) | 117 | * | DATA IN | 16 * x (max ->max_req_size) |
106 | * |-----------| 0x50 (inplace operation) | 118 | * |-----------| 0x80 (inplace operation) |
107 | * | DATA OUT | 16 * x (max ->max_req_size) | 119 | * | DATA OUT | 16 * x (max ->max_req_size) |
108 | * \-----------/ SRAM size | 120 | * \-----------/ SRAM size |
109 | */ | 121 | */ |
122 | |||
123 | /* Hashing memory map: | ||
124 | * /-----------\ 0 | ||
125 | * | ACCEL CFG | 4 * 8 | ||
126 | * |-----------| 0x20 | ||
127 | * | Inner IV | 5 * 4 | ||
128 | * |-----------| 0x34 | ||
129 | * | Outer IV | 5 * 4 | ||
130 | * |-----------| 0x48 | ||
131 | * | Output BUF| 5 * 4 | ||
132 | * |-----------| 0x80 | ||
133 | * | DATA IN | 64 * x (max ->max_req_size) | ||
134 | * \-----------/ SRAM size | ||
135 | */ | ||
110 | #define SRAM_CONFIG 0x00 | 136 | #define SRAM_CONFIG 0x00 |
111 | #define SRAM_DATA_KEY_P 0x20 | 137 | #define SRAM_DATA_KEY_P 0x20 |
112 | #define SRAM_DATA_IV 0x40 | 138 | #define SRAM_DATA_IV 0x40 |
113 | #define SRAM_DATA_IV_BUF 0x40 | 139 | #define SRAM_DATA_IV_BUF 0x40 |
114 | #define SRAM_DATA_IN_START 0x50 | 140 | #define SRAM_DATA_IN_START 0x80 |
115 | #define SRAM_DATA_OUT_START 0x50 | 141 | #define SRAM_DATA_OUT_START 0x80 |
142 | |||
143 | #define SRAM_HMAC_IV_IN 0x20 | ||
144 | #define SRAM_HMAC_IV_OUT 0x34 | ||
145 | #define SRAM_DIGEST_BUF 0x48 | ||
116 | 146 | ||
117 | #define SRAM_CFG_SPACE 0x50 | 147 | #define SRAM_CFG_SPACE 0x80 |
118 | 148 | ||
119 | #endif | 149 | #endif |
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S new file mode 100644 index 000000000000..f7c793745a1e --- /dev/null +++ b/drivers/crypto/n2_asm.S | |||
@@ -0,0 +1,95 @@ | |||
1 | /* n2_asm.S: Hypervisor calls for NCS support. | ||
2 | * | ||
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <asm/hypervisor.h> | ||
8 | #include "n2_core.h" | ||
9 | |||
10 | /* o0: queue type | ||
11 | * o1: RA of queue | ||
12 | * o2: num entries in queue | ||
13 | * o3: address of queue handle return | ||
14 | */ | ||
15 | ENTRY(sun4v_ncs_qconf) | ||
16 | mov HV_FAST_NCS_QCONF, %o5 | ||
17 | ta HV_FAST_TRAP | ||
18 | stx %o1, [%o3] | ||
19 | retl | ||
20 | nop | ||
21 | ENDPROC(sun4v_ncs_qconf) | ||
22 | |||
23 | /* %o0: queue handle | ||
24 | * %o1: address of queue type return | ||
25 | * %o2: address of queue base address return | ||
26 | * %o3: address of queue num entries return | ||
27 | */ | ||
28 | ENTRY(sun4v_ncs_qinfo) | ||
29 | mov %o1, %g1 | ||
30 | mov %o2, %g2 | ||
31 | mov %o3, %g3 | ||
32 | mov HV_FAST_NCS_QINFO, %o5 | ||
33 | ta HV_FAST_TRAP | ||
34 | stx %o1, [%g1] | ||
35 | stx %o2, [%g2] | ||
36 | stx %o3, [%g3] | ||
37 | retl | ||
38 | nop | ||
39 | ENDPROC(sun4v_ncs_qinfo) | ||
40 | |||
41 | /* %o0: queue handle | ||
42 | * %o1: address of head offset return | ||
43 | */ | ||
44 | ENTRY(sun4v_ncs_gethead) | ||
45 | mov %o1, %o2 | ||
46 | mov HV_FAST_NCS_GETHEAD, %o5 | ||
47 | ta HV_FAST_TRAP | ||
48 | stx %o1, [%o2] | ||
49 | retl | ||
50 | nop | ||
51 | ENDPROC(sun4v_ncs_gethead) | ||
52 | |||
53 | /* %o0: queue handle | ||
54 | * %o1: address of tail offset return | ||
55 | */ | ||
56 | ENTRY(sun4v_ncs_gettail) | ||
57 | mov %o1, %o2 | ||
58 | mov HV_FAST_NCS_GETTAIL, %o5 | ||
59 | ta HV_FAST_TRAP | ||
60 | stx %o1, [%o2] | ||
61 | retl | ||
62 | nop | ||
63 | ENDPROC(sun4v_ncs_gettail) | ||
64 | |||
65 | /* %o0: queue handle | ||
66 | * %o1: new tail offset | ||
67 | */ | ||
68 | ENTRY(sun4v_ncs_settail) | ||
69 | mov HV_FAST_NCS_SETTAIL, %o5 | ||
70 | ta HV_FAST_TRAP | ||
71 | retl | ||
72 | nop | ||
73 | ENDPROC(sun4v_ncs_settail) | ||
74 | |||
75 | /* %o0: queue handle | ||
76 | * %o1: address of devino return | ||
77 | */ | ||
78 | ENTRY(sun4v_ncs_qhandle_to_devino) | ||
79 | mov %o1, %o2 | ||
80 | mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5 | ||
81 | ta HV_FAST_TRAP | ||
82 | stx %o1, [%o2] | ||
83 | retl | ||
84 | nop | ||
85 | ENDPROC(sun4v_ncs_qhandle_to_devino) | ||
86 | |||
87 | /* %o0: queue handle | ||
88 | * %o1: new head offset | ||
89 | */ | ||
90 | ENTRY(sun4v_ncs_sethead_marker) | ||
91 | mov HV_FAST_NCS_SETHEAD_MARKER, %o5 | ||
92 | ta HV_FAST_TRAP | ||
93 | retl | ||
94 | nop | ||
95 | ENDPROC(sun4v_ncs_sethead_marker) | ||
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c new file mode 100644 index 000000000000..23163fda5035 --- /dev/null +++ b/drivers/crypto/n2_core.c | |||
@@ -0,0 +1,2090 @@ | |||
1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. | ||
2 | * | ||
3 | * Copyright (C) 2010 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/of.h> | ||
11 | #include <linux/of_device.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/crypto.h> | ||
16 | #include <crypto/md5.h> | ||
17 | #include <crypto/sha.h> | ||
18 | #include <crypto/aes.h> | ||
19 | #include <crypto/des.h> | ||
20 | #include <linux/mutex.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/sched.h> | ||
23 | |||
24 | #include <crypto/internal/hash.h> | ||
25 | #include <crypto/scatterwalk.h> | ||
26 | #include <crypto/algapi.h> | ||
27 | |||
28 | #include <asm/hypervisor.h> | ||
29 | #include <asm/mdesc.h> | ||
30 | |||
31 | #include "n2_core.h" | ||
32 | |||
33 | #define DRV_MODULE_NAME "n2_crypto" | ||
34 | #define DRV_MODULE_VERSION "0.1" | ||
35 | #define DRV_MODULE_RELDATE "April 29, 2010" | ||
36 | |||
37 | static char version[] __devinitdata = | ||
38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
39 | |||
40 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
41 | MODULE_DESCRIPTION("Niagara2 Crypto driver"); | ||
42 | MODULE_LICENSE("GPL"); | ||
43 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
44 | |||
45 | #define N2_CRA_PRIORITY 300 | ||
46 | |||
47 | static DEFINE_MUTEX(spu_lock); | ||
48 | |||
49 | struct spu_queue { | ||
50 | cpumask_t sharing; | ||
51 | unsigned long qhandle; | ||
52 | |||
53 | spinlock_t lock; | ||
54 | u8 q_type; | ||
55 | void *q; | ||
56 | unsigned long head; | ||
57 | unsigned long tail; | ||
58 | struct list_head jobs; | ||
59 | |||
60 | unsigned long devino; | ||
61 | |||
62 | char irq_name[32]; | ||
63 | unsigned int irq; | ||
64 | |||
65 | struct list_head list; | ||
66 | }; | ||
67 | |||
68 | static struct spu_queue **cpu_to_cwq; | ||
69 | static struct spu_queue **cpu_to_mau; | ||
70 | |||
71 | static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) | ||
72 | { | ||
73 | if (q->q_type == HV_NCS_QTYPE_MAU) { | ||
74 | off += MAU_ENTRY_SIZE; | ||
75 | if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) | ||
76 | off = 0; | ||
77 | } else { | ||
78 | off += CWQ_ENTRY_SIZE; | ||
79 | if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) | ||
80 | off = 0; | ||
81 | } | ||
82 | return off; | ||
83 | } | ||
84 | |||
85 | struct n2_request_common { | ||
86 | struct list_head entry; | ||
87 | unsigned int offset; | ||
88 | }; | ||
89 | #define OFFSET_NOT_RUNNING (~(unsigned int)0) | ||
90 | |||
91 | /* An async job request records the final tail value it used in | ||
92 | * n2_request_common->offset, test to see if that offset is in | ||
93 | * the range old_head, new_head, inclusive. | ||
94 | */ | ||
95 | static inline bool job_finished(struct spu_queue *q, unsigned int offset, | ||
96 | unsigned long old_head, unsigned long new_head) | ||
97 | { | ||
98 | if (old_head <= new_head) { | ||
99 | if (offset > old_head && offset <= new_head) | ||
100 | return true; | ||
101 | } else { | ||
102 | if (offset > old_head || offset <= new_head) | ||
103 | return true; | ||
104 | } | ||
105 | return false; | ||
106 | } | ||
107 | |||
108 | /* When the HEAD marker is unequal to the actual HEAD, we get | ||
109 | * a virtual device INO interrupt. We should process the | ||
110 | * completed CWQ entries and adjust the HEAD marker to clear | ||
111 | * the IRQ. | ||
112 | */ | ||
113 | static irqreturn_t cwq_intr(int irq, void *dev_id) | ||
114 | { | ||
115 | unsigned long off, new_head, hv_ret; | ||
116 | struct spu_queue *q = dev_id; | ||
117 | |||
118 | pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", | ||
119 | smp_processor_id(), q->qhandle); | ||
120 | |||
121 | spin_lock(&q->lock); | ||
122 | |||
123 | hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); | ||
124 | |||
125 | pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", | ||
126 | smp_processor_id(), new_head, hv_ret); | ||
127 | |||
128 | for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { | ||
129 | /* XXX ... XXX */ | ||
130 | } | ||
131 | |||
132 | hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); | ||
133 | if (hv_ret == HV_EOK) | ||
134 | q->head = new_head; | ||
135 | |||
136 | spin_unlock(&q->lock); | ||
137 | |||
138 | return IRQ_HANDLED; | ||
139 | } | ||
140 | |||
141 | static irqreturn_t mau_intr(int irq, void *dev_id) | ||
142 | { | ||
143 | struct spu_queue *q = dev_id; | ||
144 | unsigned long head, hv_ret; | ||
145 | |||
146 | spin_lock(&q->lock); | ||
147 | |||
148 | pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", | ||
149 | smp_processor_id(), q->qhandle); | ||
150 | |||
151 | hv_ret = sun4v_ncs_gethead(q->qhandle, &head); | ||
152 | |||
153 | pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", | ||
154 | smp_processor_id(), head, hv_ret); | ||
155 | |||
156 | sun4v_ncs_sethead_marker(q->qhandle, head); | ||
157 | |||
158 | spin_unlock(&q->lock); | ||
159 | |||
160 | return IRQ_HANDLED; | ||
161 | } | ||
162 | |||
163 | static void *spu_queue_next(struct spu_queue *q, void *cur) | ||
164 | { | ||
165 | return q->q + spu_next_offset(q, cur - q->q); | ||
166 | } | ||
167 | |||
168 | static int spu_queue_num_free(struct spu_queue *q) | ||
169 | { | ||
170 | unsigned long head = q->head; | ||
171 | unsigned long tail = q->tail; | ||
172 | unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); | ||
173 | unsigned long diff; | ||
174 | |||
175 | if (head > tail) | ||
176 | diff = head - tail; | ||
177 | else | ||
178 | diff = (end - tail) + head; | ||
179 | |||
180 | return (diff / CWQ_ENTRY_SIZE) - 1; | ||
181 | } | ||
182 | |||
183 | static void *spu_queue_alloc(struct spu_queue *q, int num_entries) | ||
184 | { | ||
185 | int avail = spu_queue_num_free(q); | ||
186 | |||
187 | if (avail >= num_entries) | ||
188 | return q->q + q->tail; | ||
189 | |||
190 | return NULL; | ||
191 | } | ||
192 | |||
193 | static unsigned long spu_queue_submit(struct spu_queue *q, void *last) | ||
194 | { | ||
195 | unsigned long hv_ret, new_tail; | ||
196 | |||
197 | new_tail = spu_next_offset(q, last - q->q); | ||
198 | |||
199 | hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); | ||
200 | if (hv_ret == HV_EOK) | ||
201 | q->tail = new_tail; | ||
202 | return hv_ret; | ||
203 | } | ||
204 | |||
205 | static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, | ||
206 | int enc_type, int auth_type, | ||
207 | unsigned int hash_len, | ||
208 | bool sfas, bool sob, bool eob, bool encrypt, | ||
209 | int opcode) | ||
210 | { | ||
211 | u64 word = (len - 1) & CONTROL_LEN; | ||
212 | |||
213 | word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); | ||
214 | word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); | ||
215 | word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); | ||
216 | if (sfas) | ||
217 | word |= CONTROL_STORE_FINAL_AUTH_STATE; | ||
218 | if (sob) | ||
219 | word |= CONTROL_START_OF_BLOCK; | ||
220 | if (eob) | ||
221 | word |= CONTROL_END_OF_BLOCK; | ||
222 | if (encrypt) | ||
223 | word |= CONTROL_ENCRYPT; | ||
224 | if (hmac_key_len) | ||
225 | word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; | ||
226 | if (hash_len) | ||
227 | word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; | ||
228 | |||
229 | return word; | ||
230 | } | ||
231 | |||
232 | #if 0 | ||
233 | static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | ||
234 | { | ||
235 | if (this_len >= 64 || | ||
236 | qp->head != qp->tail) | ||
237 | return true; | ||
238 | return false; | ||
239 | } | ||
240 | #endif | ||
241 | |||
242 | struct n2_base_ctx { | ||
243 | struct list_head list; | ||
244 | }; | ||
245 | |||
246 | static void n2_base_ctx_init(struct n2_base_ctx *ctx) | ||
247 | { | ||
248 | INIT_LIST_HEAD(&ctx->list); | ||
249 | } | ||
250 | |||
251 | struct n2_hash_ctx { | ||
252 | struct n2_base_ctx base; | ||
253 | |||
254 | struct crypto_ahash *fallback_tfm; | ||
255 | }; | ||
256 | |||
257 | struct n2_hash_req_ctx { | ||
258 | union { | ||
259 | struct md5_state md5; | ||
260 | struct sha1_state sha1; | ||
261 | struct sha256_state sha256; | ||
262 | } u; | ||
263 | |||
264 | unsigned char hash_key[64]; | ||
265 | unsigned char keyed_zero_hash[32]; | ||
266 | |||
267 | struct ahash_request fallback_req; | ||
268 | }; | ||
269 | |||
270 | static int n2_hash_async_init(struct ahash_request *req) | ||
271 | { | ||
272 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
273 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
274 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
275 | |||
276 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
277 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
278 | |||
279 | return crypto_ahash_init(&rctx->fallback_req); | ||
280 | } | ||
281 | |||
282 | static int n2_hash_async_update(struct ahash_request *req) | ||
283 | { | ||
284 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
285 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
286 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
287 | |||
288 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
289 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
290 | rctx->fallback_req.nbytes = req->nbytes; | ||
291 | rctx->fallback_req.src = req->src; | ||
292 | |||
293 | return crypto_ahash_update(&rctx->fallback_req); | ||
294 | } | ||
295 | |||
296 | static int n2_hash_async_final(struct ahash_request *req) | ||
297 | { | ||
298 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
299 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
300 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
301 | |||
302 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
303 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
304 | rctx->fallback_req.result = req->result; | ||
305 | |||
306 | return crypto_ahash_final(&rctx->fallback_req); | ||
307 | } | ||
308 | |||
309 | static int n2_hash_async_finup(struct ahash_request *req) | ||
310 | { | ||
311 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
314 | |||
315 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
316 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
317 | rctx->fallback_req.nbytes = req->nbytes; | ||
318 | rctx->fallback_req.src = req->src; | ||
319 | rctx->fallback_req.result = req->result; | ||
320 | |||
321 | return crypto_ahash_finup(&rctx->fallback_req); | ||
322 | } | ||
323 | |||
324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | ||
325 | { | ||
326 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
327 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
328 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
329 | struct crypto_ahash *fallback_tfm; | ||
330 | int err; | ||
331 | |||
332 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | ||
333 | CRYPTO_ALG_NEED_FALLBACK); | ||
334 | if (IS_ERR(fallback_tfm)) { | ||
335 | pr_warning("Fallback driver '%s' could not be loaded!\n", | ||
336 | fallback_driver_name); | ||
337 | err = PTR_ERR(fallback_tfm); | ||
338 | goto out; | ||
339 | } | ||
340 | |||
341 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | ||
342 | crypto_ahash_reqsize(fallback_tfm))); | ||
343 | |||
344 | ctx->fallback_tfm = fallback_tfm; | ||
345 | return 0; | ||
346 | |||
347 | out: | ||
348 | return err; | ||
349 | } | ||
350 | |||
351 | static void n2_hash_cra_exit(struct crypto_tfm *tfm) | ||
352 | { | ||
353 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
354 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
355 | |||
356 | crypto_free_ahash(ctx->fallback_tfm); | ||
357 | } | ||
358 | |||
359 | static unsigned long wait_for_tail(struct spu_queue *qp) | ||
360 | { | ||
361 | unsigned long head, hv_ret; | ||
362 | |||
363 | do { | ||
364 | hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); | ||
365 | if (hv_ret != HV_EOK) { | ||
366 | pr_err("Hypervisor error on gethead\n"); | ||
367 | break; | ||
368 | } | ||
369 | if (head == qp->tail) { | ||
370 | qp->head = head; | ||
371 | break; | ||
372 | } | ||
373 | } while (1); | ||
374 | return hv_ret; | ||
375 | } | ||
376 | |||
377 | static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | ||
378 | struct cwq_initial_entry *ent) | ||
379 | { | ||
380 | unsigned long hv_ret = spu_queue_submit(qp, ent); | ||
381 | |||
382 | if (hv_ret == HV_EOK) | ||
383 | hv_ret = wait_for_tail(qp); | ||
384 | |||
385 | return hv_ret; | ||
386 | } | ||
387 | |||
388 | static int n2_hash_async_digest(struct ahash_request *req, | ||
389 | unsigned int auth_type, unsigned int digest_size, | ||
390 | unsigned int result_size, void *hash_loc) | ||
391 | { | ||
392 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
393 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
394 | struct cwq_initial_entry *ent; | ||
395 | struct crypto_hash_walk walk; | ||
396 | struct spu_queue *qp; | ||
397 | unsigned long flags; | ||
398 | int err = -ENODEV; | ||
399 | int nbytes, cpu; | ||
400 | |||
401 | /* The total effective length of the operation may not | ||
402 | * exceed 2^16. | ||
403 | */ | ||
404 | if (unlikely(req->nbytes > (1 << 16))) { | ||
405 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
406 | |||
407 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | ||
408 | rctx->fallback_req.base.flags = | ||
409 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
410 | rctx->fallback_req.nbytes = req->nbytes; | ||
411 | rctx->fallback_req.src = req->src; | ||
412 | rctx->fallback_req.result = req->result; | ||
413 | |||
414 | return crypto_ahash_digest(&rctx->fallback_req); | ||
415 | } | ||
416 | |||
417 | n2_base_ctx_init(&ctx->base); | ||
418 | |||
419 | nbytes = crypto_hash_walk_first(req, &walk); | ||
420 | |||
421 | cpu = get_cpu(); | ||
422 | qp = cpu_to_cwq[cpu]; | ||
423 | if (!qp) | ||
424 | goto out; | ||
425 | |||
426 | spin_lock_irqsave(&qp->lock, flags); | ||
427 | |||
428 | /* XXX can do better, improve this later by doing a by-hand scatterlist | ||
429 | * XXX walk, etc. | ||
430 | */ | ||
431 | ent = qp->q + qp->tail; | ||
432 | |||
433 | ent->control = control_word_base(nbytes, 0, 0, | ||
434 | auth_type, digest_size, | ||
435 | false, true, false, false, | ||
436 | OPCODE_INPLACE_BIT | | ||
437 | OPCODE_AUTH_MAC); | ||
438 | ent->src_addr = __pa(walk.data); | ||
439 | ent->auth_key_addr = 0UL; | ||
440 | ent->auth_iv_addr = __pa(hash_loc); | ||
441 | ent->final_auth_state_addr = 0UL; | ||
442 | ent->enc_key_addr = 0UL; | ||
443 | ent->enc_iv_addr = 0UL; | ||
444 | ent->dest_addr = __pa(hash_loc); | ||
445 | |||
446 | nbytes = crypto_hash_walk_done(&walk, 0); | ||
447 | while (nbytes > 0) { | ||
448 | ent = spu_queue_next(qp, ent); | ||
449 | |||
450 | ent->control = (nbytes - 1); | ||
451 | ent->src_addr = __pa(walk.data); | ||
452 | ent->auth_key_addr = 0UL; | ||
453 | ent->auth_iv_addr = 0UL; | ||
454 | ent->final_auth_state_addr = 0UL; | ||
455 | ent->enc_key_addr = 0UL; | ||
456 | ent->enc_iv_addr = 0UL; | ||
457 | ent->dest_addr = 0UL; | ||
458 | |||
459 | nbytes = crypto_hash_walk_done(&walk, 0); | ||
460 | } | ||
461 | ent->control |= CONTROL_END_OF_BLOCK; | ||
462 | |||
463 | if (submit_and_wait_for_tail(qp, ent) != HV_EOK) | ||
464 | err = -EINVAL; | ||
465 | else | ||
466 | err = 0; | ||
467 | |||
468 | spin_unlock_irqrestore(&qp->lock, flags); | ||
469 | |||
470 | if (!err) | ||
471 | memcpy(req->result, hash_loc, result_size); | ||
472 | out: | ||
473 | put_cpu(); | ||
474 | |||
475 | return err; | ||
476 | } | ||
477 | |||
478 | static int n2_md5_async_digest(struct ahash_request *req) | ||
479 | { | ||
480 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
481 | struct md5_state *m = &rctx->u.md5; | ||
482 | |||
483 | if (unlikely(req->nbytes == 0)) { | ||
484 | static const char md5_zero[MD5_DIGEST_SIZE] = { | ||
485 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
486 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
487 | }; | ||
488 | |||
489 | memcpy(req->result, md5_zero, MD5_DIGEST_SIZE); | ||
490 | return 0; | ||
491 | } | ||
492 | m->hash[0] = cpu_to_le32(0x67452301); | ||
493 | m->hash[1] = cpu_to_le32(0xefcdab89); | ||
494 | m->hash[2] = cpu_to_le32(0x98badcfe); | ||
495 | m->hash[3] = cpu_to_le32(0x10325476); | ||
496 | |||
497 | return n2_hash_async_digest(req, AUTH_TYPE_MD5, | ||
498 | MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, | ||
499 | m->hash); | ||
500 | } | ||
501 | |||
502 | static int n2_sha1_async_digest(struct ahash_request *req) | ||
503 | { | ||
504 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
505 | struct sha1_state *s = &rctx->u.sha1; | ||
506 | |||
507 | if (unlikely(req->nbytes == 0)) { | ||
508 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | ||
509 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
510 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
511 | 0x07, 0x09 | ||
512 | }; | ||
513 | |||
514 | memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); | ||
515 | return 0; | ||
516 | } | ||
517 | s->state[0] = SHA1_H0; | ||
518 | s->state[1] = SHA1_H1; | ||
519 | s->state[2] = SHA1_H2; | ||
520 | s->state[3] = SHA1_H3; | ||
521 | s->state[4] = SHA1_H4; | ||
522 | |||
523 | return n2_hash_async_digest(req, AUTH_TYPE_SHA1, | ||
524 | SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, | ||
525 | s->state); | ||
526 | } | ||
527 | |||
528 | static int n2_sha256_async_digest(struct ahash_request *req) | ||
529 | { | ||
530 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
531 | struct sha256_state *s = &rctx->u.sha256; | ||
532 | |||
533 | if (req->nbytes == 0) { | ||
534 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | ||
535 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | ||
536 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | ||
537 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | ||
538 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
539 | }; | ||
540 | |||
541 | memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); | ||
542 | return 0; | ||
543 | } | ||
544 | s->state[0] = SHA256_H0; | ||
545 | s->state[1] = SHA256_H1; | ||
546 | s->state[2] = SHA256_H2; | ||
547 | s->state[3] = SHA256_H3; | ||
548 | s->state[4] = SHA256_H4; | ||
549 | s->state[5] = SHA256_H5; | ||
550 | s->state[6] = SHA256_H6; | ||
551 | s->state[7] = SHA256_H7; | ||
552 | |||
553 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | ||
554 | SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, | ||
555 | s->state); | ||
556 | } | ||
557 | |||
558 | static int n2_sha224_async_digest(struct ahash_request *req) | ||
559 | { | ||
560 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
561 | struct sha256_state *s = &rctx->u.sha256; | ||
562 | |||
563 | if (req->nbytes == 0) { | ||
564 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | ||
565 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
566 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
567 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
568 | 0x2f | ||
569 | }; | ||
570 | |||
571 | memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); | ||
572 | return 0; | ||
573 | } | ||
574 | s->state[0] = SHA224_H0; | ||
575 | s->state[1] = SHA224_H1; | ||
576 | s->state[2] = SHA224_H2; | ||
577 | s->state[3] = SHA224_H3; | ||
578 | s->state[4] = SHA224_H4; | ||
579 | s->state[5] = SHA224_H5; | ||
580 | s->state[6] = SHA224_H6; | ||
581 | s->state[7] = SHA224_H7; | ||
582 | |||
583 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | ||
584 | SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, | ||
585 | s->state); | ||
586 | } | ||
587 | |||
588 | struct n2_cipher_context { | ||
589 | int key_len; | ||
590 | int enc_type; | ||
591 | union { | ||
592 | u8 aes[AES_MAX_KEY_SIZE]; | ||
593 | u8 des[DES_KEY_SIZE]; | ||
594 | u8 des3[3 * DES_KEY_SIZE]; | ||
595 | u8 arc4[258]; /* S-box, X, Y */ | ||
596 | } key; | ||
597 | }; | ||
598 | |||
599 | #define N2_CHUNK_ARR_LEN 16 | ||
600 | |||
601 | struct n2_crypto_chunk { | ||
602 | struct list_head entry; | ||
603 | unsigned long iv_paddr : 44; | ||
604 | unsigned long arr_len : 20; | ||
605 | unsigned long dest_paddr; | ||
606 | unsigned long dest_final; | ||
607 | struct { | ||
608 | unsigned long src_paddr : 44; | ||
609 | unsigned long src_len : 20; | ||
610 | } arr[N2_CHUNK_ARR_LEN]; | ||
611 | }; | ||
612 | |||
613 | struct n2_request_context { | ||
614 | struct ablkcipher_walk walk; | ||
615 | struct list_head chunk_list; | ||
616 | struct n2_crypto_chunk chunk; | ||
617 | u8 temp_iv[16]; | ||
618 | }; | ||
619 | |||
620 | /* The SPU allows some level of flexibility for partial cipher blocks | ||
621 | * being specified in a descriptor. | ||
622 | * | ||
623 | * It merely requires that every descriptor's length field is at least | ||
624 | * as large as the cipher block size. This means that a cipher block | ||
625 | * can span at most 2 descriptors. However, this does not allow a | ||
626 | * partial block to span into the final descriptor as that would | ||
627 | * violate the rule (since every descriptor's length must be at lest | ||
628 | * the block size). So, for example, assuming an 8 byte block size: | ||
629 | * | ||
630 | * 0xe --> 0xa --> 0x8 | ||
631 | * | ||
632 | * is a valid length sequence, whereas: | ||
633 | * | ||
634 | * 0xe --> 0xb --> 0x7 | ||
635 | * | ||
636 | * is not a valid sequence. | ||
637 | */ | ||
638 | |||
639 | struct n2_cipher_alg { | ||
640 | struct list_head entry; | ||
641 | u8 enc_type; | ||
642 | struct crypto_alg alg; | ||
643 | }; | ||
644 | |||
645 | static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) | ||
646 | { | ||
647 | struct crypto_alg *alg = tfm->__crt_alg; | ||
648 | |||
649 | return container_of(alg, struct n2_cipher_alg, alg); | ||
650 | } | ||
651 | |||
652 | struct n2_cipher_request_context { | ||
653 | struct ablkcipher_walk walk; | ||
654 | }; | ||
655 | |||
656 | static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
657 | unsigned int keylen) | ||
658 | { | ||
659 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
660 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
661 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
662 | |||
663 | ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); | ||
664 | |||
665 | switch (keylen) { | ||
666 | case AES_KEYSIZE_128: | ||
667 | ctx->enc_type |= ENC_TYPE_ALG_AES128; | ||
668 | break; | ||
669 | case AES_KEYSIZE_192: | ||
670 | ctx->enc_type |= ENC_TYPE_ALG_AES192; | ||
671 | break; | ||
672 | case AES_KEYSIZE_256: | ||
673 | ctx->enc_type |= ENC_TYPE_ALG_AES256; | ||
674 | break; | ||
675 | default: | ||
676 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
677 | return -EINVAL; | ||
678 | } | ||
679 | |||
680 | ctx->key_len = keylen; | ||
681 | memcpy(ctx->key.aes, key, keylen); | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
686 | unsigned int keylen) | ||
687 | { | ||
688 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
689 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
690 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
691 | u32 tmp[DES_EXPKEY_WORDS]; | ||
692 | int err; | ||
693 | |||
694 | ctx->enc_type = n2alg->enc_type; | ||
695 | |||
696 | if (keylen != DES_KEY_SIZE) { | ||
697 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
698 | return -EINVAL; | ||
699 | } | ||
700 | |||
701 | err = des_ekey(tmp, key); | ||
702 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
703 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
704 | return -EINVAL; | ||
705 | } | ||
706 | |||
707 | ctx->key_len = keylen; | ||
708 | memcpy(ctx->key.des, key, keylen); | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
713 | unsigned int keylen) | ||
714 | { | ||
715 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
716 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
717 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
718 | |||
719 | ctx->enc_type = n2alg->enc_type; | ||
720 | |||
721 | if (keylen != (3 * DES_KEY_SIZE)) { | ||
722 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
723 | return -EINVAL; | ||
724 | } | ||
725 | ctx->key_len = keylen; | ||
726 | memcpy(ctx->key.des3, key, keylen); | ||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
731 | unsigned int keylen) | ||
732 | { | ||
733 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
734 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
735 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | ||
736 | u8 *s = ctx->key.arc4; | ||
737 | u8 *x = s + 256; | ||
738 | u8 *y = x + 1; | ||
739 | int i, j, k; | ||
740 | |||
741 | ctx->enc_type = n2alg->enc_type; | ||
742 | |||
743 | j = k = 0; | ||
744 | *x = 0; | ||
745 | *y = 0; | ||
746 | for (i = 0; i < 256; i++) | ||
747 | s[i] = i; | ||
748 | for (i = 0; i < 256; i++) { | ||
749 | u8 a = s[i]; | ||
750 | j = (j + key[k] + a) & 0xff; | ||
751 | s[i] = s[j]; | ||
752 | s[j] = a; | ||
753 | if (++k >= keylen) | ||
754 | k = 0; | ||
755 | } | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) | ||
761 | { | ||
762 | int this_len = nbytes; | ||
763 | |||
764 | this_len -= (nbytes & (block_size - 1)); | ||
765 | return this_len > (1 << 16) ? (1 << 16) : this_len; | ||
766 | } | ||
767 | |||
768 | static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, | ||
769 | struct spu_queue *qp, bool encrypt) | ||
770 | { | ||
771 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | ||
772 | struct cwq_initial_entry *ent; | ||
773 | bool in_place; | ||
774 | int i; | ||
775 | |||
776 | ent = spu_queue_alloc(qp, cp->arr_len); | ||
777 | if (!ent) { | ||
778 | pr_info("queue_alloc() of %d fails\n", | ||
779 | cp->arr_len); | ||
780 | return -EBUSY; | ||
781 | } | ||
782 | |||
783 | in_place = (cp->dest_paddr == cp->arr[0].src_paddr); | ||
784 | |||
785 | ent->control = control_word_base(cp->arr[0].src_len, | ||
786 | 0, ctx->enc_type, 0, 0, | ||
787 | false, true, false, encrypt, | ||
788 | OPCODE_ENCRYPT | | ||
789 | (in_place ? OPCODE_INPLACE_BIT : 0)); | ||
790 | ent->src_addr = cp->arr[0].src_paddr; | ||
791 | ent->auth_key_addr = 0UL; | ||
792 | ent->auth_iv_addr = 0UL; | ||
793 | ent->final_auth_state_addr = 0UL; | ||
794 | ent->enc_key_addr = __pa(&ctx->key); | ||
795 | ent->enc_iv_addr = cp->iv_paddr; | ||
796 | ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); | ||
797 | |||
798 | for (i = 1; i < cp->arr_len; i++) { | ||
799 | ent = spu_queue_next(qp, ent); | ||
800 | |||
801 | ent->control = cp->arr[i].src_len - 1; | ||
802 | ent->src_addr = cp->arr[i].src_paddr; | ||
803 | ent->auth_key_addr = 0UL; | ||
804 | ent->auth_iv_addr = 0UL; | ||
805 | ent->final_auth_state_addr = 0UL; | ||
806 | ent->enc_key_addr = 0UL; | ||
807 | ent->enc_iv_addr = 0UL; | ||
808 | ent->dest_addr = 0UL; | ||
809 | } | ||
810 | ent->control |= CONTROL_END_OF_BLOCK; | ||
811 | |||
812 | return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; | ||
813 | } | ||
814 | |||
815 | static int n2_compute_chunks(struct ablkcipher_request *req) | ||
816 | { | ||
817 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
818 | struct ablkcipher_walk *walk = &rctx->walk; | ||
819 | struct n2_crypto_chunk *chunk; | ||
820 | unsigned long dest_prev; | ||
821 | unsigned int tot_len; | ||
822 | bool prev_in_place; | ||
823 | int err, nbytes; | ||
824 | |||
825 | ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); | ||
826 | err = ablkcipher_walk_phys(req, walk); | ||
827 | if (err) | ||
828 | return err; | ||
829 | |||
830 | INIT_LIST_HEAD(&rctx->chunk_list); | ||
831 | |||
832 | chunk = &rctx->chunk; | ||
833 | INIT_LIST_HEAD(&chunk->entry); | ||
834 | |||
835 | chunk->iv_paddr = 0UL; | ||
836 | chunk->arr_len = 0; | ||
837 | chunk->dest_paddr = 0UL; | ||
838 | |||
839 | prev_in_place = false; | ||
840 | dest_prev = ~0UL; | ||
841 | tot_len = 0; | ||
842 | |||
843 | while ((nbytes = walk->nbytes) != 0) { | ||
844 | unsigned long dest_paddr, src_paddr; | ||
845 | bool in_place; | ||
846 | int this_len; | ||
847 | |||
848 | src_paddr = (page_to_phys(walk->src.page) + | ||
849 | walk->src.offset); | ||
850 | dest_paddr = (page_to_phys(walk->dst.page) + | ||
851 | walk->dst.offset); | ||
852 | in_place = (src_paddr == dest_paddr); | ||
853 | this_len = cipher_descriptor_len(nbytes, walk->blocksize); | ||
854 | |||
855 | if (chunk->arr_len != 0) { | ||
856 | if (in_place != prev_in_place || | ||
857 | (!prev_in_place && | ||
858 | dest_paddr != dest_prev) || | ||
859 | chunk->arr_len == N2_CHUNK_ARR_LEN || | ||
860 | tot_len + this_len > (1 << 16)) { | ||
861 | chunk->dest_final = dest_prev; | ||
862 | list_add_tail(&chunk->entry, | ||
863 | &rctx->chunk_list); | ||
864 | chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); | ||
865 | if (!chunk) { | ||
866 | err = -ENOMEM; | ||
867 | break; | ||
868 | } | ||
869 | INIT_LIST_HEAD(&chunk->entry); | ||
870 | } | ||
871 | } | ||
872 | if (chunk->arr_len == 0) { | ||
873 | chunk->dest_paddr = dest_paddr; | ||
874 | tot_len = 0; | ||
875 | } | ||
876 | chunk->arr[chunk->arr_len].src_paddr = src_paddr; | ||
877 | chunk->arr[chunk->arr_len].src_len = this_len; | ||
878 | chunk->arr_len++; | ||
879 | |||
880 | dest_prev = dest_paddr + this_len; | ||
881 | prev_in_place = in_place; | ||
882 | tot_len += this_len; | ||
883 | |||
884 | err = ablkcipher_walk_done(req, walk, nbytes - this_len); | ||
885 | if (err) | ||
886 | break; | ||
887 | } | ||
888 | if (!err && chunk->arr_len != 0) { | ||
889 | chunk->dest_final = dest_prev; | ||
890 | list_add_tail(&chunk->entry, &rctx->chunk_list); | ||
891 | } | ||
892 | |||
893 | return err; | ||
894 | } | ||
895 | |||
896 | static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) | ||
897 | { | ||
898 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
899 | struct n2_crypto_chunk *c, *tmp; | ||
900 | |||
901 | if (final_iv) | ||
902 | memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); | ||
903 | |||
904 | ablkcipher_walk_complete(&rctx->walk); | ||
905 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | ||
906 | list_del(&c->entry); | ||
907 | if (unlikely(c != &rctx->chunk)) | ||
908 | kfree(c); | ||
909 | } | ||
910 | |||
911 | } | ||
912 | |||
913 | static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) | ||
914 | { | ||
915 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
916 | struct crypto_tfm *tfm = req->base.tfm; | ||
917 | int err = n2_compute_chunks(req); | ||
918 | struct n2_crypto_chunk *c, *tmp; | ||
919 | unsigned long flags, hv_ret; | ||
920 | struct spu_queue *qp; | ||
921 | |||
922 | if (err) | ||
923 | return err; | ||
924 | |||
925 | qp = cpu_to_cwq[get_cpu()]; | ||
926 | err = -ENODEV; | ||
927 | if (!qp) | ||
928 | goto out; | ||
929 | |||
930 | spin_lock_irqsave(&qp->lock, flags); | ||
931 | |||
932 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | ||
933 | err = __n2_crypt_chunk(tfm, c, qp, encrypt); | ||
934 | if (err) | ||
935 | break; | ||
936 | list_del(&c->entry); | ||
937 | if (unlikely(c != &rctx->chunk)) | ||
938 | kfree(c); | ||
939 | } | ||
940 | if (!err) { | ||
941 | hv_ret = wait_for_tail(qp); | ||
942 | if (hv_ret != HV_EOK) | ||
943 | err = -EINVAL; | ||
944 | } | ||
945 | |||
946 | spin_unlock_irqrestore(&qp->lock, flags); | ||
947 | |||
948 | put_cpu(); | ||
949 | |||
950 | out: | ||
951 | n2_chunk_complete(req, NULL); | ||
952 | return err; | ||
953 | } | ||
954 | |||
955 | static int n2_encrypt_ecb(struct ablkcipher_request *req) | ||
956 | { | ||
957 | return n2_do_ecb(req, true); | ||
958 | } | ||
959 | |||
960 | static int n2_decrypt_ecb(struct ablkcipher_request *req) | ||
961 | { | ||
962 | return n2_do_ecb(req, false); | ||
963 | } | ||
964 | |||
965 | static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) | ||
966 | { | ||
967 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | ||
968 | struct crypto_tfm *tfm = req->base.tfm; | ||
969 | unsigned long flags, hv_ret, iv_paddr; | ||
970 | int err = n2_compute_chunks(req); | ||
971 | struct n2_crypto_chunk *c, *tmp; | ||
972 | struct spu_queue *qp; | ||
973 | void *final_iv_addr; | ||
974 | |||
975 | final_iv_addr = NULL; | ||
976 | |||
977 | if (err) | ||
978 | return err; | ||
979 | |||
980 | qp = cpu_to_cwq[get_cpu()]; | ||
981 | err = -ENODEV; | ||
982 | if (!qp) | ||
983 | goto out; | ||
984 | |||
985 | spin_lock_irqsave(&qp->lock, flags); | ||
986 | |||
987 | if (encrypt) { | ||
988 | iv_paddr = __pa(rctx->walk.iv); | ||
989 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, | ||
990 | entry) { | ||
991 | c->iv_paddr = iv_paddr; | ||
992 | err = __n2_crypt_chunk(tfm, c, qp, true); | ||
993 | if (err) | ||
994 | break; | ||
995 | iv_paddr = c->dest_final - rctx->walk.blocksize; | ||
996 | list_del(&c->entry); | ||
997 | if (unlikely(c != &rctx->chunk)) | ||
998 | kfree(c); | ||
999 | } | ||
1000 | final_iv_addr = __va(iv_paddr); | ||
1001 | } else { | ||
1002 | list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, | ||
1003 | entry) { | ||
1004 | if (c == &rctx->chunk) { | ||
1005 | iv_paddr = __pa(rctx->walk.iv); | ||
1006 | } else { | ||
1007 | iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + | ||
1008 | tmp->arr[tmp->arr_len-1].src_len - | ||
1009 | rctx->walk.blocksize); | ||
1010 | } | ||
1011 | if (!final_iv_addr) { | ||
1012 | unsigned long pa; | ||
1013 | |||
1014 | pa = (c->arr[c->arr_len-1].src_paddr + | ||
1015 | c->arr[c->arr_len-1].src_len - | ||
1016 | rctx->walk.blocksize); | ||
1017 | final_iv_addr = rctx->temp_iv; | ||
1018 | memcpy(rctx->temp_iv, __va(pa), | ||
1019 | rctx->walk.blocksize); | ||
1020 | } | ||
1021 | c->iv_paddr = iv_paddr; | ||
1022 | err = __n2_crypt_chunk(tfm, c, qp, false); | ||
1023 | if (err) | ||
1024 | break; | ||
1025 | list_del(&c->entry); | ||
1026 | if (unlikely(c != &rctx->chunk)) | ||
1027 | kfree(c); | ||
1028 | } | ||
1029 | } | ||
1030 | if (!err) { | ||
1031 | hv_ret = wait_for_tail(qp); | ||
1032 | if (hv_ret != HV_EOK) | ||
1033 | err = -EINVAL; | ||
1034 | } | ||
1035 | |||
1036 | spin_unlock_irqrestore(&qp->lock, flags); | ||
1037 | |||
1038 | put_cpu(); | ||
1039 | |||
1040 | out: | ||
1041 | n2_chunk_complete(req, err ? NULL : final_iv_addr); | ||
1042 | return err; | ||
1043 | } | ||
1044 | |||
1045 | static int n2_encrypt_chaining(struct ablkcipher_request *req) | ||
1046 | { | ||
1047 | return n2_do_chaining(req, true); | ||
1048 | } | ||
1049 | |||
1050 | static int n2_decrypt_chaining(struct ablkcipher_request *req) | ||
1051 | { | ||
1052 | return n2_do_chaining(req, false); | ||
1053 | } | ||
1054 | |||
1055 | struct n2_cipher_tmpl { | ||
1056 | const char *name; | ||
1057 | const char *drv_name; | ||
1058 | u8 block_size; | ||
1059 | u8 enc_type; | ||
1060 | struct ablkcipher_alg ablkcipher; | ||
1061 | }; | ||
1062 | |||
1063 | static const struct n2_cipher_tmpl cipher_tmpls[] = { | ||
1064 | /* ARC4: only ECB is supported (chaining bits ignored) */ | ||
1065 | { .name = "ecb(arc4)", | ||
1066 | .drv_name = "ecb-arc4", | ||
1067 | .block_size = 1, | ||
1068 | .enc_type = (ENC_TYPE_ALG_RC4_STREAM | | ||
1069 | ENC_TYPE_CHAINING_ECB), | ||
1070 | .ablkcipher = { | ||
1071 | .min_keysize = 1, | ||
1072 | .max_keysize = 256, | ||
1073 | .setkey = n2_arc4_setkey, | ||
1074 | .encrypt = n2_encrypt_ecb, | ||
1075 | .decrypt = n2_decrypt_ecb, | ||
1076 | }, | ||
1077 | }, | ||
1078 | |||
1079 | /* DES: ECB CBC and CFB are supported */ | ||
1080 | { .name = "ecb(des)", | ||
1081 | .drv_name = "ecb-des", | ||
1082 | .block_size = DES_BLOCK_SIZE, | ||
1083 | .enc_type = (ENC_TYPE_ALG_DES | | ||
1084 | ENC_TYPE_CHAINING_ECB), | ||
1085 | .ablkcipher = { | ||
1086 | .min_keysize = DES_KEY_SIZE, | ||
1087 | .max_keysize = DES_KEY_SIZE, | ||
1088 | .setkey = n2_des_setkey, | ||
1089 | .encrypt = n2_encrypt_ecb, | ||
1090 | .decrypt = n2_decrypt_ecb, | ||
1091 | }, | ||
1092 | }, | ||
1093 | { .name = "cbc(des)", | ||
1094 | .drv_name = "cbc-des", | ||
1095 | .block_size = DES_BLOCK_SIZE, | ||
1096 | .enc_type = (ENC_TYPE_ALG_DES | | ||
1097 | ENC_TYPE_CHAINING_CBC), | ||
1098 | .ablkcipher = { | ||
1099 | .ivsize = DES_BLOCK_SIZE, | ||
1100 | .min_keysize = DES_KEY_SIZE, | ||
1101 | .max_keysize = DES_KEY_SIZE, | ||
1102 | .setkey = n2_des_setkey, | ||
1103 | .encrypt = n2_encrypt_chaining, | ||
1104 | .decrypt = n2_decrypt_chaining, | ||
1105 | }, | ||
1106 | }, | ||
1107 | { .name = "cfb(des)", | ||
1108 | .drv_name = "cfb-des", | ||
1109 | .block_size = DES_BLOCK_SIZE, | ||
1110 | .enc_type = (ENC_TYPE_ALG_DES | | ||
1111 | ENC_TYPE_CHAINING_CFB), | ||
1112 | .ablkcipher = { | ||
1113 | .min_keysize = DES_KEY_SIZE, | ||
1114 | .max_keysize = DES_KEY_SIZE, | ||
1115 | .setkey = n2_des_setkey, | ||
1116 | .encrypt = n2_encrypt_chaining, | ||
1117 | .decrypt = n2_decrypt_chaining, | ||
1118 | }, | ||
1119 | }, | ||
1120 | |||
1121 | /* 3DES: ECB CBC and CFB are supported */ | ||
1122 | { .name = "ecb(des3_ede)", | ||
1123 | .drv_name = "ecb-3des", | ||
1124 | .block_size = DES_BLOCK_SIZE, | ||
1125 | .enc_type = (ENC_TYPE_ALG_3DES | | ||
1126 | ENC_TYPE_CHAINING_ECB), | ||
1127 | .ablkcipher = { | ||
1128 | .min_keysize = 3 * DES_KEY_SIZE, | ||
1129 | .max_keysize = 3 * DES_KEY_SIZE, | ||
1130 | .setkey = n2_3des_setkey, | ||
1131 | .encrypt = n2_encrypt_ecb, | ||
1132 | .decrypt = n2_decrypt_ecb, | ||
1133 | }, | ||
1134 | }, | ||
1135 | { .name = "cbc(des3_ede)", | ||
1136 | .drv_name = "cbc-3des", | ||
1137 | .block_size = DES_BLOCK_SIZE, | ||
1138 | .enc_type = (ENC_TYPE_ALG_3DES | | ||
1139 | ENC_TYPE_CHAINING_CBC), | ||
1140 | .ablkcipher = { | ||
1141 | .ivsize = DES_BLOCK_SIZE, | ||
1142 | .min_keysize = 3 * DES_KEY_SIZE, | ||
1143 | .max_keysize = 3 * DES_KEY_SIZE, | ||
1144 | .setkey = n2_3des_setkey, | ||
1145 | .encrypt = n2_encrypt_chaining, | ||
1146 | .decrypt = n2_decrypt_chaining, | ||
1147 | }, | ||
1148 | }, | ||
1149 | { .name = "cfb(des3_ede)", | ||
1150 | .drv_name = "cfb-3des", | ||
1151 | .block_size = DES_BLOCK_SIZE, | ||
1152 | .enc_type = (ENC_TYPE_ALG_3DES | | ||
1153 | ENC_TYPE_CHAINING_CFB), | ||
1154 | .ablkcipher = { | ||
1155 | .min_keysize = 3 * DES_KEY_SIZE, | ||
1156 | .max_keysize = 3 * DES_KEY_SIZE, | ||
1157 | .setkey = n2_3des_setkey, | ||
1158 | .encrypt = n2_encrypt_chaining, | ||
1159 | .decrypt = n2_decrypt_chaining, | ||
1160 | }, | ||
1161 | }, | ||
1162 | /* AES: ECB CBC and CTR are supported */ | ||
1163 | { .name = "ecb(aes)", | ||
1164 | .drv_name = "ecb-aes", | ||
1165 | .block_size = AES_BLOCK_SIZE, | ||
1166 | .enc_type = (ENC_TYPE_ALG_AES128 | | ||
1167 | ENC_TYPE_CHAINING_ECB), | ||
1168 | .ablkcipher = { | ||
1169 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1170 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1171 | .setkey = n2_aes_setkey, | ||
1172 | .encrypt = n2_encrypt_ecb, | ||
1173 | .decrypt = n2_decrypt_ecb, | ||
1174 | }, | ||
1175 | }, | ||
1176 | { .name = "cbc(aes)", | ||
1177 | .drv_name = "cbc-aes", | ||
1178 | .block_size = AES_BLOCK_SIZE, | ||
1179 | .enc_type = (ENC_TYPE_ALG_AES128 | | ||
1180 | ENC_TYPE_CHAINING_CBC), | ||
1181 | .ablkcipher = { | ||
1182 | .ivsize = AES_BLOCK_SIZE, | ||
1183 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1184 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1185 | .setkey = n2_aes_setkey, | ||
1186 | .encrypt = n2_encrypt_chaining, | ||
1187 | .decrypt = n2_decrypt_chaining, | ||
1188 | }, | ||
1189 | }, | ||
1190 | { .name = "ctr(aes)", | ||
1191 | .drv_name = "ctr-aes", | ||
1192 | .block_size = AES_BLOCK_SIZE, | ||
1193 | .enc_type = (ENC_TYPE_ALG_AES128 | | ||
1194 | ENC_TYPE_CHAINING_COUNTER), | ||
1195 | .ablkcipher = { | ||
1196 | .ivsize = AES_BLOCK_SIZE, | ||
1197 | .min_keysize = AES_MIN_KEY_SIZE, | ||
1198 | .max_keysize = AES_MAX_KEY_SIZE, | ||
1199 | .setkey = n2_aes_setkey, | ||
1200 | .encrypt = n2_encrypt_chaining, | ||
1201 | .decrypt = n2_encrypt_chaining, | ||
1202 | }, | ||
1203 | }, | ||
1204 | |||
1205 | }; | ||
1206 | #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) | ||
1207 | |||
1208 | static LIST_HEAD(cipher_algs); | ||
1209 | |||
1210 | struct n2_hash_tmpl { | ||
1211 | const char *name; | ||
1212 | int (*digest)(struct ahash_request *req); | ||
1213 | u8 digest_size; | ||
1214 | u8 block_size; | ||
1215 | }; | ||
1216 | static const struct n2_hash_tmpl hash_tmpls[] = { | ||
1217 | { .name = "md5", | ||
1218 | .digest = n2_md5_async_digest, | ||
1219 | .digest_size = MD5_DIGEST_SIZE, | ||
1220 | .block_size = MD5_HMAC_BLOCK_SIZE }, | ||
1221 | { .name = "sha1", | ||
1222 | .digest = n2_sha1_async_digest, | ||
1223 | .digest_size = SHA1_DIGEST_SIZE, | ||
1224 | .block_size = SHA1_BLOCK_SIZE }, | ||
1225 | { .name = "sha256", | ||
1226 | .digest = n2_sha256_async_digest, | ||
1227 | .digest_size = SHA256_DIGEST_SIZE, | ||
1228 | .block_size = SHA256_BLOCK_SIZE }, | ||
1229 | { .name = "sha224", | ||
1230 | .digest = n2_sha224_async_digest, | ||
1231 | .digest_size = SHA224_DIGEST_SIZE, | ||
1232 | .block_size = SHA224_BLOCK_SIZE }, | ||
1233 | }; | ||
1234 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | ||
1235 | |||
1236 | struct n2_ahash_alg { | ||
1237 | struct list_head entry; | ||
1238 | struct ahash_alg alg; | ||
1239 | }; | ||
1240 | static LIST_HEAD(ahash_algs); | ||
1241 | |||
1242 | static int algs_registered; | ||
1243 | |||
1244 | static void __n2_unregister_algs(void) | ||
1245 | { | ||
1246 | struct n2_cipher_alg *cipher, *cipher_tmp; | ||
1247 | struct n2_ahash_alg *alg, *alg_tmp; | ||
1248 | |||
1249 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | ||
1250 | crypto_unregister_alg(&cipher->alg); | ||
1251 | list_del(&cipher->entry); | ||
1252 | kfree(cipher); | ||
1253 | } | ||
1254 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | ||
1255 | crypto_unregister_ahash(&alg->alg); | ||
1256 | list_del(&alg->entry); | ||
1257 | kfree(alg); | ||
1258 | } | ||
1259 | } | ||
1260 | |||
1261 | static int n2_cipher_cra_init(struct crypto_tfm *tfm) | ||
1262 | { | ||
1263 | tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); | ||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | ||
1268 | { | ||
1269 | struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1270 | struct crypto_alg *alg; | ||
1271 | int err; | ||
1272 | |||
1273 | if (!p) | ||
1274 | return -ENOMEM; | ||
1275 | |||
1276 | alg = &p->alg; | ||
1277 | |||
1278 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | ||
1279 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); | ||
1280 | alg->cra_priority = N2_CRA_PRIORITY; | ||
1281 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | ||
1282 | alg->cra_blocksize = tmpl->block_size; | ||
1283 | p->enc_type = tmpl->enc_type; | ||
1284 | alg->cra_ctxsize = sizeof(struct n2_cipher_context); | ||
1285 | alg->cra_type = &crypto_ablkcipher_type; | ||
1286 | alg->cra_u.ablkcipher = tmpl->ablkcipher; | ||
1287 | alg->cra_init = n2_cipher_cra_init; | ||
1288 | alg->cra_module = THIS_MODULE; | ||
1289 | |||
1290 | list_add(&p->entry, &cipher_algs); | ||
1291 | err = crypto_register_alg(alg); | ||
1292 | if (err) { | ||
1293 | list_del(&p->entry); | ||
1294 | kfree(p); | ||
1295 | } | ||
1296 | return err; | ||
1297 | } | ||
1298 | |||
1299 | static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | ||
1300 | { | ||
1301 | struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1302 | struct hash_alg_common *halg; | ||
1303 | struct crypto_alg *base; | ||
1304 | struct ahash_alg *ahash; | ||
1305 | int err; | ||
1306 | |||
1307 | if (!p) | ||
1308 | return -ENOMEM; | ||
1309 | |||
1310 | ahash = &p->alg; | ||
1311 | ahash->init = n2_hash_async_init; | ||
1312 | ahash->update = n2_hash_async_update; | ||
1313 | ahash->final = n2_hash_async_final; | ||
1314 | ahash->finup = n2_hash_async_finup; | ||
1315 | ahash->digest = tmpl->digest; | ||
1316 | |||
1317 | halg = &ahash->halg; | ||
1318 | halg->digestsize = tmpl->digest_size; | ||
1319 | |||
1320 | base = &halg->base; | ||
1321 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | ||
1322 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); | ||
1323 | base->cra_priority = N2_CRA_PRIORITY; | ||
1324 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; | ||
1325 | base->cra_blocksize = tmpl->block_size; | ||
1326 | base->cra_ctxsize = sizeof(struct n2_hash_ctx); | ||
1327 | base->cra_module = THIS_MODULE; | ||
1328 | base->cra_init = n2_hash_cra_init; | ||
1329 | base->cra_exit = n2_hash_cra_exit; | ||
1330 | |||
1331 | list_add(&p->entry, &ahash_algs); | ||
1332 | err = crypto_register_ahash(ahash); | ||
1333 | if (err) { | ||
1334 | list_del(&p->entry); | ||
1335 | kfree(p); | ||
1336 | } | ||
1337 | return err; | ||
1338 | } | ||
1339 | |||
1340 | static int __devinit n2_register_algs(void) | ||
1341 | { | ||
1342 | int i, err = 0; | ||
1343 | |||
1344 | mutex_lock(&spu_lock); | ||
1345 | if (algs_registered++) | ||
1346 | goto out; | ||
1347 | |||
1348 | for (i = 0; i < NUM_HASH_TMPLS; i++) { | ||
1349 | err = __n2_register_one_ahash(&hash_tmpls[i]); | ||
1350 | if (err) { | ||
1351 | __n2_unregister_algs(); | ||
1352 | goto out; | ||
1353 | } | ||
1354 | } | ||
1355 | for (i = 0; i < NUM_CIPHER_TMPLS; i++) { | ||
1356 | err = __n2_register_one_cipher(&cipher_tmpls[i]); | ||
1357 | if (err) { | ||
1358 | __n2_unregister_algs(); | ||
1359 | goto out; | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | out: | ||
1364 | mutex_unlock(&spu_lock); | ||
1365 | return err; | ||
1366 | } | ||
1367 | |||
1368 | static void __exit n2_unregister_algs(void) | ||
1369 | { | ||
1370 | mutex_lock(&spu_lock); | ||
1371 | if (!--algs_registered) | ||
1372 | __n2_unregister_algs(); | ||
1373 | mutex_unlock(&spu_lock); | ||
1374 | } | ||
1375 | |||
1376 | /* To map CWQ queues to interrupt sources, the hypervisor API provides | ||
1377 | * a devino. This isn't very useful to us because all of the | ||
1378 | * interrupts listed in the of_device node have been translated to | ||
1379 | * Linux virtual IRQ cookie numbers. | ||
1380 | * | ||
1381 | * So we have to back-translate, going through the 'intr' and 'ino' | ||
1382 | * property tables of the n2cp MDESC node, matching it with the OF | ||
1383 | * 'interrupts' property entries, in order to to figure out which | ||
1384 | * devino goes to which already-translated IRQ. | ||
1385 | */ | ||
1386 | static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, | ||
1387 | unsigned long dev_ino) | ||
1388 | { | ||
1389 | const unsigned int *dev_intrs; | ||
1390 | unsigned int intr; | ||
1391 | int i; | ||
1392 | |||
1393 | for (i = 0; i < ip->num_intrs; i++) { | ||
1394 | if (ip->ino_table[i].ino == dev_ino) | ||
1395 | break; | ||
1396 | } | ||
1397 | if (i == ip->num_intrs) | ||
1398 | return -ENODEV; | ||
1399 | |||
1400 | intr = ip->ino_table[i].intr; | ||
1401 | |||
1402 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); | ||
1403 | if (!dev_intrs) | ||
1404 | return -ENODEV; | ||
1405 | |||
1406 | for (i = 0; i < dev->num_irqs; i++) { | ||
1407 | if (dev_intrs[i] == intr) | ||
1408 | return i; | ||
1409 | } | ||
1410 | |||
1411 | return -ENODEV; | ||
1412 | } | ||
1413 | |||
1414 | static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, | ||
1415 | const char *irq_name, struct spu_queue *p, | ||
1416 | irq_handler_t handler) | ||
1417 | { | ||
1418 | unsigned long herr; | ||
1419 | int index; | ||
1420 | |||
1421 | herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); | ||
1422 | if (herr) | ||
1423 | return -EINVAL; | ||
1424 | |||
1425 | index = find_devino_index(dev, ip, p->devino); | ||
1426 | if (index < 0) | ||
1427 | return index; | ||
1428 | |||
1429 | p->irq = dev->irqs[index]; | ||
1430 | |||
1431 | sprintf(p->irq_name, "%s-%d", irq_name, index); | ||
1432 | |||
1433 | return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, | ||
1434 | p->irq_name, p); | ||
1435 | } | ||
1436 | |||
1437 | static struct kmem_cache *queue_cache[2]; | ||
1438 | |||
1439 | static void *new_queue(unsigned long q_type) | ||
1440 | { | ||
1441 | return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); | ||
1442 | } | ||
1443 | |||
1444 | static void free_queue(void *p, unsigned long q_type) | ||
1445 | { | ||
1446 | return kmem_cache_free(queue_cache[q_type - 1], p); | ||
1447 | } | ||
1448 | |||
1449 | static int queue_cache_init(void) | ||
1450 | { | ||
1451 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | ||
1452 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | ||
1453 | kmem_cache_create("mau_queue", | ||
1454 | (MAU_NUM_ENTRIES * | ||
1455 | MAU_ENTRY_SIZE), | ||
1456 | MAU_ENTRY_SIZE, 0, NULL); | ||
1457 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | ||
1458 | return -ENOMEM; | ||
1459 | |||
1460 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) | ||
1461 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = | ||
1462 | kmem_cache_create("cwq_queue", | ||
1463 | (CWQ_NUM_ENTRIES * | ||
1464 | CWQ_ENTRY_SIZE), | ||
1465 | CWQ_ENTRY_SIZE, 0, NULL); | ||
1466 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { | ||
1467 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | ||
1468 | return -ENOMEM; | ||
1469 | } | ||
1470 | return 0; | ||
1471 | } | ||
1472 | |||
1473 | static void queue_cache_destroy(void) | ||
1474 | { | ||
1475 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | ||
1476 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); | ||
1477 | } | ||
1478 | |||
1479 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) | ||
1480 | { | ||
1481 | cpumask_var_t old_allowed; | ||
1482 | unsigned long hv_ret; | ||
1483 | |||
1484 | if (cpumask_empty(&p->sharing)) | ||
1485 | return -EINVAL; | ||
1486 | |||
1487 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | ||
1488 | return -ENOMEM; | ||
1489 | |||
1490 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | ||
1491 | |||
1492 | set_cpus_allowed_ptr(current, &p->sharing); | ||
1493 | |||
1494 | hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), | ||
1495 | CWQ_NUM_ENTRIES, &p->qhandle); | ||
1496 | if (!hv_ret) | ||
1497 | sun4v_ncs_sethead_marker(p->qhandle, 0); | ||
1498 | |||
1499 | set_cpus_allowed_ptr(current, old_allowed); | ||
1500 | |||
1501 | free_cpumask_var(old_allowed); | ||
1502 | |||
1503 | return (hv_ret ? -EINVAL : 0); | ||
1504 | } | ||
1505 | |||
1506 | static int spu_queue_setup(struct spu_queue *p) | ||
1507 | { | ||
1508 | int err; | ||
1509 | |||
1510 | p->q = new_queue(p->q_type); | ||
1511 | if (!p->q) | ||
1512 | return -ENOMEM; | ||
1513 | |||
1514 | err = spu_queue_register(p, p->q_type); | ||
1515 | if (err) { | ||
1516 | free_queue(p->q, p->q_type); | ||
1517 | p->q = NULL; | ||
1518 | } | ||
1519 | |||
1520 | return err; | ||
1521 | } | ||
1522 | |||
1523 | static void spu_queue_destroy(struct spu_queue *p) | ||
1524 | { | ||
1525 | unsigned long hv_ret; | ||
1526 | |||
1527 | if (!p->q) | ||
1528 | return; | ||
1529 | |||
1530 | hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); | ||
1531 | |||
1532 | if (!hv_ret) | ||
1533 | free_queue(p->q, p->q_type); | ||
1534 | } | ||
1535 | |||
1536 | static void spu_list_destroy(struct list_head *list) | ||
1537 | { | ||
1538 | struct spu_queue *p, *n; | ||
1539 | |||
1540 | list_for_each_entry_safe(p, n, list, list) { | ||
1541 | int i; | ||
1542 | |||
1543 | for (i = 0; i < NR_CPUS; i++) { | ||
1544 | if (cpu_to_cwq[i] == p) | ||
1545 | cpu_to_cwq[i] = NULL; | ||
1546 | } | ||
1547 | |||
1548 | if (p->irq) { | ||
1549 | free_irq(p->irq, p); | ||
1550 | p->irq = 0; | ||
1551 | } | ||
1552 | spu_queue_destroy(p); | ||
1553 | list_del(&p->list); | ||
1554 | kfree(p); | ||
1555 | } | ||
1556 | } | ||
1557 | |||
1558 | /* Walk the backward arcs of a CWQ 'exec-unit' node, | ||
1559 | * gathering cpu membership information. | ||
1560 | */ | ||
1561 | static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | ||
1562 | struct of_device *dev, | ||
1563 | u64 node, struct spu_queue *p, | ||
1564 | struct spu_queue **table) | ||
1565 | { | ||
1566 | u64 arc; | ||
1567 | |||
1568 | mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { | ||
1569 | u64 tgt = mdesc_arc_target(mdesc, arc); | ||
1570 | const char *name = mdesc_node_name(mdesc, tgt); | ||
1571 | const u64 *id; | ||
1572 | |||
1573 | if (strcmp(name, "cpu")) | ||
1574 | continue; | ||
1575 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | ||
1576 | if (table[*id] != NULL) { | ||
1577 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | ||
1578 | dev->dev.of_node->full_name); | ||
1579 | return -EINVAL; | ||
1580 | } | ||
1581 | cpu_set(*id, p->sharing); | ||
1582 | table[*id] = p; | ||
1583 | } | ||
1584 | return 0; | ||
1585 | } | ||
1586 | |||
1587 | /* Process an 'exec-unit' MDESC node of type 'cwq'. */ | ||
1588 | static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | ||
1589 | struct of_device *dev, struct mdesc_handle *mdesc, | ||
1590 | u64 node, const char *iname, unsigned long q_type, | ||
1591 | irq_handler_t handler, struct spu_queue **table) | ||
1592 | { | ||
1593 | struct spu_queue *p; | ||
1594 | int err; | ||
1595 | |||
1596 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | ||
1597 | if (!p) { | ||
1598 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | ||
1599 | dev->dev.of_node->full_name); | ||
1600 | return -ENOMEM; | ||
1601 | } | ||
1602 | |||
1603 | cpus_clear(p->sharing); | ||
1604 | spin_lock_init(&p->lock); | ||
1605 | p->q_type = q_type; | ||
1606 | INIT_LIST_HEAD(&p->jobs); | ||
1607 | list_add(&p->list, list); | ||
1608 | |||
1609 | err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); | ||
1610 | if (err) | ||
1611 | return err; | ||
1612 | |||
1613 | err = spu_queue_setup(p); | ||
1614 | if (err) | ||
1615 | return err; | ||
1616 | |||
1617 | return spu_map_ino(dev, ip, iname, p, handler); | ||
1618 | } | ||
1619 | |||
1620 | static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, | ||
1621 | struct spu_mdesc_info *ip, struct list_head *list, | ||
1622 | const char *exec_name, unsigned long q_type, | ||
1623 | irq_handler_t handler, struct spu_queue **table) | ||
1624 | { | ||
1625 | int err = 0; | ||
1626 | u64 node; | ||
1627 | |||
1628 | mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { | ||
1629 | const char *type; | ||
1630 | |||
1631 | type = mdesc_get_property(mdesc, node, "type", NULL); | ||
1632 | if (!type || strcmp(type, exec_name)) | ||
1633 | continue; | ||
1634 | |||
1635 | err = handle_exec_unit(ip, list, dev, mdesc, node, | ||
1636 | exec_name, q_type, handler, table); | ||
1637 | if (err) { | ||
1638 | spu_list_destroy(list); | ||
1639 | break; | ||
1640 | } | ||
1641 | } | ||
1642 | |||
1643 | return err; | ||
1644 | } | ||
1645 | |||
1646 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | ||
1647 | struct spu_mdesc_info *ip) | ||
1648 | { | ||
1649 | const u64 *intr, *ino; | ||
1650 | int intr_len, ino_len; | ||
1651 | int i; | ||
1652 | |||
1653 | intr = mdesc_get_property(mdesc, node, "intr", &intr_len); | ||
1654 | if (!intr) | ||
1655 | return -ENODEV; | ||
1656 | |||
1657 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | ||
1658 | if (!intr) | ||
1659 | return -ENODEV; | ||
1660 | |||
1661 | if (intr_len != ino_len) | ||
1662 | return -EINVAL; | ||
1663 | |||
1664 | ip->num_intrs = intr_len / sizeof(u64); | ||
1665 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * | ||
1666 | ip->num_intrs), | ||
1667 | GFP_KERNEL); | ||
1668 | if (!ip->ino_table) | ||
1669 | return -ENOMEM; | ||
1670 | |||
1671 | for (i = 0; i < ip->num_intrs; i++) { | ||
1672 | struct ino_blob *b = &ip->ino_table[i]; | ||
1673 | b->intr = intr[i]; | ||
1674 | b->ino = ino[i]; | ||
1675 | } | ||
1676 | |||
1677 | return 0; | ||
1678 | } | ||
1679 | |||
1680 | static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | ||
1681 | struct of_device *dev, | ||
1682 | struct spu_mdesc_info *ip, | ||
1683 | const char *node_name) | ||
1684 | { | ||
1685 | const unsigned int *reg; | ||
1686 | u64 node; | ||
1687 | |||
1688 | reg = of_get_property(dev->dev.of_node, "reg", NULL); | ||
1689 | if (!reg) | ||
1690 | return -ENODEV; | ||
1691 | |||
1692 | mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { | ||
1693 | const char *name; | ||
1694 | const u64 *chdl; | ||
1695 | |||
1696 | name = mdesc_get_property(mdesc, node, "name", NULL); | ||
1697 | if (!name || strcmp(name, node_name)) | ||
1698 | continue; | ||
1699 | chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); | ||
1700 | if (!chdl || (*chdl != *reg)) | ||
1701 | continue; | ||
1702 | ip->cfg_handle = *chdl; | ||
1703 | return get_irq_props(mdesc, node, ip); | ||
1704 | } | ||
1705 | |||
1706 | return -ENODEV; | ||
1707 | } | ||
1708 | |||
1709 | static unsigned long n2_spu_hvapi_major; | ||
1710 | static unsigned long n2_spu_hvapi_minor; | ||
1711 | |||
1712 | static int __devinit n2_spu_hvapi_register(void) | ||
1713 | { | ||
1714 | int err; | ||
1715 | |||
1716 | n2_spu_hvapi_major = 2; | ||
1717 | n2_spu_hvapi_minor = 0; | ||
1718 | |||
1719 | err = sun4v_hvapi_register(HV_GRP_NCS, | ||
1720 | n2_spu_hvapi_major, | ||
1721 | &n2_spu_hvapi_minor); | ||
1722 | |||
1723 | if (!err) | ||
1724 | pr_info("Registered NCS HVAPI version %lu.%lu\n", | ||
1725 | n2_spu_hvapi_major, | ||
1726 | n2_spu_hvapi_minor); | ||
1727 | |||
1728 | return err; | ||
1729 | } | ||
1730 | |||
1731 | static void n2_spu_hvapi_unregister(void) | ||
1732 | { | ||
1733 | sun4v_hvapi_unregister(HV_GRP_NCS); | ||
1734 | } | ||
1735 | |||
1736 | static int global_ref; | ||
1737 | |||
1738 | static int __devinit grab_global_resources(void) | ||
1739 | { | ||
1740 | int err = 0; | ||
1741 | |||
1742 | mutex_lock(&spu_lock); | ||
1743 | |||
1744 | if (global_ref++) | ||
1745 | goto out; | ||
1746 | |||
1747 | err = n2_spu_hvapi_register(); | ||
1748 | if (err) | ||
1749 | goto out; | ||
1750 | |||
1751 | err = queue_cache_init(); | ||
1752 | if (err) | ||
1753 | goto out_hvapi_release; | ||
1754 | |||
1755 | err = -ENOMEM; | ||
1756 | cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | ||
1757 | GFP_KERNEL); | ||
1758 | if (!cpu_to_cwq) | ||
1759 | goto out_queue_cache_destroy; | ||
1760 | |||
1761 | cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | ||
1762 | GFP_KERNEL); | ||
1763 | if (!cpu_to_mau) | ||
1764 | goto out_free_cwq_table; | ||
1765 | |||
1766 | err = 0; | ||
1767 | |||
1768 | out: | ||
1769 | if (err) | ||
1770 | global_ref--; | ||
1771 | mutex_unlock(&spu_lock); | ||
1772 | return err; | ||
1773 | |||
1774 | out_free_cwq_table: | ||
1775 | kfree(cpu_to_cwq); | ||
1776 | cpu_to_cwq = NULL; | ||
1777 | |||
1778 | out_queue_cache_destroy: | ||
1779 | queue_cache_destroy(); | ||
1780 | |||
1781 | out_hvapi_release: | ||
1782 | n2_spu_hvapi_unregister(); | ||
1783 | goto out; | ||
1784 | } | ||
1785 | |||
1786 | static void release_global_resources(void) | ||
1787 | { | ||
1788 | mutex_lock(&spu_lock); | ||
1789 | if (!--global_ref) { | ||
1790 | kfree(cpu_to_cwq); | ||
1791 | cpu_to_cwq = NULL; | ||
1792 | |||
1793 | kfree(cpu_to_mau); | ||
1794 | cpu_to_mau = NULL; | ||
1795 | |||
1796 | queue_cache_destroy(); | ||
1797 | n2_spu_hvapi_unregister(); | ||
1798 | } | ||
1799 | mutex_unlock(&spu_lock); | ||
1800 | } | ||
1801 | |||
1802 | static struct n2_crypto * __devinit alloc_n2cp(void) | ||
1803 | { | ||
1804 | struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); | ||
1805 | |||
1806 | if (np) | ||
1807 | INIT_LIST_HEAD(&np->cwq_list); | ||
1808 | |||
1809 | return np; | ||
1810 | } | ||
1811 | |||
1812 | static void free_n2cp(struct n2_crypto *np) | ||
1813 | { | ||
1814 | if (np->cwq_info.ino_table) { | ||
1815 | kfree(np->cwq_info.ino_table); | ||
1816 | np->cwq_info.ino_table = NULL; | ||
1817 | } | ||
1818 | |||
1819 | kfree(np); | ||
1820 | } | ||
1821 | |||
1822 | static void __devinit n2_spu_driver_version(void) | ||
1823 | { | ||
1824 | static int n2_spu_version_printed; | ||
1825 | |||
1826 | if (n2_spu_version_printed++ == 0) | ||
1827 | pr_info("%s", version); | ||
1828 | } | ||
1829 | |||
1830 | static int __devinit n2_crypto_probe(struct of_device *dev, | ||
1831 | const struct of_device_id *match) | ||
1832 | { | ||
1833 | struct mdesc_handle *mdesc; | ||
1834 | const char *full_name; | ||
1835 | struct n2_crypto *np; | ||
1836 | int err; | ||
1837 | |||
1838 | n2_spu_driver_version(); | ||
1839 | |||
1840 | full_name = dev->dev.of_node->full_name; | ||
1841 | pr_info("Found N2CP at %s\n", full_name); | ||
1842 | |||
1843 | np = alloc_n2cp(); | ||
1844 | if (!np) { | ||
1845 | dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", | ||
1846 | full_name); | ||
1847 | return -ENOMEM; | ||
1848 | } | ||
1849 | |||
1850 | err = grab_global_resources(); | ||
1851 | if (err) { | ||
1852 | dev_err(&dev->dev, "%s: Unable to grab " | ||
1853 | "global resources.\n", full_name); | ||
1854 | goto out_free_n2cp; | ||
1855 | } | ||
1856 | |||
1857 | mdesc = mdesc_grab(); | ||
1858 | |||
1859 | if (!mdesc) { | ||
1860 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | ||
1861 | full_name); | ||
1862 | err = -ENODEV; | ||
1863 | goto out_free_global; | ||
1864 | } | ||
1865 | err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); | ||
1866 | if (err) { | ||
1867 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | ||
1868 | full_name); | ||
1869 | mdesc_release(mdesc); | ||
1870 | goto out_free_global; | ||
1871 | } | ||
1872 | |||
1873 | err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, | ||
1874 | "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, | ||
1875 | cpu_to_cwq); | ||
1876 | mdesc_release(mdesc); | ||
1877 | |||
1878 | if (err) { | ||
1879 | dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", | ||
1880 | full_name); | ||
1881 | goto out_free_global; | ||
1882 | } | ||
1883 | |||
1884 | err = n2_register_algs(); | ||
1885 | if (err) { | ||
1886 | dev_err(&dev->dev, "%s: Unable to register algorithms.\n", | ||
1887 | full_name); | ||
1888 | goto out_free_spu_list; | ||
1889 | } | ||
1890 | |||
1891 | dev_set_drvdata(&dev->dev, np); | ||
1892 | |||
1893 | return 0; | ||
1894 | |||
1895 | out_free_spu_list: | ||
1896 | spu_list_destroy(&np->cwq_list); | ||
1897 | |||
1898 | out_free_global: | ||
1899 | release_global_resources(); | ||
1900 | |||
1901 | out_free_n2cp: | ||
1902 | free_n2cp(np); | ||
1903 | |||
1904 | return err; | ||
1905 | } | ||
1906 | |||
1907 | static int __devexit n2_crypto_remove(struct of_device *dev) | ||
1908 | { | ||
1909 | struct n2_crypto *np = dev_get_drvdata(&dev->dev); | ||
1910 | |||
1911 | n2_unregister_algs(); | ||
1912 | |||
1913 | spu_list_destroy(&np->cwq_list); | ||
1914 | |||
1915 | release_global_resources(); | ||
1916 | |||
1917 | free_n2cp(np); | ||
1918 | |||
1919 | return 0; | ||
1920 | } | ||
1921 | |||
1922 | static struct n2_mau * __devinit alloc_ncp(void) | ||
1923 | { | ||
1924 | struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); | ||
1925 | |||
1926 | if (mp) | ||
1927 | INIT_LIST_HEAD(&mp->mau_list); | ||
1928 | |||
1929 | return mp; | ||
1930 | } | ||
1931 | |||
1932 | static void free_ncp(struct n2_mau *mp) | ||
1933 | { | ||
1934 | if (mp->mau_info.ino_table) { | ||
1935 | kfree(mp->mau_info.ino_table); | ||
1936 | mp->mau_info.ino_table = NULL; | ||
1937 | } | ||
1938 | |||
1939 | kfree(mp); | ||
1940 | } | ||
1941 | |||
1942 | static int __devinit n2_mau_probe(struct of_device *dev, | ||
1943 | const struct of_device_id *match) | ||
1944 | { | ||
1945 | struct mdesc_handle *mdesc; | ||
1946 | const char *full_name; | ||
1947 | struct n2_mau *mp; | ||
1948 | int err; | ||
1949 | |||
1950 | n2_spu_driver_version(); | ||
1951 | |||
1952 | full_name = dev->dev.of_node->full_name; | ||
1953 | pr_info("Found NCP at %s\n", full_name); | ||
1954 | |||
1955 | mp = alloc_ncp(); | ||
1956 | if (!mp) { | ||
1957 | dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", | ||
1958 | full_name); | ||
1959 | return -ENOMEM; | ||
1960 | } | ||
1961 | |||
1962 | err = grab_global_resources(); | ||
1963 | if (err) { | ||
1964 | dev_err(&dev->dev, "%s: Unable to grab " | ||
1965 | "global resources.\n", full_name); | ||
1966 | goto out_free_ncp; | ||
1967 | } | ||
1968 | |||
1969 | mdesc = mdesc_grab(); | ||
1970 | |||
1971 | if (!mdesc) { | ||
1972 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | ||
1973 | full_name); | ||
1974 | err = -ENODEV; | ||
1975 | goto out_free_global; | ||
1976 | } | ||
1977 | |||
1978 | err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); | ||
1979 | if (err) { | ||
1980 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | ||
1981 | full_name); | ||
1982 | mdesc_release(mdesc); | ||
1983 | goto out_free_global; | ||
1984 | } | ||
1985 | |||
1986 | err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, | ||
1987 | "mau", HV_NCS_QTYPE_MAU, mau_intr, | ||
1988 | cpu_to_mau); | ||
1989 | mdesc_release(mdesc); | ||
1990 | |||
1991 | if (err) { | ||
1992 | dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", | ||
1993 | full_name); | ||
1994 | goto out_free_global; | ||
1995 | } | ||
1996 | |||
1997 | dev_set_drvdata(&dev->dev, mp); | ||
1998 | |||
1999 | return 0; | ||
2000 | |||
2001 | out_free_global: | ||
2002 | release_global_resources(); | ||
2003 | |||
2004 | out_free_ncp: | ||
2005 | free_ncp(mp); | ||
2006 | |||
2007 | return err; | ||
2008 | } | ||
2009 | |||
2010 | static int __devexit n2_mau_remove(struct of_device *dev) | ||
2011 | { | ||
2012 | struct n2_mau *mp = dev_get_drvdata(&dev->dev); | ||
2013 | |||
2014 | spu_list_destroy(&mp->mau_list); | ||
2015 | |||
2016 | release_global_resources(); | ||
2017 | |||
2018 | free_ncp(mp); | ||
2019 | |||
2020 | return 0; | ||
2021 | } | ||
2022 | |||
2023 | static struct of_device_id n2_crypto_match[] = { | ||
2024 | { | ||
2025 | .name = "n2cp", | ||
2026 | .compatible = "SUNW,n2-cwq", | ||
2027 | }, | ||
2028 | { | ||
2029 | .name = "n2cp", | ||
2030 | .compatible = "SUNW,vf-cwq", | ||
2031 | }, | ||
2032 | {}, | ||
2033 | }; | ||
2034 | |||
2035 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | ||
2036 | |||
2037 | static struct of_platform_driver n2_crypto_driver = { | ||
2038 | .driver = { | ||
2039 | .name = "n2cp", | ||
2040 | .owner = THIS_MODULE, | ||
2041 | .of_match_table = n2_crypto_match, | ||
2042 | }, | ||
2043 | .probe = n2_crypto_probe, | ||
2044 | .remove = __devexit_p(n2_crypto_remove), | ||
2045 | }; | ||
2046 | |||
2047 | static struct of_device_id n2_mau_match[] = { | ||
2048 | { | ||
2049 | .name = "ncp", | ||
2050 | .compatible = "SUNW,n2-mau", | ||
2051 | }, | ||
2052 | { | ||
2053 | .name = "ncp", | ||
2054 | .compatible = "SUNW,vf-mau", | ||
2055 | }, | ||
2056 | {}, | ||
2057 | }; | ||
2058 | |||
2059 | MODULE_DEVICE_TABLE(of, n2_mau_match); | ||
2060 | |||
2061 | static struct of_platform_driver n2_mau_driver = { | ||
2062 | .driver = { | ||
2063 | .name = "ncp", | ||
2064 | .owner = THIS_MODULE, | ||
2065 | .of_match_table = n2_mau_match, | ||
2066 | }, | ||
2067 | .probe = n2_mau_probe, | ||
2068 | .remove = __devexit_p(n2_mau_remove), | ||
2069 | }; | ||
2070 | |||
2071 | static int __init n2_init(void) | ||
2072 | { | ||
2073 | int err = of_register_driver(&n2_crypto_driver, &of_bus_type); | ||
2074 | |||
2075 | if (!err) { | ||
2076 | err = of_register_driver(&n2_mau_driver, &of_bus_type); | ||
2077 | if (err) | ||
2078 | of_unregister_driver(&n2_crypto_driver); | ||
2079 | } | ||
2080 | return err; | ||
2081 | } | ||
2082 | |||
2083 | static void __exit n2_exit(void) | ||
2084 | { | ||
2085 | of_unregister_driver(&n2_mau_driver); | ||
2086 | of_unregister_driver(&n2_crypto_driver); | ||
2087 | } | ||
2088 | |||
2089 | module_init(n2_init); | ||
2090 | module_exit(n2_exit); | ||
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h new file mode 100644 index 000000000000..4bcbbeae98f5 --- /dev/null +++ b/drivers/crypto/n2_core.h | |||
@@ -0,0 +1,231 @@ | |||
1 | #ifndef _N2_CORE_H | ||
2 | #define _N2_CORE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | struct ino_blob { | ||
7 | u64 intr; | ||
8 | u64 ino; | ||
9 | }; | ||
10 | |||
11 | struct spu_mdesc_info { | ||
12 | u64 cfg_handle; | ||
13 | struct ino_blob *ino_table; | ||
14 | int num_intrs; | ||
15 | }; | ||
16 | |||
17 | struct n2_crypto { | ||
18 | struct spu_mdesc_info cwq_info; | ||
19 | struct list_head cwq_list; | ||
20 | }; | ||
21 | |||
22 | struct n2_mau { | ||
23 | struct spu_mdesc_info mau_info; | ||
24 | struct list_head mau_list; | ||
25 | }; | ||
26 | |||
27 | #define CWQ_ENTRY_SIZE 64 | ||
28 | #define CWQ_NUM_ENTRIES 64 | ||
29 | |||
30 | #define MAU_ENTRY_SIZE 64 | ||
31 | #define MAU_NUM_ENTRIES 64 | ||
32 | |||
33 | struct cwq_initial_entry { | ||
34 | u64 control; | ||
35 | u64 src_addr; | ||
36 | u64 auth_key_addr; | ||
37 | u64 auth_iv_addr; | ||
38 | u64 final_auth_state_addr; | ||
39 | u64 enc_key_addr; | ||
40 | u64 enc_iv_addr; | ||
41 | u64 dest_addr; | ||
42 | }; | ||
43 | |||
44 | struct cwq_ext_entry { | ||
45 | u64 len; | ||
46 | u64 src_addr; | ||
47 | u64 resv1; | ||
48 | u64 resv2; | ||
49 | u64 resv3; | ||
50 | u64 resv4; | ||
51 | u64 resv5; | ||
52 | u64 resv6; | ||
53 | }; | ||
54 | |||
55 | struct cwq_final_entry { | ||
56 | u64 control; | ||
57 | u64 src_addr; | ||
58 | u64 resv1; | ||
59 | u64 resv2; | ||
60 | u64 resv3; | ||
61 | u64 resv4; | ||
62 | u64 resv5; | ||
63 | u64 resv6; | ||
64 | }; | ||
65 | |||
66 | #define CONTROL_LEN 0x000000000000ffffULL | ||
67 | #define CONTROL_LEN_SHIFT 0 | ||
68 | #define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL | ||
69 | #define CONTROL_HMAC_KEY_LEN_SHIFT 16 | ||
70 | #define CONTROL_ENC_TYPE 0x00000000ff000000ULL | ||
71 | #define CONTROL_ENC_TYPE_SHIFT 24 | ||
72 | #define ENC_TYPE_ALG_RC4_STREAM 0x00ULL | ||
73 | #define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL | ||
74 | #define ENC_TYPE_ALG_DES 0x08ULL | ||
75 | #define ENC_TYPE_ALG_3DES 0x0cULL | ||
76 | #define ENC_TYPE_ALG_AES128 0x10ULL | ||
77 | #define ENC_TYPE_ALG_AES192 0x14ULL | ||
78 | #define ENC_TYPE_ALG_AES256 0x18ULL | ||
79 | #define ENC_TYPE_ALG_RESERVED 0x1cULL | ||
80 | #define ENC_TYPE_ALG_MASK 0x1cULL | ||
81 | #define ENC_TYPE_CHAINING_ECB 0x00ULL | ||
82 | #define ENC_TYPE_CHAINING_CBC 0x01ULL | ||
83 | #define ENC_TYPE_CHAINING_CFB 0x02ULL | ||
84 | #define ENC_TYPE_CHAINING_COUNTER 0x03ULL | ||
85 | #define ENC_TYPE_CHAINING_MASK 0x03ULL | ||
86 | #define CONTROL_AUTH_TYPE 0x0000001f00000000ULL | ||
87 | #define CONTROL_AUTH_TYPE_SHIFT 32 | ||
88 | #define AUTH_TYPE_RESERVED 0x00ULL | ||
89 | #define AUTH_TYPE_MD5 0x01ULL | ||
90 | #define AUTH_TYPE_SHA1 0x02ULL | ||
91 | #define AUTH_TYPE_SHA256 0x03ULL | ||
92 | #define AUTH_TYPE_CRC32 0x04ULL | ||
93 | #define AUTH_TYPE_HMAC_MD5 0x05ULL | ||
94 | #define AUTH_TYPE_HMAC_SHA1 0x06ULL | ||
95 | #define AUTH_TYPE_HMAC_SHA256 0x07ULL | ||
96 | #define AUTH_TYPE_TCP_CHECKSUM 0x08ULL | ||
97 | #define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL | ||
98 | #define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL | ||
99 | #define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL | ||
100 | #define CONTROL_STRAND 0x000000e000000000ULL | ||
101 | #define CONTROL_STRAND_SHIFT 37 | ||
102 | #define CONTROL_HASH_LEN 0x0000ff0000000000ULL | ||
103 | #define CONTROL_HASH_LEN_SHIFT 40 | ||
104 | #define CONTROL_INTERRUPT 0x0001000000000000ULL | ||
105 | #define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL | ||
106 | #define CONTROL_RESERVED 0x001c000000000000ULL | ||
107 | #define CONTROL_HV_DONE 0x0004000000000000ULL | ||
108 | #define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL | ||
109 | #define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL | ||
110 | #define CONTROL_END_OF_BLOCK 0x0020000000000000ULL | ||
111 | #define CONTROL_START_OF_BLOCK 0x0040000000000000ULL | ||
112 | #define CONTROL_ENCRYPT 0x0080000000000000ULL | ||
113 | #define CONTROL_OPCODE 0xff00000000000000ULL | ||
114 | #define CONTROL_OPCODE_SHIFT 56 | ||
115 | #define OPCODE_INPLACE_BIT 0x80ULL | ||
116 | #define OPCODE_SSL_KEYBLOCK 0x10ULL | ||
117 | #define OPCODE_COPY 0x20ULL | ||
118 | #define OPCODE_ENCRYPT 0x40ULL | ||
119 | #define OPCODE_AUTH_MAC 0x41ULL | ||
120 | |||
121 | #endif /* !(__ASSEMBLY__) */ | ||
122 | |||
123 | /* NCS v2.0 hypervisor interfaces */ | ||
124 | #define HV_NCS_QTYPE_MAU 0x01 | ||
125 | #define HV_NCS_QTYPE_CWQ 0x02 | ||
126 | |||
127 | /* ncs_qconf() | ||
128 | * TRAP: HV_FAST_TRAP | ||
129 | * FUNCTION: HV_FAST_NCS_QCONF | ||
130 | * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) | ||
131 | * ARG1: Real address of queue, or handle for unconfigure | ||
132 | * ARG2: Number of entries in queue, zero for unconfigure | ||
133 | * RET0: status | ||
134 | * RET1: queue handle | ||
135 | * | ||
136 | * Configure a queue in the stream processing unit. | ||
137 | * | ||
138 | * The real address given as the base must be 64-byte | ||
139 | * aligned. | ||
140 | * | ||
141 | * The queue size can range from a minimum of 2 to a maximum | ||
142 | * of 64. The queue size must be a power of two. | ||
143 | * | ||
144 | * To unconfigure a queue, specify a length of zero and place | ||
145 | * the queue handle into ARG1. | ||
146 | * | ||
147 | * On configure success the hypervisor will set the FIRST, HEAD, | ||
148 | * and TAIL registers to the address of the first entry in the | ||
149 | * queue. The LAST register will be set to point to the last | ||
150 | * entry in the queue. | ||
151 | */ | ||
152 | #define HV_FAST_NCS_QCONF 0x111 | ||
153 | |||
154 | /* ncs_qinfo() | ||
155 | * TRAP: HV_FAST_TRAP | ||
156 | * FUNCTION: HV_FAST_NCS_QINFO | ||
157 | * ARG0: Queue handle | ||
158 | * RET0: status | ||
159 | * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ}) | ||
160 | * RET2: Queue base address | ||
161 | * RET3: Number of entries | ||
162 | */ | ||
163 | #define HV_FAST_NCS_QINFO 0x112 | ||
164 | |||
165 | /* ncs_gethead() | ||
166 | * TRAP: HV_FAST_TRAP | ||
167 | * FUNCTION: HV_FAST_NCS_GETHEAD | ||
168 | * ARG0: Queue handle | ||
169 | * RET0: status | ||
170 | * RET1: queue head offset | ||
171 | */ | ||
172 | #define HV_FAST_NCS_GETHEAD 0x113 | ||
173 | |||
174 | /* ncs_gettail() | ||
175 | * TRAP: HV_FAST_TRAP | ||
176 | * FUNCTION: HV_FAST_NCS_GETTAIL | ||
177 | * ARG0: Queue handle | ||
178 | * RET0: status | ||
179 | * RET1: queue tail offset | ||
180 | */ | ||
181 | #define HV_FAST_NCS_GETTAIL 0x114 | ||
182 | |||
183 | /* ncs_settail() | ||
184 | * TRAP: HV_FAST_TRAP | ||
185 | * FUNCTION: HV_FAST_NCS_SETTAIL | ||
186 | * ARG0: Queue handle | ||
187 | * ARG1: New tail offset | ||
188 | * RET0: status | ||
189 | */ | ||
190 | #define HV_FAST_NCS_SETTAIL 0x115 | ||
191 | |||
192 | /* ncs_qhandle_to_devino() | ||
193 | * TRAP: HV_FAST_TRAP | ||
194 | * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO | ||
195 | * ARG0: Queue handle | ||
196 | * RET0: status | ||
197 | * RET1: devino | ||
198 | */ | ||
199 | #define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116 | ||
200 | |||
201 | /* ncs_sethead_marker() | ||
202 | * TRAP: HV_FAST_TRAP | ||
203 | * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER | ||
204 | * ARG0: Queue handle | ||
205 | * ARG1: New head offset | ||
206 | * RET0: status | ||
207 | */ | ||
208 | #define HV_FAST_NCS_SETHEAD_MARKER 0x117 | ||
209 | |||
210 | #ifndef __ASSEMBLY__ | ||
211 | extern unsigned long sun4v_ncs_qconf(unsigned long queue_type, | ||
212 | unsigned long queue_ra, | ||
213 | unsigned long num_entries, | ||
214 | unsigned long *qhandle); | ||
215 | extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle, | ||
216 | unsigned long *queue_type, | ||
217 | unsigned long *queue_ra, | ||
218 | unsigned long *num_entries); | ||
219 | extern unsigned long sun4v_ncs_gethead(unsigned long qhandle, | ||
220 | unsigned long *head); | ||
221 | extern unsigned long sun4v_ncs_gettail(unsigned long qhandle, | ||
222 | unsigned long *tail); | ||
223 | extern unsigned long sun4v_ncs_settail(unsigned long qhandle, | ||
224 | unsigned long tail); | ||
225 | extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle, | ||
226 | unsigned long *devino); | ||
227 | extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle, | ||
228 | unsigned long head); | ||
229 | #endif /* !(__ASSEMBLY__) */ | ||
230 | |||
231 | #endif /* _N2_CORE_H */ | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c new file mode 100644 index 000000000000..8b034337793f --- /dev/null +++ b/drivers/crypto/omap-sham.c | |||
@@ -0,0 +1,1259 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for OMAP SHA1/MD5 HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2010 Nokia Corporation | ||
7 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * Some ideas are from old omap-sha1-md5.c driver. | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
17 | |||
18 | #include <linux/version.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/irq.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/scatterlist.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/crypto.h> | ||
34 | #include <linux/cryptohash.h> | ||
35 | #include <crypto/scatterwalk.h> | ||
36 | #include <crypto/algapi.h> | ||
37 | #include <crypto/sha.h> | ||
38 | #include <crypto/hash.h> | ||
39 | #include <crypto/internal/hash.h> | ||
40 | |||
41 | #include <plat/cpu.h> | ||
42 | #include <plat/dma.h> | ||
43 | #include <mach/irqs.h> | ||
44 | |||
45 | #define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04)) | ||
46 | #define SHA_REG_DIN(x) (0x1C + ((x) * 0x04)) | ||
47 | |||
48 | #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE | ||
49 | #define MD5_DIGEST_SIZE 16 | ||
50 | |||
51 | #define SHA_REG_DIGCNT 0x14 | ||
52 | |||
53 | #define SHA_REG_CTRL 0x18 | ||
54 | #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) | ||
55 | #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) | ||
56 | #define SHA_REG_CTRL_ALGO_CONST (1 << 3) | ||
57 | #define SHA_REG_CTRL_ALGO (1 << 2) | ||
58 | #define SHA_REG_CTRL_INPUT_READY (1 << 1) | ||
59 | #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) | ||
60 | |||
61 | #define SHA_REG_REV 0x5C | ||
62 | #define SHA_REG_REV_MAJOR 0xF0 | ||
63 | #define SHA_REG_REV_MINOR 0x0F | ||
64 | |||
65 | #define SHA_REG_MASK 0x60 | ||
66 | #define SHA_REG_MASK_DMA_EN (1 << 3) | ||
67 | #define SHA_REG_MASK_IT_EN (1 << 2) | ||
68 | #define SHA_REG_MASK_SOFTRESET (1 << 1) | ||
69 | #define SHA_REG_AUTOIDLE (1 << 0) | ||
70 | |||
71 | #define SHA_REG_SYSSTATUS 0x64 | ||
72 | #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) | ||
73 | |||
74 | #define DEFAULT_TIMEOUT_INTERVAL HZ | ||
75 | |||
76 | #define FLAGS_FIRST 0x0001 | ||
77 | #define FLAGS_FINUP 0x0002 | ||
78 | #define FLAGS_FINAL 0x0004 | ||
79 | #define FLAGS_FAST 0x0008 | ||
80 | #define FLAGS_SHA1 0x0010 | ||
81 | #define FLAGS_DMA_ACTIVE 0x0020 | ||
82 | #define FLAGS_OUTPUT_READY 0x0040 | ||
83 | #define FLAGS_CLEAN 0x0080 | ||
84 | #define FLAGS_INIT 0x0100 | ||
85 | #define FLAGS_CPU 0x0200 | ||
86 | #define FLAGS_HMAC 0x0400 | ||
87 | |||
88 | /* 3rd byte */ | ||
89 | #define FLAGS_BUSY 16 | ||
90 | |||
91 | #define OP_UPDATE 1 | ||
92 | #define OP_FINAL 2 | ||
93 | |||
94 | struct omap_sham_dev; | ||
95 | |||
96 | struct omap_sham_reqctx { | ||
97 | struct omap_sham_dev *dd; | ||
98 | unsigned long flags; | ||
99 | unsigned long op; | ||
100 | |||
101 | size_t digcnt; | ||
102 | u8 *buffer; | ||
103 | size_t bufcnt; | ||
104 | size_t buflen; | ||
105 | dma_addr_t dma_addr; | ||
106 | |||
107 | /* walk state */ | ||
108 | struct scatterlist *sg; | ||
109 | unsigned int offset; /* offset in current sg */ | ||
110 | unsigned int total; /* total request */ | ||
111 | }; | ||
112 | |||
113 | struct omap_sham_hmac_ctx { | ||
114 | struct crypto_shash *shash; | ||
115 | u8 ipad[SHA1_MD5_BLOCK_SIZE]; | ||
116 | u8 opad[SHA1_MD5_BLOCK_SIZE]; | ||
117 | }; | ||
118 | |||
119 | struct omap_sham_ctx { | ||
120 | struct omap_sham_dev *dd; | ||
121 | |||
122 | unsigned long flags; | ||
123 | |||
124 | /* fallback stuff */ | ||
125 | struct crypto_shash *fallback; | ||
126 | |||
127 | struct omap_sham_hmac_ctx base[0]; | ||
128 | }; | ||
129 | |||
130 | #define OMAP_SHAM_QUEUE_LENGTH 1 | ||
131 | |||
132 | struct omap_sham_dev { | ||
133 | struct list_head list; | ||
134 | unsigned long phys_base; | ||
135 | struct device *dev; | ||
136 | void __iomem *io_base; | ||
137 | int irq; | ||
138 | struct clk *iclk; | ||
139 | spinlock_t lock; | ||
140 | int dma; | ||
141 | int dma_lch; | ||
142 | struct tasklet_struct done_task; | ||
143 | struct tasklet_struct queue_task; | ||
144 | |||
145 | unsigned long flags; | ||
146 | struct crypto_queue queue; | ||
147 | struct ahash_request *req; | ||
148 | }; | ||
149 | |||
150 | struct omap_sham_drv { | ||
151 | struct list_head dev_list; | ||
152 | spinlock_t lock; | ||
153 | unsigned long flags; | ||
154 | }; | ||
155 | |||
156 | static struct omap_sham_drv sham = { | ||
157 | .dev_list = LIST_HEAD_INIT(sham.dev_list), | ||
158 | .lock = __SPIN_LOCK_UNLOCKED(sham.lock), | ||
159 | }; | ||
160 | |||
161 | static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) | ||
162 | { | ||
163 | return __raw_readl(dd->io_base + offset); | ||
164 | } | ||
165 | |||
166 | static inline void omap_sham_write(struct omap_sham_dev *dd, | ||
167 | u32 offset, u32 value) | ||
168 | { | ||
169 | __raw_writel(value, dd->io_base + offset); | ||
170 | } | ||
171 | |||
172 | static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, | ||
173 | u32 value, u32 mask) | ||
174 | { | ||
175 | u32 val; | ||
176 | |||
177 | val = omap_sham_read(dd, address); | ||
178 | val &= ~mask; | ||
179 | val |= value; | ||
180 | omap_sham_write(dd, address, val); | ||
181 | } | ||
182 | |||
183 | static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) | ||
184 | { | ||
185 | unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; | ||
186 | |||
187 | while (!(omap_sham_read(dd, offset) & bit)) { | ||
188 | if (time_is_before_jiffies(timeout)) | ||
189 | return -ETIMEDOUT; | ||
190 | } | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static void omap_sham_copy_hash(struct ahash_request *req, int out) | ||
196 | { | ||
197 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
198 | u32 *hash = (u32 *)req->result; | ||
199 | int i; | ||
200 | |||
201 | if (likely(ctx->flags & FLAGS_SHA1)) { | ||
202 | /* SHA1 results are in big endian */ | ||
203 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) | ||
204 | if (out) | ||
205 | hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, | ||
206 | SHA_REG_DIGEST(i))); | ||
207 | else | ||
208 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
209 | cpu_to_be32(hash[i])); | ||
210 | } else { | ||
211 | /* MD5 results are in little endian */ | ||
212 | for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) | ||
213 | if (out) | ||
214 | hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, | ||
215 | SHA_REG_DIGEST(i))); | ||
216 | else | ||
217 | omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), | ||
218 | cpu_to_le32(hash[i])); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, | ||
223 | int final, int dma) | ||
224 | { | ||
225 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
226 | u32 val = length << 5, mask; | ||
227 | |||
228 | if (unlikely(!ctx->digcnt)) { | ||
229 | |||
230 | clk_enable(dd->iclk); | ||
231 | |||
232 | if (!(dd->flags & FLAGS_INIT)) { | ||
233 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
234 | SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); | ||
235 | |||
236 | if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, | ||
237 | SHA_REG_SYSSTATUS_RESETDONE)) | ||
238 | return -ETIMEDOUT; | ||
239 | |||
240 | dd->flags |= FLAGS_INIT; | ||
241 | } | ||
242 | } else { | ||
243 | omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); | ||
244 | } | ||
245 | |||
246 | omap_sham_write_mask(dd, SHA_REG_MASK, | ||
247 | SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), | ||
248 | SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); | ||
249 | /* | ||
250 | * Setting ALGO_CONST only for the first iteration | ||
251 | * and CLOSE_HASH only for the last one. | ||
252 | */ | ||
253 | if (ctx->flags & FLAGS_SHA1) | ||
254 | val |= SHA_REG_CTRL_ALGO; | ||
255 | if (!ctx->digcnt) | ||
256 | val |= SHA_REG_CTRL_ALGO_CONST; | ||
257 | if (final) | ||
258 | val |= SHA_REG_CTRL_CLOSE_HASH; | ||
259 | |||
260 | mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | | ||
261 | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; | ||
262 | |||
263 | omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, | ||
269 | size_t length, int final) | ||
270 | { | ||
271 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
272 | int err, count, len32; | ||
273 | const u32 *buffer = (const u32 *)buf; | ||
274 | |||
275 | dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", | ||
276 | ctx->digcnt, length, final); | ||
277 | |||
278 | err = omap_sham_write_ctrl(dd, length, final, 0); | ||
279 | if (err) | ||
280 | return err; | ||
281 | |||
282 | if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) | ||
283 | return -ETIMEDOUT; | ||
284 | |||
285 | ctx->digcnt += length; | ||
286 | |||
287 | if (final) | ||
288 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | ||
289 | |||
290 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
291 | |||
292 | for (count = 0; count < len32; count++) | ||
293 | omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]); | ||
294 | |||
295 | return -EINPROGRESS; | ||
296 | } | ||
297 | |||
298 | static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, | ||
299 | size_t length, int final) | ||
300 | { | ||
301 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
302 | int err, len32; | ||
303 | |||
304 | dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", | ||
305 | ctx->digcnt, length, final); | ||
306 | |||
307 | /* flush cache entries related to our page */ | ||
308 | if (dma_addr == ctx->dma_addr) | ||
309 | dma_sync_single_for_device(dd->dev, dma_addr, length, | ||
310 | DMA_TO_DEVICE); | ||
311 | |||
312 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | ||
313 | |||
314 | omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, | ||
315 | 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); | ||
316 | |||
317 | omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, | ||
318 | dma_addr, 0, 0); | ||
319 | |||
320 | err = omap_sham_write_ctrl(dd, length, final, 1); | ||
321 | if (err) | ||
322 | return err; | ||
323 | |||
324 | ctx->digcnt += length; | ||
325 | |||
326 | if (final) | ||
327 | ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ | ||
328 | |||
329 | dd->flags |= FLAGS_DMA_ACTIVE; | ||
330 | |||
331 | omap_start_dma(dd->dma_lch); | ||
332 | |||
333 | return -EINPROGRESS; | ||
334 | } | ||
335 | |||
336 | static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, | ||
337 | const u8 *data, size_t length) | ||
338 | { | ||
339 | size_t count = min(length, ctx->buflen - ctx->bufcnt); | ||
340 | |||
341 | count = min(count, ctx->total); | ||
342 | if (count <= 0) | ||
343 | return 0; | ||
344 | memcpy(ctx->buffer + ctx->bufcnt, data, count); | ||
345 | ctx->bufcnt += count; | ||
346 | |||
347 | return count; | ||
348 | } | ||
349 | |||
350 | static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) | ||
351 | { | ||
352 | size_t count; | ||
353 | |||
354 | while (ctx->sg) { | ||
355 | count = omap_sham_append_buffer(ctx, | ||
356 | sg_virt(ctx->sg) + ctx->offset, | ||
357 | ctx->sg->length - ctx->offset); | ||
358 | if (!count) | ||
359 | break; | ||
360 | ctx->offset += count; | ||
361 | ctx->total -= count; | ||
362 | if (ctx->offset == ctx->sg->length) { | ||
363 | ctx->sg = sg_next(ctx->sg); | ||
364 | if (ctx->sg) | ||
365 | ctx->offset = 0; | ||
366 | else | ||
367 | ctx->total = 0; | ||
368 | } | ||
369 | } | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) | ||
375 | { | ||
376 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
377 | unsigned int final; | ||
378 | size_t count; | ||
379 | |||
380 | if (!ctx->total) | ||
381 | return 0; | ||
382 | |||
383 | omap_sham_append_sg(ctx); | ||
384 | |||
385 | final = (ctx->flags & FLAGS_FINUP) && !ctx->total; | ||
386 | |||
387 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", | ||
388 | ctx->bufcnt, ctx->digcnt, final); | ||
389 | |||
390 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | ||
391 | count = ctx->bufcnt; | ||
392 | ctx->bufcnt = 0; | ||
393 | return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); | ||
394 | } | ||
395 | |||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) | ||
400 | { | ||
401 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
402 | unsigned int length; | ||
403 | |||
404 | ctx->flags |= FLAGS_FAST; | ||
405 | |||
406 | length = min(ctx->total, sg_dma_len(ctx->sg)); | ||
407 | ctx->total = length; | ||
408 | |||
409 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | ||
410 | dev_err(dd->dev, "dma_map_sg error\n"); | ||
411 | return -EINVAL; | ||
412 | } | ||
413 | |||
414 | ctx->total -= length; | ||
415 | |||
416 | return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); | ||
417 | } | ||
418 | |||
419 | static int omap_sham_update_cpu(struct omap_sham_dev *dd) | ||
420 | { | ||
421 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
422 | int bufcnt; | ||
423 | |||
424 | omap_sham_append_sg(ctx); | ||
425 | bufcnt = ctx->bufcnt; | ||
426 | ctx->bufcnt = 0; | ||
427 | |||
428 | return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | ||
429 | } | ||
430 | |||
431 | static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) | ||
432 | { | ||
433 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
434 | |||
435 | omap_stop_dma(dd->dma_lch); | ||
436 | if (ctx->flags & FLAGS_FAST) | ||
437 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static void omap_sham_cleanup(struct ahash_request *req) | ||
443 | { | ||
444 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
445 | struct omap_sham_dev *dd = ctx->dd; | ||
446 | unsigned long flags; | ||
447 | |||
448 | spin_lock_irqsave(&dd->lock, flags); | ||
449 | if (ctx->flags & FLAGS_CLEAN) { | ||
450 | spin_unlock_irqrestore(&dd->lock, flags); | ||
451 | return; | ||
452 | } | ||
453 | ctx->flags |= FLAGS_CLEAN; | ||
454 | spin_unlock_irqrestore(&dd->lock, flags); | ||
455 | |||
456 | if (ctx->digcnt) | ||
457 | clk_disable(dd->iclk); | ||
458 | |||
459 | if (ctx->dma_addr) | ||
460 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, | ||
461 | DMA_TO_DEVICE); | ||
462 | |||
463 | if (ctx->buffer) | ||
464 | free_page((unsigned long)ctx->buffer); | ||
465 | |||
466 | dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); | ||
467 | } | ||
468 | |||
469 | static int omap_sham_init(struct ahash_request *req) | ||
470 | { | ||
471 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
472 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | ||
473 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
474 | struct omap_sham_dev *dd = NULL, *tmp; | ||
475 | |||
476 | spin_lock_bh(&sham.lock); | ||
477 | if (!tctx->dd) { | ||
478 | list_for_each_entry(tmp, &sham.dev_list, list) { | ||
479 | dd = tmp; | ||
480 | break; | ||
481 | } | ||
482 | tctx->dd = dd; | ||
483 | } else { | ||
484 | dd = tctx->dd; | ||
485 | } | ||
486 | spin_unlock_bh(&sham.lock); | ||
487 | |||
488 | ctx->dd = dd; | ||
489 | |||
490 | ctx->flags = 0; | ||
491 | |||
492 | ctx->flags |= FLAGS_FIRST; | ||
493 | |||
494 | dev_dbg(dd->dev, "init: digest size: %d\n", | ||
495 | crypto_ahash_digestsize(tfm)); | ||
496 | |||
497 | if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) | ||
498 | ctx->flags |= FLAGS_SHA1; | ||
499 | |||
500 | ctx->bufcnt = 0; | ||
501 | ctx->digcnt = 0; | ||
502 | |||
503 | ctx->buflen = PAGE_SIZE; | ||
504 | ctx->buffer = (void *)__get_free_page( | ||
505 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
506 | GFP_KERNEL : GFP_ATOMIC); | ||
507 | if (!ctx->buffer) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, | ||
511 | DMA_TO_DEVICE); | ||
512 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { | ||
513 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); | ||
514 | free_page((unsigned long)ctx->buffer); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | |||
518 | if (tctx->flags & FLAGS_HMAC) { | ||
519 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
520 | |||
521 | memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); | ||
522 | ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; | ||
523 | ctx->flags |= FLAGS_HMAC; | ||
524 | } | ||
525 | |||
526 | return 0; | ||
527 | |||
528 | } | ||
529 | |||
530 | static int omap_sham_update_req(struct omap_sham_dev *dd) | ||
531 | { | ||
532 | struct ahash_request *req = dd->req; | ||
533 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
534 | int err; | ||
535 | |||
536 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", | ||
537 | ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); | ||
538 | |||
539 | if (ctx->flags & FLAGS_CPU) | ||
540 | err = omap_sham_update_cpu(dd); | ||
541 | else if (ctx->flags & FLAGS_FAST) | ||
542 | err = omap_sham_update_dma_fast(dd); | ||
543 | else | ||
544 | err = omap_sham_update_dma_slow(dd); | ||
545 | |||
546 | /* wait for dma completion before can take more data */ | ||
547 | dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); | ||
548 | |||
549 | return err; | ||
550 | } | ||
551 | |||
552 | static int omap_sham_final_req(struct omap_sham_dev *dd) | ||
553 | { | ||
554 | struct ahash_request *req = dd->req; | ||
555 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
556 | int err = 0, use_dma = 1; | ||
557 | |||
558 | if (ctx->bufcnt <= 64) | ||
559 | /* faster to handle last block with cpu */ | ||
560 | use_dma = 0; | ||
561 | |||
562 | if (use_dma) | ||
563 | err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); | ||
564 | else | ||
565 | err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); | ||
566 | |||
567 | ctx->bufcnt = 0; | ||
568 | |||
569 | if (err != -EINPROGRESS) | ||
570 | omap_sham_cleanup(req); | ||
571 | |||
572 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | ||
573 | |||
574 | return err; | ||
575 | } | ||
576 | |||
577 | static int omap_sham_finish_req_hmac(struct ahash_request *req) | ||
578 | { | ||
579 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
580 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
581 | int bs = crypto_shash_blocksize(bctx->shash); | ||
582 | int ds = crypto_shash_digestsize(bctx->shash); | ||
583 | struct { | ||
584 | struct shash_desc shash; | ||
585 | char ctx[crypto_shash_descsize(bctx->shash)]; | ||
586 | } desc; | ||
587 | |||
588 | desc.shash.tfm = bctx->shash; | ||
589 | desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ | ||
590 | |||
591 | return crypto_shash_init(&desc.shash) ?: | ||
592 | crypto_shash_update(&desc.shash, bctx->opad, bs) ?: | ||
593 | crypto_shash_finup(&desc.shash, req->result, ds, req->result); | ||
594 | } | ||
595 | |||
596 | static void omap_sham_finish_req(struct ahash_request *req, int err) | ||
597 | { | ||
598 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
599 | |||
600 | if (!err) { | ||
601 | omap_sham_copy_hash(ctx->dd->req, 1); | ||
602 | if (ctx->flags & FLAGS_HMAC) | ||
603 | err = omap_sham_finish_req_hmac(req); | ||
604 | } | ||
605 | |||
606 | if (ctx->flags & FLAGS_FINAL) | ||
607 | omap_sham_cleanup(req); | ||
608 | |||
609 | clear_bit(FLAGS_BUSY, &ctx->dd->flags); | ||
610 | |||
611 | if (req->base.complete) | ||
612 | req->base.complete(&req->base, err); | ||
613 | } | ||
614 | |||
615 | static int omap_sham_handle_queue(struct omap_sham_dev *dd) | ||
616 | { | ||
617 | struct crypto_async_request *async_req, *backlog; | ||
618 | struct omap_sham_reqctx *ctx; | ||
619 | struct ahash_request *req, *prev_req; | ||
620 | unsigned long flags; | ||
621 | int err = 0; | ||
622 | |||
623 | if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) | ||
624 | return 0; | ||
625 | |||
626 | spin_lock_irqsave(&dd->lock, flags); | ||
627 | backlog = crypto_get_backlog(&dd->queue); | ||
628 | async_req = crypto_dequeue_request(&dd->queue); | ||
629 | if (!async_req) | ||
630 | clear_bit(FLAGS_BUSY, &dd->flags); | ||
631 | spin_unlock_irqrestore(&dd->lock, flags); | ||
632 | |||
633 | if (!async_req) | ||
634 | return 0; | ||
635 | |||
636 | if (backlog) | ||
637 | backlog->complete(backlog, -EINPROGRESS); | ||
638 | |||
639 | req = ahash_request_cast(async_req); | ||
640 | |||
641 | prev_req = dd->req; | ||
642 | dd->req = req; | ||
643 | |||
644 | ctx = ahash_request_ctx(req); | ||
645 | |||
646 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | ||
647 | ctx->op, req->nbytes); | ||
648 | |||
649 | if (req != prev_req && ctx->digcnt) | ||
650 | /* request has changed - restore hash */ | ||
651 | omap_sham_copy_hash(req, 0); | ||
652 | |||
653 | if (ctx->op == OP_UPDATE) { | ||
654 | err = omap_sham_update_req(dd); | ||
655 | if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) | ||
656 | /* no final() after finup() */ | ||
657 | err = omap_sham_final_req(dd); | ||
658 | } else if (ctx->op == OP_FINAL) { | ||
659 | err = omap_sham_final_req(dd); | ||
660 | } | ||
661 | |||
662 | if (err != -EINPROGRESS) { | ||
663 | /* done_task will not finish it, so do it here */ | ||
664 | omap_sham_finish_req(req, err); | ||
665 | tasklet_schedule(&dd->queue_task); | ||
666 | } | ||
667 | |||
668 | dev_dbg(dd->dev, "exit, err: %d\n", err); | ||
669 | |||
670 | return err; | ||
671 | } | ||
672 | |||
673 | static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) | ||
674 | { | ||
675 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
676 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
677 | struct omap_sham_dev *dd = tctx->dd; | ||
678 | unsigned long flags; | ||
679 | int err; | ||
680 | |||
681 | ctx->op = op; | ||
682 | |||
683 | spin_lock_irqsave(&dd->lock, flags); | ||
684 | err = ahash_enqueue_request(&dd->queue, req); | ||
685 | spin_unlock_irqrestore(&dd->lock, flags); | ||
686 | |||
687 | omap_sham_handle_queue(dd); | ||
688 | |||
689 | return err; | ||
690 | } | ||
691 | |||
692 | static int omap_sham_update(struct ahash_request *req) | ||
693 | { | ||
694 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
695 | |||
696 | if (!req->nbytes) | ||
697 | return 0; | ||
698 | |||
699 | ctx->total = req->nbytes; | ||
700 | ctx->sg = req->src; | ||
701 | ctx->offset = 0; | ||
702 | |||
703 | if (ctx->flags & FLAGS_FINUP) { | ||
704 | if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { | ||
705 | /* | ||
706 | * OMAP HW accel works only with buffers >= 9 | ||
707 | * will switch to bypass in final() | ||
708 | * final has the same request and data | ||
709 | */ | ||
710 | omap_sham_append_sg(ctx); | ||
711 | return 0; | ||
712 | } else if (ctx->bufcnt + ctx->total <= 64) { | ||
713 | ctx->flags |= FLAGS_CPU; | ||
714 | } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { | ||
715 | /* may be can use faster functions */ | ||
716 | int aligned = IS_ALIGNED((u32)ctx->sg->offset, | ||
717 | sizeof(u32)); | ||
718 | |||
719 | if (aligned && (ctx->flags & FLAGS_FIRST)) | ||
720 | /* digest: first and final */ | ||
721 | ctx->flags |= FLAGS_FAST; | ||
722 | |||
723 | ctx->flags &= ~FLAGS_FIRST; | ||
724 | } | ||
725 | } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { | ||
726 | /* if not finaup -> not fast */ | ||
727 | omap_sham_append_sg(ctx); | ||
728 | return 0; | ||
729 | } | ||
730 | |||
731 | return omap_sham_enqueue(req, OP_UPDATE); | ||
732 | } | ||
733 | |||
734 | static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags, | ||
735 | const u8 *data, unsigned int len, u8 *out) | ||
736 | { | ||
737 | struct { | ||
738 | struct shash_desc shash; | ||
739 | char ctx[crypto_shash_descsize(shash)]; | ||
740 | } desc; | ||
741 | |||
742 | desc.shash.tfm = shash; | ||
743 | desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
744 | |||
745 | return crypto_shash_digest(&desc.shash, data, len, out); | ||
746 | } | ||
747 | |||
748 | static int omap_sham_final_shash(struct ahash_request *req) | ||
749 | { | ||
750 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | ||
751 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
752 | |||
753 | return omap_sham_shash_digest(tctx->fallback, req->base.flags, | ||
754 | ctx->buffer, ctx->bufcnt, req->result); | ||
755 | } | ||
756 | |||
757 | static int omap_sham_final(struct ahash_request *req) | ||
758 | { | ||
759 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
760 | int err = 0; | ||
761 | |||
762 | ctx->flags |= FLAGS_FINUP; | ||
763 | |||
764 | /* OMAP HW accel works only with buffers >= 9 */ | ||
765 | /* HMAC is always >= 9 because of ipad */ | ||
766 | if ((ctx->digcnt + ctx->bufcnt) < 9) | ||
767 | err = omap_sham_final_shash(req); | ||
768 | else if (ctx->bufcnt) | ||
769 | return omap_sham_enqueue(req, OP_FINAL); | ||
770 | |||
771 | omap_sham_cleanup(req); | ||
772 | |||
773 | return err; | ||
774 | } | ||
775 | |||
776 | static int omap_sham_finup(struct ahash_request *req) | ||
777 | { | ||
778 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
779 | int err1, err2; | ||
780 | |||
781 | ctx->flags |= FLAGS_FINUP; | ||
782 | |||
783 | err1 = omap_sham_update(req); | ||
784 | if (err1 == -EINPROGRESS) | ||
785 | return err1; | ||
786 | /* | ||
787 | * final() has to be always called to cleanup resources | ||
788 | * even if udpate() failed, except EINPROGRESS | ||
789 | */ | ||
790 | err2 = omap_sham_final(req); | ||
791 | |||
792 | return err1 ?: err2; | ||
793 | } | ||
794 | |||
795 | static int omap_sham_digest(struct ahash_request *req) | ||
796 | { | ||
797 | return omap_sham_init(req) ?: omap_sham_finup(req); | ||
798 | } | ||
799 | |||
800 | static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
801 | unsigned int keylen) | ||
802 | { | ||
803 | struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); | ||
804 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
805 | int bs = crypto_shash_blocksize(bctx->shash); | ||
806 | int ds = crypto_shash_digestsize(bctx->shash); | ||
807 | int err, i; | ||
808 | err = crypto_shash_setkey(tctx->fallback, key, keylen); | ||
809 | if (err) | ||
810 | return err; | ||
811 | |||
812 | if (keylen > bs) { | ||
813 | err = omap_sham_shash_digest(bctx->shash, | ||
814 | crypto_shash_get_flags(bctx->shash), | ||
815 | key, keylen, bctx->ipad); | ||
816 | if (err) | ||
817 | return err; | ||
818 | keylen = ds; | ||
819 | } else { | ||
820 | memcpy(bctx->ipad, key, keylen); | ||
821 | } | ||
822 | |||
823 | memset(bctx->ipad + keylen, 0, bs - keylen); | ||
824 | memcpy(bctx->opad, bctx->ipad, bs); | ||
825 | |||
826 | for (i = 0; i < bs; i++) { | ||
827 | bctx->ipad[i] ^= 0x36; | ||
828 | bctx->opad[i] ^= 0x5c; | ||
829 | } | ||
830 | |||
831 | return err; | ||
832 | } | ||
833 | |||
834 | static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) | ||
835 | { | ||
836 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); | ||
837 | const char *alg_name = crypto_tfm_alg_name(tfm); | ||
838 | |||
839 | /* Allocate a fallback and abort if it failed. */ | ||
840 | tctx->fallback = crypto_alloc_shash(alg_name, 0, | ||
841 | CRYPTO_ALG_NEED_FALLBACK); | ||
842 | if (IS_ERR(tctx->fallback)) { | ||
843 | pr_err("omap-sham: fallback driver '%s' " | ||
844 | "could not be loaded.\n", alg_name); | ||
845 | return PTR_ERR(tctx->fallback); | ||
846 | } | ||
847 | |||
848 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
849 | sizeof(struct omap_sham_reqctx)); | ||
850 | |||
851 | if (alg_base) { | ||
852 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
853 | tctx->flags |= FLAGS_HMAC; | ||
854 | bctx->shash = crypto_alloc_shash(alg_base, 0, | ||
855 | CRYPTO_ALG_NEED_FALLBACK); | ||
856 | if (IS_ERR(bctx->shash)) { | ||
857 | pr_err("omap-sham: base driver '%s' " | ||
858 | "could not be loaded.\n", alg_base); | ||
859 | crypto_free_shash(tctx->fallback); | ||
860 | return PTR_ERR(bctx->shash); | ||
861 | } | ||
862 | |||
863 | } | ||
864 | |||
865 | return 0; | ||
866 | } | ||
867 | |||
868 | static int omap_sham_cra_init(struct crypto_tfm *tfm) | ||
869 | { | ||
870 | return omap_sham_cra_init_alg(tfm, NULL); | ||
871 | } | ||
872 | |||
873 | static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) | ||
874 | { | ||
875 | return omap_sham_cra_init_alg(tfm, "sha1"); | ||
876 | } | ||
877 | |||
878 | static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) | ||
879 | { | ||
880 | return omap_sham_cra_init_alg(tfm, "md5"); | ||
881 | } | ||
882 | |||
883 | static void omap_sham_cra_exit(struct crypto_tfm *tfm) | ||
884 | { | ||
885 | struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); | ||
886 | |||
887 | crypto_free_shash(tctx->fallback); | ||
888 | tctx->fallback = NULL; | ||
889 | |||
890 | if (tctx->flags & FLAGS_HMAC) { | ||
891 | struct omap_sham_hmac_ctx *bctx = tctx->base; | ||
892 | crypto_free_shash(bctx->shash); | ||
893 | } | ||
894 | } | ||
895 | |||
896 | static struct ahash_alg algs[] = { | ||
897 | { | ||
898 | .init = omap_sham_init, | ||
899 | .update = omap_sham_update, | ||
900 | .final = omap_sham_final, | ||
901 | .finup = omap_sham_finup, | ||
902 | .digest = omap_sham_digest, | ||
903 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
904 | .halg.base = { | ||
905 | .cra_name = "sha1", | ||
906 | .cra_driver_name = "omap-sha1", | ||
907 | .cra_priority = 100, | ||
908 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
909 | CRYPTO_ALG_ASYNC | | ||
910 | CRYPTO_ALG_NEED_FALLBACK, | ||
911 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
912 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
913 | .cra_alignmask = 0, | ||
914 | .cra_module = THIS_MODULE, | ||
915 | .cra_init = omap_sham_cra_init, | ||
916 | .cra_exit = omap_sham_cra_exit, | ||
917 | } | ||
918 | }, | ||
919 | { | ||
920 | .init = omap_sham_init, | ||
921 | .update = omap_sham_update, | ||
922 | .final = omap_sham_final, | ||
923 | .finup = omap_sham_finup, | ||
924 | .digest = omap_sham_digest, | ||
925 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
926 | .halg.base = { | ||
927 | .cra_name = "md5", | ||
928 | .cra_driver_name = "omap-md5", | ||
929 | .cra_priority = 100, | ||
930 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
931 | CRYPTO_ALG_ASYNC | | ||
932 | CRYPTO_ALG_NEED_FALLBACK, | ||
933 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
934 | .cra_ctxsize = sizeof(struct omap_sham_ctx), | ||
935 | .cra_alignmask = 0, | ||
936 | .cra_module = THIS_MODULE, | ||
937 | .cra_init = omap_sham_cra_init, | ||
938 | .cra_exit = omap_sham_cra_exit, | ||
939 | } | ||
940 | }, | ||
941 | { | ||
942 | .init = omap_sham_init, | ||
943 | .update = omap_sham_update, | ||
944 | .final = omap_sham_final, | ||
945 | .finup = omap_sham_finup, | ||
946 | .digest = omap_sham_digest, | ||
947 | .setkey = omap_sham_setkey, | ||
948 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
949 | .halg.base = { | ||
950 | .cra_name = "hmac(sha1)", | ||
951 | .cra_driver_name = "omap-hmac-sha1", | ||
952 | .cra_priority = 100, | ||
953 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
954 | CRYPTO_ALG_ASYNC | | ||
955 | CRYPTO_ALG_NEED_FALLBACK, | ||
956 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
957 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
958 | sizeof(struct omap_sham_hmac_ctx), | ||
959 | .cra_alignmask = 0, | ||
960 | .cra_module = THIS_MODULE, | ||
961 | .cra_init = omap_sham_cra_sha1_init, | ||
962 | .cra_exit = omap_sham_cra_exit, | ||
963 | } | ||
964 | }, | ||
965 | { | ||
966 | .init = omap_sham_init, | ||
967 | .update = omap_sham_update, | ||
968 | .final = omap_sham_final, | ||
969 | .finup = omap_sham_finup, | ||
970 | .digest = omap_sham_digest, | ||
971 | .setkey = omap_sham_setkey, | ||
972 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
973 | .halg.base = { | ||
974 | .cra_name = "hmac(md5)", | ||
975 | .cra_driver_name = "omap-hmac-md5", | ||
976 | .cra_priority = 100, | ||
977 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
978 | CRYPTO_ALG_ASYNC | | ||
979 | CRYPTO_ALG_NEED_FALLBACK, | ||
980 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
981 | .cra_ctxsize = sizeof(struct omap_sham_ctx) + | ||
982 | sizeof(struct omap_sham_hmac_ctx), | ||
983 | .cra_alignmask = 0, | ||
984 | .cra_module = THIS_MODULE, | ||
985 | .cra_init = omap_sham_cra_md5_init, | ||
986 | .cra_exit = omap_sham_cra_exit, | ||
987 | } | ||
988 | } | ||
989 | }; | ||
990 | |||
991 | static void omap_sham_done_task(unsigned long data) | ||
992 | { | ||
993 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | ||
994 | struct ahash_request *req = dd->req; | ||
995 | struct omap_sham_reqctx *ctx = ahash_request_ctx(req); | ||
996 | int ready = 1; | ||
997 | |||
998 | if (ctx->flags & FLAGS_OUTPUT_READY) { | ||
999 | ctx->flags &= ~FLAGS_OUTPUT_READY; | ||
1000 | ready = 1; | ||
1001 | } | ||
1002 | |||
1003 | if (dd->flags & FLAGS_DMA_ACTIVE) { | ||
1004 | dd->flags &= ~FLAGS_DMA_ACTIVE; | ||
1005 | omap_sham_update_dma_stop(dd); | ||
1006 | omap_sham_update_dma_slow(dd); | ||
1007 | } | ||
1008 | |||
1009 | if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { | ||
1010 | dev_dbg(dd->dev, "update done\n"); | ||
1011 | /* finish curent request */ | ||
1012 | omap_sham_finish_req(req, 0); | ||
1013 | /* start new request */ | ||
1014 | omap_sham_handle_queue(dd); | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | static void omap_sham_queue_task(unsigned long data) | ||
1019 | { | ||
1020 | struct omap_sham_dev *dd = (struct omap_sham_dev *)data; | ||
1021 | |||
1022 | omap_sham_handle_queue(dd); | ||
1023 | } | ||
1024 | |||
1025 | static irqreturn_t omap_sham_irq(int irq, void *dev_id) | ||
1026 | { | ||
1027 | struct omap_sham_dev *dd = dev_id; | ||
1028 | struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); | ||
1029 | |||
1030 | if (!ctx) { | ||
1031 | dev_err(dd->dev, "unknown interrupt.\n"); | ||
1032 | return IRQ_HANDLED; | ||
1033 | } | ||
1034 | |||
1035 | if (unlikely(ctx->flags & FLAGS_FINAL)) | ||
1036 | /* final -> allow device to go to power-saving mode */ | ||
1037 | omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); | ||
1038 | |||
1039 | omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, | ||
1040 | SHA_REG_CTRL_OUTPUT_READY); | ||
1041 | omap_sham_read(dd, SHA_REG_CTRL); | ||
1042 | |||
1043 | ctx->flags |= FLAGS_OUTPUT_READY; | ||
1044 | tasklet_schedule(&dd->done_task); | ||
1045 | |||
1046 | return IRQ_HANDLED; | ||
1047 | } | ||
1048 | |||
1049 | static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) | ||
1050 | { | ||
1051 | struct omap_sham_dev *dd = data; | ||
1052 | |||
1053 | if (likely(lch == dd->dma_lch)) | ||
1054 | tasklet_schedule(&dd->done_task); | ||
1055 | } | ||
1056 | |||
1057 | static int omap_sham_dma_init(struct omap_sham_dev *dd) | ||
1058 | { | ||
1059 | int err; | ||
1060 | |||
1061 | dd->dma_lch = -1; | ||
1062 | |||
1063 | err = omap_request_dma(dd->dma, dev_name(dd->dev), | ||
1064 | omap_sham_dma_callback, dd, &dd->dma_lch); | ||
1065 | if (err) { | ||
1066 | dev_err(dd->dev, "Unable to request DMA channel\n"); | ||
1067 | return err; | ||
1068 | } | ||
1069 | omap_set_dma_dest_params(dd->dma_lch, 0, | ||
1070 | OMAP_DMA_AMODE_CONSTANT, | ||
1071 | dd->phys_base + SHA_REG_DIN(0), 0, 16); | ||
1072 | |||
1073 | omap_set_dma_dest_burst_mode(dd->dma_lch, | ||
1074 | OMAP_DMA_DATA_BURST_16); | ||
1075 | |||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | static void omap_sham_dma_cleanup(struct omap_sham_dev *dd) | ||
1080 | { | ||
1081 | if (dd->dma_lch >= 0) { | ||
1082 | omap_free_dma(dd->dma_lch); | ||
1083 | dd->dma_lch = -1; | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1087 | static int __devinit omap_sham_probe(struct platform_device *pdev) | ||
1088 | { | ||
1089 | struct omap_sham_dev *dd; | ||
1090 | struct device *dev = &pdev->dev; | ||
1091 | struct resource *res; | ||
1092 | int err, i, j; | ||
1093 | |||
1094 | dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); | ||
1095 | if (dd == NULL) { | ||
1096 | dev_err(dev, "unable to alloc data struct.\n"); | ||
1097 | err = -ENOMEM; | ||
1098 | goto data_err; | ||
1099 | } | ||
1100 | dd->dev = dev; | ||
1101 | platform_set_drvdata(pdev, dd); | ||
1102 | |||
1103 | INIT_LIST_HEAD(&dd->list); | ||
1104 | spin_lock_init(&dd->lock); | ||
1105 | tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); | ||
1106 | tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd); | ||
1107 | crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); | ||
1108 | |||
1109 | dd->irq = -1; | ||
1110 | |||
1111 | /* Get the base address */ | ||
1112 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1113 | if (!res) { | ||
1114 | dev_err(dev, "no MEM resource info\n"); | ||
1115 | err = -ENODEV; | ||
1116 | goto res_err; | ||
1117 | } | ||
1118 | dd->phys_base = res->start; | ||
1119 | |||
1120 | /* Get the DMA */ | ||
1121 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1122 | if (!res) { | ||
1123 | dev_err(dev, "no DMA resource info\n"); | ||
1124 | err = -ENODEV; | ||
1125 | goto res_err; | ||
1126 | } | ||
1127 | dd->dma = res->start; | ||
1128 | |||
1129 | /* Get the IRQ */ | ||
1130 | dd->irq = platform_get_irq(pdev, 0); | ||
1131 | if (dd->irq < 0) { | ||
1132 | dev_err(dev, "no IRQ resource info\n"); | ||
1133 | err = dd->irq; | ||
1134 | goto res_err; | ||
1135 | } | ||
1136 | |||
1137 | err = request_irq(dd->irq, omap_sham_irq, | ||
1138 | IRQF_TRIGGER_LOW, dev_name(dev), dd); | ||
1139 | if (err) { | ||
1140 | dev_err(dev, "unable to request irq.\n"); | ||
1141 | goto res_err; | ||
1142 | } | ||
1143 | |||
1144 | err = omap_sham_dma_init(dd); | ||
1145 | if (err) | ||
1146 | goto dma_err; | ||
1147 | |||
1148 | /* Initializing the clock */ | ||
1149 | dd->iclk = clk_get(dev, "ick"); | ||
1150 | if (!dd->iclk) { | ||
1151 | dev_err(dev, "clock intialization failed.\n"); | ||
1152 | err = -ENODEV; | ||
1153 | goto clk_err; | ||
1154 | } | ||
1155 | |||
1156 | dd->io_base = ioremap(dd->phys_base, SZ_4K); | ||
1157 | if (!dd->io_base) { | ||
1158 | dev_err(dev, "can't ioremap\n"); | ||
1159 | err = -ENOMEM; | ||
1160 | goto io_err; | ||
1161 | } | ||
1162 | |||
1163 | clk_enable(dd->iclk); | ||
1164 | dev_info(dev, "hw accel on OMAP rev %u.%u\n", | ||
1165 | (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4, | ||
1166 | omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR); | ||
1167 | clk_disable(dd->iclk); | ||
1168 | |||
1169 | spin_lock(&sham.lock); | ||
1170 | list_add_tail(&dd->list, &sham.dev_list); | ||
1171 | spin_unlock(&sham.lock); | ||
1172 | |||
1173 | for (i = 0; i < ARRAY_SIZE(algs); i++) { | ||
1174 | err = crypto_register_ahash(&algs[i]); | ||
1175 | if (err) | ||
1176 | goto err_algs; | ||
1177 | } | ||
1178 | |||
1179 | return 0; | ||
1180 | |||
1181 | err_algs: | ||
1182 | for (j = 0; j < i; j++) | ||
1183 | crypto_unregister_ahash(&algs[j]); | ||
1184 | iounmap(dd->io_base); | ||
1185 | io_err: | ||
1186 | clk_put(dd->iclk); | ||
1187 | clk_err: | ||
1188 | omap_sham_dma_cleanup(dd); | ||
1189 | dma_err: | ||
1190 | if (dd->irq >= 0) | ||
1191 | free_irq(dd->irq, dd); | ||
1192 | res_err: | ||
1193 | kfree(dd); | ||
1194 | dd = NULL; | ||
1195 | data_err: | ||
1196 | dev_err(dev, "initialization failed.\n"); | ||
1197 | |||
1198 | return err; | ||
1199 | } | ||
1200 | |||
1201 | static int __devexit omap_sham_remove(struct platform_device *pdev) | ||
1202 | { | ||
1203 | static struct omap_sham_dev *dd; | ||
1204 | int i; | ||
1205 | |||
1206 | dd = platform_get_drvdata(pdev); | ||
1207 | if (!dd) | ||
1208 | return -ENODEV; | ||
1209 | spin_lock(&sham.lock); | ||
1210 | list_del(&dd->list); | ||
1211 | spin_unlock(&sham.lock); | ||
1212 | for (i = 0; i < ARRAY_SIZE(algs); i++) | ||
1213 | crypto_unregister_ahash(&algs[i]); | ||
1214 | tasklet_kill(&dd->done_task); | ||
1215 | tasklet_kill(&dd->queue_task); | ||
1216 | iounmap(dd->io_base); | ||
1217 | clk_put(dd->iclk); | ||
1218 | omap_sham_dma_cleanup(dd); | ||
1219 | if (dd->irq >= 0) | ||
1220 | free_irq(dd->irq, dd); | ||
1221 | kfree(dd); | ||
1222 | dd = NULL; | ||
1223 | |||
1224 | return 0; | ||
1225 | } | ||
1226 | |||
1227 | static struct platform_driver omap_sham_driver = { | ||
1228 | .probe = omap_sham_probe, | ||
1229 | .remove = omap_sham_remove, | ||
1230 | .driver = { | ||
1231 | .name = "omap-sham", | ||
1232 | .owner = THIS_MODULE, | ||
1233 | }, | ||
1234 | }; | ||
1235 | |||
1236 | static int __init omap_sham_mod_init(void) | ||
1237 | { | ||
1238 | pr_info("loading %s driver\n", "omap-sham"); | ||
1239 | |||
1240 | if (!cpu_class_is_omap2() || | ||
1241 | omap_type() != OMAP2_DEVICE_TYPE_SEC) { | ||
1242 | pr_err("Unsupported cpu\n"); | ||
1243 | return -ENODEV; | ||
1244 | } | ||
1245 | |||
1246 | return platform_driver_register(&omap_sham_driver); | ||
1247 | } | ||
1248 | |||
1249 | static void __exit omap_sham_mod_exit(void) | ||
1250 | { | ||
1251 | platform_driver_unregister(&omap_sham_driver); | ||
1252 | } | ||
1253 | |||
1254 | module_init(omap_sham_mod_init); | ||
1255 | module_exit(omap_sham_mod_exit); | ||
1256 | |||
1257 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); | ||
1258 | MODULE_LICENSE("GPL v2"); | ||
1259 | MODULE_AUTHOR("Dmitry Kasatkin"); | ||
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dc558a097311..637c105f53d2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * talitos - Freescale Integrated Security Engine (SEC) device driver | 2 | * talitos - Freescale Integrated Security Engine (SEC) device driver |
3 | * | 3 | * |
4 | * Copyright (c) 2008 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2008-2010 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Scatterlist Crypto API glue code copied from files with the following: | 6 | * Scatterlist Crypto API glue code copied from files with the following: |
7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> | 7 | * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> |
@@ -43,9 +43,12 @@ | |||
43 | #include <crypto/aes.h> | 43 | #include <crypto/aes.h> |
44 | #include <crypto/des.h> | 44 | #include <crypto/des.h> |
45 | #include <crypto/sha.h> | 45 | #include <crypto/sha.h> |
46 | #include <crypto/md5.h> | ||
46 | #include <crypto/aead.h> | 47 | #include <crypto/aead.h> |
47 | #include <crypto/authenc.h> | 48 | #include <crypto/authenc.h> |
48 | #include <crypto/skcipher.h> | 49 | #include <crypto/skcipher.h> |
50 | #include <crypto/hash.h> | ||
51 | #include <crypto/internal/hash.h> | ||
49 | #include <crypto/scatterwalk.h> | 52 | #include <crypto/scatterwalk.h> |
50 | 53 | ||
51 | #include "talitos.h" | 54 | #include "talitos.h" |
@@ -65,6 +68,13 @@ struct talitos_ptr { | |||
65 | __be32 ptr; /* address */ | 68 | __be32 ptr; /* address */ |
66 | }; | 69 | }; |
67 | 70 | ||
71 | static const struct talitos_ptr zero_entry = { | ||
72 | .len = 0, | ||
73 | .j_extent = 0, | ||
74 | .eptr = 0, | ||
75 | .ptr = 0 | ||
76 | }; | ||
77 | |||
68 | /* descriptor */ | 78 | /* descriptor */ |
69 | struct talitos_desc { | 79 | struct talitos_desc { |
70 | __be32 hdr; /* header high bits */ | 80 | __be32 hdr; /* header high bits */ |
@@ -146,6 +156,7 @@ struct talitos_private { | |||
146 | /* .features flag */ | 156 | /* .features flag */ |
147 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 157 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
148 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 158 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
159 | #define TALITOS_FTR_SHA224_HWINIT 0x00000004 | ||
149 | 160 | ||
150 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | 161 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) |
151 | { | 162 | { |
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
692 | #define TALITOS_MAX_KEY_SIZE 64 | 703 | #define TALITOS_MAX_KEY_SIZE 64 |
693 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 704 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
694 | 705 | ||
695 | #define MD5_DIGEST_SIZE 16 | 706 | #define MD5_BLOCK_SIZE 64 |
696 | 707 | ||
697 | struct talitos_ctx { | 708 | struct talitos_ctx { |
698 | struct device *dev; | 709 | struct device *dev; |
@@ -705,6 +716,23 @@ struct talitos_ctx { | |||
705 | unsigned int authsize; | 716 | unsigned int authsize; |
706 | }; | 717 | }; |
707 | 718 | ||
719 | #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE | ||
720 | #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 | ||
721 | |||
722 | struct talitos_ahash_req_ctx { | ||
723 | u64 count; | ||
724 | u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; | ||
725 | unsigned int hw_context_size; | ||
726 | u8 buf[HASH_MAX_BLOCK_SIZE]; | ||
727 | u8 bufnext[HASH_MAX_BLOCK_SIZE]; | ||
728 | unsigned int swinit; | ||
729 | unsigned int first; | ||
730 | unsigned int last; | ||
731 | unsigned int to_hash_later; | ||
732 | struct scatterlist bufsl[2]; | ||
733 | struct scatterlist *psrc; | ||
734 | }; | ||
735 | |||
708 | static int aead_setauthsize(struct crypto_aead *authenc, | 736 | static int aead_setauthsize(struct crypto_aead *authenc, |
709 | unsigned int authsize) | 737 | unsigned int authsize) |
710 | { | 738 | { |
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev, | |||
821 | else | 849 | else |
822 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); | 850 | dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
823 | 851 | ||
824 | if (edesc->dst_is_chained) | 852 | if (dst) { |
825 | talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); | 853 | if (edesc->dst_is_chained) |
826 | else | 854 | talitos_unmap_sg_chain(dev, dst, |
827 | dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); | 855 | DMA_FROM_DEVICE); |
856 | else | ||
857 | dma_unmap_sg(dev, dst, dst_nents, | ||
858 | DMA_FROM_DEVICE); | ||
859 | } | ||
828 | } else | 860 | } else |
829 | if (edesc->src_is_chained) | 861 | if (edesc->src_is_chained) |
830 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); | 862 | talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); |
@@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | |||
1114 | return sg_nents; | 1146 | return sg_nents; |
1115 | } | 1147 | } |
1116 | 1148 | ||
1149 | /** | ||
1150 | * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer | ||
1151 | * @sgl: The SG list | ||
1152 | * @nents: Number of SG entries | ||
1153 | * @buf: Where to copy to | ||
1154 | * @buflen: The number of bytes to copy | ||
1155 | * @skip: The number of bytes to skip before copying. | ||
1156 | * Note: skip + buflen should equal SG total size. | ||
1157 | * | ||
1158 | * Returns the number of copied bytes. | ||
1159 | * | ||
1160 | **/ | ||
1161 | static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
1162 | void *buf, size_t buflen, unsigned int skip) | ||
1163 | { | ||
1164 | unsigned int offset = 0; | ||
1165 | unsigned int boffset = 0; | ||
1166 | struct sg_mapping_iter miter; | ||
1167 | unsigned long flags; | ||
1168 | unsigned int sg_flags = SG_MITER_ATOMIC; | ||
1169 | size_t total_buffer = buflen + skip; | ||
1170 | |||
1171 | sg_flags |= SG_MITER_FROM_SG; | ||
1172 | |||
1173 | sg_miter_start(&miter, sgl, nents, sg_flags); | ||
1174 | |||
1175 | local_irq_save(flags); | ||
1176 | |||
1177 | while (sg_miter_next(&miter) && offset < total_buffer) { | ||
1178 | unsigned int len; | ||
1179 | unsigned int ignore; | ||
1180 | |||
1181 | if ((offset + miter.length) > skip) { | ||
1182 | if (offset < skip) { | ||
1183 | /* Copy part of this segment */ | ||
1184 | ignore = skip - offset; | ||
1185 | len = miter.length - ignore; | ||
1186 | memcpy(buf + boffset, miter.addr + ignore, len); | ||
1187 | } else { | ||
1188 | /* Copy all of this segment */ | ||
1189 | len = miter.length; | ||
1190 | memcpy(buf + boffset, miter.addr, len); | ||
1191 | } | ||
1192 | boffset += len; | ||
1193 | } | ||
1194 | offset += miter.length; | ||
1195 | } | ||
1196 | |||
1197 | sg_miter_stop(&miter); | ||
1198 | |||
1199 | local_irq_restore(flags); | ||
1200 | return boffset; | ||
1201 | } | ||
1202 | |||
1117 | /* | 1203 | /* |
1118 | * allocate and map the extended descriptor | 1204 | * allocate and map the extended descriptor |
1119 | */ | 1205 | */ |
1120 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | 1206 | static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1121 | struct scatterlist *src, | 1207 | struct scatterlist *src, |
1122 | struct scatterlist *dst, | 1208 | struct scatterlist *dst, |
1209 | int hash_result, | ||
1123 | unsigned int cryptlen, | 1210 | unsigned int cryptlen, |
1124 | unsigned int authsize, | 1211 | unsigned int authsize, |
1125 | int icv_stashing, | 1212 | int icv_stashing, |
@@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1139 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1226 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1140 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1227 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1141 | 1228 | ||
1142 | if (dst == src) { | 1229 | if (hash_result) { |
1143 | dst_nents = src_nents; | 1230 | dst_nents = 0; |
1144 | } else { | 1231 | } else { |
1145 | dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); | 1232 | if (dst == src) { |
1146 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | 1233 | dst_nents = src_nents; |
1234 | } else { | ||
1235 | dst_nents = sg_count(dst, cryptlen + authsize, | ||
1236 | &dst_chained); | ||
1237 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | ||
1238 | } | ||
1147 | } | 1239 | } |
1148 | 1240 | ||
1149 | /* | 1241 | /* |
@@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1172 | edesc->src_is_chained = src_chained; | 1264 | edesc->src_is_chained = src_chained; |
1173 | edesc->dst_is_chained = dst_chained; | 1265 | edesc->dst_is_chained = dst_chained; |
1174 | edesc->dma_len = dma_len; | 1266 | edesc->dma_len = dma_len; |
1175 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], | 1267 | if (dma_len) |
1176 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1268 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], |
1269 | edesc->dma_len, | ||
1270 | DMA_BIDIRECTIONAL); | ||
1177 | 1271 | ||
1178 | return edesc; | 1272 | return edesc; |
1179 | } | 1273 | } |
@@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, | |||
1184 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1278 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1185 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1279 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1186 | 1280 | ||
1187 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, | 1281 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, |
1188 | areq->cryptlen, ctx->authsize, icv_stashing, | 1282 | areq->cryptlen, ctx->authsize, icv_stashing, |
1189 | areq->base.flags); | 1283 | areq->base.flags); |
1190 | } | 1284 | } |
@@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
1441 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1535 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1442 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1536 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1443 | 1537 | ||
1444 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, | 1538 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, |
1445 | 0, 0, areq->base.flags); | 1539 | areq->nbytes, 0, 0, areq->base.flags); |
1446 | } | 1540 | } |
1447 | 1541 | ||
1448 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | 1542 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
@@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1478 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); | 1572 | return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); |
1479 | } | 1573 | } |
1480 | 1574 | ||
1575 | static void common_nonsnoop_hash_unmap(struct device *dev, | ||
1576 | struct talitos_edesc *edesc, | ||
1577 | struct ahash_request *areq) | ||
1578 | { | ||
1579 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1580 | |||
1581 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | ||
1582 | |||
1583 | /* When using hashctx-in, must unmap it. */ | ||
1584 | if (edesc->desc.ptr[1].len) | ||
1585 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], | ||
1586 | DMA_TO_DEVICE); | ||
1587 | |||
1588 | if (edesc->desc.ptr[2].len) | ||
1589 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], | ||
1590 | DMA_TO_DEVICE); | ||
1591 | |||
1592 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); | ||
1593 | |||
1594 | if (edesc->dma_len) | ||
1595 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | ||
1596 | DMA_BIDIRECTIONAL); | ||
1597 | |||
1598 | } | ||
1599 | |||
1600 | static void ahash_done(struct device *dev, | ||
1601 | struct talitos_desc *desc, void *context, | ||
1602 | int err) | ||
1603 | { | ||
1604 | struct ahash_request *areq = context; | ||
1605 | struct talitos_edesc *edesc = | ||
1606 | container_of(desc, struct talitos_edesc, desc); | ||
1607 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1608 | |||
1609 | if (!req_ctx->last && req_ctx->to_hash_later) { | ||
1610 | /* Position any partial block for next update/final/finup */ | ||
1611 | memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); | ||
1612 | } | ||
1613 | common_nonsnoop_hash_unmap(dev, edesc, areq); | ||
1614 | |||
1615 | kfree(edesc); | ||
1616 | |||
1617 | areq->base.complete(&areq->base, err); | ||
1618 | } | ||
1619 | |||
1620 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
1621 | struct ahash_request *areq, unsigned int length, | ||
1622 | void (*callback) (struct device *dev, | ||
1623 | struct talitos_desc *desc, | ||
1624 | void *context, int error)) | ||
1625 | { | ||
1626 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1627 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1628 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1629 | struct device *dev = ctx->dev; | ||
1630 | struct talitos_desc *desc = &edesc->desc; | ||
1631 | int sg_count, ret; | ||
1632 | |||
1633 | /* first DWORD empty */ | ||
1634 | desc->ptr[0] = zero_entry; | ||
1635 | |||
1636 | /* hash context in */ | ||
1637 | if (!req_ctx->first || req_ctx->swinit) { | ||
1638 | map_single_talitos_ptr(dev, &desc->ptr[1], | ||
1639 | req_ctx->hw_context_size, | ||
1640 | (char *)req_ctx->hw_context, 0, | ||
1641 | DMA_TO_DEVICE); | ||
1642 | req_ctx->swinit = 0; | ||
1643 | } else { | ||
1644 | desc->ptr[1] = zero_entry; | ||
1645 | /* Indicate next op is not the first. */ | ||
1646 | req_ctx->first = 0; | ||
1647 | } | ||
1648 | |||
1649 | /* HMAC key */ | ||
1650 | if (ctx->keylen) | ||
1651 | map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, | ||
1652 | (char *)&ctx->key, 0, DMA_TO_DEVICE); | ||
1653 | else | ||
1654 | desc->ptr[2] = zero_entry; | ||
1655 | |||
1656 | /* | ||
1657 | * data in | ||
1658 | */ | ||
1659 | desc->ptr[3].len = cpu_to_be16(length); | ||
1660 | desc->ptr[3].j_extent = 0; | ||
1661 | |||
1662 | sg_count = talitos_map_sg(dev, req_ctx->psrc, | ||
1663 | edesc->src_nents ? : 1, | ||
1664 | DMA_TO_DEVICE, | ||
1665 | edesc->src_is_chained); | ||
1666 | |||
1667 | if (sg_count == 1) { | ||
1668 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); | ||
1669 | } else { | ||
1670 | sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, | ||
1671 | &edesc->link_tbl[0]); | ||
1672 | if (sg_count > 1) { | ||
1673 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | ||
1674 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1675 | dma_sync_single_for_device(ctx->dev, | ||
1676 | edesc->dma_link_tbl, | ||
1677 | edesc->dma_len, | ||
1678 | DMA_BIDIRECTIONAL); | ||
1679 | } else { | ||
1680 | /* Only one segment now, so no link tbl needed */ | ||
1681 | to_talitos_ptr(&desc->ptr[3], | ||
1682 | sg_dma_address(req_ctx->psrc)); | ||
1683 | } | ||
1684 | } | ||
1685 | |||
1686 | /* fifth DWORD empty */ | ||
1687 | desc->ptr[4] = zero_entry; | ||
1688 | |||
1689 | /* hash/HMAC out -or- hash context out */ | ||
1690 | if (req_ctx->last) | ||
1691 | map_single_talitos_ptr(dev, &desc->ptr[5], | ||
1692 | crypto_ahash_digestsize(tfm), | ||
1693 | areq->result, 0, DMA_FROM_DEVICE); | ||
1694 | else | ||
1695 | map_single_talitos_ptr(dev, &desc->ptr[5], | ||
1696 | req_ctx->hw_context_size, | ||
1697 | req_ctx->hw_context, 0, DMA_FROM_DEVICE); | ||
1698 | |||
1699 | /* last DWORD empty */ | ||
1700 | desc->ptr[6] = zero_entry; | ||
1701 | |||
1702 | ret = talitos_submit(dev, desc, callback, areq); | ||
1703 | if (ret != -EINPROGRESS) { | ||
1704 | common_nonsnoop_hash_unmap(dev, edesc, areq); | ||
1705 | kfree(edesc); | ||
1706 | } | ||
1707 | return ret; | ||
1708 | } | ||
1709 | |||
1710 | static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | ||
1711 | unsigned int nbytes) | ||
1712 | { | ||
1713 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1714 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1715 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1716 | |||
1717 | return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1, | ||
1718 | nbytes, 0, 0, areq->base.flags); | ||
1719 | } | ||
1720 | |||
1721 | static int ahash_init(struct ahash_request *areq) | ||
1722 | { | ||
1723 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1724 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1725 | |||
1726 | /* Initialize the context */ | ||
1727 | req_ctx->count = 0; | ||
1728 | req_ctx->first = 1; /* first indicates h/w must init its context */ | ||
1729 | req_ctx->swinit = 0; /* assume h/w init of context */ | ||
1730 | req_ctx->hw_context_size = | ||
1731 | (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) | ||
1732 | ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 | ||
1733 | : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; | ||
1734 | |||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | /* | ||
1739 | * on h/w without explicit sha224 support, we initialize h/w context | ||
1740 | * manually with sha224 constants, and tell it to run sha256. | ||
1741 | */ | ||
1742 | static int ahash_init_sha224_swinit(struct ahash_request *areq) | ||
1743 | { | ||
1744 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1745 | |||
1746 | ahash_init(areq); | ||
1747 | req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ | ||
1748 | |||
1749 | req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); | ||
1750 | req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); | ||
1751 | req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); | ||
1752 | req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); | ||
1753 | req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); | ||
1754 | req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); | ||
1755 | req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); | ||
1756 | req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); | ||
1757 | |||
1758 | /* init 64-bit count */ | ||
1759 | req_ctx->hw_context[8] = 0; | ||
1760 | req_ctx->hw_context[9] = 0; | ||
1761 | |||
1762 | return 0; | ||
1763 | } | ||
1764 | |||
1765 | static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | ||
1766 | { | ||
1767 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | ||
1768 | struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); | ||
1769 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1770 | struct talitos_edesc *edesc; | ||
1771 | unsigned int blocksize = | ||
1772 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
1773 | unsigned int nbytes_to_hash; | ||
1774 | unsigned int to_hash_later; | ||
1775 | unsigned int index; | ||
1776 | int chained; | ||
1777 | |||
1778 | index = req_ctx->count & (blocksize - 1); | ||
1779 | req_ctx->count += nbytes; | ||
1780 | |||
1781 | if (!req_ctx->last && (index + nbytes) < blocksize) { | ||
1782 | /* Buffer the partial block */ | ||
1783 | sg_copy_to_buffer(areq->src, | ||
1784 | sg_count(areq->src, nbytes, &chained), | ||
1785 | req_ctx->buf + index, nbytes); | ||
1786 | return 0; | ||
1787 | } | ||
1788 | |||
1789 | if (index) { | ||
1790 | /* partial block from previous update; chain it in. */ | ||
1791 | sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1); | ||
1792 | sg_set_buf(req_ctx->bufsl, req_ctx->buf, index); | ||
1793 | if (nbytes) | ||
1794 | scatterwalk_sg_chain(req_ctx->bufsl, 2, | ||
1795 | areq->src); | ||
1796 | req_ctx->psrc = req_ctx->bufsl; | ||
1797 | } else { | ||
1798 | req_ctx->psrc = areq->src; | ||
1799 | } | ||
1800 | nbytes_to_hash = index + nbytes; | ||
1801 | if (!req_ctx->last) { | ||
1802 | to_hash_later = (nbytes_to_hash & (blocksize - 1)); | ||
1803 | if (to_hash_later) { | ||
1804 | int nents; | ||
1805 | /* Must copy to_hash_later bytes from the end | ||
1806 | * to bufnext (a partial block) for later. | ||
1807 | */ | ||
1808 | nents = sg_count(areq->src, nbytes, &chained); | ||
1809 | sg_copy_end_to_buffer(areq->src, nents, | ||
1810 | req_ctx->bufnext, | ||
1811 | to_hash_later, | ||
1812 | nbytes - to_hash_later); | ||
1813 | |||
1814 | /* Adjust count for what will be hashed now */ | ||
1815 | nbytes_to_hash -= to_hash_later; | ||
1816 | } | ||
1817 | req_ctx->to_hash_later = to_hash_later; | ||
1818 | } | ||
1819 | |||
1820 | /* allocate extended descriptor */ | ||
1821 | edesc = ahash_edesc_alloc(areq, nbytes_to_hash); | ||
1822 | if (IS_ERR(edesc)) | ||
1823 | return PTR_ERR(edesc); | ||
1824 | |||
1825 | edesc->desc.hdr = ctx->desc_hdr_template; | ||
1826 | |||
1827 | /* On last one, request SEC to pad; otherwise continue */ | ||
1828 | if (req_ctx->last) | ||
1829 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; | ||
1830 | else | ||
1831 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; | ||
1832 | |||
1833 | /* request SEC to INIT hash. */ | ||
1834 | if (req_ctx->first && !req_ctx->swinit) | ||
1835 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; | ||
1836 | |||
1837 | /* When the tfm context has a keylen, it's an HMAC. | ||
1838 | * A first or last (ie. not middle) descriptor must request HMAC. | ||
1839 | */ | ||
1840 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) | ||
1841 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; | ||
1842 | |||
1843 | return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, | ||
1844 | ahash_done); | ||
1845 | } | ||
1846 | |||
1847 | static int ahash_update(struct ahash_request *areq) | ||
1848 | { | ||
1849 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1850 | |||
1851 | req_ctx->last = 0; | ||
1852 | |||
1853 | return ahash_process_req(areq, areq->nbytes); | ||
1854 | } | ||
1855 | |||
1856 | static int ahash_final(struct ahash_request *areq) | ||
1857 | { | ||
1858 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1859 | |||
1860 | req_ctx->last = 1; | ||
1861 | |||
1862 | return ahash_process_req(areq, 0); | ||
1863 | } | ||
1864 | |||
1865 | static int ahash_finup(struct ahash_request *areq) | ||
1866 | { | ||
1867 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1868 | |||
1869 | req_ctx->last = 1; | ||
1870 | |||
1871 | return ahash_process_req(areq, areq->nbytes); | ||
1872 | } | ||
1873 | |||
1874 | static int ahash_digest(struct ahash_request *areq) | ||
1875 | { | ||
1876 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | ||
1877 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
1878 | |||
1879 | ahash->init(areq); | ||
1880 | req_ctx->last = 1; | ||
1881 | |||
1882 | return ahash_process_req(areq, areq->nbytes); | ||
1883 | } | ||
1884 | |||
1481 | struct talitos_alg_template { | 1885 | struct talitos_alg_template { |
1482 | struct crypto_alg alg; | 1886 | u32 type; |
1887 | union { | ||
1888 | struct crypto_alg crypto; | ||
1889 | struct ahash_alg hash; | ||
1890 | } alg; | ||
1483 | __be32 desc_hdr_template; | 1891 | __be32 desc_hdr_template; |
1484 | }; | 1892 | }; |
1485 | 1893 | ||
1486 | static struct talitos_alg_template driver_algs[] = { | 1894 | static struct talitos_alg_template driver_algs[] = { |
1487 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ | 1895 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ |
1488 | { | 1896 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1489 | .alg = { | 1897 | .alg.crypto = { |
1490 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1898 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
1491 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", | 1899 | .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", |
1492 | .cra_blocksize = AES_BLOCK_SIZE, | 1900 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1511 | DESC_HDR_MODE1_MDEU_PAD | | 1919 | DESC_HDR_MODE1_MDEU_PAD | |
1512 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1920 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1513 | }, | 1921 | }, |
1514 | { | 1922 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1515 | .alg = { | 1923 | .alg.crypto = { |
1516 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | 1924 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", |
1517 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", | 1925 | .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", |
1518 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1926 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1538 | DESC_HDR_MODE1_MDEU_PAD | | 1946 | DESC_HDR_MODE1_MDEU_PAD | |
1539 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, | 1947 | DESC_HDR_MODE1_MDEU_SHA1_HMAC, |
1540 | }, | 1948 | }, |
1541 | { | 1949 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1542 | .alg = { | 1950 | .alg.crypto = { |
1543 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 1951 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
1544 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", | 1952 | .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", |
1545 | .cra_blocksize = AES_BLOCK_SIZE, | 1953 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1564 | DESC_HDR_MODE1_MDEU_PAD | | 1972 | DESC_HDR_MODE1_MDEU_PAD | |
1565 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 1973 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1566 | }, | 1974 | }, |
1567 | { | 1975 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1568 | .alg = { | 1976 | .alg.crypto = { |
1569 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | 1977 | .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", |
1570 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", | 1978 | .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", |
1571 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 1979 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1591 | DESC_HDR_MODE1_MDEU_PAD | | 1999 | DESC_HDR_MODE1_MDEU_PAD | |
1592 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, | 2000 | DESC_HDR_MODE1_MDEU_SHA256_HMAC, |
1593 | }, | 2001 | }, |
1594 | { | 2002 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1595 | .alg = { | 2003 | .alg.crypto = { |
1596 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 2004 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
1597 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", | 2005 | .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", |
1598 | .cra_blocksize = AES_BLOCK_SIZE, | 2006 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1617 | DESC_HDR_MODE1_MDEU_PAD | | 2025 | DESC_HDR_MODE1_MDEU_PAD | |
1618 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 2026 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1619 | }, | 2027 | }, |
1620 | { | 2028 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1621 | .alg = { | 2029 | .alg.crypto = { |
1622 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 2030 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
1623 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", | 2031 | .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", |
1624 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2032 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1645 | DESC_HDR_MODE1_MDEU_MD5_HMAC, | 2053 | DESC_HDR_MODE1_MDEU_MD5_HMAC, |
1646 | }, | 2054 | }, |
1647 | /* ABLKCIPHER algorithms. */ | 2055 | /* ABLKCIPHER algorithms. */ |
1648 | { | 2056 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
1649 | .alg = { | 2057 | .alg.crypto = { |
1650 | .cra_name = "cbc(aes)", | 2058 | .cra_name = "cbc(aes)", |
1651 | .cra_driver_name = "cbc-aes-talitos", | 2059 | .cra_driver_name = "cbc-aes-talitos", |
1652 | .cra_blocksize = AES_BLOCK_SIZE, | 2060 | .cra_blocksize = AES_BLOCK_SIZE, |
@@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = { | |||
1667 | DESC_HDR_SEL0_AESU | | 2075 | DESC_HDR_SEL0_AESU | |
1668 | DESC_HDR_MODE0_AESU_CBC, | 2076 | DESC_HDR_MODE0_AESU_CBC, |
1669 | }, | 2077 | }, |
1670 | { | 2078 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
1671 | .alg = { | 2079 | .alg.crypto = { |
1672 | .cra_name = "cbc(des3_ede)", | 2080 | .cra_name = "cbc(des3_ede)", |
1673 | .cra_driver_name = "cbc-3des-talitos", | 2081 | .cra_driver_name = "cbc-3des-talitos", |
1674 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2082 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
@@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = { | |||
1689 | DESC_HDR_SEL0_DEU | | 2097 | DESC_HDR_SEL0_DEU | |
1690 | DESC_HDR_MODE0_DEU_CBC | | 2098 | DESC_HDR_MODE0_DEU_CBC | |
1691 | DESC_HDR_MODE0_DEU_3DES, | 2099 | DESC_HDR_MODE0_DEU_3DES, |
1692 | } | 2100 | }, |
2101 | /* AHASH algorithms. */ | ||
2102 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2103 | .alg.hash = { | ||
2104 | .init = ahash_init, | ||
2105 | .update = ahash_update, | ||
2106 | .final = ahash_final, | ||
2107 | .finup = ahash_finup, | ||
2108 | .digest = ahash_digest, | ||
2109 | .halg.digestsize = MD5_DIGEST_SIZE, | ||
2110 | .halg.base = { | ||
2111 | .cra_name = "md5", | ||
2112 | .cra_driver_name = "md5-talitos", | ||
2113 | .cra_blocksize = MD5_BLOCK_SIZE, | ||
2114 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2115 | CRYPTO_ALG_ASYNC, | ||
2116 | .cra_type = &crypto_ahash_type | ||
2117 | } | ||
2118 | }, | ||
2119 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2120 | DESC_HDR_SEL0_MDEUA | | ||
2121 | DESC_HDR_MODE0_MDEU_MD5, | ||
2122 | }, | ||
2123 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2124 | .alg.hash = { | ||
2125 | .init = ahash_init, | ||
2126 | .update = ahash_update, | ||
2127 | .final = ahash_final, | ||
2128 | .finup = ahash_finup, | ||
2129 | .digest = ahash_digest, | ||
2130 | .halg.digestsize = SHA1_DIGEST_SIZE, | ||
2131 | .halg.base = { | ||
2132 | .cra_name = "sha1", | ||
2133 | .cra_driver_name = "sha1-talitos", | ||
2134 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
2135 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2136 | CRYPTO_ALG_ASYNC, | ||
2137 | .cra_type = &crypto_ahash_type | ||
2138 | } | ||
2139 | }, | ||
2140 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2141 | DESC_HDR_SEL0_MDEUA | | ||
2142 | DESC_HDR_MODE0_MDEU_SHA1, | ||
2143 | }, | ||
2144 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2145 | .alg.hash = { | ||
2146 | .init = ahash_init, | ||
2147 | .update = ahash_update, | ||
2148 | .final = ahash_final, | ||
2149 | .finup = ahash_finup, | ||
2150 | .digest = ahash_digest, | ||
2151 | .halg.digestsize = SHA224_DIGEST_SIZE, | ||
2152 | .halg.base = { | ||
2153 | .cra_name = "sha224", | ||
2154 | .cra_driver_name = "sha224-talitos", | ||
2155 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
2156 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2157 | CRYPTO_ALG_ASYNC, | ||
2158 | .cra_type = &crypto_ahash_type | ||
2159 | } | ||
2160 | }, | ||
2161 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2162 | DESC_HDR_SEL0_MDEUA | | ||
2163 | DESC_HDR_MODE0_MDEU_SHA224, | ||
2164 | }, | ||
2165 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2166 | .alg.hash = { | ||
2167 | .init = ahash_init, | ||
2168 | .update = ahash_update, | ||
2169 | .final = ahash_final, | ||
2170 | .finup = ahash_finup, | ||
2171 | .digest = ahash_digest, | ||
2172 | .halg.digestsize = SHA256_DIGEST_SIZE, | ||
2173 | .halg.base = { | ||
2174 | .cra_name = "sha256", | ||
2175 | .cra_driver_name = "sha256-talitos", | ||
2176 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
2177 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2178 | CRYPTO_ALG_ASYNC, | ||
2179 | .cra_type = &crypto_ahash_type | ||
2180 | } | ||
2181 | }, | ||
2182 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2183 | DESC_HDR_SEL0_MDEUA | | ||
2184 | DESC_HDR_MODE0_MDEU_SHA256, | ||
2185 | }, | ||
2186 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2187 | .alg.hash = { | ||
2188 | .init = ahash_init, | ||
2189 | .update = ahash_update, | ||
2190 | .final = ahash_final, | ||
2191 | .finup = ahash_finup, | ||
2192 | .digest = ahash_digest, | ||
2193 | .halg.digestsize = SHA384_DIGEST_SIZE, | ||
2194 | .halg.base = { | ||
2195 | .cra_name = "sha384", | ||
2196 | .cra_driver_name = "sha384-talitos", | ||
2197 | .cra_blocksize = SHA384_BLOCK_SIZE, | ||
2198 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2199 | CRYPTO_ALG_ASYNC, | ||
2200 | .cra_type = &crypto_ahash_type | ||
2201 | } | ||
2202 | }, | ||
2203 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2204 | DESC_HDR_SEL0_MDEUB | | ||
2205 | DESC_HDR_MODE0_MDEUB_SHA384, | ||
2206 | }, | ||
2207 | { .type = CRYPTO_ALG_TYPE_AHASH, | ||
2208 | .alg.hash = { | ||
2209 | .init = ahash_init, | ||
2210 | .update = ahash_update, | ||
2211 | .final = ahash_final, | ||
2212 | .finup = ahash_finup, | ||
2213 | .digest = ahash_digest, | ||
2214 | .halg.digestsize = SHA512_DIGEST_SIZE, | ||
2215 | .halg.base = { | ||
2216 | .cra_name = "sha512", | ||
2217 | .cra_driver_name = "sha512-talitos", | ||
2218 | .cra_blocksize = SHA512_BLOCK_SIZE, | ||
2219 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | | ||
2220 | CRYPTO_ALG_ASYNC, | ||
2221 | .cra_type = &crypto_ahash_type | ||
2222 | } | ||
2223 | }, | ||
2224 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2225 | DESC_HDR_SEL0_MDEUB | | ||
2226 | DESC_HDR_MODE0_MDEUB_SHA512, | ||
2227 | }, | ||
1693 | }; | 2228 | }; |
1694 | 2229 | ||
1695 | struct talitos_crypto_alg { | 2230 | struct talitos_crypto_alg { |
1696 | struct list_head entry; | 2231 | struct list_head entry; |
1697 | struct device *dev; | 2232 | struct device *dev; |
1698 | __be32 desc_hdr_template; | 2233 | struct talitos_alg_template algt; |
1699 | struct crypto_alg crypto_alg; | ||
1700 | }; | 2234 | }; |
1701 | 2235 | ||
1702 | static int talitos_cra_init(struct crypto_tfm *tfm) | 2236 | static int talitos_cra_init(struct crypto_tfm *tfm) |
@@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
1705 | struct talitos_crypto_alg *talitos_alg; | 2239 | struct talitos_crypto_alg *talitos_alg; |
1706 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | 2240 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); |
1707 | 2241 | ||
1708 | talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); | 2242 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) |
2243 | talitos_alg = container_of(__crypto_ahash_alg(alg), | ||
2244 | struct talitos_crypto_alg, | ||
2245 | algt.alg.hash); | ||
2246 | else | ||
2247 | talitos_alg = container_of(alg, struct talitos_crypto_alg, | ||
2248 | algt.alg.crypto); | ||
1709 | 2249 | ||
1710 | /* update context with ptr to dev */ | 2250 | /* update context with ptr to dev */ |
1711 | ctx->dev = talitos_alg->dev; | 2251 | ctx->dev = talitos_alg->dev; |
1712 | 2252 | ||
1713 | /* copy descriptor header template value */ | 2253 | /* copy descriptor header template value */ |
1714 | ctx->desc_hdr_template = talitos_alg->desc_hdr_template; | 2254 | ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; |
2255 | |||
2256 | return 0; | ||
2257 | } | ||
2258 | |||
2259 | static int talitos_cra_init_aead(struct crypto_tfm *tfm) | ||
2260 | { | ||
2261 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2262 | |||
2263 | talitos_cra_init(tfm); | ||
1715 | 2264 | ||
1716 | /* random first IV */ | 2265 | /* random first IV */ |
1717 | get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); | 2266 | get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); |
@@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
1719 | return 0; | 2268 | return 0; |
1720 | } | 2269 | } |
1721 | 2270 | ||
2271 | static int talitos_cra_init_ahash(struct crypto_tfm *tfm) | ||
2272 | { | ||
2273 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2274 | |||
2275 | talitos_cra_init(tfm); | ||
2276 | |||
2277 | ctx->keylen = 0; | ||
2278 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
2279 | sizeof(struct talitos_ahash_req_ctx)); | ||
2280 | |||
2281 | return 0; | ||
2282 | } | ||
2283 | |||
1722 | /* | 2284 | /* |
1723 | * given the alg's descriptor header template, determine whether descriptor | 2285 | * given the alg's descriptor header template, determine whether descriptor |
1724 | * type and primary/secondary execution units required match the hw | 2286 | * type and primary/secondary execution units required match the hw |
@@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev) | |||
1747 | int i; | 2309 | int i; |
1748 | 2310 | ||
1749 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | 2311 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { |
1750 | crypto_unregister_alg(&t_alg->crypto_alg); | 2312 | switch (t_alg->algt.type) { |
2313 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2314 | case CRYPTO_ALG_TYPE_AEAD: | ||
2315 | crypto_unregister_alg(&t_alg->algt.alg.crypto); | ||
2316 | break; | ||
2317 | case CRYPTO_ALG_TYPE_AHASH: | ||
2318 | crypto_unregister_ahash(&t_alg->algt.alg.hash); | ||
2319 | break; | ||
2320 | } | ||
1751 | list_del(&t_alg->entry); | 2321 | list_del(&t_alg->entry); |
1752 | kfree(t_alg); | 2322 | kfree(t_alg); |
1753 | } | 2323 | } |
@@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
1781 | struct talitos_alg_template | 2351 | struct talitos_alg_template |
1782 | *template) | 2352 | *template) |
1783 | { | 2353 | { |
2354 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1784 | struct talitos_crypto_alg *t_alg; | 2355 | struct talitos_crypto_alg *t_alg; |
1785 | struct crypto_alg *alg; | 2356 | struct crypto_alg *alg; |
1786 | 2357 | ||
@@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
1788 | if (!t_alg) | 2359 | if (!t_alg) |
1789 | return ERR_PTR(-ENOMEM); | 2360 | return ERR_PTR(-ENOMEM); |
1790 | 2361 | ||
1791 | alg = &t_alg->crypto_alg; | 2362 | t_alg->algt = *template; |
1792 | *alg = template->alg; | 2363 | |
2364 | switch (t_alg->algt.type) { | ||
2365 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2366 | alg = &t_alg->algt.alg.crypto; | ||
2367 | alg->cra_init = talitos_cra_init; | ||
2368 | break; | ||
2369 | case CRYPTO_ALG_TYPE_AEAD: | ||
2370 | alg = &t_alg->algt.alg.crypto; | ||
2371 | alg->cra_init = talitos_cra_init_aead; | ||
2372 | break; | ||
2373 | case CRYPTO_ALG_TYPE_AHASH: | ||
2374 | alg = &t_alg->algt.alg.hash.halg.base; | ||
2375 | alg->cra_init = talitos_cra_init_ahash; | ||
2376 | if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && | ||
2377 | !strcmp(alg->cra_name, "sha224")) { | ||
2378 | t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; | ||
2379 | t_alg->algt.desc_hdr_template = | ||
2380 | DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | ||
2381 | DESC_HDR_SEL0_MDEUA | | ||
2382 | DESC_HDR_MODE0_MDEU_SHA256; | ||
2383 | } | ||
2384 | break; | ||
2385 | } | ||
1793 | 2386 | ||
1794 | alg->cra_module = THIS_MODULE; | 2387 | alg->cra_module = THIS_MODULE; |
1795 | alg->cra_init = talitos_cra_init; | ||
1796 | alg->cra_priority = TALITOS_CRA_PRIORITY; | 2388 | alg->cra_priority = TALITOS_CRA_PRIORITY; |
1797 | alg->cra_alignmask = 0; | 2389 | alg->cra_alignmask = 0; |
1798 | alg->cra_ctxsize = sizeof(struct talitos_ctx); | 2390 | alg->cra_ctxsize = sizeof(struct talitos_ctx); |
1799 | 2391 | ||
1800 | t_alg->desc_hdr_template = template->desc_hdr_template; | ||
1801 | t_alg->dev = dev; | 2392 | t_alg->dev = dev; |
1802 | 2393 | ||
1803 | return t_alg; | 2394 | return t_alg; |
@@ -1807,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev, | |||
1807 | const struct of_device_id *match) | 2398 | const struct of_device_id *match) |
1808 | { | 2399 | { |
1809 | struct device *dev = &ofdev->dev; | 2400 | struct device *dev = &ofdev->dev; |
1810 | struct device_node *np = ofdev->node; | 2401 | struct device_node *np = ofdev->dev.of_node; |
1811 | struct talitos_private *priv; | 2402 | struct talitos_private *priv; |
1812 | const unsigned int *prop; | 2403 | const unsigned int *prop; |
1813 | int i, err; | 2404 | int i, err; |
@@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev, | |||
1877 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; | 2468 | priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; |
1878 | 2469 | ||
1879 | if (of_device_is_compatible(np, "fsl,sec2.1")) | 2470 | if (of_device_is_compatible(np, "fsl,sec2.1")) |
1880 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK; | 2471 | priv->features |= TALITOS_FTR_HW_AUTH_CHECK | |
2472 | TALITOS_FTR_SHA224_HWINIT; | ||
1881 | 2473 | ||
1882 | priv->chan = kzalloc(sizeof(struct talitos_channel) * | 2474 | priv->chan = kzalloc(sizeof(struct talitos_channel) * |
1883 | priv->num_channels, GFP_KERNEL); | 2475 | priv->num_channels, GFP_KERNEL); |
@@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev, | |||
1931 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2523 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
1932 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { | 2524 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { |
1933 | struct talitos_crypto_alg *t_alg; | 2525 | struct talitos_crypto_alg *t_alg; |
2526 | char *name = NULL; | ||
1934 | 2527 | ||
1935 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); | 2528 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); |
1936 | if (IS_ERR(t_alg)) { | 2529 | if (IS_ERR(t_alg)) { |
@@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev, | |||
1938 | goto err_out; | 2531 | goto err_out; |
1939 | } | 2532 | } |
1940 | 2533 | ||
1941 | err = crypto_register_alg(&t_alg->crypto_alg); | 2534 | switch (t_alg->algt.type) { |
2535 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2536 | case CRYPTO_ALG_TYPE_AEAD: | ||
2537 | err = crypto_register_alg( | ||
2538 | &t_alg->algt.alg.crypto); | ||
2539 | name = t_alg->algt.alg.crypto.cra_driver_name; | ||
2540 | break; | ||
2541 | case CRYPTO_ALG_TYPE_AHASH: | ||
2542 | err = crypto_register_ahash( | ||
2543 | &t_alg->algt.alg.hash); | ||
2544 | name = | ||
2545 | t_alg->algt.alg.hash.halg.base.cra_driver_name; | ||
2546 | break; | ||
2547 | } | ||
1942 | if (err) { | 2548 | if (err) { |
1943 | dev_err(dev, "%s alg registration failed\n", | 2549 | dev_err(dev, "%s alg registration failed\n", |
1944 | t_alg->crypto_alg.cra_driver_name); | 2550 | name); |
1945 | kfree(t_alg); | 2551 | kfree(t_alg); |
1946 | } else { | 2552 | } else { |
1947 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2553 | list_add_tail(&t_alg->entry, &priv->alg_list); |
1948 | dev_info(dev, "%s\n", | 2554 | dev_info(dev, "%s\n", name); |
1949 | t_alg->crypto_alg.cra_driver_name); | ||
1950 | } | 2555 | } |
1951 | } | 2556 | } |
1952 | } | 2557 | } |
@@ -1968,8 +2573,11 @@ static const struct of_device_id talitos_match[] = { | |||
1968 | MODULE_DEVICE_TABLE(of, talitos_match); | 2573 | MODULE_DEVICE_TABLE(of, talitos_match); |
1969 | 2574 | ||
1970 | static struct of_platform_driver talitos_driver = { | 2575 | static struct of_platform_driver talitos_driver = { |
1971 | .name = "talitos", | 2576 | .driver = { |
1972 | .match_table = talitos_match, | 2577 | .name = "talitos", |
2578 | .owner = THIS_MODULE, | ||
2579 | .of_match_table = talitos_match, | ||
2580 | }, | ||
1973 | .probe = talitos_probe, | 2581 | .probe = talitos_probe, |
1974 | .remove = talitos_remove, | 2582 | .remove = talitos_remove, |
1975 | }; | 2583 | }; |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index ff5a1450e145..0b746aca4587 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale SEC (talitos) device register and descriptor header defines | 2 | * Freescale SEC (talitos) device register and descriptor header defines |
3 | * | 3 | * |
4 | * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. | 4 | * Copyright (c) 2006-2010 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | 7 | * modification, are permitted provided that the following conditions |
@@ -130,6 +130,9 @@ | |||
130 | #define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ | 130 | #define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ |
131 | #define TALITOS_CRCUISR_LO 0xf034 | 131 | #define TALITOS_CRCUISR_LO 0xf034 |
132 | 132 | ||
133 | #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 | ||
134 | #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 | ||
135 | |||
133 | /* | 136 | /* |
134 | * talitos descriptor header (hdr) bits | 137 | * talitos descriptor header (hdr) bits |
135 | */ | 138 | */ |
@@ -157,12 +160,16 @@ | |||
157 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) | 160 | #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) |
158 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) | 161 | #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) |
159 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) | 162 | #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) |
163 | #define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) | ||
160 | #define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) | 164 | #define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) |
161 | #define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) | 165 | #define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) |
162 | #define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) | 166 | #define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) |
167 | #define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000) | ||
163 | #define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) | 168 | #define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) |
164 | #define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) | 169 | #define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) |
165 | #define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) | 170 | #define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) |
171 | #define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000) | ||
172 | #define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000) | ||
166 | #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ | 173 | #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ |
167 | DESC_HDR_MODE0_MDEU_HMAC) | 174 | DESC_HDR_MODE0_MDEU_HMAC) |
168 | #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ | 175 | #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ |
@@ -181,9 +188,12 @@ | |||
181 | #define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) | 188 | #define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) |
182 | #define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) | 189 | #define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) |
183 | #define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) | 190 | #define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) |
191 | #define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300) | ||
184 | #define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) | 192 | #define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) |
185 | #define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) | 193 | #define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) |
186 | #define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) | 194 | #define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) |
195 | #define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000) | ||
196 | #define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200) | ||
187 | #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ | 197 | #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ |
188 | DESC_HDR_MODE1_MDEU_HMAC) | 198 | DESC_HDR_MODE1_MDEU_HMAC) |
189 | #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ | 199 | #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ |